summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accel/Kconfig1
-rw-r--r--drivers/accel/Makefile1
-rw-r--r--drivers/accel/amdxdna/Makefile1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c67
-rw-r--r--drivers/accel/amdxdna/aie2_message.c21
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c217
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h5
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c52
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h10
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c139
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c108
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h7
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.c232
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.h19
-rw-r--r--drivers/accel/habanalabs/Kconfig23
-rw-r--r--drivers/accel/habanalabs/common/Makefile5
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c324
-rw-r--r--drivers/accel/habanalabs/common/device.c23
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h56
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c6
-rw-r--r--drivers/accel/habanalabs/common/hldio.c437
-rw-r--r--drivers/accel/habanalabs/common/hldio.h146
-rw-r--r--drivers/accel/habanalabs/common/memory.c9
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c5
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c11
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c19
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c386
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c2
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c3
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h2
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c1
-rw-r--r--drivers/accel/rocket/Kconfig24
-rw-r--r--drivers/accel/rocket/Makefile10
-rw-r--r--drivers/accel/rocket/rocket_core.c110
-rw-r--r--drivers/accel/rocket/rocket_core.h64
-rw-r--r--drivers/accel/rocket/rocket_device.c60
-rw-r--r--drivers/accel/rocket/rocket_device.h30
-rw-r--r--drivers/accel/rocket/rocket_drv.c290
-rw-r--r--drivers/accel/rocket/rocket_drv.h32
-rw-r--r--drivers/accel/rocket/rocket_gem.c181
-rw-r--r--drivers/accel/rocket/rocket_gem.h34
-rw-r--r--drivers/accel/rocket/rocket_job.c637
-rw-r--r--drivers/accel/rocket/rocket_job.h52
-rw-r--r--drivers/accel/rocket/rocket_registers.h4404
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/acpi_dbg.c26
-rw-r--r--drivers/acpi/acpi_processor.c4
-rw-r--r--drivers/acpi/acpi_tad.c3
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h3
-rw-r--r--drivers/acpi/acpica/dsmethod.c21
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/psopinfo.c4
-rw-r--r--drivers/acpi/acpica/tbprint.c8
-rw-r--r--drivers/acpi/apei/einj-core.c51
-rw-r--r--drivers/acpi/apei/erst-dbg.c8
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/fan_core.c18
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/pci_irq.c3
-rw-r--r--drivers/acpi/power.c90
-rw-r--r--drivers/acpi/prmt.c19
-rw-r--r--drivers/acpi/processor_driver.c6
-rw-r--r--drivers/acpi/processor_idle.c139
-rw-r--r--drivers/acpi/processor_thermal.c52
-rw-r--r--drivers/acpi/property.c152
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/riscv/Kconfig7
-rw-r--r--drivers/acpi/riscv/Makefile1
-rw-r--r--drivers/acpi/riscv/init.c2
-rw-r--r--drivers/acpi/riscv/init.h1
-rw-r--r--drivers/acpi/riscv/rimt.c520
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/spcr.c13
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/base/base.h8
-rw-r--r--drivers/base/devres.c21
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/power/main.c32
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/Makefile4
-rw-r--r--drivers/block/amiflop.c10
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/brd.c75
-rw-r--r--drivers/block/floppy.c59
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c6
-rw-r--r--drivers/block/rnull.rs80
-rw-r--r--drivers/block/rnull/Kconfig13
-rw-r--r--drivers/block/rnull/Makefile3
-rw-r--r--drivers/block/rnull/configfs.rs262
-rw-r--r--drivers/block/rnull/rnull.rs104
-rw-r--r--drivers/block/sunvdc.c7
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/ublk_drv.c242
-rw-r--r--drivers/block/virtio_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/zram/zram_drv.c25
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btintel.c3
-rw-r--r--drivers/bluetooth/btintel_pcie.c328
-rw-r--r--drivers/bluetooth/btintel_pcie.h2
-rw-r--r--drivers/bluetooth/btmtksdio.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c2
-rw-r--r--drivers/bluetooth/btusb.c23
-rw-r--r--drivers/bluetooth/h4_recv.h153
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c9
-rw-r--r--drivers/cache/sifive_ccache.c8
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/ipmi/Kconfig7
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c605
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c17
-rw-r--r--drivers/char/ipmi/ipmi_si.h7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c74
-rw-r--r--drivers/char/ipmi/ipmi_si_ls2k.c189
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/mem.c21
-rw-r--r--drivers/clk/clk-rp1.c1022
-rw-r--r--drivers/clk/clk-sp7021.c22
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c9
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c1
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c16
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq.c44
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/freq_table.c22
-rw-r--r--drivers/cpufreq/intel_pstate.c182
-rw-r--r--drivers/cpufreq/longhaul.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c134
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c11
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c5
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c10
-rw-r--r--drivers/cpufreq/sh-cpufreq.c6
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpufreq/ti-cpufreq.c12
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c11
-rw-r--r--drivers/cpuidle/cpuidle.c8
-rw-r--r--drivers/cpuidle/governors/menu.c70
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/ccp/sev-dev.c10
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c91
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c5
-rw-r--r--drivers/dibs/Kconfig23
-rw-r--r--drivers/dibs/Makefile8
-rw-r--r--drivers/dibs/dibs_loopback.c361
-rw-r--r--drivers/dibs/dibs_loopback.h57
-rw-r--r--drivers/dibs/dibs_main.c278
-rw-r--r--drivers/dma-buf/dma-heap.c4
-rw-r--r--drivers/dpll/dpll_netlink.c66
-rw-r--r--drivers/dpll/dpll_nl.c5
-rw-r--r--drivers/dpll/zl3073x/Makefile2
-rw-r--r--drivers/dpll/zl3073x/core.c392
-rw-r--r--drivers/dpll/zl3073x/core.h48
-rw-r--r--drivers/dpll/zl3073x/devlink.c155
-rw-r--r--drivers/dpll/zl3073x/devlink.h3
-rw-r--r--drivers/dpll/zl3073x/dpll.c58
-rw-r--r--drivers/dpll/zl3073x/dpll.h2
-rw-r--r--drivers/dpll/zl3073x/flash.c666
-rw-r--r--drivers/dpll/zl3073x/flash.h29
-rw-r--r--drivers/dpll/zl3073x/fw.c419
-rw-r--r--drivers/dpll/zl3073x/fw.h52
-rw-r--r--drivers/dpll/zl3073x/regs.h51
-rw-r--r--drivers/firewire/core-card.c490
-rw-r--r--drivers/firewire/core-cdev.c36
-rw-r--r--drivers/firewire/core-device.c27
-rw-r--r--drivers/firewire/core-topology.c91
-rw-r--r--drivers/firewire/core-transaction.c130
-rw-r--r--drivers/firewire/core.h22
-rw-r--r--drivers/firewire/ohci.c316
-rw-r--r--drivers/firmware/arm_scmi/bus.c13
-rw-r--r--drivers/firmware/arm_scmi/quirks.c15
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c7
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c2
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c3
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c111
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst25
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/efi/efi-init.c29
-rw-r--r--drivers/firmware/meson/Kconfig2
-rw-r--r--drivers/firmware/meson/meson_sm.c7
-rw-r--r--drivers/firmware/qcom/qcom_scm.c125
-rw-r--r--drivers/firmware/qcom/qcom_scm.h7
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c64
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c25
-rw-r--r--drivers/firmware/ti_sci.c57
-rw-r--r--drivers/firmware/ti_sci.h3
-rw-r--r--drivers/fwctl/mlx5/main.c9
-rw-r--r--drivers/fwctl/pds/main.c18
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c275
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c224
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c348
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c358
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3818
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c56
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c493
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c88
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c63
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link.h)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h189
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c53
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h98
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h30
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h85
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h33
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c79
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c370
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h9
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c126
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c97
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h129
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h82
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c368
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c214
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h26
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile0
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_v11_0.h)21
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c46
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h20
-rw-r--r--drivers/gpu/drm/ast/ast_main.c126
-rw-r--r--drivers/gpu/drm/bridge/Kconfig29
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c23
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c63
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c1
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c211
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c64
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c33
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c353
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2095
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c20
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c23
-rw-r--r--drivers/gpu/drm/drm_bridge.c7
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c34
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_format_helper.c111
-rw-r--r--drivers/gpu/drm/drm_gem.c92
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c419
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c317
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c119
-rw-r--r--drivers/gpu/drm/drm_of.c7
-rw-r--r--drivers/gpu/drm/drm_pagemap.c138
-rw-r--r--drivers/gpu/drm/drm_panel.c73
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c113
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/drm_sysfs.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c2
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c25
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c54
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h13
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c64
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c51
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c15
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c62
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c32
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h176
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c137
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c156
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c190
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c167
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c9
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c53
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h13
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c64
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c20
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c32
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c70
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c29
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c6
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_active.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c108
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c19
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/i915_request.h5
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c9
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c30
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h210
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h6
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c35
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c266
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c97
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h13
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c3
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c5
-rw-r--r--drivers/gpu/drm/imagination/Kconfig3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c22
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h17
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c23
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c158
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h15
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c92
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c108
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c242
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c29
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c47
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c95
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c21
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c67
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c8
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c14
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c3
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c4
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml718
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml40
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml50
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml179
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py201
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c6
-rw-r--r--drivers/gpu/drm/nova/driver.rs4
-rw-r--r--drivers/gpu/drm/nova/file.rs24
-rw-r--r--drivers/gpu/drm/nova/gem.rs10
-rw-r--r--drivers/gpu/drm/nova/nova.rs1
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig26
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c55
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c2
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c302
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c192
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c198
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c804
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c7
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c26
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c2
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c2
-rw-r--r--drivers/gpu/drm/panthor/Makefile1
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c5
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c25
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c5
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c3
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c100
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c125
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h11
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c76
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h3
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c43
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c523
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c215
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c66
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c449
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c230
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h133
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c8
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c142
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c21
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c80
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c68
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c15
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c14
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c67
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c2
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h8
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c4
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c45
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c12
-rw-r--r--drivers/gpu/drm/stm/ltdc.c197
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h2
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c23
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c21
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c15
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c153
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c22
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c7
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c322
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h3
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h76
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c9
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig15
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c2
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1165
-rw-r--r--drivers/gpu/drm/tiny/repaper.c16
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c25
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h33
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c9
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c68
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c86
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c51
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c143
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c331
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/xe/Makefile16
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h10
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h15
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h22
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c1
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c91
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c20
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c39
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c87
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c80
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c13
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c36
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c29
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c66
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c246
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h15
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c90
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h4
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c35
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c868
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h82
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c778
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_device.c121
-rw-r--r--drivers/gpu/drm/xe/xe_device.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c110
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h102
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c84
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c9
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c31
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c111
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c42
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c21
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c596
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c48
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c89
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c127
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c63
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h37
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c90
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c215
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c325
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h18
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c4
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c18
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c20
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h2
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c33
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c264
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h9
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c465
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h29
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c33
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_module.c29
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c8
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c116
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c31
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c387
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c34
-rw-r--r--drivers/gpu/drm/xe/xe_query.c14
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h10
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c22
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c13
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c1
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h15
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c15
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c191
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c410
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h34
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c178
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h5
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c723
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h86
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c62
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h14
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c135
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c268
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h40
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c12
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c22
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c29
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c319
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c1162
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h69
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h145
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c209
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h11
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c88
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h8
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules10
-rw-r--r--drivers/gpu/nova-core/driver.rs13
-rw-r--r--drivers/gpu/nova-core/falcon.rs113
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs16
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs2
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs47
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs13
-rw-r--r--drivers/gpu/nova-core/fb.rs2
-rw-r--r--drivers/gpu/nova-core/firmware.rs113
-rw-r--r--drivers/gpu/nova-core/firmware/booter.rs375
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs17
-rw-r--r--drivers/gpu/nova-core/firmware/gsp.rs243
-rw-r--r--drivers/gpu/nova-core/firmware/riscv.rs91
-rw-r--r--drivers/gpu/nova-core/gpu.rs206
-rw-r--r--drivers/gpu/nova-core/gsp.rs22
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs137
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs7
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs29
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs1
-rw-r--r--drivers/gpu/nova-core/nova_core.rs1
-rw-r--r--drivers/gpu/nova-core/regs.rs84
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs751
-rw-r--r--drivers/gpu/nova-core/util.rs20
-rw-r--r--drivers/gpu/nova-core/vbios.rs176
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c46
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c11
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c11
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c9
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c9
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/busses/i2c-k1.c71
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c17
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c8
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-sprd.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c26
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/i2c-boardinfo.c4
-rw-r--r--drivers/i2c/i2c-core-base.c9
-rw-r--r--drivers/i2c/i2c-core-slave.c3
-rw-r--r--drivers/i2c/i2c-mux.c9
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c50
-rw-r--r--drivers/i3c/internals.h12
-rw-r--r--drivers/i3c/master.c78
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/dw-i3c-master.c23
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c9
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c74
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c96
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c75
-rw-r--r--drivers/i3c/master/renesas-i3c.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c31
-rw-r--r--drivers/idle/intel_idle.c256
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c83
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c136
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/sa_query.c283
-rw-r--r--drivers/infiniband/core/ucma.c120
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h19
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c37
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c156
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c378
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c38
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c98
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h44
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c110
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h4
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c8
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig7
-rw-r--r--drivers/infiniband/hw/irdma/Makefile4
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c1440
-rw-r--r--drivers/infiniband/hw/irdma/defs.h264
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c18
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h19
-rw-r--r--drivers/infiniband/hw/irdma/hw.c363
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h5
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c343
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c232
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h22
-rw-r--r--drivers/infiniband/hw/irdma/main.c371
-rw-r--r--drivers/infiniband/hw/irdma/main.h35
-rw-r--r--drivers/infiniband/hw/irdma/pble.c20
-rw-r--r--drivers/infiniband/hw/irdma/protos.h1
-rw-r--r--drivers/infiniband/hw/irdma/puda.h4
-rw-r--r--drivers/infiniband/hw/irdma/type.h221
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h5
-rw-r--r--drivers/infiniband/hw/irdma/uk.c303
-rw-r--r--drivers/infiniband/hw/irdma/user.h267
-rw-r--r--drivers/infiniband/hw/irdma/utils.c112
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c834
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h50
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/mana/cq.c26
-rw-r--r--drivers/infiniband/hw/mana/device.c3
-rw-r--r--drivers/infiniband/hw/mana/main.c5
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h14
-rw-r--r--drivers/infiniband/hw/mana/mr.c6
-rw-r--r--drivers/infiniband/hw/mana/qp.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c2
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c15
-rw-r--r--drivers/infiniband/hw/mlx5/main.c113
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c11
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c27
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c21
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h5
-rw-r--r--drivers/iommu/amd/init.c284
-rw-r--r--drivers/iommu/amd/iommu.c5
-rw-r--r--drivers/iommu/apple-dart.c55
-rw-r--r--drivers/iommu/dma-iommu.c61
-rw-r--r--drivers/iommu/intel/debugfs.c29
-rw-r--r--drivers/iommu/intel/iommu.c2
-rw-r--r--drivers/iommu/intel/iommu.h7
-rw-r--r--drivers/iommu/intel/perf.c10
-rw-r--r--drivers/iommu/intel/perf.h5
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/io-pgtable-dart.c139
-rw-r--r--drivers/iommu/iommu-priv.h2
-rw-r--r--drivers/iommu/iommu.c26
-rw-r--r--drivers/iommu/iommufd/selftest.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/riscv/iommu-platform.c17
-rw-r--r--drivers/iommu/riscv/iommu.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/md/Kconfig31
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/movinggc.c8
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.c8
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ima.c70
-rw-r--r--drivers/md/dm-integrity.c359
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-pcache/Kconfig17
-rw-r--r--drivers/md/dm-pcache/Makefile3
-rw-r--r--drivers/md/dm-pcache/backing_dev.c374
-rw-r--r--drivers/md/dm-pcache/backing_dev.h127
-rw-r--r--drivers/md/dm-pcache/cache.c445
-rw-r--r--drivers/md/dm-pcache/cache.h635
-rw-r--r--drivers/md/dm-pcache/cache_dev.c303
-rw-r--r--drivers/md/dm-pcache/cache_dev.h70
-rw-r--r--drivers/md/dm-pcache/cache_gc.c170
-rw-r--r--drivers/md/dm-pcache/cache_key.c888
-rw-r--r--drivers/md/dm-pcache/cache_req.c836
-rw-r--r--drivers/md/dm-pcache/cache_segment.c305
-rw-r--r--drivers/md/dm-pcache/cache_writeback.c261
-rw-r--r--drivers/md/dm-pcache/dm_pcache.c497
-rw-r--r--drivers/md/dm-pcache/dm_pcache.h67
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h117
-rw-r--r--drivers/md/dm-pcache/segment.c61
-rw-r--r--drivers/md/dm-pcache/segment.h74
-rw-r--r--drivers/md/dm-raid.c31
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/dm-vdo/data-vio.c17
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c4
-rw-r--r--drivers/md/dm-vdo/vio.c2
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/md-bitmap.c89
-rw-r--r--drivers/md/md-bitmap.h107
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md-linear.c14
-rw-r--r--drivers/md/md-llbitmap.c1626
-rw-r--r--drivers/md/md.c382
-rw-r--r--drivers/md/md.h24
-rw-r--r--drivers/md/raid0.c30
-rw-r--r--drivers/md/raid1-10.c2
-rw-r--r--drivers/md/raid1.c119
-rw-r--r--drivers/md/raid1.h4
-rw-r--r--drivers/md/raid10.c107
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5.c74
-rw-r--r--drivers/media/cec/core/cec-core.c2
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile6
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c6
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c4
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c4
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c2
-rw-r--r--drivers/media/common/b2c2/flexcop.c22
-rw-r--r--drivers/media/common/cx2341x.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/i2c/Kconfig50
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/adv7180.c338
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ar0521.c9
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/i2c/ds90ub913.c17
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c34
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c9
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h1
-rw-r--r--drivers/media/i2c/gc0310.c (renamed from drivers/staging/media/atomisp/i2c/atomisp-gc0310.c)0
-rw-r--r--drivers/media/i2c/gc05a2.c8
-rw-r--r--drivers/media/i2c/gc08a3.c8
-rw-r--r--drivers/media/i2c/gc2145.c2
-rw-r--r--drivers/media/i2c/hi556.c92
-rw-r--r--drivers/media/i2c/hi846.c11
-rw-r--r--drivers/media/i2c/hi847.c84
-rw-r--r--drivers/media/i2c/imx208.c91
-rw-r--r--drivers/media/i2c/imx214.c247
-rw-r--r--drivers/media/i2c/imx219.c6
-rw-r--r--drivers/media/i2c/imx258.c105
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx283.c5
-rw-r--r--drivers/media/i2c/imx290.c27
-rw-r--r--drivers/media/i2c/imx296.c4
-rw-r--r--drivers/media/i2c/imx319.c92
-rw-r--r--drivers/media/i2c/imx334.c15
-rw-r--r--drivers/media/i2c/imx335.c9
-rw-r--r--drivers/media/i2c/imx355.c90
-rw-r--r--drivers/media/i2c/imx412.c9
-rw-r--r--drivers/media/i2c/imx415.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c6
-rw-r--r--drivers/media/i2c/mt9m001.c5
-rw-r--r--drivers/media/i2c/mt9m111.c5
-rw-r--r--drivers/media/i2c/mt9m114.c75
-rw-r--r--drivers/media/i2c/mt9p031.c9
-rw-r--r--drivers/media/i2c/mt9t112.c11
-rw-r--r--drivers/media/i2c/mt9v032.c105
-rw-r--r--drivers/media/i2c/mt9v111.c9
-rw-r--r--drivers/media/i2c/og01a1b.c115
-rw-r--r--drivers/media/i2c/og0ve1b.c816
-rw-r--r--drivers/media/i2c/ov02a10.c45
-rw-r--r--drivers/media/i2c/ov02c10.c108
-rw-r--r--drivers/media/i2c/ov02e10.c107
-rw-r--r--drivers/media/i2c/ov08d10.c82
-rw-r--r--drivers/media/i2c/ov08x40.c95
-rw-r--r--drivers/media/i2c/ov13858.c69
-rw-r--r--drivers/media/i2c/ov13b10.c110
-rw-r--r--drivers/media/i2c/ov2659.c5
-rw-r--r--drivers/media/i2c/ov2680.c29
-rw-r--r--drivers/media/i2c/ov2685.c16
-rw-r--r--drivers/media/i2c/ov2735.c1109
-rw-r--r--drivers/media/i2c/ov2740.c91
-rw-r--r--drivers/media/i2c/ov4689.c12
-rw-r--r--drivers/media/i2c/ov5640.c9
-rw-r--r--drivers/media/i2c/ov5645.c13
-rw-r--r--drivers/media/i2c/ov5647.c9
-rw-r--r--drivers/media/i2c/ov5648.c10
-rw-r--r--drivers/media/i2c/ov5670.c105
-rw-r--r--drivers/media/i2c/ov5675.c89
-rw-r--r--drivers/media/i2c/ov5693.c16
-rw-r--r--drivers/media/i2c/ov5695.c16
-rw-r--r--drivers/media/i2c/ov6211.c793
-rw-r--r--drivers/media/i2c/ov64a40.c2
-rw-r--r--drivers/media/i2c/ov6650.c1149
-rw-r--r--drivers/media/i2c/ov7251.c26
-rw-r--r--drivers/media/i2c/ov7740.c11
-rw-r--r--drivers/media/i2c/ov8856.c95
-rw-r--r--drivers/media/i2c/ov8858.c2
-rw-r--r--drivers/media/i2c/ov8865.c50
-rw-r--r--drivers/media/i2c/ov9282.c9
-rw-r--r--drivers/media/i2c/ov9640.c5
-rw-r--r--drivers/media/i2c/ov9650.c5
-rw-r--r--drivers/media/i2c/ov9734.c82
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c9
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c19
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k5baf.c21
-rw-r--r--drivers/media/i2c/s5k6a3.c20
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/tc358743.c113
-rw-r--r--drivers/media/i2c/tc358743_regs.h57
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vd55g1.c4
-rw-r--r--drivers/media/i2c/vd56g3.c2
-rw-r--r--drivers/media/i2c/vgxy61.c26
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-devnode.c6
-rw-r--r--drivers/media/mc/mc-entity.c6
-rw-r--r--drivers/media/mc/mc-request.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c6
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c60
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.h2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c13
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/pci/cx18/cx18-io.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c66
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c15
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h2
-rw-r--r--drivers/media/pci/cx18/cx18-version.h2
-rw-r--r--drivers/media/pci/cx18/cx18-video.c2
-rw-r--r--drivers/media/pci/cx18/cx18-video.h2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c1
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c42
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c126
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c30
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c25
-rw-r--r--drivers/media/pci/saa7164/saa7164.h10
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68.h2
-rw-r--r--drivers/media/pci/zoran/zoran.h6
-rw-r--r--drivers/media/pci/zoran/zoran_card.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c35
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c33
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c7
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c25
-rw-r--r--drivers/media/platform/amphion/vpu.h2
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c22
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h8
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c199
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c75
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c50
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c23
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c31
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h5
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c23
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.h5
-rw-r--r--drivers/media/platform/m2m-deinterlace.c26
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c37
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c25
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c36
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c37
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h4
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c35
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c7
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c45
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c353
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c7
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c8
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h14
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c292
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c156
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c24
-rw-r--r--drivers/media/platform/qcom/camss/Makefile6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-340.c190
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-csid-780.c)34
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.h (renamed from drivers/media/platform/qcom/camss/camss-csid-780.h)8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c175
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-340.c320
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-vfe-780.c)76
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c43
-rw-r--r--drivers/media/platform/qcom/camss/camss.c705
-rw-r--r--drivers/media/platform/qcom/camss/camss.h4
-rw-r--r--drivers/media/platform/qcom/iris/Makefile5
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c222
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h7
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.c232
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c10
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h20
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c675
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c15
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c482
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h112
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c60
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c359
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h44
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c46
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h24
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h82
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c609
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h352
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c236
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8750.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c37
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c9
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h1
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c36
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c58
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c251
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.c579
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.h27
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c335
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c202
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c922
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h24
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c14
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h6
-rw-r--r--drivers/media/platform/qcom/venus/core.c113
-rw-r--r--drivers/media/platform/qcom/venus/core.h22
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c11
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c188
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c25
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h4
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c11
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c5
-rw-r--r--drivers/media/platform/qcom/venus/venc.c5
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c2
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c8
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c2
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c12
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c27
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c29
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c10
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c2
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h9
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c31
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c14
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c18
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c1
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c30
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h17
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c123
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c21
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h4
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h6
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c37
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c19
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c26
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c40
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c33
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c17
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c35
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c34
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c38
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c30
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c20
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c41
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c38
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h2
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c28
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c16
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c10
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c8
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h6
-rw-r--r--drivers/media/platform/ti/Kconfig3
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c67
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c7
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c36
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h6
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c21
-rw-r--r--drivers/media/platform/verisilicon/hantro.h4
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c12
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c28
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c20
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c10
-rw-r--r--drivers/media/radio/Kconfig17
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-raremono.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2159
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c10
-rw-r--r--drivers/media/rc/imon.c99
-rw-r--r--drivers/media/rc/lirc_dev.c9
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c23
-rw-r--r--drivers/media/test-drivers/vim2m.c23
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c5
-rw-r--r--drivers/media/test-drivers/visl/visl.h7
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c100
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c16
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h16
-rw-r--r--drivers/media/tuners/xc4000.c8
-rw-r--r--drivers/media/tuners/xc5000.c14
-rw-r--r--drivers/media/usb/au0828/au0828-video.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/em28xx/Kconfig1
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c18
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c69
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c69
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c7
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c56
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c115
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c4
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c71
-rw-r--r--drivers/media/usb/uvc/uvc_status.c7
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c128
-rw-r--r--drivers/media/usb/uvc/uvc_video.c10
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h21
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c90
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c456
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c50
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c40
-rw-r--r--drivers/memory/samsung/exynos-srom.c10
-rw-r--r--drivers/memory/stm32_omm.c2
-rw-r--r--drivers/memory/tegra/tegra210.c146
-rw-r--r--drivers/memstick/core/ms_block.c4
-rw-r--r--drivers/memstick/core/mspro_block.c7
-rw-r--r--drivers/memstick/host/jmb38x_ms.c3
-rw-r--r--drivers/memstick/host/tifm_ms.c3
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/max77705.c3
-rw-r--r--drivers/misc/mei/Kconfig13
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/bus.c13
-rw-r--r--drivers/misc/mei/mei_lb.c312
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/mmc/core/block.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c9
-rw-r--r--drivers/mmc/host/tifm_sd.c4
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c31
-rw-r--r--drivers/net/bonding/bond_main.c115
-rw-r--r--drivers/net/bonding/bond_netlink.c46
-rw-r--r--drivers/net/bonding/bond_options.c54
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/can/dev/calc_bittiming.c10
-rw-r--r--drivers/net/can/dev/dev.c80
-rw-r--r--drivers/net/can/dev/netlink.c592
-rw-r--r--drivers/net/can/m_can/m_can.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c292
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c84
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c8
-rw-r--r--drivers/net/can/spi/hi311x.c3
-rw-r--r--drivers/net/can/spi/mcp251x.c3
-rw-r--r--drivers/net/can/usb/esd_usb.c64
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/Kconfig16
-rw-r--r--drivers/net/dsa/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c35
-rw-r--r--drivers/net/dsa/dsa_loop.c77
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/ks8995.c (renamed from drivers/net/phy/spi_ks8995.c)453
-rw-r--r--drivers/net/dsa/lantiq/Kconfig7
-rw-r--r--drivers/net/dsa/lantiq/Makefile1
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c (renamed from drivers/net/dsa/lantiq_gswip.c)469
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h276
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c45
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c70
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/ocelot/felix.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c3
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c7
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h27
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c198
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h36
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c234
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c5
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h22
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c26
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h17
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c482
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2217
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h250
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c67
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c85
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c152
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c66
-rw-r--r--drivers/net/ethernet/cadence/macb.h71
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c441
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c20
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c209
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h24
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c86
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c15
-rw-r--r--drivers/net/ethernet/freescale/fec.h11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c68
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c35
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c211
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c394
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h34
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h151
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c541
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c417
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c69
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c848
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h126
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h119
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c152
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h19
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c870
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h39
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c226
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c190
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h30
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c109
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c390
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c143
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c474
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1008
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c188
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c153
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h22
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c973
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c719
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1683
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)0
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h57
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c179
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c110
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c990
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h210
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1233
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h33
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c128
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile4
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c15
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c33
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c952
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c395
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1821
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h37
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c249
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c209
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c482
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h92
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c66
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c57
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c149
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h13
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c61
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c145
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c971
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h33
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c18
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c46
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c161
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c270
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h118
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig15
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c12
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c76
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h33
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h43
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c (renamed from drivers/net/ethernet/renesas/rswitch.c)97
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c34
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c5
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c14
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2159
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c63
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c338
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c391
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig12
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c27
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c101
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c224
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h13
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c133
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c113
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h28
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h72
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c6
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c5
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c10
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/hamradio/6pack.c57
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/macsec.c173
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/Kconfig5
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c4
-rw-r--r--drivers/net/mdio/of_mdio.c3
-rw-r--r--drivers/net/netconsole.c91
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/dev.c6
-rw-r--r--drivers/net/netdevsim/ethtool.c25
-rw-r--r--drivers/net/netdevsim/health.c4
-rw-r--r--drivers/net/netdevsim/netdev.c43
-rw-r--r--drivers/net/netdevsim/netdevsim.h27
-rw-r--r--drivers/net/netdevsim/psp.c225
-rw-r--r--drivers/net/pcs/Kconfig11
-rw-r--r--drivers/net/pcs/pcs-lynx.c11
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c317
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/aquantia/aquantia.h52
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c702
-rw-r--r--drivers/net/phy/as21xxx.c7
-rw-r--r--drivers/net/phy/ax88796b.c5
-rw-r--r--drivers/net/phy/broadcom.c147
-rw-r--r--drivers/net/phy/dp83640.c58
-rw-r--r--drivers/net/phy/fixed_phy.c217
-rw-r--r--drivers/net/phy/marvell-88x2222.c13
-rw-r--r--drivers/net/phy/marvell.c47
-rw-r--r--drivers/net/phy/marvell10g.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c79
-rw-r--r--drivers/net/phy/mdio-boardinfo.h18
-rw-r--r--drivers/net/phy/mdio_bus_provider.c33
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c104
-rw-r--r--drivers/net/phy/micrel.c1004
-rw-r--r--drivers/net/phy/motorcomm.c117
-rw-r--r--drivers/net/phy/mscc/mscc.h3
-rw-r--r--drivers/net/phy/mscc/mscc_main.c40
-rw-r--r--drivers/net/phy/mxl-86110.c392
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/phy-caps.h2
-rw-r--r--drivers/net/phy/phy.c15
-rw-r--r--drivers/net/phy/phy_caps.c2
-rw-r--r--drivers/net/phy/phy_device.c31
-rw-r--r--drivers/net/phy/phylink.c14
-rw-r--r--drivers/net/phy/qcom/at803x.c9
-rw-r--r--drivers/net/phy/qcom/qca807x.c7
-rw-r--r--drivers/net/phy/realtek/realtek_main.c263
-rw-r--r--drivers/net/phy/sfp-bus.c107
-rw-r--r--drivers/net/phy/sfp.c85
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c120
-rw-r--r--drivers/net/ppp/ppp_mppe.c108
-rw-r--r--drivers/net/ppp/pppoe.c129
-rw-r--r--drivers/net/pse-pd/Kconfig11
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/lan78xx.c6
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c22
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c7
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c28
-rw-r--r--drivers/net/wireguard/device.c6
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c39
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c56
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c352
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c117
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c158
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h33
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c553
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c384
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c809
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c231
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h29
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c59
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c356
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c783
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c507
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c314
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h32
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c8
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c684
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h148
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c125
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c177
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c35
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c462
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h128
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c476
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h56
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c159
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h6
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c259
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c3
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/nfc/pn533/pn533.c12
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c17
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvme/common/auth.c86
-rw-r--r--drivers/nvme/host/apple.c197
-rw-r--r--drivers/nvme/host/auth.c5
-rw-r--r--drivers/nvme/host/core.c23
-rw-r--r--drivers/nvme/host/fc.c10
-rw-r--r--drivers/nvme/host/ioctl.c7
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c184
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c35
-rw-r--r--drivers/nvme/target/fcloop.c8
-rw-r--r--drivers/of/irq.c25
-rw-r--r--drivers/opp/core.c99
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c42
-rw-r--r--drivers/pci/controller/pcie-rockchip.h35
-rw-r--r--drivers/pci/msi/irqdomain.c2
-rw-r--r--drivers/pci/vgaarb.c31
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c70
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c11
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c51
-rw-r--r--drivers/pinctrl/Kconfig23
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/bcm/Kconfig12
-rw-r--r--drivers/pinctrl/bcm/Kconfig.stb10
-rw-r--r--drivers/pinctrl/bcm/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c747
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.c442
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.h93
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c45
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c23
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c44
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c6
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c3
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c8
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c8
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c6
-rw-r--r--drivers/pinctrl/pinctrl-amd.c41
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c6
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c2
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c2
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c6
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c53
-rw-r--r--drivers/pinctrl/pinctrl-k210.c2
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c30
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c6
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c4
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c6
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c96
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c8
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c4
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c12
-rw-r--r--drivers/pinctrl/pinctrl-upboard.c1070
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c2
-rw-r--r--drivers/pinctrl/pinmux.c70
-rw-r--r--drivers/pinctrl/pinmux.h9
-rw-r--r--drivers/pinctrl/qcom/Kconfig11
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm10
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-glymur.c1777
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c26
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c51
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c160
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c83
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c2
-rw-r--r--drivers/pinctrl/renesas/Kconfig13
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c220
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzt2h.c813
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c50
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h5
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c4
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c9
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c11
-rw-r--r--drivers/pinctrl/tegra/Kconfig4
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra186.c1979
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c1
-rw-r--r--drivers/pnp/isapnp/core.c3
-rw-r--r--drivers/power/supply/88pm860x_charger.c8
-rw-r--r--drivers/power/supply/Kconfig16
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_btemp.c3
-rw-r--r--drivers/power/supply/adc-battery-helper.c327
-rw-r--r--drivers/power/supply/adc-battery-helper.h62
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c17
-rw-r--r--drivers/power/supply/cw2015_battery.c8
-rw-r--r--drivers/power/supply/gpio-charger.c7
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c389
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c3
-rw-r--r--drivers/power/supply/max77705_charger.c332
-rw-r--r--drivers/power/supply/max77976_charger.c12
-rw-r--r--drivers/power/supply/mt6370-charger.c18
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/qcom_battmgr.c324
-rw-r--r--drivers/power/supply/rk817_charger.c6
-rw-r--r--drivers/power/supply/rt9467-charger.c47
-rw-r--r--drivers/power/supply/rx51_battery.c2
-rw-r--r--drivers/power/supply/sbs-charger.c16
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/ucs1002_power.c2
-rw-r--r--drivers/power/supply/ug3105_battery.c346
-rw-r--r--drivers/powercap/idle_inject.c5
-rw-r--r--drivers/ptp/Kconfig13
-rw-r--r--drivers/ptp/Makefile5
-rw-r--r--drivers/ptp/ptp_chardev.c62
-rw-r--r--drivers/ptp/ptp_clock.c150
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2
-rw-r--r--drivers/ptp/ptp_netc.c1043
-rw-r--r--drivers/ptp/ptp_ocp.c6
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/ptp/ptp_qoriq.c24
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c7
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c2
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-aspeed.c253
-rw-r--r--drivers/reset/reset-bcm6345.c1
-rw-r--r--drivers/reset/reset-intel-gw.c1
-rw-r--r--drivers/reset/reset-qcom-pdc.c1
-rw-r--r--drivers/reset/reset-th1520.c41
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c573
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/BusLogic.c8
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/aacraid/linit.c6
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c1
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c6
-rw-r--r--drivers/scsi/hpsa.c53
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/initio.c4
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/ips.h2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h2
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h52
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c632
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h38
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c13
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c32
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c8
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c34
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h4
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c13
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/scsi_debug.c17
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsicam.c16
-rw-r--r--drivers/scsi/sd.c66
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c17
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/storvsc_drv.c6
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/soc/apple/Kconfig3
-rw-r--r--drivers/soc/apple/mailbox.c19
-rw-r--r--drivers/soc/apple/sart.c60
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c2
-rw-r--r--drivers/soc/fsl/qe/gpio.c139
-rw-r--r--drivers/soc/fsl/qe/qmc.c44
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c2
-rw-r--r--drivers/soc/mediatek/mtk-svs.c23
-rw-r--r--drivers/soc/qcom/icc-bwmon.c3
-rw-r--r--drivers/soc/qcom/llcc-qcom.c1
-rw-r--r--drivers/soc/qcom/mdt_loader.c20
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c506
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c1
-rw-r--r--drivers/soc/qcom/ramp_controller.c1
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/renesas/Kconfig13
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c1
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c1
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c1
-rw-r--r--drivers/soc/renesas/renesas-soc.c12
-rw-r--r--drivers/soc/renesas/rz-sysc.c30
-rw-r--r--drivers/soc/renesas/rz-sysc.h2
-rw-r--r--drivers/soc/rockchip/grf.c35
-rw-r--r--drivers/soc/samsung/exynos-pmu.c276
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c14
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c122
-rw-r--r--drivers/soc/ti/k3-socinfo.c10
-rw-r--r--drivers/soc/ti/pruss.c2
-rw-r--r--drivers/soundwire/bus.c12
-rw-r--r--drivers/soundwire/slave.c6
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-apple.c1
-rw-r--r--drivers/spi/spi-geni-qcom.c6
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c26
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c8
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c5
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c2
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c3
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c35
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h1
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c37
-rw-r--r--drivers/staging/media/ipu7/ipu7.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c8
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c5
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c16
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c6
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c4
-rw-r--r--drivers/staging/most/video/video.c19
-rw-r--r--drivers/staging/octeon/ethernet-tx.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c3
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/tee/Kconfig9
-rw-r--r--drivers/tee/Makefile2
-rw-r--r--drivers/tee/optee/Kconfig5
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c9
-rw-r--r--drivers/tee/optee/ffa_abi.c146
-rw-r--r--drivers/tee/optee/optee_ffa.h27
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h15
-rw-r--r--drivers/tee/optee/optee_smc.h37
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/smc_abi.c141
-rw-r--r--drivers/tee/qcomtee/Kconfig12
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c342
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h20
-rw-r--r--drivers/tee/tee_shm.c165
-rw-r--r--drivers/thermal/gov_step_wise.c25
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c20
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c284
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c2
-rw-r--r--drivers/thermal/qcom/Kconfig3
-rw-r--r--drivers/thermal/qcom/lmh.c4
-rw-r--r--drivers/thermal/renesas/Kconfig21
-rw-r--r--drivers/thermal/renesas/Makefile3
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c63
-rw-r--r--drivers/thermal/renesas/rzg3e_thermal.c547
-rw-r--r--drivers/thermal/renesas/rzg3s_thermal.c272
-rw-r--r--drivers/thermal/rockchip_thermal.c50
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c18
-rw-r--r--drivers/thermal/tegra/soctherm.c13
-rw-r--r--drivers/thermal/tegra/soctherm.h11
-rw-r--r--drivers/thermal/tegra/tegra114-soctherm.c209
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c4
-rw-r--r--drivers/thermal/testing/zone.c31
-rw-r--r--drivers/thermal/thermal-generic-adc.c55
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c8
-rw-r--r--drivers/ufs/core/ufs-mcq.c11
-rw-r--r--drivers/ufs/core/ufs-sysfs.c2
-rw-r--r--drivers/ufs/core/ufs_trace.h1
-rw-r--r--drivers/ufs/core/ufs_trace_types.h24
-rw-r--r--drivers/ufs/core/ufshcd.c60
-rw-r--r--drivers/ufs/host/ufs-exynos.c10
-rw-r--r--drivers/ufs/host/ufs-mediatek.c352
-rw-r--r--drivers/ufs/host/ufs-mediatek.h1
-rw-r--r--drivers/ufs/host/ufs-qcom.c226
-rw-r--r--drivers/ufs/host/ufs-qcom.h28
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c33
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
-rw-r--r--drivers/usb/gadget/function/uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/vfio/pci/pds/lm.c3
-rw-r--r--drivers/vfio/pci/virtio/migrate.c3
-rw-r--r--drivers/vhost/vringh.c7
-rw-r--r--drivers/video/fbdev/core/fbcon.c9
-rw-r--r--drivers/video/screen_info_generic.c55
-rw-r--r--drivers/virtio/virtio_balloon.c10
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/gntdev.c5
-rw-r--r--drivers/xen/swiotlb-xen.c21
3309 files changed, 169575 insertions, 53021 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index b5749cf67044..a104163b1353 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -195,4 +195,5 @@ obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
obj-$(CONFIG_DPLL) += dpll/
+obj-$(CONFIG_DIBS) += dibs/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 5b9490367a39..bb01cebc42bf 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -28,5 +28,6 @@ source "drivers/accel/amdxdna/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
+source "drivers/accel/rocket/Kconfig"
endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index a301fb6089d4..ffc3fa588666 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
+obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/ \ No newline at end of file
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
index 0e9adf6890a0..6797dac65efa 100644
--- a/drivers/accel/amdxdna/Makefile
+++ b/drivers/accel/amdxdna/Makefile
@@ -15,6 +15,7 @@ amdxdna-y := \
amdxdna_mailbox_helper.o \
amdxdna_pci_drv.o \
amdxdna_sysfs.o \
+ amdxdna_ubuf.o \
npu1_regs.o \
npu2_regs.o \
npu4_regs.o \
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 2cff5419bd2f..e9f9b1fa5dc1 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -46,6 +46,17 @@ static void aie2_job_put(struct amdxdna_sched_job *job)
kref_put(&job->refcnt, aie2_job_release);
}
+static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->status = hwctx->old_status;
+}
+
/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
struct drm_sched_job *bad_job)
@@ -89,25 +100,6 @@ out:
return ret;
}
-void aie2_restart_ctx(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
- if (hwctx->status != HWCTX_STAT_STOP)
- continue;
-
- hwctx->status = hwctx->old_status;
- XDNA_DBG(xdna, "Resetting %s", hwctx->name);
- aie2_hwctx_restart(xdna, hwctx);
- }
- mutex_unlock(&client->hwctx_lock);
-}
-
static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
{
struct dma_fence *fence, *out_fence = NULL;
@@ -141,34 +133,49 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
dma_fence_put(fence);
}
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ aie2_hwctx_status_shift_stop(hwctx);
+
+ return 0;
+}
+
+void aie2_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
/*
* Command timeout is unlikely. But if it happens, it doesn't
* break the system. aie2_hwctx_stop() will destroy mailbox
* and abort all commands.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- aie2_hwctx_wait_for_idle(hwctx);
- aie2_hwctx_stop(xdna, hwctx, NULL);
- hwctx->old_status = hwctx->status;
- hwctx->status = HWCTX_STAT_STOP;
+ amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
}
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_status_restore(hwctx);
+ return aie2_hwctx_restart(xdna, hwctx);
+}
+
+int aie2_hwctx_resume(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
/*
* The resume path cannot guarantee that mailbox channel can be
* regenerated. If this happen, when submit message to this
* mailbox channel, error will return.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- hwctx->status = hwctx->old_status;
- aie2_hwctx_restart(xdna, hwctx);
+ return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
}
static void
@@ -192,7 +199,7 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- u32 ret = 0;
+ int ret = 0;
u32 status;
cmd_abo = job->cmd_bo;
@@ -222,7 +229,7 @@ static int
aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
- u32 ret = 0;
+ int ret = 0;
u32 status;
if (unlikely(!data))
@@ -250,7 +257,7 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
u32 fail_cmd_status;
u32 fail_cmd_idx;
u32 cmd_status;
- u32 ret = 0;
+ int ret = 0;
cmd_abo = job->cmd_bo;
if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 82412eec9a4b..9caad083543d 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
return 0;
}
+static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ u32 *bitmap = arg;
+
+ *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
+
+ return 0;
+}
+
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
u32 size, u32 *cols_filled)
{
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
struct amdxdna_dev *xdna = ndev->xdna;
struct amdxdna_client *client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
dma_addr_t dma_addr;
u32 aie_bitmap = 0;
u8 *buff_addr;
- int ret, idx;
+ int ret;
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL);
@@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
return -ENOMEM;
/* Go through each hardware context and mark the AIE columns that are active */
- list_for_each_entry(client, &xdna->client_list, node) {
- idx = srcu_read_lock(&client->hwctx_srcu);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
- srcu_read_unlock(&client->hwctx_srcu, idx);
- }
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
*cols_filled = 0;
req.dump_buff_addr = dma_addr;
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index c6cf7068d23c..87c425e3d2b9 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/iommu.h>
@@ -440,6 +441,40 @@ disable_dev:
return ret;
}
+static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+
+ guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+ aie2_hw_stop(xdna);
+
+ return 0;
+}
+
+static int aie2_hw_resume(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+ int ret;
+
+ guard(mutex)(&xdna->dev_lock);
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Start hardware failed, %d", ret);
+ return ret;
+ }
+
+ list_for_each_entry(client, &xdna->client_list, node) {
+ ret = aie2_hwctx_resume(client);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static int aie2_init(struct amdxdna_dev *xdna)
{
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
@@ -520,14 +555,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto free_irq;
+ goto release_fw;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto free_irq;
+ goto release_fw;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -578,8 +613,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-free_irq:
- pci_free_irq_vectors(pdev);
release_fw:
release_firmware(fw);
@@ -588,12 +621,10 @@ release_fw:
static void aie2_fini(struct amdxdna_dev *xdna)
{
- struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- pci_free_irq_vectors(pdev);
}
static int aie2_get_aie_status(struct amdxdna_client *client,
@@ -752,65 +783,68 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
return ret;
}
+static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
+
+ if (!array_args->num_element)
+ return -EINVAL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pid = hwctx->client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
+ return -EFAULT;
+
+ array_args->buffer += size;
+ array_args->num_element--;
+
+ return 0;
+}
+
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
- struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_drm_get_array array_args;
struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_drm_query_hwctx *tmp;
struct amdxdna_client *tmp_client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
- bool overflow = false;
- u32 req_bytes = 0;
- u32 hw_i = 0;
- int ret = 0;
- int idx;
+ int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- buf = u64_to_user_ptr(args->buffer);
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
list_for_each_entry(tmp_client, &xdna->client_list, node) {
- idx = srcu_read_lock(&tmp_client->hwctx_srcu);
- amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
- req_bytes += sizeof(*tmp);
- if (args->buffer_size < req_bytes) {
- /* Continue iterating to get the required size */
- overflow = true;
- continue;
- }
-
- memset(tmp, 0, sizeof(*tmp));
- tmp->pid = tmp_client->pid;
- tmp->context_id = hwctx->id;
- tmp->start_col = hwctx->start_col;
- tmp->num_col = hwctx->num_col;
- tmp->command_submissions = hwctx->priv->seq;
- tmp->command_completions = hwctx->priv->completed;
-
- if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
- ret = -EFAULT;
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
- goto out;
- }
- hw_i++;
- }
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
- }
-
- if (overflow) {
- XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
- args->buffer_size, req_bytes);
- ret = -EINVAL;
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
}
-out:
- kfree(tmp);
- args->buffer_size = req_bytes;
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
return ret;
}
@@ -854,6 +888,58 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
return ret;
}
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return ret;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
static int aie2_set_power_mode(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
@@ -903,17 +989,16 @@ static int aie2_set_state(struct amdxdna_client *client,
}
const struct amdxdna_dev_ops aie2_ops = {
- .init = aie2_init,
- .fini = aie2_fini,
- .resume = aie2_hw_start,
- .suspend = aie2_hw_stop,
- .get_aie_info = aie2_get_info,
- .set_aie_state = aie2_set_state,
- .hwctx_init = aie2_hwctx_init,
- .hwctx_fini = aie2_hwctx_fini,
- .hwctx_config = aie2_hwctx_config,
- .cmd_submit = aie2_cmd_submit,
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
- .hwctx_suspend = aie2_hwctx_suspend,
- .hwctx_resume = aie2_hwctx_resume,
+ .get_array = aie2_get_array,
};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index 385914840eaa..91a8e948f82a 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -288,10 +288,9 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_suspend(struct amdxdna_client *client);
+int aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
-void aie2_restart_ctx(struct amdxdna_client *client);
#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index be073224bd69..4bfe4ef20550 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -60,32 +60,6 @@ static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
return &fence->base;
}
-void amdxdna_hwctx_suspend(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_suspend(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
-void amdxdna_hwctx_resume(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_resume(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
struct srcu_struct *ss)
{
@@ -94,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
synchronize_srcu(ss);
/* At this point, user is not able to submit new commands */
- mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->hwctx_fini(hwctx);
- mutex_unlock(&xdna->dev_lock);
kfree(hwctx->name);
kfree(hwctx);
}
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ ret = walk(hwctx, arg);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+
+ return ret;
+}
+
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
@@ -152,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
- mutex_lock(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
XDNA_DBG(client->xdna, "PID %d close HW context %d",
client->pid, hwctx->id);
xa_erase(&client->hwctx_xa, hwctx->id);
- mutex_unlock(&client->hwctx_lock);
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
- mutex_lock(&client->hwctx_lock);
}
- mutex_unlock(&client->hwctx_lock);
}
int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -251,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
+ mutex_lock(&xdna->dev_lock);
hwctx = xa_erase(&client->hwctx_xa, args->handle);
if (!hwctx) {
ret = -EINVAL;
@@ -267,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
out:
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return ret;
}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index f0a4a8586d85..7cd7a55936f0 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -139,16 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
-static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
-{
- return GENMASK(hwctx->start_col + hwctx->num_col - 1,
- hwctx->start_col);
-}
-
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
-void amdxdna_hwctx_suspend(struct amdxdna_client *client);
-void amdxdna_hwctx_resume(struct amdxdna_client *client);
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
int amdxdna_cmd_submit(struct amdxdna_client *client,
u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 0f85a0105178..d407a36eb412 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -18,6 +18,7 @@
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
@@ -296,7 +297,7 @@ static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
- ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ ret = dma_buf_mmap(abo->dma_buf, vma, 0);
if (ret) {
XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
return ret;
@@ -391,10 +392,47 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
+static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+
+ iosys_map_clear(map);
+
+ dma_resv_assert_held(obj->resv);
+
+ if (is_import_bo(abo))
+ dma_buf_vmap(abo->dma_buf, map);
+ else
+ drm_gem_shmem_object_vmap(obj, map);
+
+ if (!map->vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+
+ dma_resv_assert_held(obj->resv);
+
+ if (is_import_bo(abo))
+ dma_buf_vunmap(abo->dma_buf, map);
+ else
+ drm_gem_shmem_object_vunmap(obj, map);
+}
+
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ if (abo->dma_buf) {
+ get_dma_buf(abo->dma_buf);
+ return abo->dma_buf;
+ }
+
exp_info.ops = &amdxdna_dmabuf_ops;
exp_info.size = gobj->size;
exp_info.flags = flags;
@@ -451,8 +489,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
- .vmap = drm_gem_shmem_object_vmap,
- .vunmap = drm_gem_shmem_object_vunmap,
+ .vmap = amdxdna_gem_obj_vmap,
+ .vunmap = amdxdna_gem_obj_vunmap,
.mmap = amdxdna_gem_obj_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
.export = amdxdna_gem_prime_export,
@@ -494,6 +532,68 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
+
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ return to_xdna_obj(&shmem->base);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ enum amdxdna_ubuf_flag flags = 0;
+ struct amdxdna_drm_va_tbl va_tbl;
+ struct drm_gem_object *gobj;
+ struct dma_buf *dma_buf;
+
+ if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
+ XDNA_DBG(xdna, "Access va table failed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (va_tbl.num_entries) {
+ if (args->type == AMDXDNA_BO_CMD)
+ flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
+
+ dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
+ u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
+ } else {
+ dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
+ }
+
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gobj = amdxdna_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(gobj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(gobj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ return to_xdna_obj(gobj);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_object(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args)
+{
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+
+ if (args->vaddr)
+ return amdxdna_gem_create_ubuf_object(dev, args);
+
+ return amdxdna_gem_create_shmem_object(dev, aligned_sz);
+}
+
struct drm_gem_object *
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
{
@@ -545,16 +645,12 @@ amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
- abo = to_xdna_obj(&shmem->base);
abo->client = client;
abo->type = AMDXDNA_BO_SHMEM;
@@ -569,7 +665,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct amdxdna_client *client = filp->driver_priv;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -586,14 +681,12 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
goto mm_unlock;
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem)) {
- ret = PTR_ERR(shmem);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
goto mm_unlock;
}
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
abo->type = AMDXDNA_BO_DEV_HEAP;
abo->client = client;
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
@@ -657,7 +750,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -671,12 +763,9 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
@@ -691,7 +780,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
return abo;
release_obj:
- drm_gem_shmem_free(shmem);
+ drm_gem_object_put(to_gobj(abo));
return ERR_PTR(ret);
}
@@ -702,7 +791,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
struct amdxdna_gem_obj *abo;
int ret;
- if (args->flags || args->vaddr || !args->size)
+ if (args->flags)
return -EINVAL;
XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f2bf1d374cc7..569cd703729d 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -27,6 +27,13 @@ MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 1
+
+/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
* same device_id have very similar interface to host driver.
@@ -81,7 +88,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
ret = -ENODEV;
goto unbind_sva;
}
- mutex_init(&client->hwctx_lock);
init_srcu_struct(&client->hwctx_srcu);
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
mutex_init(&client->mm_lock);
@@ -116,7 +122,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
xa_destroy(&client->hwctx_xa);
cleanup_srcu_struct(&client->hwctx_srcu);
- mutex_destroy(&client->hwctx_lock);
mutex_destroy(&client->mm_lock);
if (client->dev_heap)
drm_gem_object_put(to_gobj(client->dev_heap));
@@ -142,8 +147,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id)
mutex_lock(&xdna->dev_lock);
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
amdxdna_hwctx_remove_all(client);
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return 0;
@@ -166,6 +171,23 @@ static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct
return ret;
}
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
@@ -197,6 +219,7 @@ static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
@@ -220,6 +243,8 @@ const struct drm_driver amdxdna_drm_drv = {
.fops = &amdxdna_fops,
.name = "amdxdna_accel_driver",
.desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
.open = amdxdna_drm_open,
.postclose = amdxdna_drm_close,
.ioctls = amdxdna_drm_ioctls,
@@ -330,11 +355,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct amdxdna_client, node);
while (client) {
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
-
amdxdna_hwctx_remove_all(client);
- mutex_lock(&xdna->dev_lock);
client = list_first_entry_or_null(&xdna->client_list,
struct amdxdna_client, node);
}
@@ -343,89 +365,29 @@ static void amdxdna_remove(struct pci_dev *pdev)
mutex_unlock(&xdna->dev_lock);
}
-static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->suspend)
- xdna->dev_info->ops->suspend(xdna);
-
- return 0;
-}
-
-static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->resume)
- return xdna->dev_info->ops->resume(xdna);
-
- return 0;
-}
-
static int amdxdna_pmops_suspend(struct device *dev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
-
- mutex_lock(&xdna->dev_lock);
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_suspend(client);
- amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
+ if (!xdna->dev_info->ops->suspend)
+ return -EOPNOTSUPP;
- return 0;
+ return xdna->dev_info->ops->suspend(xdna);
}
static int amdxdna_pmops_resume(struct device *dev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
- int ret;
-
- XDNA_INFO(xdna, "firmware resuming...");
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- if (ret) {
- XDNA_ERR(xdna, "resume NPU firmware failed");
- mutex_unlock(&xdna->dev_lock);
- return ret;
- }
-
- XDNA_INFO(xdna, "hardware context resuming...");
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_resume(client);
- mutex_unlock(&xdna->dev_lock);
-
- return 0;
-}
-
-static int amdxdna_rpmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
- XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
- return ret;
-}
-
-static int amdxdna_rpmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
+ if (!xdna->dev_info->ops->resume)
+ return -EOPNOTSUPP;
- XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
- return ret;
+ return xdna->dev_info->ops->resume(xdna);
}
static const struct dev_pm_ops amdxdna_pm_ops = {
SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
- RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+ RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL)
};
static struct pci_driver amdxdna_pci_driver = {
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index ab79600911aa..72d6696d49da 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -50,16 +50,15 @@ struct amdxdna_dev_ops {
int (*init)(struct amdxdna_dev *xdna);
void (*fini)(struct amdxdna_dev *xdna);
int (*resume)(struct amdxdna_dev *xdna);
- void (*suspend)(struct amdxdna_dev *xdna);
+ int (*suspend)(struct amdxdna_dev *xdna);
int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
- void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
- void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct amdxdna_drm_get_array *args);
};
/*
@@ -118,8 +117,6 @@ struct amdxdna_device_id {
struct amdxdna_client {
struct list_head node;
pid_t pid;
- struct mutex hwctx_lock; /* protect hwctx */
- /* do NOT wait this srcu when hwctx_lock is held */
struct srcu_struct hwctx_srcu;
struct xarray hwctx_xa;
u32 next_hwctxid;
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
new file mode 100644
index 000000000000..077b2261cf2a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <linux/dma-buf.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+struct amdxdna_ubuf_priv {
+ struct page **pages;
+ u64 nr_pages;
+ enum amdxdna_ubuf_flag flags;
+ struct mm_struct *mm;
+};
+
+static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
+ ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
+ ret = dma_map_sgtable(attach->dev, sg, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return sg;
+}
+
+static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
+ dma_unmap_sgtable(attach->dev, sg, direction, 0);
+
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void amdxdna_ubuf_release(struct dma_buf *dbuf)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ unpin_user_pages(ubuf->pages, ubuf->nr_pages);
+ kvfree(ubuf->pages);
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+}
+
+static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct amdxdna_ubuf_priv *ubuf;
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ ubuf = vma->vm_private_data;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
+ .fault = amdxdna_ubuf_vm_fault,
+};
+
+static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ vma->vm_ops = &amdxdna_ubuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+ void *kva;
+
+ kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!kva)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, kva);
+ return 0;
+}
+
+static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
+
+static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
+ .map_dma_buf = amdxdna_ubuf_map,
+ .unmap_dma_buf = amdxdna_ubuf_unmap,
+ .release = amdxdna_ubuf_release,
+ .mmap = amdxdna_ubuf_mmap,
+ .vmap = amdxdna_ubuf_vmap,
+ .vunmap = amdxdna_ubuf_vunmap,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ unsigned long lock_limit, new_pinned;
+ struct amdxdna_drm_va_entry *va_ent;
+ struct amdxdna_ubuf_priv *ubuf;
+ u32 npages, start = 0;
+ struct dma_buf *dbuf;
+ int i, ret;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return ERR_PTR(-ENOMEM);
+
+ ubuf->flags = flags;
+ ubuf->mm = current->mm;
+ mmgrab(ubuf->mm);
+
+ va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
+ if (!va_ent) {
+ ret = -ENOMEM;
+ goto free_ubuf;
+ }
+
+ if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
+ XDNA_DBG(xdna, "Access va entries failed");
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ for (i = 0, exp_info.size = 0; i < num_entries; i++) {
+ if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
+ !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
+ XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
+ va_ent[i].vaddr, va_ent[i].len);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ exp_info.size += va_ent[i].len;
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
+ new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ npages = va_ent[i].len >> PAGE_SHIFT;
+
+ ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ &ubuf->pages[start]);
+ if (ret < 0 || ret != npages) {
+ ret = -ENOMEM;
+ XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
+ goto destroy_pages;
+ }
+
+ start += ret;
+ }
+
+ exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ goto destroy_pages;
+ }
+ kvfree(va_ent);
+
+ return dbuf;
+
+destroy_pages:
+ if (start)
+ unpin_user_pages(ubuf->pages, start);
+ kvfree(ubuf->pages);
+sub_pin_cnt:
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+free_ent:
+ kvfree(va_ent);
+free_ubuf:
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h
new file mode 100644
index 000000000000..e5cb3bdb3ec9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDXDNA_UBUF_H_
+#define _AMDXDNA_UBUF_H_
+
+#include <drm/drm_device.h>
+#include <linux/dma-buf.h>
+
+enum amdxdna_ubuf_flag {
+ AMDXDNA_UBUF_FLAG_MAP_DMA = 1,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries);
+
+#endif /* _AMDXDNA_UBUF_H_ */
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index 1919fbb169c7..6d1506acbd72 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -27,3 +27,26 @@ config DRM_ACCEL_HABANALABS
To compile this driver as a module, choose M here: the
module will be called habanalabs.
+
+if DRM_ACCEL_HABANALABS
+
+config HL_HLDIO
+ bool "Habanalabs NVMe Direct I/O (HLDIO)"
+ depends on PCI_P2PDMA
+ depends on BLOCK
+ help
+ Enable NVMe peer-to-peer direct I/O support for Habanalabs AI
+ accelerators.
+
+ This allows direct data transfers between NVMe storage devices
+ and Habanalabs accelerators without involving system memory,
+ using PCI peer-to-peer DMA capabilities.
+
+ Requirements:
+ - CONFIG_PCI_P2PDMA=y
+ - NVMe device and Habanalabs accelerator under same PCI root complex
+ - IOMMU disabled or in passthrough mode
+ - Hardware supporting PCI P2P DMA
+
+ If unsure, say N
+endif # DRM_ACCEL_HABANALABS
diff --git a/drivers/accel/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..b6d00de09db5 100644
--- a/drivers/accel/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
@@ -13,3 +13,8 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_submission.o common/firmware_if.o \
common/security.o common/state_dump.o \
common/memory_mgr.o common/decoder.o
+
+# Conditionally add HLDIO support
+ifdef CONFIG_HL_HLDIO
+HL_COMMON_FILES += common/hldio.o
+endif \ No newline at end of file
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 4b391807e5f2..5f0820b19ccb 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -6,6 +6,7 @@
*/
#include "habanalabs.h"
+#include "hldio.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
@@ -602,6 +603,198 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+#ifdef CONFIG_HL_HLDIO
+/* DIO debugfs functions following the standard pattern */
+static int dio_ssd2hl_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ seq_puts(s, "Usage: echo \"fd=N va=0xADDR off=N len=N\" > dio_ssd2hl\n");
+ seq_printf(s, "Last transfer: %zu bytes\\n", dev_entry->dio_stats.last_len_read);
+ seq_puts(s, "Note: All parameters must be page-aligned (4KB)\\n");
+
+ return 0;
+}
+
+static ssize_t dio_ssd2hl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->kernel_ctx;
+ char kbuf[128];
+ u64 device_va = 0, off_bytes = 0, len_bytes = 0;
+ u32 fd = 0;
+ size_t len_read = 0;
+ int rc, parsed;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ /* Parse: fd=N va=0xADDR off=N len=N */
+ parsed = sscanf(kbuf, "fd=%u va=0x%llx off=%llu len=%llu",
+ &fd, &device_va, &off_bytes, &len_bytes);
+ if (parsed != 4) {
+ dev_err(hdev->dev, "Invalid format. Expected: fd=N va=0xADDR off=N len=N\\n");
+ return -EINVAL;
+ }
+
+ /* Validate file descriptor */
+ if (fd == 0) {
+ dev_err(hdev->dev, "Invalid file descriptor: %u\\n", fd);
+ return -EINVAL;
+ }
+
+ /* Validate alignment requirements */
+ if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
+ !IS_ALIGNED(off_bytes, PAGE_SIZE) ||
+ !IS_ALIGNED(len_bytes, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "All parameters must be page-aligned (4KB)\\n");
+ return -EINVAL;
+ }
+
+ /* Validate transfer size */
+ if (len_bytes == 0 || len_bytes > SZ_1G) {
+ dev_err(hdev->dev, "Invalid length: %llu (max 1GB)\\n",
+ len_bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "DIO SSD2HL: fd=%u va=0x%llx off=%llu len=%llu\\n",
+ fd, device_va, off_bytes, len_bytes);
+
+ rc = hl_dio_ssd2hl(hdev, ctx, fd, device_va, off_bytes, len_bytes, &len_read);
+ if (rc < 0) {
+ dev_entry->dio_stats.failed_ops++;
+ dev_err(hdev->dev, "SSD2HL operation failed: %d\\n", rc);
+ return rc;
+ }
+
+ /* Update statistics */
+ dev_entry->dio_stats.total_ops++;
+ dev_entry->dio_stats.successful_ops++;
+ dev_entry->dio_stats.bytes_transferred += len_read;
+ dev_entry->dio_stats.last_len_read = len_read;
+
+ dev_dbg(hdev->dev, "DIO SSD2HL completed: %zu bytes transferred\\n", len_read);
+
+ return count;
+}
+
+static int dio_hl2ssd_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "HL2SSD (device-to-SSD) transfers not implemented\\n");
+ return 0;
+}
+
+static ssize_t dio_hl2ssd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ dev_dbg(hdev->dev, "HL2SSD operation not implemented\\n");
+ return -EOPNOTSUPP;
+}
+
+static int dio_stats_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_dio_stats *stats = &dev_entry->dio_stats;
+ u64 avg_bytes_per_op = 0, success_rate = 0;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ if (stats->successful_ops > 0)
+ avg_bytes_per_op = stats->bytes_transferred / stats->successful_ops;
+
+ if (stats->total_ops > 0)
+ success_rate = (stats->successful_ops * 100) / stats->total_ops;
+
+ seq_puts(s, "=== Habanalabs Direct I/O Statistics ===\\n");
+ seq_printf(s, "Total operations: %llu\\n", stats->total_ops);
+ seq_printf(s, "Successful ops: %llu\\n", stats->successful_ops);
+ seq_printf(s, "Failed ops: %llu\\n", stats->failed_ops);
+ seq_printf(s, "Success rate: %llu%%\\n", success_rate);
+ seq_printf(s, "Total bytes: %llu\\n", stats->bytes_transferred);
+ seq_printf(s, "Avg bytes per op: %llu\\n", avg_bytes_per_op);
+ seq_printf(s, "Last transfer: %zu bytes\\n", stats->last_len_read);
+
+ return 0;
+}
+
+static int dio_reset_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "Write '1' to reset DIO statistics\\n");
+ return 0;
+}
+
+static ssize_t dio_reset_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[8];
+ unsigned long val;
+ int rc;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ rc = kstrtoul(kbuf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val == 1) {
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+ dev_dbg(hdev->dev, "DIO statistics reset\\n");
+ } else {
+ dev_err(hdev->dev, "Write '1' to reset statistics\\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -788,6 +981,113 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
}
+static void dump_cfg_access_entry(struct hl_device *hdev,
+ struct hl_debugfs_cfg_access_entry *entry)
+{
+ char *access_type = "";
+ struct tm tm;
+
+ switch (entry->debugfs_type) {
+ case DEBUGFS_READ32:
+ access_type = "READ32 from";
+ break;
+ case DEBUGFS_WRITE32:
+ access_type = "WRITE32 to";
+ break;
+ case DEBUGFS_READ64:
+ access_type = "READ64 from";
+ break;
+ case DEBUGFS_WRITE64:
+ access_type = "WRITE64 to";
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid DEBUGFS access type (%u)\n", entry->debugfs_type);
+ return;
+ }
+
+ time64_to_tm(entry->seconds_since_epoch, 0, &tm);
+ dev_info(hdev->dev,
+ "%ld-%02d-%02d %02d:%02d:%02d (UTC): %s %#llx\n", tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, access_type, entry->addr);
+}
+
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+ struct hl_debugfs_cfg_access *dbgfs = &hdev->debugfs_cfg_accesses;
+ u32 i, head, count = 0;
+ time64_t entry_time, now;
+ unsigned long flags;
+
+ now = ktime_get_real_seconds();
+
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ head = dbgfs->head;
+ if (head == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i = head - 1;
+
+ /* Walk back until timeout or invalid entry */
+ while (dbgfs->cfg_access_list[i].valid) {
+ entry_time = dbgfs->cfg_access_list[i].seconds_since_epoch;
+ /* Stop when entry is older than timeout */
+ if (now - entry_time > HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC)
+ break;
+
+ /* print single entry under lock */
+ {
+ struct hl_debugfs_cfg_access_entry entry = dbgfs->cfg_access_list[i];
+ /*
+ * We copy the entry out under lock and then print after
+ * releasing the lock to minimize time under lock.
+ */
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+ dump_cfg_access_entry(hdev, &entry);
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ }
+
+ /* mark consumed */
+ dbgfs->cfg_access_list[i].valid = false;
+
+ if (i == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i--;
+ count++;
+ if (count >= HL_DBGFS_CFG_ACCESS_HIST_LEN)
+ break;
+ }
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+}
+
+static void check_if_cfg_access_and_log(struct hl_device *hdev, u64 addr, size_t access_size,
+ enum debugfs_access_type access_type)
+{
+ struct hl_debugfs_cfg_access *dbgfs_cfg_accesses = &hdev->debugfs_cfg_accesses;
+ struct pci_mem_region *mem_reg = &hdev->pci_mem_region[PCI_REGION_CFG];
+ struct hl_debugfs_cfg_access_entry *new_entry;
+ unsigned long flags;
+
+ /* Check if address is in config memory */
+ if (addr >= mem_reg->region_base &&
+ mem_reg->region_size >= access_size &&
+ addr <= mem_reg->region_base + mem_reg->region_size - access_size) {
+
+ spin_lock_irqsave(&dbgfs_cfg_accesses->lock, flags);
+
+ new_entry = &dbgfs_cfg_accesses->cfg_access_list[dbgfs_cfg_accesses->head];
+ new_entry->seconds_since_epoch = ktime_get_real_seconds();
+ new_entry->addr = addr;
+ new_entry->debugfs_type = access_type;
+ new_entry->valid = true;
+ dbgfs_cfg_accesses->head = (dbgfs_cfg_accesses->head + 1)
+ % HL_DBGFS_CFG_ACCESS_HIST_LEN;
+
+ spin_unlock_irqrestore(&dbgfs_cfg_accesses->lock, flags);
+
+ }
+}
+
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
@@ -805,6 +1105,7 @@ static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
return rc;
}
+ check_if_cfg_access_and_log(hdev, addr, acc_size, acc_type);
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
@@ -1525,6 +1826,13 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
+#ifdef CONFIG_HL_HLDIO
+ /* DIO entries - only created if NVMe is supported */
+ {"dio_ssd2hl", dio_ssd2hl_show, dio_ssd2hl_write},
+ {"dio_stats", dio_stats_show, NULL},
+ {"dio_reset", dio_reset_show, dio_reset_write},
+ {"dio_hl2ssd", dio_hl2ssd_show, dio_hl2ssd_write},
+#endif
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1723,6 +2031,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->asic_prop.server_type);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ /* Skip DIO entries if NVMe is not supported */
+ if (strncmp(hl_debugfs_list[i].name, "dio_", 4) == 0 &&
+ !hdev->asic_prop.supports_nvme)
+ continue;
+
debugfs_create_file(hl_debugfs_list[i].name,
0644,
root,
@@ -1762,6 +2075,14 @@ int hl_debugfs_device_init(struct hl_device *hdev)
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
+ spin_lock_init(&hdev->debugfs_cfg_accesses.lock);
+ hdev->debugfs_cfg_accesses.head = 0; /* already zero by alloc but explicit init is fine */
+
+#ifdef CONFIG_HL_HLDIO
+ /* Initialize DIO statistics */
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+#endif
+
return 0;
}
@@ -1780,6 +2101,7 @@ void hl_debugfs_device_fini(struct hl_device *hdev)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
+
}
void hl_debugfs_add_device(struct hl_device *hdev)
@@ -1792,6 +2114,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
+
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
@@ -1924,3 +2247,4 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
+
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 80fa08bf57bd..999c92d7036e 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1630,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+ if (hdev->cpld_shutdown) {
+ dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");
+ return -EIO;
+ }
+
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
@@ -2576,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
+ /* Reset the H/W (if it accessible). It will be in idle state after this returns */
+ if (!hdev->cpld_shutdown) {
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev,
+ "hw_fini failed in device fini while removing device %d\n", rc);
+ }
+
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
@@ -2943,3 +2956,13 @@ void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *eve
mutex_unlock(&clk_throttle->lock);
}
+
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_handle_critical_hw_err(hdev, event_id, event_mask);
+ *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+
+ /* Avoid any new accesses to the H/W */
+ hdev->disabled = true;
+ hdev->cpld_shutdown = true;
+}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 6f27ce4fa01b..d94c2ba22a6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -90,7 +90,9 @@ struct hl_fpriv;
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
-#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_DBGFS_CFG_ACCESS_HIST_LEN 20
+#define HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC 2 /* 2s */
/* Default value for device reset trigger , an invalid value */
#define HL_RESET_TRIGGER_DEFAULT 0xFF
@@ -702,6 +704,7 @@ struct hl_hints_range {
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
* @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
+ * @supports_nvme: indicates whether the asic supports NVMe P2P DMA.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -822,6 +825,7 @@ struct asic_fixed_properties {
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
u8 support_dynamic_resereved_fw_size;
+ u8 supports_nvme;
};
/**
@@ -2274,6 +2278,9 @@ struct hl_vm {
u8 init_done;
};
+#ifdef CONFIG_HL_HLDIO
+#include "hldio.h"
+#endif
/*
* DEBUG, PROFILING STRUCTURE
@@ -2344,7 +2351,6 @@ struct hl_fpriv {
struct mutex ctx_lock;
};
-
/*
* DebugFS
*/
@@ -2372,6 +2378,7 @@ struct hl_debugfs_entry {
struct hl_dbg_device_entry *dev_entry;
};
+
/**
* struct hl_dbg_device_entry - ASIC specific debugfs manager.
* @root: root dentry.
@@ -2403,6 +2410,7 @@ struct hl_debugfs_entry {
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
* @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
+ * @dio_stats: Direct I/O statistics
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -2434,6 +2442,35 @@ struct hl_dbg_device_entry {
u8 i2c_addr;
u8 i2c_reg;
u8 i2c_len;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio_stats dio_stats;
+#endif
+};
+
+/**
+ * struct hl_debugfs_cfg_access_entry - single debugfs config access object, member of
+ * hl_debugfs_cfg_access.
+ * @seconds_since_epoch: seconds since January 1, 1970, used for time comparisons.
+ * @debugfs_type: the debugfs operation requested, can be READ32, WRITE32, READ64 or WRITE64.
+ * @addr: the requested address to access.
+ * @valid: if set, this entry has valid data for dumping at interrupt time.
+ */
+struct hl_debugfs_cfg_access_entry {
+ ktime_t seconds_since_epoch;
+ enum debugfs_access_type debugfs_type;
+ u64 addr;
+ bool valid;
+};
+
+/**
+ * struct hl_debugfs_cfg_access - saves debugfs config region access requests history.
+ * @cfg_access_list: list of objects describing config region access requests.
+ * @head: next valid index to add new entry to in cfg_access_list.
+ */
+struct hl_debugfs_cfg_access {
+ struct hl_debugfs_cfg_access_entry cfg_access_list[HL_DBGFS_CFG_ACCESS_HIST_LEN];
+ u32 head;
+ spinlock_t lock; /* protects head and entries */
};
/**
@@ -3281,6 +3318,7 @@ struct eq_heartbeat_debug_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
+ * @debugfs_cfg_accesses: list of last debugfs config region accesses.
* @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
@@ -3305,6 +3343,7 @@ struct eq_heartbeat_debug_info {
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @heartbeat_debug_info: counters used to debug heartbeat failures.
+ * @hldio: describes habanalabs direct storage interaction interface.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
@@ -3357,6 +3396,7 @@ struct eq_heartbeat_debug_info {
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
+ * @cpld_shutdown: is cpld shutdown.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
@@ -3461,6 +3501,7 @@ struct hl_device {
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
+ struct hl_debugfs_cfg_access debugfs_cfg_accesses;
struct list_head cb_pool;
spinlock_t cb_pool_lock;
@@ -3496,7 +3537,9 @@ struct hl_device {
struct hl_reset_info reset_info;
struct eq_heartbeat_debug_info heartbeat_debug_info;
-
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio hldio;
+#endif
cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr;
@@ -3532,6 +3575,7 @@ struct hl_device {
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
+ u8 cpld_shutdown;
u8 late_init_done;
u8 hwmon_initialized;
u8 reset_on_lockup;
@@ -4089,6 +4133,7 @@ void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS
@@ -4110,6 +4155,7 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length);
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev);
#else
@@ -4185,6 +4231,10 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+}
+
#endif
/* Security */
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index dc80ca921d90..fdfdabc85e54 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -961,6 +961,12 @@ static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
+ case HL_GET_ERR_COUNTERS_CMD:
+ need_input_buff = true;
+ break;
+ case HL_GET_P_STATE:
+ need_input_buff = false;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/accel/habanalabs/common/hldio.c b/drivers/accel/habanalabs/common/hldio.c
new file mode 100644
index 000000000000..083ae5610875
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2024 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "hldio.h"
+#include <generated/uapi/linux/version.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+/*
+ * NVMe Direct I/O implementation for habanalabs driver
+ *
+ * ASSUMPTIONS
+ * ===========
+ * 1. No IOMMU (well, technically it can work with IOMMU, but it is *almost useless).
+ * 2. Only READ operations (can extend in the future).
+ * 3. No sparse files (can overcome this in the future).
+ * 4. Kernel version >= 6.9
+ * 5. Requiring page alignment is OK (I don't see a solution to this one right,
+ * now, how do we read partial pages?)
+ * 6. Kernel compiled with CONFIG_PCI_P2PDMA. This requires a CUSTOM kernel.
+ * Theoretically I have a slight idea on how this could be solvable, but it
+ * is probably inacceptable for the upstream. Also may not work in the end.
+ * 7. Either make sure our cards and disks are under the same PCI bridge, or
+ * compile a custom kernel to hack around this.
+ */
+
+#define IO_STABILIZE_TIMEOUT 10000000 /* 10 seconds in microseconds */
+
+/*
+ * This struct contains all the useful data I could milk out of the file handle
+ * provided by the user.
+ * @TODO: right now it is retrieved on each IO, but can be done once with some
+ * dedicated IOCTL, call it for example HL_REGISTER_HANDLE.
+ */
+struct hl_dio_fd {
+ /* Back pointer in case we need it in async completion */
+ struct hl_ctx *ctx;
+ /* Associated fd struct */
+ struct file *filp;
+};
+
+/*
+ * This is a single IO descriptor
+ */
+struct hl_direct_io {
+ struct hl_dio_fd f;
+ struct kiocb kio;
+ struct bio_vec *bv;
+ struct iov_iter iter;
+ u64 device_va;
+ u64 off_bytes;
+ u64 len_bytes;
+ u32 type;
+};
+
+bool hl_device_supports_nvme(struct hl_device *hdev)
+{
+ return hdev->asic_prop.supports_nvme;
+}
+
+static int hl_dio_fd_register(struct hl_ctx *ctx, int fd, struct hl_dio_fd *f)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct block_device *bd;
+ struct super_block *sb;
+ struct inode *inode;
+ struct gendisk *gd;
+ struct device *disk_dev;
+ int rc;
+
+ f->filp = fget(fd);
+ if (!f->filp) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (!(f->filp->f_flags & O_DIRECT)) {
+ dev_err(hdev->dev, "file is not in the direct mode\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!f->filp->f_op->read_iter) {
+ dev_err(hdev->dev, "read iter is not supported, need to fall back to legacy\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ inode = file_inode(f->filp);
+ sb = inode->i_sb;
+ bd = sb->s_bdev;
+ gd = bd->bd_disk;
+
+ if (inode->i_blocks << sb->s_blocksize_bits < i_size_read(inode)) {
+ dev_err(hdev->dev, "sparse files are not currently supported\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!bd || !gd) {
+ dev_err(hdev->dev, "invalid block device\n");
+ rc = -ENODEV;
+ goto fput;
+ }
+ /* Get the underlying device from the block device */
+ disk_dev = disk_to_dev(gd);
+ if (!dma_pci_p2pdma_supported(disk_dev)) {
+ dev_err(hdev->dev, "device does not support PCI P2P DMA\n");
+ rc = -EOPNOTSUPP;
+ goto fput;
+ }
+
+ /*
+ * @TODO: Maybe we need additional checks here
+ */
+
+ f->ctx = ctx;
+ rc = 0;
+
+ goto out;
+fput:
+ fput(f->filp);
+out:
+ return rc;
+}
+
+static void hl_dio_fd_unregister(struct hl_dio_fd *f)
+{
+ fput(f->filp);
+}
+
+static long hl_dio_count_io(struct hl_device *hdev)
+{
+ s64 sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(*hdev->hldio.inflight_ios, i);
+
+ return sum;
+}
+
+static bool hl_dio_get_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->hldio.io_enabled) {
+ this_cpu_inc(*hdev->hldio.inflight_ios);
+
+ /* Avoid race conditions */
+ if (!hdev->hldio.io_enabled) {
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+ return false;
+ }
+
+ hl_ctx_get(ctx);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hl_dio_put_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ hl_ctx_put(ctx);
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+}
+
+static void hl_dio_set_io_enabled(struct hl_device *hdev, bool enabled)
+{
+ hdev->hldio.io_enabled = enabled;
+}
+
+static bool hl_dio_validate_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ if ((u64)io->device_va & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "device address must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->len_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO length must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->off_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO offset must be 4K aligned\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct page *hl_dio_va2page(struct hl_device *hdev, struct hl_ctx *ctx, u64 device_va)
+{
+ struct hl_dio *hldio = &hdev->hldio;
+ u64 device_pa;
+ int rc, i;
+
+ rc = hl_mmu_va_to_pa(ctx, device_va, &device_pa);
+ if (rc) {
+ dev_err(hdev->dev, "device virtual address translation error: %#llx (%d)",
+ device_va, rc);
+ return NULL;
+ }
+
+ for (i = 0 ; i < hldio->np2prs ; ++i) {
+ if (device_pa >= hldio->p2prs[i].device_pa &&
+ device_pa < hldio->p2prs[i].device_pa + hldio->p2prs[i].size)
+ return hldio->p2prs[i].p2ppages[(device_pa - hldio->p2prs[i].device_pa) >>
+ PAGE_SHIFT];
+ }
+
+ return NULL;
+}
+
+static ssize_t hl_direct_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ u64 npages, device_va;
+ ssize_t rc;
+ int i;
+
+ if (!hl_dio_validate_io(hdev, io))
+ return -EINVAL;
+
+ if (!hl_dio_get_iopath(io->f.ctx)) {
+ dev_info(hdev->dev, "can't schedule a new IO, IO is disabled\n");
+ return -ESHUTDOWN;
+ }
+
+ init_sync_kiocb(&io->kio, io->f.filp);
+ io->kio.ki_pos = io->off_bytes;
+
+ npages = (io->len_bytes >> PAGE_SHIFT);
+
+ /* @TODO: this can be implemented smarter, vmalloc in iopath is not
+ * ideal. Maybe some variation of genpool. Number of pages may differ
+ * greatly, so maybe even use pools of different sizes and chose the
+ * closest one.
+ */
+ io->bv = vzalloc(npages * sizeof(struct bio_vec));
+ if (!io->bv)
+ return -ENOMEM;
+
+ for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
+ io->bv[i].bv_page = hl_dio_va2page(hdev, io->f.ctx, device_va);
+ if (!io->bv[i].bv_page) {
+ dev_err(hdev->dev, "error getting page struct for device va %#llx",
+ device_va);
+ rc = -EFAULT;
+ goto cleanup;
+ }
+ io->bv[i].bv_offset = 0;
+ io->bv[i].bv_len = PAGE_SIZE;
+ }
+
+ iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
+ if (io->f.filp->f_op && io->f.filp->f_op->read_iter)
+ rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
+ else
+ rc = -EINVAL;
+
+cleanup:
+ vfree(io->bv);
+ hl_dio_put_iopath(io->f.ctx);
+
+ dev_dbg(hdev->dev, "IO ended with %ld\n", rc);
+
+ return rc;
+}
+
+/*
+ * @TODO: This function can be used as a callback for io completion under
+ * kio->ki_complete in order to implement async IO.
+ * Note that on more recent kernels there is no ret2.
+ */
+__maybe_unused static void hl_direct_io_complete(struct kiocb *kio, long ret, long ret2)
+{
+ struct hl_direct_io *io = container_of(kio, struct hl_direct_io, kio);
+
+ dev_dbg(io->f.ctx->hdev->dev, "IO completed with %ld\n", ret);
+
+ /* Do something to copy result to user / notify completion */
+
+ hl_dio_put_iopath(io->f.ctx);
+
+ hl_dio_fd_unregister(&io->f);
+}
+
+/*
+ * DMA disk to ASIC, wait for results. Must be invoked from the user context
+ */
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{
+ struct hl_direct_io *io;
+ ssize_t rc;
+
+ dev_dbg(hdev->dev, "SSD2HL fd=%d va=%#llx len=%#lx\n", fd, device_va, len_bytes);
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *io = (struct hl_direct_io){
+ .device_va = device_va,
+ .len_bytes = len_bytes,
+ .off_bytes = off_bytes,
+ .type = READ,
+ };
+
+ rc = hl_dio_fd_register(ctx, fd, &io->f);
+ if (rc)
+ goto kfree_io;
+
+ rc = hl_direct_io(hdev, io);
+ if (rc >= 0) {
+ *len_read = rc;
+ rc = 0;
+ }
+
+ /* This shall be called only in the case of a sync IO */
+ hl_dio_fd_unregister(&io->f);
+kfree_io:
+ kfree(io);
+out:
+ return rc;
+}
+
+static void hl_p2p_region_fini(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ if (p2pr->p2ppages) {
+ vfree(p2pr->p2ppages);
+ p2pr->p2ppages = NULL;
+ }
+
+ if (p2pr->p2pmem) {
+ dev_dbg(hdev->dev, "freeing P2P mem from %p, size=%#llx\n",
+ p2pr->p2pmem, p2pr->size);
+ pci_free_p2pmem(hdev->pdev, p2pr->p2pmem, p2pr->size);
+ p2pr->p2pmem = NULL;
+ }
+}
+
+void hl_p2p_region_fini_all(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < hdev->hldio.np2prs ; ++i)
+ hl_p2p_region_fini(hdev, &hdev->hldio.p2prs[i]);
+
+ kvfree(hdev->hldio.p2prs);
+ hdev->hldio.p2prs = NULL;
+ hdev->hldio.np2prs = 0;
+}
+
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ void *addr;
+ int rc, i;
+
+ /* Start by publishing our p2p memory */
+ rc = pci_p2pdma_add_resource(hdev->pdev, p2pr->bar, p2pr->size, p2pr->bar_offset);
+ if (rc) {
+ dev_err(hdev->dev, "error adding p2p resource: %d\n", rc);
+ goto err;
+ }
+
+ /* Alloc all p2p mem */
+ p2pr->p2pmem = pci_alloc_p2pmem(hdev->pdev, p2pr->size);
+ if (!p2pr->p2pmem) {
+ dev_err(hdev->dev, "error allocating p2p memory\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ p2pr->p2ppages = vmalloc((p2pr->size >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!p2pr->p2ppages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
+ p2pr->p2ppages[i] = virt_to_page(addr);
+ if (!p2pr->p2ppages[i]) {
+ rc = -EFAULT;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ hl_p2p_region_fini(hdev, p2pr);
+ return rc;
+}
+
+int hl_dio_start(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "initializing HLDIO\n");
+
+ /* Initialize the IO counter and enable IO */
+ hdev->hldio.inflight_ios = alloc_percpu(s64);
+ if (!hdev->hldio.inflight_ios)
+ return -ENOMEM;
+
+ hl_dio_set_io_enabled(hdev, true);
+
+ return 0;
+}
+
+void hl_dio_stop(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "deinitializing HLDIO\n");
+
+ if (hdev->hldio.io_enabled) {
+ /* Wait for all the IO to finish */
+ hl_dio_set_io_enabled(hdev, false);
+ hl_poll_timeout_condition(hdev, !hl_dio_count_io(hdev), 1000, IO_STABILIZE_TIMEOUT);
+ }
+
+ if (hdev->hldio.inflight_ios) {
+ free_percpu(hdev->hldio.inflight_ios);
+ hdev->hldio.inflight_ios = NULL;
+ }
+}
diff --git a/drivers/accel/habanalabs/common/hldio.h b/drivers/accel/habanalabs/common/hldio.h
new file mode 100644
index 000000000000..2874388f2851
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * hldio.h - NVMe Direct I/O (HLDIO) infrastructure for Habana Labs Driver
+ *
+ * This feature requires specific hardware setup and must not be built
+ * under COMPILE_TEST.
+ */
+
+#ifndef __HL_HLDIO_H__
+#define __HL_HLDIO_H__
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/ktime.h> /* ktime functions */
+#include <linux/delay.h> /* usleep_range */
+#include <linux/kernel.h> /* might_sleep_if */
+#include <linux/errno.h> /* error codes */
+
+/* Forward declarations */
+struct hl_device;
+struct file;
+
+/* Enable only if Kconfig selected */
+#ifdef CONFIG_HL_HLDIO
+/**
+ * struct hl_p2p_region - describes a single P2P memory region
+ * @p2ppages: array of page structs for the P2P memory
+ * @p2pmem: virtual address of the P2P memory region
+ * @device_pa: physical address on the device
+ * @bar_offset: offset within the BAR
+ * @size: size of the region in bytes
+ * @bar: BAR number containing this region
+ */
+struct hl_p2p_region {
+ struct page **p2ppages;
+ void *p2pmem;
+ u64 device_pa;
+ u64 bar_offset;
+ u64 size;
+ int bar;
+};
+
+/**
+ * struct hl_dio_stats - Direct I/O statistics
+ * @total_ops: total number of operations attempted
+ * @successful_ops: number of successful operations
+ * @failed_ops: number of failed operations
+ * @bytes_transferred: total bytes successfully transferred
+ * @last_len_read: length of the last read operation
+ */
+struct hl_dio_stats {
+ u64 total_ops;
+ u64 successful_ops;
+ u64 failed_ops;
+ u64 bytes_transferred;
+ size_t last_len_read;
+};
+
+/**
+ * struct hl_dio - describes habanalabs direct storage interaction interface
+ * @p2prs: array of p2p regions
+ * @inflight_ios: percpu counter for inflight ios
+ * @np2prs: number of elements in p2prs
+ * @io_enabled: 1 if io is enabled 0 otherwise
+ */
+struct hl_dio {
+ struct hl_p2p_region *p2prs;
+ s64 __percpu *inflight_ios;
+ u8 np2prs;
+ u8 io_enabled;
+};
+
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read);
+void hl_p2p_region_fini_all(struct hl_device *hdev);
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr);
+int hl_dio_start(struct hl_device *hdev);
+void hl_dio_stop(struct hl_device *hdev);
+
+/* Init/teardown */
+int hl_hldio_init(struct hl_device *hdev);
+void hl_hldio_fini(struct hl_device *hdev);
+
+/* File operations */
+long hl_hldio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/* DebugFS hooks */
+#ifdef CONFIG_DEBUG_FS
+void hl_hldio_debugfs_init(struct hl_device *hdev);
+void hl_hldio_debugfs_fini(struct hl_device *hdev);
+#else
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+#endif
+
+#else /* !CONFIG_HL_HLDIO */
+
+struct hl_p2p_region;
+/* Stubs when HLDIO is disabled */
+static inline int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{ return -EOPNOTSUPP; }
+static inline void hl_p2p_region_fini_all(struct hl_device *hdev) {}
+static inline int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{ return -EOPNOTSUPP; }
+static inline int hl_dio_start(struct hl_device *hdev) { return -EOPNOTSUPP; }
+static inline void hl_dio_stop(struct hl_device *hdev) {}
+
+static inline int hl_hldio_init(struct hl_device *hdev) { return 0; }
+static inline void hl_hldio_fini(struct hl_device *hdev) { }
+static inline long hl_hldio_ioctl(struct file *f, unsigned int c,
+ unsigned long a)
+{ return -ENOTTY; }
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+
+#endif /* CONFIG_HL_HLDIO */
+
+/* Simplified polling macro for HLDIO (no simulator support) */
+#define hl_poll_timeout_condition(hdev, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ (void)(hdev); /* keep signature consistent, hdev unused */ \
+ for (;;) { \
+ mb(); /* ensure ordering of memory operations */ \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#ifdef CONFIG_HL_HLDIO
+bool hl_device_supports_nvme(struct hl_device *hdev);
+#else
+static inline bool hl_device_supports_nvme(struct hl_device *hdev) { return false; }
+#endif
+
+#endif /* __HL_HLDIO_H__ */
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 61472a381904..633db4bff46f 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -1837,7 +1837,12 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
- /* Paired with get_file() in export_dmabuf() */
+ /*
+ * Paired with get_file() in export_dmabuf().
+ * 'ctx' can be still used here to get the file pointer, even after hl_ctx_put() was called,
+ * because releasing the compute device file involves another reference decrement, and it
+ * would be possible only after calling fput().
+ */
fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
@@ -2332,7 +2337,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc < 0)
goto destroy_pages;
npages = rc;
- rc = -EFAULT;
+ rc = -ENOMEM;
goto put_pages;
}
userptr->npages = npages;
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 99cd83139d46..4401beb99e42 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -259,13 +259,8 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
goto put_mem;
}
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
- user_mem_size)) {
-#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
-#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 82f66520ec18..8f55ba3b4e73 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -96,14 +96,21 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
infineon_second_stage_third_instance =
(infineon_second_stage_version >> 16) & mask;
- if (cpucp_info->infineon_second_stage_version)
+ if (cpucp_info->infineon_version && cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
le32_to_cpu(cpucp_info->infineon_version),
infineon_second_stage_first_instance,
infineon_second_stage_second_instance,
infineon_second_stage_third_instance);
- else
+ else if (cpucp_info->infineon_second_stage_version)
+ return sprintf(buf, "%#04x:%#04x:%#04x\n",
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
+ else if (cpucp_info->infineon_version)
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
+
+ return 0;
}
static DEVICE_ATTR_RO(vrm_ver);
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index fa893a9b826e..34771d75da9d 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -4168,10 +4168,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
+#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
+
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ #endif
+
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index 5722e4128d3c..b8c0689dba64 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -728,6 +728,354 @@ static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
+const char *gaudi2_engine_id_str[] = {
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_0),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_1),
+ __stringify(GAUDI2_ENGINE_ID_ROT_0),
+ __stringify(GAUDI2_ENGINE_ID_ROT_1),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_1),
+ __stringify(GAUDI2_ENGINE_ID_PCIE),
+ __stringify(GAUDI2_ENGINE_ID_PSOC),
+ __stringify(GAUDI2_ENGINE_ID_ARC_FARM),
+ __stringify(GAUDI2_ENGINE_ID_KDMA),
+ __stringify(GAUDI2_ENGINE_ID_SIZE),
+};
+
+const char *gaudi2_queue_id_str[] = {
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_3),
+ __stringify(GAUDI2_QUEUE_ID_CPU_PQ),
+ __stringify(GAUDI2_QUEUE_ID_SIZE),
+};
+
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
@@ -3150,7 +3498,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
- /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
@@ -3162,6 +3509,13 @@ static int gaudi2_early_init(struct hl_device *hdev)
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
+
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
}
return 0;
@@ -4836,7 +5190,7 @@ static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
- if (fw_reset)
+ if (fw_reset || hdev->cpld_shutdown)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
@@ -6484,6 +6838,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
@@ -6774,7 +7135,8 @@ static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parse
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ dev_err(hdev->dev, "h/w queue %s is disabled\n",
+ GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id));
return -EINVAL;
}
@@ -7026,7 +7388,8 @@ static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
- "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
+ "Failed to send msg_short packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -7052,8 +7415,8 @@ static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queu
timeout_usec);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
- hw_queue_id, tmp);
+ dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp);
rc = -EIO;
}
@@ -9603,8 +9966,8 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
- engine_id, intr_type, q->queue_index);
+ "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u",
+ GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
@@ -10172,7 +10535,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ hl_eq_cpld_shutdown_event_handle(hdev, event_type, &event_mask);
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
@@ -10260,6 +10623,7 @@ reset_device:
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+ hl_debugfs_cfg_access_history_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -10296,8 +10660,8 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
- hw_queue_id);
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 05117272cac7..bdf5c1bd2d63 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -240,6 +240,15 @@
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+extern const char *gaudi2_engine_id_str[];
+extern const char *gaudi2_queue_id_str[];
+
+#define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" : \
+ gaudi2_engine_id_str[initiator])
+
+#define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" : \
+ gaudi2_queue_id_str[initiator])
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 2423620ff358..bc3c57bda5cd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2426,7 +2426,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41);
}
return 0;
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 9a3935be1c05..7081913fb0dd 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -45,7 +45,7 @@ struct ivpu_fw_info {
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
-void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params);
static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index b236c7234daa..afdb3b2aa72a 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -33,7 +33,6 @@
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
-#define PLL_CONFIG_DEFAULT 0x0
#define PLL_REF_CLK_FREQ 50000000ull
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
@@ -303,7 +302,7 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp,
wp->epp = 0;
} else {
wp->target = hw->pll.pn_ratio;
- wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
+ wp->cfg = 0;
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index d2d82651976d..032c384ac3d4 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 39f83225c181..5f00809d448a 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -141,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
lockdep_assert_held(&ipc->cons_lock);
- lockdep_assert_irqs_disabled();
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig
new file mode 100644
index 000000000000..16465abe0660
--- /dev/null
+++ b/drivers/accel/rocket/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ROCKET
+ tristate "Rocket (support for Rockchip NPUs)"
+ depends on DRM_ACCEL
+ depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST
+ depends on ROCKCHIP_IOMMU || COMPILE_TEST
+ depends on MMU
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ help
+ Choose this option if you have a Rockchip SoC that contains a
+ compatible Neural Processing Unit (NPU), such as the RK3588. Called by
+ Rockchip either RKNN or RKNPU, it accelerates inference of neural
+ networks.
+
+ The interface exposed to userspace is described in
+ include/uapi/drm/rocket_accel.h and is used by the Rocket userspace
+ driver in Mesa3D.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile
new file mode 100644
index 000000000000..3713dfe223d6
--- /dev/null
+++ b/drivers/accel/rocket/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o
+
+rocket-y := \
+ rocket_core.o \
+ rocket_device.o \
+ rocket_drv.o \
+ rocket_gem.o \
+ rocket_job.o
diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
new file mode 100644
index 000000000000..abe7719c1db4
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "rocket_core.h"
+#include "rocket_job.h"
+
+int rocket_core_init(struct rocket_core *core)
+{
+ struct device *dev = core->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 version;
+ int err = 0;
+
+ core->resets[0].id = "srst_a";
+ core->resets[1].id = "srst_h";
+ err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets),
+ core->resets);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index);
+
+ err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index);
+
+ core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc");
+ if (IS_ERR(core->pc_iomem)) {
+ dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem));
+ return PTR_ERR(core->pc_iomem);
+ }
+
+ core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna");
+ if (IS_ERR(core->cna_iomem)) {
+ dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem));
+ return PTR_ERR(core->cna_iomem);
+ }
+
+ core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(core->core_iomem)) {
+ dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem));
+ return PTR_ERR(core->core_iomem);
+ }
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return err;
+
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+ if (err)
+ return err;
+
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * As this NPU will be most often used as part of a media pipeline that
+ * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an
+ * autosuspend delay as that will keep the device powered up while the
+ * pipeline is running.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 50);
+
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ rocket_job_fini(core);
+ return err;
+ }
+
+ version = rocket_pc_readl(core, VERSION);
+ version += rocket_pc_readl(core, VERSION_NUM) & 0xffff;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version);
+
+ return 0;
+}
+
+void rocket_core_fini(struct rocket_core *core)
+{
+ pm_runtime_dont_use_autosuspend(core->dev);
+ pm_runtime_disable(core->dev);
+ iommu_group_put(core->iommu_group);
+ core->iommu_group = NULL;
+ rocket_job_fini(core);
+}
+
+void rocket_core_reset(struct rocket_core *core)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets);
+
+ udelay(10);
+
+ reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets);
+}
diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h
new file mode 100644
index 000000000000..f6d7382854ca
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_CORE_H__
+#define __ROCKET_CORE_H__
+
+#include <drm/gpu_scheduler.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex_types.h>
+#include <linux/reset.h>
+
+#include "rocket_registers.h"
+
+#define rocket_pc_readl(core, reg) \
+ readl((core)->pc_iomem + (REG_PC_##reg))
+#define rocket_pc_writel(core, reg, value) \
+ writel(value, (core)->pc_iomem + (REG_PC_##reg))
+
+#define rocket_cna_readl(core, reg) \
+ readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+#define rocket_cna_writel(core, reg, value) \
+ writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+
+#define rocket_core_readl(core, reg) \
+ readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+#define rocket_core_writel(core, reg, value) \
+ writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+
+struct rocket_core {
+ struct device *dev;
+ struct rocket_device *rdev;
+ unsigned int index;
+
+ int irq;
+ void __iomem *pc_iomem;
+ void __iomem *cna_iomem;
+ void __iomem *core_iomem;
+ struct clk_bulk_data clks[4];
+ struct reset_control_bulk_data resets[2];
+
+ struct iommu_group *iommu_group;
+
+ struct mutex job_lock;
+ struct rocket_job *in_flight_job;
+
+ spinlock_t fence_lock;
+
+ struct {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ atomic_t pending;
+ } reset;
+
+ struct drm_gpu_scheduler sched;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+int rocket_core_init(struct rocket_core *core);
+void rocket_core_fini(struct rocket_core *core);
+void rocket_core_reset(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c
new file mode 100644
index 000000000000..46e6ee1e72c5
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_drv.h>
+#include <linux/array_size.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "rocket_device.h"
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *core_node;
+ struct rocket_device *rdev;
+ struct drm_device *ddev;
+ unsigned int num_cores = 0;
+ int err;
+
+ rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev);
+ if (IS_ERR(rdev))
+ return rdev;
+
+ ddev = &rdev->ddev;
+ dev_set_drvdata(dev, rdev);
+
+ for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core")
+ if (of_device_is_available(core_node))
+ num_cores++;
+
+ rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL);
+ if (!rdev->cores)
+ return ERR_PTR(-ENOMEM);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return ERR_PTR(err);
+
+ err = devm_mutex_init(dev, &rdev->sched_lock);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_dev_register(ddev, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ return rdev;
+}
+
+void rocket_device_fini(struct rocket_device *rdev)
+{
+ WARN_ON(rdev->num_cores > 0);
+
+ drm_dev_unregister(&rdev->ddev);
+}
diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h
new file mode 100644
index 000000000000..ce662abc01d3
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DEVICE_H__
+#define __ROCKET_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "rocket_core.h"
+
+struct rocket_device {
+ struct drm_device ddev;
+
+ struct mutex sched_lock;
+
+ struct rocket_core *cores;
+ unsigned int num_cores;
+};
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver);
+void rocket_device_fini(struct rocket_device *rdev);
+#define to_rocket_device(drm_dev) \
+ ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev)))
+
+#endif /* __ROCKET_DEVICE_H__ */
diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
new file mode 100644
index 000000000000..5c0b63f0a8f0
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/rocket_accel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+#include "rocket_job.h"
+
+/*
+ * Facade device, used to expose a single DRM device to userspace, that
+ * schedules jobs to any RKNN cores in the system.
+ */
+static struct platform_device *drm_dev;
+static struct rocket_device *rdev;
+
+static void
+rocket_iommu_domain_destroy(struct kref *kref)
+{
+ struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref);
+
+ iommu_domain_free(domain->domain);
+ domain->domain = NULL;
+ kfree(domain);
+}
+
+static struct rocket_iommu_domain*
+rocket_iommu_domain_create(struct device *dev)
+{
+ struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ void *err;
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain->domain)) {
+ err = ERR_CAST(domain->domain);
+ kfree(domain);
+ return err;
+ }
+ kref_init(&domain->kref);
+
+ return domain;
+}
+
+struct rocket_iommu_domain *
+rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv)
+{
+ kref_get(&rocket_priv->domain->kref);
+ return rocket_priv->domain;
+}
+
+void
+rocket_iommu_domain_put(struct rocket_iommu_domain *domain)
+{
+ kref_put(&domain->kref, rocket_iommu_domain_destroy);
+}
+
+static int
+rocket_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *rocket_priv;
+ u64 start, end;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL);
+ if (!rocket_priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ rocket_priv->rdev = rdev;
+ rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev);
+ if (IS_ERR(rocket_priv->domain)) {
+ ret = PTR_ERR(rocket_priv->domain);
+ goto err_free;
+ }
+
+ file->driver_priv = rocket_priv;
+
+ start = rocket_priv->domain->domain->geometry.aperture_start;
+ end = rocket_priv->domain->domain->geometry.aperture_end;
+ drm_mm_init(&rocket_priv->mm, start, end - start + 1);
+ mutex_init(&rocket_priv->mm_lock);
+
+ ret = rocket_job_open(rocket_priv);
+ if (ret)
+ goto err_mm_takedown;
+
+ return 0;
+
+err_mm_takedown:
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+err_free:
+ kfree(rocket_priv);
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+rocket_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+
+ rocket_job_close(rocket_priv);
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+ kfree(rocket_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = {
+#define ROCKET_IOCTL(n, func) \
+ DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0)
+
+ ROCKET_IOCTL(CREATE_BO, create_bo),
+ ROCKET_IOCTL(SUBMIT, submit),
+ ROCKET_IOCTL(PREP_BO, prep_bo),
+ ROCKET_IOCTL(FINI_BO, fini_bo),
+};
+
+DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops);
+
+/*
+ * Rocket driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver rocket_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = rocket_open,
+ .postclose = rocket_postclose,
+ .gem_create_object = rocket_gem_create_object,
+ .ioctls = rocket_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls),
+ .fops = &rocket_accel_driver_fops,
+ .name = "rocket",
+ .desc = "rocket DRM",
+};
+
+static int rocket_probe(struct platform_device *pdev)
+{
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to initialize rocket device\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ unsigned int core = rdev->num_cores;
+
+ dev_set_drvdata(&pdev->dev, rdev);
+
+ rdev->cores[core].rdev = rdev;
+ rdev->cores[core].dev = &pdev->dev;
+ rdev->cores[core].index = core;
+
+ rdev->num_cores++;
+
+ return rocket_core_init(&rdev->cores[core]);
+}
+
+static void rocket_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (rdev->cores[core].dev == dev) {
+ rocket_core_fini(&rdev->cores[core]);
+ rdev->num_cores--;
+ break;
+ }
+ }
+
+ if (rdev->num_cores == 0) {
+ /* Last core removed, deinitialize DRM device. */
+ rocket_device_fini(rdev);
+ rdev = NULL;
+ }
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-rknn-core" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int find_core_for_dev(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (dev == rdev->cores[core].dev)
+ return core;
+ }
+
+ return -1;
+}
+
+static int rocket_device_runtime_resume(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+ int err = 0;
+
+ if (core < 0)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+ if (err) {
+ dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rocket_device_runtime_suspend(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+
+ if (core < 0)
+ return -ENODEV;
+
+ if (!rocket_job_is_idle(&rdev->cores[core]))
+ return -EBUSY;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = {
+ RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+static struct platform_driver rocket_driver = {
+ .probe = rocket_probe,
+ .remove = rocket_remove,
+ .driver = {
+ .name = "rocket",
+ .pm = pm_ptr(&rocket_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init rocket_register(void)
+{
+ drm_dev = platform_device_register_simple("rknn", -1, NULL, 0);
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
+
+ return platform_driver_register(&rocket_driver);
+}
+
+static void __exit rocket_unregister(void)
+{
+ platform_driver_unregister(&rocket_driver);
+
+ platform_device_unregister(drm_dev);
+}
+
+module_init(rocket_register);
+module_exit(rocket_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP");
+MODULE_AUTHOR("Tomeu Vizoso");
diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h
new file mode 100644
index 000000000000..2c673bb99ccc
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DRV_H__
+#define __ROCKET_DRV_H__
+
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_device.h"
+
+extern const struct dev_pm_ops rocket_pm_ops;
+
+struct rocket_iommu_domain {
+ struct iommu_domain *domain;
+ struct kref kref;
+};
+
+struct rocket_file_priv {
+ struct rocket_device *rdev;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm mm;
+ struct mutex mm_lock;
+
+ struct drm_sched_entity sched_entity;
+};
+
+struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv);
+void rocket_iommu_domain_put(struct rocket_iommu_domain *domain);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c
new file mode 100644
index 000000000000..0551e11cc184
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_device.h>
+#include <drm/drm_utils.h>
+#include <drm/rocket_accel.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+
+static void rocket_gem_bo_free(struct drm_gem_object *obj)
+{
+ struct rocket_gem_object *bo = to_rocket_bo(obj);
+ struct rocket_file_priv *rocket_priv = bo->driver_priv;
+ size_t unmapped;
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+
+ unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size);
+ drm_WARN_ON(obj->dev, unmapped != bo->size);
+
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&bo->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ rocket_iommu_domain_put(bo->domain);
+ bo->domain = NULL;
+
+ drm_gem_shmem_free(&bo->base);
+}
+
+static const struct drm_gem_object_funcs rocket_gem_funcs = {
+ .free = rocket_gem_bo_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct rocket_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &rocket_gem_funcs;
+
+ return &obj->base.base;
+}
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+ struct drm_rocket_create_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem_obj = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+
+ gem_obj = &shmem_obj->base;
+ rkt_obj = to_rocket_bo(gem_obj);
+
+ rkt_obj->driver_priv = rocket_priv;
+ rkt_obj->domain = rocket_iommu_domain_get(rocket_priv);
+ rkt_obj->size = args->size;
+ rkt_obj->offset = 0;
+
+ ret = drm_gem_handle_create(file, gem_obj, &args->handle);
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ goto err;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err;
+ }
+
+ mutex_lock(&rocket_priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm,
+ rkt_obj->size, PAGE_SIZE,
+ 0, 0);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ ret = iommu_map_sgtable(rocket_priv->domain->domain,
+ rkt_obj->mm.start,
+ shmem_obj->sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0 || ret < args->size) {
+ drm_err(dev, "failed to map buffer: size=%d request_size=%u\n",
+ ret, args->size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ /* iommu_map_sgtable might have aligned the size */
+ rkt_obj->size = ret;
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->dma_address = rkt_obj->mm.start;
+
+ return 0;
+
+err_remove_node:
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&rkt_obj->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+err:
+ drm_gem_shmem_object_free(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_prep_bo *args = data;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
+ long ret = 0;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ shmem_obj = &to_rocket_bo(gem_obj)->base;
+
+ dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_fini_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ rkt_obj = to_rocket_bo(gem_obj);
+ shmem_obj = &rkt_obj->base;
+
+ dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return 0;
+}
diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h
new file mode 100644
index 000000000000..240430334509
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_GEM_H__
+#define __ROCKET_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+
+struct rocket_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct rocket_file_priv *driver_priv;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm_node mm;
+ size_t size;
+ u32 offset;
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size);
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline
+struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base);
+}
+
+#endif
diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c
new file mode 100644
index 000000000000..acd606160dc9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/rocket_accel.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_core.h"
+#include "rocket_device.h"
+#include "rocket_drv.h"
+#include "rocket_job.h"
+#include "rocket_registers.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct rocket_job *
+to_rocket_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct rocket_job, base);
+}
+
+static const char *rocket_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "rocket";
+}
+
+static const char *rocket_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "rockchip-npu";
+}
+
+static const struct dma_fence_ops rocket_fence_ops = {
+ .get_driver_name = rocket_fence_get_driver_name,
+ .get_timeline_name = rocket_fence_get_timeline_name,
+};
+
+static struct dma_fence *rocket_fence_create(struct rocket_core *core)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock,
+ core->fence_context, ++core->emit_seqno);
+
+ return fence;
+}
+
+static int
+rocket_copy_tasks(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_rocket_job *job,
+ struct rocket_job *rjob)
+{
+ int ret = 0;
+
+ if (job->task_struct_size < sizeof(struct drm_rocket_task))
+ return -EINVAL;
+
+ rjob->task_count = job->task_count;
+
+ if (!rjob->task_count)
+ return 0;
+
+ rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL);
+ if (!rjob->tasks) {
+ drm_dbg(dev, "Failed to allocate task array\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < rjob->task_count; i++) {
+ struct drm_rocket_task task = {0};
+
+ if (copy_from_user(&task,
+ u64_to_user_ptr(job->tasks) + i * job->task_struct_size,
+ sizeof(task))) {
+ drm_dbg(dev, "Failed to copy incoming tasks\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (task.regcmd_count == 0) {
+ drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ rjob->tasks[i].regcmd = task.regcmd;
+ rjob->tasks[i].regcmd_count = task.regcmd_count;
+ }
+
+ return 0;
+
+fail:
+ kvfree(rjob->tasks);
+ return ret;
+}
+
+static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job)
+{
+ struct rocket_task *task;
+ unsigned int extra_bit;
+
+ /* Don't queue the job if a reset is in progress */
+ if (atomic_read(&core->reset.pending))
+ return;
+
+ /* GO ! */
+
+ task = &job->tasks[job->next_task_idx];
+ job->next_task_idx++;
+
+ rocket_pc_writel(core, BASE_ADDRESS, 0x1);
+
+ /* From rknpu, in the TRM this bit is marked as reserved */
+ extra_bit = 0x10000000 * core->index;
+ rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) |
+ CNA_S_POINTER_EXECUTER_PP_EN(1) |
+ CNA_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) |
+ CORE_S_POINTER_EXECUTER_PP_EN(1) |
+ CORE_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
+ rocket_pc_writel(core, REGISTER_AMOUNTS,
+ PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
+
+ rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1);
+
+ rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) |
+ PC_TASK_CON_TASK_COUNT_CLEAR(1) |
+ PC_TASK_CON_TASK_NUMBER(1) |
+ PC_TASK_CON_TASK_PP_EN(1));
+
+ rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0));
+
+ rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1));
+
+ dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
+}
+
+static int rocket_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct drm_sched_job *job,
+ bool is_write)
+{
+ int i, ret;
+
+ for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rocket_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int rocket_job_push(struct rocket_job *job)
+{
+ struct rocket_device *rdev = job->rdev;
+ struct drm_gem_object **bos;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *),
+ GFP_KERNEL);
+ memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *));
+ memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *));
+
+ ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+ if (ret)
+ goto err;
+
+ scoped_guard(mutex, &rdev->sched_lock) {
+ drm_sched_job_arm(&job->base);
+
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false);
+ if (ret)
+ goto err_unlock;
+
+ ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true);
+ if (ret)
+ goto err_unlock;
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ drm_sched_entity_push_job(&job->base);
+ }
+
+ rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence);
+
+err_unlock:
+ drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+err:
+ kvfree(bos);
+
+ return ret;
+}
+
+static void rocket_job_cleanup(struct kref *ref)
+{
+ struct rocket_job *job = container_of(ref, struct rocket_job,
+ refcount);
+ unsigned int i;
+
+ rocket_iommu_domain_put(job->domain);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ if (job->in_bos) {
+ for (i = 0; i < job->in_bo_count; i++)
+ drm_gem_object_put(job->in_bos[i]);
+
+ kvfree(job->in_bos);
+ }
+
+ if (job->out_bos) {
+ for (i = 0; i < job->out_bo_count; i++)
+ drm_gem_object_put(job->out_bos[i]);
+
+ kvfree(job->out_bos);
+ }
+
+ kvfree(job->tasks);
+
+ kfree(job);
+}
+
+static void rocket_job_put(struct rocket_job *job)
+{
+ kref_put(&job->refcount, rocket_job_cleanup);
+}
+
+static void rocket_job_free(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ rocket_job_put(job);
+}
+
+static struct rocket_core *sched_to_core(struct rocket_device *rdev,
+ struct drm_gpu_scheduler *sched)
+{
+ unsigned int core;
+
+ for (core = 0; core < rdev->num_cores; core++) {
+ if (&rdev->cores[core].sched == sched)
+ return &rdev->cores[core];
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+ struct dma_fence *fence = NULL;
+ int ret;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ /*
+ * Nothing to execute: can happen if the job has finished while
+ * we were resetting the NPU.
+ */
+ if (job->next_task_idx == job->task_count)
+ return NULL;
+
+ fence = rocket_fence_create(core);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0)
+ return fence;
+
+ ret = iommu_attach_group(job->domain->domain, core->iommu_group);
+ if (ret < 0)
+ return fence;
+
+ scoped_guard(mutex, &core->job_lock) {
+ core->in_flight_job = job;
+ rocket_job_hw_submit(core, job);
+ }
+
+ return fence;
+}
+
+static void rocket_job_handle_irq(struct rocket_core *core)
+{
+ pm_runtime_mark_last_busy(core->dev);
+
+ rocket_pc_writel(core, OPERATION_ENABLE, 0x0);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff);
+
+ scoped_guard(mutex, &core->job_lock)
+ if (core->in_flight_job) {
+ if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) {
+ rocket_job_hw_submit(core, core->in_flight_job);
+ return;
+ }
+
+ iommu_detach_group(NULL, iommu_group_get(core->dev));
+ dma_fence_signal(core->in_flight_job->done_fence);
+ pm_runtime_put_autosuspend(core->dev);
+ core->in_flight_job = NULL;
+ }
+}
+
+static void
+rocket_reset(struct rocket_core *core, struct drm_sched_job *bad)
+{
+ if (!atomic_read(&core->reset.pending))
+ return;
+
+ drm_sched_stop(&core->sched, bad);
+
+ /*
+ * Remaining interrupts have been handled, but we might still have
+ * stuck jobs. Let's make sure the PM counters stay balanced by
+ * manually calling pm_runtime_put_noidle().
+ */
+ scoped_guard(mutex, &core->job_lock) {
+ if (core->in_flight_job)
+ pm_runtime_put_noidle(core->dev);
+
+ iommu_detach_group(NULL, core->iommu_group);
+
+ core->in_flight_job = NULL;
+ }
+
+ /* Proceed with reset now. */
+ rocket_core_reset(core);
+
+ /* NPU has been reset, we can clear the reset pending bit. */
+ atomic_set(&core->reset.pending, 0);
+
+ /* Restart the scheduler */
+ drm_sched_start(&core->sched, 0);
+}
+
+static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+
+ dev_err(core->dev, "NPU job timed out");
+
+ atomic_set(&core->reset.pending, 1);
+ rocket_reset(core, sched_job);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void rocket_reset_work(struct work_struct *work)
+{
+ struct rocket_core *core;
+
+ core = container_of(work, struct rocket_core, reset.work);
+ rocket_reset(core, NULL);
+}
+
+static const struct drm_sched_backend_ops rocket_sched_ops = {
+ .run_job = rocket_job_run,
+ .timedout_job = rocket_job_timedout,
+ .free_job = rocket_job_free
+};
+
+static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data)
+{
+ struct rocket_core *core = data;
+
+ rocket_job_handle_irq(core);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocket_job_irq_handler(int irq, void *data)
+{
+ struct rocket_core *core = data;
+ u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS);
+
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR);
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR);
+
+ if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 ||
+ raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1))
+ return IRQ_NONE;
+
+ rocket_pc_writel(core, INTERRUPT_MASK, 0x0);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int rocket_job_init(struct rocket_core *core)
+{
+ struct drm_sched_init_args args = {
+ .ops = &rocket_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(core->dev),
+ .dev = core->dev,
+ };
+ int ret;
+
+ INIT_WORK(&core->reset.work, rocket_reset_work);
+ spin_lock_init(&core->fence_lock);
+ mutex_init(&core->job_lock);
+
+ core->irq = platform_get_irq(to_platform_device(core->dev), 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ ret = devm_request_threaded_irq(core->dev, core->irq,
+ rocket_job_irq_handler,
+ rocket_job_irq_handler_thread,
+ IRQF_SHARED, dev_name(core->dev),
+ core);
+ if (ret) {
+ dev_err(core->dev, "failed to request job irq");
+ return ret;
+ }
+
+ core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index);
+ if (!core->reset.wq)
+ return -ENOMEM;
+
+ core->fence_context = dma_fence_context_alloc(1);
+
+ args.timeout_wq = core->reset.wq;
+ ret = drm_sched_init(&core->sched, &args);
+ if (ret) {
+ dev_err(core->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&core->sched);
+
+ destroy_workqueue(core->reset.wq);
+ return ret;
+}
+
+void rocket_job_fini(struct rocket_core *core)
+{
+ drm_sched_fini(&core->sched);
+
+ cancel_work_sync(&core->reset.work);
+ destroy_workqueue(core->reset.wq);
+}
+
+int rocket_job_open(struct rocket_file_priv *rocket_priv)
+{
+ struct rocket_device *rdev = rocket_priv->rdev;
+ struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores,
+ sizeof(*scheds),
+ GFP_KERNEL);
+ unsigned int core;
+ int ret;
+
+ for (core = 0; core < rdev->num_cores; core++)
+ scheds[core] = &rdev->cores[core].sched;
+
+ ret = drm_sched_entity_init(&rocket_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ scheds,
+ rdev->num_cores, NULL);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+
+void rocket_job_close(struct rocket_file_priv *rocket_priv)
+{
+ struct drm_sched_entity *entity = &rocket_priv->sched_entity;
+
+ kfree(entity->sched_list);
+ drm_sched_entity_destroy(entity);
+}
+
+int rocket_job_is_idle(struct rocket_core *core)
+{
+ /* If there are any jobs in this HW queue, we're not idle */
+ if (atomic_read(&core->sched.credit_count))
+ return false;
+
+ return true;
+}
+
+static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_rocket_job *job)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *file_priv = file->driver_priv;
+ struct rocket_job *rjob = NULL;
+ int ret = 0;
+
+ if (job->task_count == 0)
+ return -EINVAL;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ kref_init(&rjob->refcount);
+
+ rjob->rdev = rdev;
+
+ ret = drm_sched_job_init(&rjob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ret = rocket_copy_tasks(dev, file, job, rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles),
+ job->in_bo_handle_count, &rjob->in_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->in_bo_count = job->in_bo_handle_count;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles),
+ job->out_bo_handle_count, &rjob->out_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->out_bo_count = job->out_bo_handle_count;
+
+ rjob->domain = rocket_iommu_domain_get(file_priv);
+
+ ret = rocket_job_push(rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&rjob->base);
+out_put_job:
+ rocket_job_put(rjob);
+
+ return ret;
+}
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_submit *args = data;
+ struct drm_rocket_job *jobs;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->job_count == 0)
+ return 0;
+
+ if (args->job_struct_size < sizeof(struct drm_rocket_job)) {
+ drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n");
+ return -EINVAL;
+ }
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs) {
+ drm_dbg(dev, "Failed to allocate incoming job array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ if (copy_from_user(&jobs[i],
+ u64_to_user_ptr(args->jobs) + i * args->job_struct_size,
+ sizeof(*jobs))) {
+ ret = -EFAULT;
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ goto exit;
+ }
+ }
+
+
+ for (i = 0; i < args->job_count; i++)
+ rocket_ioctl_submit_job(dev, file, &jobs[i]);
+
+exit:
+ kvfree(jobs);
+
+ return ret;
+}
diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h
new file mode 100644
index 000000000000..4ae00feec3b9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_JOB_H__
+#define __ROCKET_JOB_H__
+
+#include <drm/drm_drv.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_core.h"
+#include "rocket_drv.h"
+
+struct rocket_task {
+ u64 regcmd;
+ u32 regcmd_count;
+};
+
+struct rocket_job {
+ struct drm_sched_job base;
+
+ struct rocket_device *rdev;
+
+ struct drm_gem_object **in_bos;
+ struct drm_gem_object **out_bos;
+
+ u32 in_bo_count;
+ u32 out_bo_count;
+
+ struct rocket_task *tasks;
+ u32 task_count;
+ u32 next_task_idx;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct rocket_iommu_domain *domain;
+
+ struct kref refcount;
+};
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_job_init(struct rocket_core *core);
+void rocket_job_fini(struct rocket_core *core);
+int rocket_job_open(struct rocket_file_priv *rocket_priv);
+void rocket_job_close(struct rocket_file_priv *rocket_priv);
+int rocket_job_is_idle(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h
new file mode 100644
index 000000000000..9aef614c3470
--- /dev/null
+++ b/drivers/accel/rocket/rocket_registers.h
@@ -0,0 +1,4404 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __ROCKET_REGISTERS_XML__
+#define __ROCKET_REGISTERS_XML__
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+
+- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024)
+
+Copyright (C) 2024-2025 by the following authors:
+- Tomeu Vizoso <tomeu@tomeuvizoso.net>
+*/
+
+#define REG_PC_VERSION 0x00000000
+#define PC_VERSION_VERSION__MASK 0xffffffff
+#define PC_VERSION_VERSION__SHIFT 0
+static inline uint32_t PC_VERSION_VERSION(uint32_t val)
+{
+ return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK;
+}
+
+#define REG_PC_VERSION_NUM 0x00000004
+#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff
+#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0
+static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val)
+{
+ return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK;
+}
+
+#define REG_PC_OPERATION_ENABLE 0x00000008
+#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PC_BASE_ADDRESS 0x00000010
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4
+static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK;
+}
+#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e
+#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1
+static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK;
+}
+#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001
+#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0
+static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK;
+}
+
+#define REG_PC_REGISTER_AMOUNTS 0x00000014
+#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000
+#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16
+static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK;
+}
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0
+static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK;
+}
+
+#define REG_PC_INTERRUPT_MASK 0x00000020
+#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_MASK_PPU_1 0x00000800
+#define PC_INTERRUPT_MASK_PPU_0 0x00000400
+#define PC_INTERRUPT_MASK_DPU_1 0x00000200
+#define PC_INTERRUPT_MASK_DPU_0 0x00000100
+#define PC_INTERRUPT_MASK_CORE_1 0x00000080
+#define PC_INTERRUPT_MASK_CORE_0 0x00000040
+#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_CLEAR 0x00000024
+#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800
+#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400
+#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200
+#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100
+#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080
+#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040
+#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_STATUS 0x00000028
+#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_TASK_CON 0x00000030
+#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000
+#define PC_TASK_CON_RESERVED_0__SHIFT 14
+static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK;
+}
+#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000
+#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13
+static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK;
+}
+#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000
+#define PC_TASK_CON_TASK_PP_EN__SHIFT 12
+static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK;
+}
+#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff
+#define PC_TASK_CON_TASK_NUMBER__SHIFT 0
+static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK;
+}
+
+#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK;
+}
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PC_TASK_STATUS 0x0000003c
+#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000
+#define PC_TASK_STATUS_RESERVED_0__SHIFT 28
+static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK;
+}
+#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff
+#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0
+static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK;
+}
+
+#define REG_CNA_S_STATUS 0x00001000
+#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CNA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK;
+}
+#define CNA_S_STATUS_STATUS_1__MASK 0x00030000
+#define CNA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK;
+}
+#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CNA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK;
+}
+#define CNA_S_STATUS_STATUS_0__MASK 0x00000003
+#define CNA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CNA_S_POINTER 0x00001004
+#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CNA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK;
+}
+#define CNA_S_POINTER_EXECUTER__MASK 0x00010000
+#define CNA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK;
+}
+#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CNA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER__MASK 0x00000001
+#define CNA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CNA_OPERATION_ENABLE 0x00001008
+#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CNA_CONV_CON1 0x0000100c
+#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON1_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000
+#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30
+static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK;
+}
+#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000
+#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29
+static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000
+#define CNA_CONV_CON1_RESERVED_1__SHIFT 17
+static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON1_DECONV__MASK 0x00010000
+#define CNA_CONV_CON1_DECONV__SHIFT 16
+static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK;
+}
+#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000
+#define CNA_CONV_CON1_ARGB_IN__SHIFT 12
+static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00
+#define CNA_CONV_CON1_RESERVED_2__SHIFT 10
+static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380
+#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7
+static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070
+#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4
+static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f
+#define CNA_CONV_CON1_CONV_MODE__SHIFT 0
+static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK;
+}
+
+#define REG_CNA_CONV_CON2 0x00001010
+#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000
+#define CNA_CONV_CON2_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000
+#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16
+static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000
+#define CNA_CONV_CON2_RESERVED_1__SHIFT 14
+static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0
+#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4
+static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008
+#define CNA_CONV_CON2_RESERVED_2__SHIFT 3
+static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004
+#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2
+static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK;
+}
+#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002
+#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1
+static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK;
+}
+#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001
+#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0
+static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK;
+}
+
+#define REG_CNA_CONV_CON3 0x00001014
+#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON3_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000
+#define CNA_CONV_CON3_NN_MODE__SHIFT 28
+static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000
+#define CNA_CONV_CON3_RESERVED_1__SHIFT 26
+static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21
+static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000
+#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16
+static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000
+#define CNA_CONV_CON3_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11
+static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700
+#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8
+static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0
+#define CNA_CONV_CON3_RESERVED_3__SHIFT 6
+static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK;
+}
+#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038
+#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3
+static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007
+#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0
+static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK;
+}
+
+#define REG_CNA_DATA_SIZE0 0x00001020
+#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000
+#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK;
+}
+#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK;
+}
+
+#define REG_CNA_DATA_SIZE1 0x00001024
+#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000
+#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK;
+}
+
+#define REG_CNA_DATA_SIZE2 0x00001028
+#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800
+#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CNA_DATA_SIZE3 0x0000102c
+#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000
+#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000
+#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22
+static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK;
+}
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE0 0x00001030
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE1 0x00001034
+#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000
+#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19
+static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE2 0x00001038
+#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000
+#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000
+#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000
+#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK;
+}
+
+#define REG_CNA_CBUF_CON0 0x00001040
+#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000
+#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000
+#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12
+static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800
+#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK;
+}
+#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700
+#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8
+static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0
+#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK;
+}
+#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f
+#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0
+static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK;
+}
+
+#define REG_CNA_CBUF_CON1 0x00001044
+#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff
+#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0
+static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK;
+}
+
+#define REG_CNA_CVT_CON0 0x0000104c
+#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000
+#define CNA_CVT_CON0_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK;
+}
+#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008
+#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3
+static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK;
+}
+#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004
+#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2
+static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002
+#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1
+static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001
+#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0
+static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK;
+}
+
+#define REG_CNA_CVT_CON1 0x00001050
+#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000
+#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16
+static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK;
+}
+#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff
+#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0
+static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK;
+}
+
+#define REG_CNA_CVT_CON2 0x00001054
+#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000
+#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16
+static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK;
+}
+#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff
+#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0
+static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK;
+}
+
+#define REG_CNA_CVT_CON3 0x00001058
+#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000
+#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16
+static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK;
+}
+#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff
+#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0
+static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK;
+}
+
+#define REG_CNA_CVT_CON4 0x0000105c
+#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000
+#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16
+static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK;
+}
+#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff
+#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0
+static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK;
+}
+
+#define REG_CNA_FC_CON0 0x00001060
+#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000
+#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16
+static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK;
+}
+#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe
+#define CNA_FC_CON0_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK;
+}
+#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001
+#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0
+static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK;
+}
+
+#define REG_CNA_FC_CON1 0x00001064
+#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON1_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK;
+}
+#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK;
+}
+
+#define REG_CNA_PAD_CON0 0x00001068
+#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00
+#define CNA_PAD_CON0_RESERVED_0__SHIFT 8
+static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK;
+}
+#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0
+#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4
+static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK;
+}
+#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f
+#define CNA_PAD_CON0_PAD_TOP__SHIFT 0
+static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK;
+}
+
+#define REG_CNA_FEATURE_DATA_ADDR 0x00001070
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0
+static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val)
+{
+ return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK;
+}
+
+#define REG_CNA_FC_CON2 0x00001074
+#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON2_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK;
+}
+#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK;
+}
+
+#define REG_CNA_DMA_CON0 0x00001078
+#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000
+#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31
+static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000
+#define CNA_DMA_CON0_RESERVED_0__SHIFT 20
+static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16
+static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0
+#define CNA_DMA_CON0_RESERVED_1__SHIFT 4
+static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK;
+}
+#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f
+#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0
+static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK;
+}
+
+#define REG_CNA_DMA_CON1 0x0000107c
+#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON1_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK;
+}
+
+#define REG_CNA_DMA_CON2 0x00001080
+#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON2_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE0 0x00001084
+#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000
+#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK;
+}
+#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE1 0x00001088
+#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000
+#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK;
+}
+
+#define REG_CNA_CLK_GATE 0x00001090
+#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0
+#define CNA_CLK_GATE_RESERVED_0__SHIFT 5
+static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK;
+}
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4
+static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008
+#define CNA_CLK_GATE_RESERVED_1__SHIFT 3
+static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK;
+}
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2
+static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1
+static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0
+static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK;
+}
+
+#define REG_CNA_DCOMP_CTRL 0x00001100
+#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0
+#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4
+static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK;
+}
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3
+static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK;
+}
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0
+static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK;
+}
+
+#define REG_CNA_DCOMP_REGNUM 0x00001104
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0
+static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK;
+}
+
+#define REG_CNA_DCOMP_ADDR0 0x00001110
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0
+static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT0 0x00001140
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT1 0x00001144
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT2 0x00001148
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT3 0x0000114c
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT4 0x00001150
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT5 0x00001154
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT6 0x00001158
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT7 0x0000115c
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT8 0x00001160
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT9 0x00001164
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT10 0x00001168
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT11 0x0000116c
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT12 0x00001170
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT13 0x00001174
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT14 0x00001178
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT15 0x0000117c
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK;
+}
+
+#define REG_CNA_CVT_CON5 0x00001180
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0
+static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK;
+}
+
+#define REG_CNA_PAD_CON1 0x00001184
+#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff
+#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0
+static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK;
+}
+
+#define REG_CORE_S_STATUS 0x00003000
+#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CORE_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK;
+}
+#define CORE_S_STATUS_STATUS_1__MASK 0x00030000
+#define CORE_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK;
+}
+#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CORE_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK;
+}
+#define CORE_S_STATUS_STATUS_0__MASK 0x00000003
+#define CORE_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CORE_S_POINTER 0x00003004
+#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CORE_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK;
+}
+#define CORE_S_POINTER_EXECUTER__MASK 0x00010000
+#define CORE_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK;
+}
+#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CORE_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER__MASK 0x00000001
+#define CORE_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CORE_OPERATION_ENABLE 0x00003008
+#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CORE_MAC_GATING 0x0000300c
+#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000
+#define CORE_MAC_GATING_RESERVED_0__SHIFT 27
+static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK;
+}
+#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff
+#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0
+static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK;
+}
+
+#define REG_CORE_MISC_CFG 0x00003010
+#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000
+#define CORE_MISC_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK;
+}
+#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000
+#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14
+static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800
+#define CORE_MISC_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK;
+}
+#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700
+#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8
+static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc
+#define CORE_MISC_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK;
+}
+#define CORE_MISC_CFG_DW_EN__MASK 0x00000002
+#define CORE_MISC_CFG_DW_EN__SHIFT 1
+static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK;
+}
+#define CORE_MISC_CFG_QD_EN__MASK 0x00000001
+#define CORE_MISC_CFG_QD_EN__SHIFT 0
+static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_0 0x00003014
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK;
+}
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_1 0x00003018
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK;
+}
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK;
+}
+
+#define REG_CORE_CLIP_TRUNCATE 0x0000301c
+#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80
+#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK;
+}
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6
+static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK;
+}
+#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020
+#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK;
+}
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0
+static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK;
+}
+
+#define REG_DPU_S_STATUS 0x00004000
+#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_S_POINTER 0x00004004
+#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_OPERATION_ENABLE 0x00004008
+#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_FEATURE_MODE_CFG 0x0000400c
+#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000
+#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31
+static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000
+#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30
+static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26
+static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000
+#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25
+static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9
+static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5
+static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3
+static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1
+static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_DATA_FORMAT 0x00004010
+#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000
+#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29
+static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000
+#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26
+static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16
+static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10
+static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4
+static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008
+#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3
+static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK;
+}
+#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_DPU_OFFSET_PEND 0x00004014
+#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000
+#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK;
+}
+#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff
+#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0
+static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK;
+}
+
+#define REG_DPU_DST_BASE_ADDR 0x00004020
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_DST_SURF_STRIDE 0x00004024
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_WIDTH 0x00004030
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_BS_CFG 0x00004040
+#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BS_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000
+#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK;
+}
+#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BS_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100
+#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK;
+}
+#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080
+#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK;
+}
+#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040
+#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020
+#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010
+#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK;
+}
+#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BS_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002
+#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001
+#define DPU_BS_CFG_BS_BYPASS__SHIFT 0
+static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK;
+}
+
+#define REG_DPU_BS_ALU_CFG 0x00004044
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BS_MUL_CFG 0x00004048
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_BS_OW_CFG 0x00004050
+#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000
+#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28
+static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK;
+}
+#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000
+#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27
+static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK;
+}
+#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800
+#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11
+static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700
+#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0
+#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c
+#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK;
+}
+#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002
+#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK;
+}
+#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001
+#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0
+static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK;
+}
+
+#define REG_DPU_BS_OW_OP 0x00004054
+#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000
+#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff
+#define DPU_BS_OW_OP_OW_OP__SHIFT 0
+static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_0 0x00004058
+#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000
+#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000
+#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27
+static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK;
+}
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_1 0x0000405c
+#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000
+#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK;
+}
+
+#define REG_DPU_BN_CFG 0x00004060
+#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BN_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000
+#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK;
+}
+#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BN_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100
+#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK;
+}
+#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080
+#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK;
+}
+#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040
+#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020
+#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010
+#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK;
+}
+#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BN_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002
+#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001
+#define DPU_BN_CFG_BN_BYPASS__SHIFT 0
+static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK;
+}
+
+#define REG_DPU_BN_ALU_CFG 0x00004064
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BN_MUL_CFG 0x00004068
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_EW_CFG 0x00004070
+#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000
+#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000
+#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK;
+}
+#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000
+#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28
+static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK;
+}
+#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000
+#define DPU_EW_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK;
+}
+#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000
+#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22
+static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK;
+}
+#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000
+#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21
+static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK;
+}
+#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000
+#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20
+static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK;
+}
+#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000
+#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK;
+}
+#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800
+#define DPU_EW_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK;
+}
+#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400
+#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10
+static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK;
+}
+#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200
+#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9
+static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8
+static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080
+#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7
+static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040
+#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6
+static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK;
+}
+#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020
+#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK;
+}
+#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018
+#define DPU_EW_CFG_RESERVED_2__SHIFT 3
+static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK;
+}
+#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004
+#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2
+static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002
+#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1
+static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001
+#define DPU_EW_CFG_EW_BYPASS__SHIFT 0
+static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK;
+}
+
+#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_OUT_CVT_OFFSET 0x00004080
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SCALE 0x00004084
+#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000
+#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16
+static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK;
+}
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SHIFT 0x00004088
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12
+static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_0 0x00004090
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_1 0x00004094
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_2 0x00004098
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_3 0x0000409c
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_4 0x000040a0
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_5 0x000040a4
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_6 0x000040a8
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_7 0x000040ac
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK;
+}
+
+#define REG_DPU_SURFACE_ADD 0x000040c0
+#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0
+#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4
+static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK;
+}
+#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f
+#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_CFG 0x00004100
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_DATA 0x00004104
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK;
+}
+
+#define REG_DPU_LUT_CFG 0x00004108
+#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00
+#define DPU_LUT_CFG_RESERVED_0__SHIFT 8
+static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080
+#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7
+static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK;
+}
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6
+static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5
+static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4
+static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2
+static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK;
+}
+#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002
+#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1
+static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK;
+}
+#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001
+#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0
+static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK;
+}
+
+#define REG_DPU_LUT_INFO 0x0000410c
+#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000
+#define DPU_LUT_INFO_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK;
+}
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16
+static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8
+static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff
+#define DPU_LUT_INFO_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK;
+}
+
+#define REG_DPU_LUT_LE_START 0x00004110
+#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff
+#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0
+static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK;
+}
+
+#define REG_DPU_LUT_LE_END 0x00004114
+#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff
+#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0
+static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK;
+}
+
+#define REG_DPU_LUT_LO_START 0x00004118
+#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff
+#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0
+static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK;
+}
+
+#define REG_DPU_LUT_LO_END 0x0000411c
+#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff
+#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0
+static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_PPU_S_STATUS 0x00006000
+#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_S_POINTER 0x00006004
+#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_OPERATION_ENABLE 0x00006008
+#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK;
+}
+
+#define REG_PPU_OPERATION_MODE_CFG 0x00006024
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30
+static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16
+static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0
+#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5
+static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4
+static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0
+static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK;
+}
+
+#define REG_PPU_POOLING_KERNEL_CFG 0x00006034
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK;
+}
+
+#define REG_PPU_POOLING_PADDING_CFG 0x00006040
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK;
+}
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK;
+}
+
+#define REG_PPU_DST_BASE_ADDR 0x00006070
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4
+static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DST_SURF_STRIDE 0x0000607c
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DATA_FORMAT 0x00006084
+#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0
+#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4
+static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK;
+}
+#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008
+#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3
+static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK;
+}
+#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_PPU_MISC_CTRL 0x000060dc
+#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000
+#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16
+static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00
+#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9
+static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK;
+}
+#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100
+#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8
+static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK;
+}
+#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080
+#define PPU_MISC_CTRL_NONALIGN__SHIFT 7
+static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070
+#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4
+static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK;
+}
+#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f
+#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0
+static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK;
+}
+
+#define REG_DDMA_CFG_OUTSTANDING 0x00008000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_0 0x00008004
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_DDMA_WR_WEIGHT_0 0x00008008
+#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_DDMA_CFG_ID_ERROR 0x0000800c
+#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_1 0x00008010
+#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_ARB 0x00008018
+#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_DDMA_CFG_STATUS 0x00008030
+#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define DDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK;
+}
+#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_SDMA_CFG_OUTSTANDING 0x00009000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_0 0x00009004
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_SDMA_WR_WEIGHT_0 0x00009008
+#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_SDMA_CFG_ID_ERROR 0x0000900c
+#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_1 0x00009010
+#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_ARB 0x00009018
+#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_SDMA_CFG_STATUS 0x00009030
+#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define SDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK;
+}
+#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK;
+}
+
+#endif /* __ROCKET_REGISTERS_XML__ */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b594780a57d7..2cdbd08b30e4 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -547,6 +547,10 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+if RISCV
+source "drivers/acpi/riscv/Kconfig"
+endif
+
config ACPI_PPTT
bool
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index d50261d05f3a..515b20d0b698 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
return 0;
}
-static int acpi_aml_read_user(char __user *buf, int len)
+static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
- n = min(len, circ_count_to_end(crc));
+ n = min_t(size_t, len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
@@ -599,8 +599,8 @@ out:
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
@@ -639,11 +639,11 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_write_user(const char __user *buf, int len)
+static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
- n = min(len, circ_space_to_end(crc));
+ n = min_t(size_t, len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
- return n;
+ return ret;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 2a99f5eb6962..7ec1dc04fd11 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -815,7 +815,7 @@ bool acpi_processor_claim_cst_control(void)
cst_control_claimed = true;
return true;
}
-EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE");
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
@@ -994,5 +994,5 @@ end:
return ret;
}
-EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE");
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 91d7d90c47da..33418dd6768a 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -565,6 +565,9 @@ static void acpi_tad_remove(struct platform_device *pdev)
pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT)
+ sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
+
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index fe6d38b43c9a..91241bd6917a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
+ char name_seg[ACPI_NAMESEG_SIZE + 1];
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0c41f0097e8d..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1141,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 76c5ed02e916..da2c45880cc7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -450,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM,
+ ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index fef6fb29ece4..45ec32e81903 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_evaluate_info *info;
- u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
@@ -484,10 +483,17 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
}
if (this_walk_state->num_operands < obj_desc->method.param_count) {
- ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
+ ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]",
acpi_ut_get_node_name(method_node)));
- return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+ return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (this_walk_state->num_operands > obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS);
}
/* Init for new method, possibly wait on method mutex */
@@ -546,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < obj_desc->method.param_count; i++) {
- acpi_ut_remove_reference(this_walk_state->operands[i]);
- this_walk_state->operands[i] = NULL;
- }
-
- /* Clear the operand stack */
-
- this_walk_state->num_operands = 0;
+ acpi_ds_clear_operands(this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fa3e0d00d1ca..df2a4ab0e0da 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
return_ACPI_STATUS(AE_OK);
}
+ if (!acpi_gbl_use_global_lock) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 1c8044ffcb97..532ea307a675 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
const char *opcode_name = "Unknown AML opcode";
#endif
@@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
+#endif
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index fd64460a2e26..049f6c2f1e32 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -121,6 +121,14 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
local_header.oem_id));
+ } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) {
+
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address),
+ ACPI_CAST_PTR(struct acpi_table_cdat,
+ header)->length));
} else {
/* Standard ACPI table with full common header */
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 2561b045acc7..3c87953dbd19 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -656,6 +656,43 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
}
+/* Allow almost all types of address except MMIO. */
+static bool is_allowed_range(u64 base_addr, u64 size)
+{
+ int i;
+ /*
+ * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE.
+ * However, IORES_DESC_NONE is treated like a wildcard when we check if
+ * region intersects with known resource. So do an allow list check for
+ * IORES_DESCs that definitely or most likely not MMIO.
+ */
+ int non_mmio_desc[] = {
+ IORES_DESC_CRASH_KERNEL,
+ IORES_DESC_ACPI_TABLES,
+ IORES_DESC_ACPI_NV_STORAGE,
+ IORES_DESC_PERSISTENT_MEMORY,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */
+ IORES_DESC_RESERVED,
+ IORES_DESC_SOFT_RESERVED,
+ };
+
+ if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ == REGION_INTERSECTS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) {
+ if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i])
+ == REGION_INTERSECTS)
+ return true;
+ }
+
+ if (arch_is_platform_page(base_addr))
+ return true;
+
+ return false;
+}
+
/* Inject the specified hardware error */
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4)
@@ -702,19 +739,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM or
- * NVDIMM.
+ * as long as is not MMIO.
*/
base_addr = param1 & param2;
size = ~param2 + 1;
- if (((param2 & PAGE_MASK) != PAGE_MASK) ||
- ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
- != REGION_INTERSECTS) &&
- !arch_is_platform_page(base_addr)))
+ if ((param2 & PAGE_MASK) != PAGE_MASK)
+ return -EINVAL;
+
+ if (!is_allowed_range(base_addr, size))
return -EINVAL;
if (is_zero_pfn(base_addr >> PAGE_SHIFT))
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 246076341e8c..ff0e8bf8e97a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -60,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
- rc = copy_from_user(&record_id, (void __user *)arg,
- sizeof(record_id));
- if (rc)
+ if (copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id)))
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
@@ -175,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
- rc = copy_from_user(erst_dbg_buf, ubuf, usize);
- if (rc) {
+ if (copy_from_user(erst_dbg_buf, ubuf, usize)) {
rc = -EFAULT;
goto out;
}
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 3961fc47152c..cd199fbe4dc9 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -464,7 +464,7 @@ static ssize_t description_show(struct device *dev,
buf[result++] = '\n';
- kfree(str_obj);
+ ACPI_FREE(str_obj);
return result;
}
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 095502086b41..04ff608f2ff0 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -203,18 +203,6 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
-static bool acpi_fan_has_fst(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FST");
-}
-
-static bool acpi_fan_is_acpi4(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FIF") &&
- acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL");
-}
-
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -331,9 +319,11 @@ static int acpi_fan_probe(struct platform_device *pdev)
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_has_fst(device)) {
+ if (acpi_has_method(device->handle, "_FST")) {
fan->has_fst = true;
- fan->acpi4 = acpi_fan_is_acpi4(device);
+ fan->acpi4 = acpi_has_method(device->handle, "_FIF") &&
+ acpi_has_method(device->handle, "_FPS") &&
+ acpi_has_method(device->handle, "_FSL");
}
if (fan->acpi4) {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e2781864fdce..63354972ab0b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -140,6 +140,7 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
+void acpi_power_resources_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 630fe0a34bc6..ad81aa03fe2f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string_choices.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
@@ -468,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq);
kfree(entry);
return 0;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b7243d7563b1..361a7721a6a8 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -63,6 +64,9 @@ struct acpi_power_resource_entry {
struct acpi_power_resource *resource;
};
+static bool hp_eb_gp12pxp_quirk;
+static bool unused_power_resources_quirk;
+
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
@@ -992,6 +996,38 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool resource_is_gp12pxp(acpi_handle handle)
+{
+ const char *path;
+ bool ret;
+
+ path = acpi_handle_path(handle);
+ ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0;
+ kfree(path);
+
+ return ret;
+}
+
+static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource)
+{
+ acpi_handle_notice(resource->device.handle,
+ "HP EB quirk - turning OFF then ON\n");
+
+ __acpi_power_off(resource);
+ __acpi_power_on(resource);
+
+ /*
+ * Use the same delay as DSDT uses in modem _RST method.
+ *
+ * Otherwise we get "Unable to change power state from unknown to D0,
+ * device inaccessible" error for the modem PCI device after thaw.
+ *
+ * This power resource is normally being enabled only during thaw (once)
+ * so this wait is not a performance issue.
+ */
+ msleep(200);
+}
+
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1013,8 +1049,14 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- acpi_handle_debug(resource->device.handle, "Turning ON\n");
- __acpi_power_on(resource);
+ if (hp_eb_gp12pxp_quirk &&
+ resource_is_gp12pxp(resource->device.handle)) {
+ acpi_resume_on_eb_gp12pxp(resource);
+ } else {
+ acpi_handle_debug(resource->device.handle,
+ "Turning ON\n");
+ __acpi_power_on(resource);
+ }
}
mutex_unlock(&resource->resource_lock);
@@ -1024,6 +1066,41 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = {
+/*
+ * This laptop (and possibly similar models too) has power resource called
+ * "GP12.PXP_" for its WWAN modem.
+ *
+ * For this power resource to turn ON power for the modem it needs certain
+ * internal flag called "ONEN" to be set.
+ * This flag only gets set from this power resource "_OFF" method, while the
+ * actual modem power gets turned off during suspend by "GP12.PTS" method
+ * called from the global "_PTS" (Prepare To Sleep) method.
+ * On the other hand, this power resource "_OFF" method implementation just
+ * sets the aforementioned flag without actually doing anything else (it
+ * doesn't contain any code to actually turn off power).
+ *
+ * The above means that when upon hibernation finish we try to set this
+ * power resource back ON since its "_STA" method returns 0 (while the resource
+ * is still considered in use) its "_ON" method won't do anything since
+ * that "ONEN" flag is not set.
+ * Overall, this means the modem is dead until laptop is rebooted since its
+ * power has been cut by "_PTS" and its PCI configuration was lost and not able
+ * to be restored.
+ *
+ * The easiest way to workaround the issue is to call this power resource
+ * "_OFF" method before calling the "_ON" method to make sure the "ONEN"
+ * flag gets properly set.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
@@ -1046,7 +1123,7 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
- if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ if (unused_power_resources_quirk)
return;
mutex_lock(&power_resource_list_lock);
@@ -1065,3 +1142,10 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+
+void __init acpi_power_resources_init(void)
+{
+ hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk);
+ unused_power_resources_quirk =
+ dmi_check_system(dmi_leave_unused_power_resources_on);
+}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index be033bbb126a..6792d4385eee 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -150,15 +150,28 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+
+ /*
+ * Print an error message if handler_address is NULL, the parse of VA also
+ * can be skipped.
+ */
+ if (unlikely(!handler_info->handler_address)) {
+ pr_info("Skipping handler with NULL address for GUID: %pUL",
+ (guid_t *)handler_info->handler_guid);
+ continue;
+ }
+
th->handler_addr =
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
/*
- * Print a warning message if handler_addr is zero which is not expected to
- * ever happen.
+ * Print a warning message and skip the parse of VA if handler_addr is zero
+ * which is not expected to ever happen.
*/
- if (unlikely(!th->handler_addr))
+ if (unlikely(!th->handler_addr)) {
pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
&th->guid, handler_info->handler_address);
+ continue;
+ }
th->static_data_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 65e779be64ff..5d824435b26b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -166,8 +166,7 @@ static int __acpi_processor_start(struct acpi_device *device)
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
- if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
- acpi_processor_power_init(pr);
+ acpi_processor_power_init(pr);
acpi_pss_perf_init(pr);
@@ -263,6 +262,8 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
+ acpi_processor_register_idle_driver();
+
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"acpi/cpu-drv:online",
acpi_soft_cpu_online, NULL);
@@ -301,6 +302,7 @@ static void __exit acpi_processor_driver_exit(void)
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
+ acpi_processor_unregister_idle_driver();
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2c2dc559e0f8..22b051b94a86 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -51,7 +51,7 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-struct cpuidle_driver acpi_idle_driver = {
+static struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
@@ -998,11 +998,6 @@ end:
return ret;
}
-/*
- * flat_state_cnt - the number of composite LPI states after the process of flattening
- */
-static int flat_state_cnt;
-
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
@@ -1045,9 +1040,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
-static int flatten_lpi_states(struct acpi_processor *pr,
- struct acpi_lpi_states_array *curr_level,
- struct acpi_lpi_states_array *prev_level)
+static unsigned int flatten_lpi_states(struct acpi_processor *pr,
+ unsigned int flat_state_cnt,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
@@ -1087,7 +1083,7 @@ static int flatten_lpi_states(struct acpi_processor *pr,
}
kfree(curr_level->entries);
- return 0;
+ return flat_state_cnt;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
@@ -1102,6 +1098,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ unsigned int state_count;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
@@ -1114,14 +1111,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
- flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
- flatten_lpi_states(pr, prev, NULL);
+ state_count = flatten_lpi_states(pr, 0, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
@@ -1143,18 +1139,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
break;
/* flatten all the LPI states in this level of hierarchy */
- flatten_lpi_states(pr, curr, prev);
+ state_count = flatten_lpi_states(pr, state_count, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
- pr->power.count = flat_state_cnt;
/* reset the index after flattening */
- for (i = 0; i < pr->power.count; i++)
+ for (i = 0; i < state_count; i++)
pr->power.lpi_states[i].index = i;
+ pr->power.count = state_count;
+
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
@@ -1360,74 +1357,102 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
return 0;
}
-static int acpi_processor_registered;
+void acpi_processor_register_idle_driver(void)
+{
+ struct acpi_processor *pr;
+ int ret = -ENODEV;
+ int cpu;
+
+ /*
+ * Acpi idle driver is used by all possible CPUs.
+ * Install the idle handler by the processor power info of one in them.
+ * Note that we use previously set idle handler will be used on
+ * platforms that only support C1.
+ */
+ for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) {
+ pr = per_cpu(processors, cpu);
+ if (!pr)
+ continue;
+
+ ret = acpi_processor_get_power_info(pr);
+ if (!ret) {
+ pr->flags.power_setup_done = 1;
+ acpi_processor_setup_cpuidle_states(pr);
+ break;
+ }
+ }
+
+ if (ret) {
+ pr_debug("No ACPI power information from any CPUs.\n");
+ return;
+ }
+
+ ret = cpuidle_register_driver(&acpi_idle_driver);
+ if (ret) {
+ pr_debug("register %s failed.\n", acpi_idle_driver.name);
+ return;
+ }
+ pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name);
+}
+
+void acpi_processor_unregister_idle_driver(void)
+{
+ cpuidle_unregister_driver(&acpi_idle_driver);
+}
-int acpi_processor_power_init(struct acpi_processor *pr)
+void acpi_processor_power_init(struct acpi_processor *pr)
{
- int retval;
struct cpuidle_device *dev;
+ /*
+ * The code below only works if the current cpuidle driver is the ACPI
+ * idle driver.
+ */
+ if (cpuidle_get_driver() != &acpi_idle_driver)
+ return;
+
if (disabled_by_idle_boot_param())
- return 0;
+ return;
acpi_processor_cstate_first_run_checks();
if (!acpi_processor_get_power_info(pr))
pr->flags.power_setup_done = 1;
- /*
- * Install the idle handler if processor power management is supported.
- * Note that we use previously set idle handler will be used on
- * platforms that only support C1.
- */
- if (pr->flags.power) {
- /* Register acpi_idle_driver if not already registered */
- if (!acpi_processor_registered) {
- acpi_processor_setup_cpuidle_states(pr);
- retval = cpuidle_register_driver(&acpi_idle_driver);
- if (retval)
- return retval;
- pr_debug("%s registered with cpuidle\n",
- acpi_idle_driver.name);
- }
+ if (!pr->flags.power)
+ return;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
- per_cpu(acpi_cpuidle_device, pr->id) = dev;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
- acpi_processor_setup_cpuidle_dev(pr, dev);
+ per_cpu(acpi_cpuidle_device, pr->id) = dev;
- /* Register per-cpu cpuidle_device. Cpuidle driver
- * must already be registered before registering device
- */
- retval = cpuidle_register_device(dev);
- if (retval) {
- if (acpi_processor_registered == 0)
- cpuidle_unregister_driver(&acpi_idle_driver);
- return retval;
- }
- acpi_processor_registered++;
+ acpi_processor_setup_cpuidle_dev(pr, dev);
+
+ /*
+ * Register a cpuidle device for this CPU. The cpuidle driver using
+ * this device is expected to be registered.
+ */
+ if (cpuidle_register_device(dev)) {
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
}
- return 0;
}
-int acpi_processor_power_exit(struct acpi_processor *pr)
+void acpi_processor_power_exit(struct acpi_processor *pr)
{
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (disabled_by_idle_boot_param())
- return 0;
+ return;
if (pr->flags.power) {
cpuidle_unregister_device(dev);
- acpi_processor_registered--;
- if (acpi_processor_registered == 0)
- cpuidle_unregister_driver(&acpi_idle_driver);
-
kfree(dev);
}
pr->flags.power_setup_done = 0;
- return 0;
}
+
+MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1219adb11ab9..c7b1dc5687ec 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -62,19 +62,14 @@ static int phys_package_first_cpu(int cpu)
return 0;
}
-static int cpu_has_cpufreq(unsigned int cpu)
+static bool cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy *policy;
-
if (!acpi_processor_cpufreq_init)
return 0;
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- cpufreq_cpu_put(policy);
- return 1;
- }
- return 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+
+ return policy != NULL;
}
static int cpufreq_get_max_state(unsigned int cpu)
@@ -93,12 +88,31 @@ static int cpufreq_get_cur_state(unsigned int cpu)
return reduction_step(cpu);
}
+static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
+{
+ unsigned long max_freq;
+ int ret;
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return false;
+
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
+
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
+ return true;
+}
+
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- struct cpufreq_policy *policy;
struct acpi_processor *pr;
- unsigned long max_freq;
- int i, ret;
+ int i;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -120,20 +134,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
- policy = cpufreq_cpu_get(i);
- if (!policy)
+ if (!cpufreq_update_thermal_limit(i, pr))
return -EINVAL;
-
- max_freq = (policy->cpuinfo.max_freq *
- (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
-
- cpufreq_cpu_put(policy);
-
- ret = freq_qos_update_request(&pr->thermal_req, max_freq);
- if (ret < 0) {
- pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
- pr->id, ret);
- }
}
return 0;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 436019d96027..54baa23a9e5a 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
+ acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
@@ -98,59 +99,45 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data);
-
- if (handle) {
- acpi_handle scope;
- acpi_status status;
+ /*
+ * The scope for the completion of relative pathname segments and
+ * subnode object lookup is the one of the namespace node (device)
+ * containing the object that has returned the package. That is, it's
+ * the scope of that object's parent device.
+ */
+ if (handle)
+ acpi_get_parent(handle, &scope);
- /*
- * The scope for the subnode object lookup is the one of the
- * namespace node (device) containing the object that has
- * returned the package. That is, it's the scope of that
- * object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
- &dn->fwnode))
- result = true;
- } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
- &dn->fwnode)) {
+ /*
+ * Extract properties from the _DSD-equivalent package pointed to by
+ * desc and use scope (if not NULL) for the completion of relative
+ * pathname segments.
+ *
+ * The extracted properties will be held in the new data node dn.
+ */
+ result = acpi_extract_properties(scope, desc, &dn->data);
+ /*
+ * Look for subnodes in the _DSD-equivalent package pointed to by desc
+ * and create child nodes of dn if there are any.
+ */
+ if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
- }
-
- if (result) {
- dn->handle = handle;
- dn->data.pointer = desc;
- list_add_tail(&dn->sibling, list);
- return true;
- }
-
- kfree(dn);
- acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
- return false;
-}
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
- const union acpi_object *link,
- struct list_head *list,
- struct fwnode_handle *parent)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- acpi_status status;
-
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
+ if (!result) {
+ kfree(dn);
+ acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
+ }
- if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
- parent))
- return true;
+ /*
+ * This will be NULL if the desc package is embedded in an outer
+ * _DSD-equivalent package and its scope cannot be determined.
+ */
+ dn->handle = handle;
+ dn->data.pointer = desc;
+ list_add_tail(&dn->sibling, list);
- ACPI_FREE(buf.pointer);
- return false;
+ return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
@@ -158,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
struct list_head *list,
struct fwnode_handle *parent)
{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
+ /*
+ * If the scope is unknown, the _DSD-equivalent package being parsed
+ * was embedded in an outer _DSD-equivalent package as a result of
+ * direct evaluation of an object pointed to by a reference. In that
+ * case, using a pathname as the target object pointer is invalid.
+ */
if (!scope)
return false;
@@ -169,7 +163,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
if (ACPI_FAILURE(status))
return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent);
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+ parent))
+ return true;
+
+ ACPI_FREE(buf.pointer);
+ return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
@@ -180,9 +184,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
bool ret = false;
int i;
+ /*
+ * Every element in the links package is expected to represent a link
+ * to a non-device node in a tree containing device-specific data.
+ */
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
- acpi_handle handle;
bool result;
link = &links->package.elements[i];
@@ -190,26 +197,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
if (link->package.count != 2)
continue;
- /* The first one must be a string. */
+ /* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
- /* The second one may be a string, a reference or a package. */
+ /* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
+ /*
+ * The string is expected to be a full pathname or a
+ * pathname segment relative to the given scope. That
+ * pathname is expected to point to an object returning
+ * a package that contains _DSD-equivalent information.
+ */
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
- case ACPI_TYPE_LOCAL_REFERENCE:
- handle = link->package.elements[1].reference.handle;
- result = acpi_nondev_subnode_data_ok(handle, link, list,
- parent);
- break;
case ACPI_TYPE_PACKAGE:
+ /*
+ * This happens when a reference is used in AML to
+ * point to the target. Since the target is expected
+ * to be a named object, a reference to it will cause it
+ * to be avaluated in place and its return package will
+ * be embedded in the links package at the location of
+ * the reference.
+ *
+ * The target package is expected to contain _DSD-
+ * equivalent information, but the scope in which it
+ * is located in the original AML is unknown. Thus
+ * it cannot contain pathname segments represented as
+ * strings because there is no way to build full
+ * pathnames out of them.
+ */
+ acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
+ link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * It is not expected to see any local references in
+ * the links package because referencing a named object
+ * should cause it to be evaluated in place.
+ */
+ acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
+ link->package.elements[0].string.pointer);
+ fallthrough;
default:
result = false;
break;
@@ -369,6 +403,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
+ if (!dn->handle)
+ continue;
+
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
@@ -383,6 +420,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
acpi_status status;
bool ret;
+ if (!dn->handle)
+ continue;
+
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b1ab192d7a08..d16906f46484 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -511,6 +512,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506CU* */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506CU"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -773,7 +781,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
- p ? "low" : "high",
+ str_low_high(p),
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig
new file mode 100644
index 000000000000..046296a18d00
--- /dev/null
+++ b/drivers/acpi/riscv/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI Configuration for RISC-V
+#
+
+config ACPI_RIMT
+ bool
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index a96fdf1e2cb8..1284a076fa88 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -2,3 +2,4 @@
obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_RIMT) += rimt.o
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
index 673e4d5dd752..7c00f7995e86 100644
--- a/drivers/acpi/riscv/init.c
+++ b/drivers/acpi/riscv/init.c
@@ -10,4 +10,6 @@
void __init acpi_arch_init(void)
{
riscv_acpi_init_gsi_mapping();
+ if (IS_ENABLED(CONFIG_ACPI_RIMT))
+ riscv_acpi_rimt_init();
}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
index 0b9a07e4031f..1680aa2aaf23 100644
--- a/drivers/acpi/riscv/init.h
+++ b/drivers/acpi/riscv/init.h
@@ -2,3 +2,4 @@
#include <linux/init.h>
void __init riscv_acpi_init_gsi_mapping(void);
+void __init riscv_acpi_rimt_init(void);
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
new file mode 100644
index 000000000000..683fcfe35c31
--- /dev/null
+++ b/drivers/acpi/riscv/rimt.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: RIMT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "init.h"
+
+struct rimt_fwnode {
+ struct list_head list;
+ struct acpi_rimt_node *rimt_node;
+ struct fwnode_handle *fwnode;
+};
+
+static LIST_HEAD(rimt_fwnode_list);
+static DEFINE_SPINLOCK(rimt_fwnode_lock);
+
+#define RIMT_TYPE_MASK(type) (1 << (type))
+#define RIMT_IOMMU_TYPE BIT(0)
+
+/* Root pointer to the mapped RIMT table */
+static struct acpi_table_header *rimt_table;
+
+/**
+ * rimt_set_fwnode() - Create rimt_fwnode and use it to register
+ * iommu data in the rimt_fwnode_list
+ *
+ * @rimt_node: RIMT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the RIMT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
+ struct fwnode_handle *fwnode)
+{
+ struct rimt_fwnode *np;
+
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->rimt_node = rimt_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_add_tail(&np->list, &rimt_fwnode_list);
+ spin_unlock(&rimt_fwnode_lock);
+
+ return 0;
+}
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
+static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
+ void *context)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct device *dev = context;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
+ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev;
+ u16 bdf;
+
+ pdev = to_pci_dev(dev);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
+ bdf == iommu_node->pcie_bdf) {
+ status = AE_OK;
+ } else {
+ status = AE_NOT_FOUND;
+ }
+ } else {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start == iommu_node->base_address)
+ status = AE_OK;
+ else
+ status = AE_NOT_FOUND;
+ }
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ struct acpi_rimt_pcie_rc *pci_rc;
+ struct pci_bus *bus;
+
+ bus = to_pci_bus(dev);
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+
+ /*
+ * It is assumed that PCI segment numbers maps one-to-one
+ * with root complexes. Each segment number can represent only
+ * one root complex.
+ */
+ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
+ AE_OK : AE_NOT_FOUND;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_rimt_platform_device *ncomp;
+ struct device *plat_dev = dev;
+ struct acpi_device *adev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * RIMT for a device matching a platform device if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(plat_dev);
+ if (adev)
+ break;
+
+ plat_dev = plat_dev->parent;
+ } while (plat_dev);
+
+ if (!adev)
+ return status;
+
+ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(plat_dev, "Can't get device full path name\n");
+ return status;
+ }
+
+ ncomp = (struct acpi_rimt_platform_device *)node->node_data;
+ status = !strcmp(ncomp->device_name, buf.pointer) ?
+ AE_OK : AE_NOT_FOUND;
+ acpi_os_free(buf.pointer);
+ }
+
+ return status;
+}
+
+static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
+ void *context)
+{
+ struct acpi_rimt_node *rimt_node, *rimt_end;
+ struct acpi_table_rimt *rimt;
+ int i;
+
+ if (!rimt_table)
+ return NULL;
+
+ /* Get the first RIMT node */
+ rimt = (struct acpi_table_rimt *)rimt_table;
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
+ rimt->node_offset);
+ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rimt_table->length);
+
+ for (i = 0; i < rimt->num_nodes; i++) {
+ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
+ "RIMT node pointer overflows, bad table!\n"))
+ return NULL;
+
+ if (rimt_node->type == type &&
+ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
+ return rimt_node;
+
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
+ rimt_node->length);
+ }
+
+ return NULL;
+}
+
+static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_pcie_rc *pci_rc;
+
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
+}
+
+static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
+{
+ struct fwnode_handle *rimt_fwnode;
+
+ if (!node)
+ return -ENODEV;
+
+ rimt_fwnode = rimt_get_fwnode(node);
+
+ /*
+ * The IOMMU drivers may not be probed yet.
+ * Defer the IOMMU configuration
+ */
+ if (!rimt_fwnode)
+ return -EPROBE_DEFER;
+
+ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
+}
+
+struct rimt_pci_alias_info {
+ struct device *dev;
+ struct acpi_rimt_node *node;
+ const struct iommu_ops *ops;
+};
+
+static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
+{
+ if (rid_in < map->source_id_base ||
+ (rid_in > map->source_id_base + map->num_ids))
+ return -ENXIO;
+
+ *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
+ return 0;
+}
+
+static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
+ u32 *id_out, int index)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ struct acpi_rimt_id_mapping *map;
+ struct acpi_rimt_node *parent;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ return NULL;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
+ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ *id_out = map->dest_id_base;
+ return parent;
+ }
+
+ return NULL;
+}
+
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ u32 id = id_in;
+
+ /* Parse the ID mapping tree to find specified node type */
+ while (node) {
+ struct acpi_rimt_id_mapping *map;
+ int i, rc = 0;
+ u32 map_id = id;
+
+ if (RIMT_TYPE_MASK(node->type) & type_mask) {
+ if (id_out)
+ *id_out = id;
+ return node;
+ }
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ goto fail_map;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping)
+ goto fail_map;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ goto fail_map;
+ }
+
+ /* Do the ID translation */
+ for (i = 0; i < num_id_mapping; i++, map++) {
+ rc = rimt_id_map(map, node->type, map_id, &id);
+ if (!rc)
+ break;
+ }
+
+ if (i == num_id_mapping)
+ goto fail_map;
+
+ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rc ? 0 : map->dest_offset);
+ }
+
+fail_map:
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
+ u8 type_mask, int index)
+{
+ struct acpi_rimt_node *parent;
+ u32 id;
+
+ parent = rimt_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
+ parent = rimt_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
+static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rimt_pci_alias_info *info = data;
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
+ return rimt_iommu_xlate(info->dev, parent, deviceid);
+}
+
+static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 deviceid = 0;
+
+ do {
+ parent = rimt_node_map_platform_id(node, &deviceid,
+ RIMT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = rimt_iommu_xlate(dev, parent, deviceid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int rimt_plat_iommu_map_id(struct device *dev,
+ struct acpi_rimt_node *node,
+ const u32 *in_id)
+{
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
+ if (parent)
+ return rimt_iommu_xlate(dev, parent, deviceid);
+
+ return -ENODEV;
+}
+
+/**
+ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ * @id_in: optional input id const value pointer
+ *
+ * Returns: 0 on success, <0 on failure
+ */
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ struct acpi_rimt_node *node;
+ int err = -ENODEV;
+
+ if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ struct rimt_pci_alias_info info = { .dev = dev };
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
+ if (!node)
+ return -ENODEV;
+
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ rimt_pci_iommu_init, &info);
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && rimt_pcie_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ } else {
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
+ if (!node)
+ return -ENODEV;
+
+ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
+ rimt_plat_iommu_map(dev, node);
+ }
+
+ return err;
+}
+
+#endif
+
+void __init riscv_acpi_rimt_init(void)
+{
+ acpi_status status;
+
+ /* rimt_table will be used at runtime after the rimt init,
+ * so we don't need to call acpi_put_table() to release
+ * the RIMT table mapping.
+ */
+ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
+ }
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fb1fe9f3b1a3..065abe56f440 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_rimt.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
@@ -845,6 +846,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
+ "INTC10DE", /* Intel CVS LNL */
+ "INTC10E0", /* Intel CVS ARL */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
@@ -1629,7 +1632,10 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
+ err = rimt_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
+
mutex_unlock(&iommu_probe_device_lock);
return err;
@@ -2702,6 +2708,7 @@ void __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
+ acpi_power_resources_init();
acpi_int340x_thermal_init();
acpi_init_lpit();
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index cd36a97b0ea2..d4d52d5e9016 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -141,12 +141,23 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
+ case ACPI_DBG2_RISCV_SBI_CON:
+ uart = "sbi";
+ break;
default:
err = -ENOENT;
goto done;
}
- switch (table->baud_rate) {
+ /*
+ * SPCR 1.09 defines Precise Baud Rate Filed contains a specific
+ * non-zero baud rate which overrides the value of the Configured
+ * Baud Rate field. If this field is zero or not present, Configured
+ * Baud Rate is used.
+ */
+ if (table->precise_baudrate)
+ baud_rate = table->precise_baudrate;
+ else switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index fa9bb8c8ce95..57fc8bc56166 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -408,7 +408,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
- ACPI_SIG_NBFT };
+ ACPI_SIG_NBFT, ACPI_SIG_SWFT};
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5c2defe55898..8537395b417b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -924,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state;
+ int i, j;
if (!dev)
return -EINVAL;
@@ -939,10 +939,8 @@ static int acpi_thermal_resume(struct device *dev)
if (!acpi_thermal_trip_valid(acpi_trip))
break;
- for (j = 0; j < acpi_trip->devices.count; j++) {
- acpi_bus_update_power(acpi_trip->devices.handles[j],
- &power_state);
- }
+ for (j = 0; j < acpi_trip->devices.count; j++)
+ acpi_bus_update_power(acpi_trip->devices.handles[j], NULL);
}
acpi_queue_thermal_check(tz);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d507d5e08435..4cf74f173c78 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -948,6 +948,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82K8"),
+ },
+ },
{ },
};
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2ded5e476d6e..b43a3196e2be 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -351,7 +351,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
- * @bdev: block device associated with @sdev
+ * @unused: gendisk associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
@@ -366,7 +366,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
* RETURNS:
* Zero.
*/
-int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+int ata_std_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
geom[0] = 255;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 7fc407255eb4..1e2a2c33cdc8 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -614,7 +614,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -631,7 +631,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
+ ata_pio_xfer(qc, page + 1, 0, count - split_len);
} else {
ata_pio_xfer(qc, page, offset, count);
}
@@ -751,7 +751,7 @@ next_sg:
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 123031a757d9..700aecd22fd3 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -251,6 +251,14 @@ void device_links_unbind_consumers(struct device *dev);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index ff55e1bcfa30..c948c88d3956 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1117,6 +1117,27 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
struct pages_devres {
unsigned long addr;
unsigned int order;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3399594136b2..67b01d579737 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -885,6 +885,10 @@ int register_one_node(int nid)
node_devices[nid] = node;
error = register_node(node_devices[nid], nid);
+ if (error) {
+ node_devices[nid] = NULL;
+ return error;
+ }
/* link cpu under this node */
for_each_present_cpu(cpu) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2ea6e05e6ec9..b9a34c3425ec 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -281,7 +277,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -338,7 +334,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -675,7 +671,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -724,8 +720,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -1330,7 +1338,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@@ -1384,7 +1392,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1813,7 +1821,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2073,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3e84dc4122de..7420b9851fe0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1903,8 +1903,7 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
+ dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index df38fb364904..77d694448990 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ menuconfig BLK_DEV
if BLK_DEV
source "drivers/block/null_blk/Kconfig"
+source "drivers/block/rnull/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -311,15 +312,6 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
-config BLK_DEV_RUST_NULL
- tristate "Rust null block driver (Experimental)"
- depends on RUST
- help
- This is the Rust implementation of the null block driver. For now it
- is only a minimal stub.
-
- If unsure, say N.
-
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a695ce74ef22..2d8096eb8cdf 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,9 +9,6 @@
# needed for trace events
ccflags-y += -I$(src)
-obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
-rnull_mod-y := rnull.o
-
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
@@ -38,6 +35,7 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6357d86eafdc..2932b6653b6f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1523,13 +1523,13 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = MINOR(bdev->bd_dev) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
- geo->heads = unit[drive].type->heads;
- geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- geo->cylinders = unit[drive].type->tracks;
+ geo->heads = p->type->heads;
+ geo->sectors = p->dtype->sects * p->type->sect_mult;
+ geo->cylinders = p->type->tracks;
return 0;
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00b74a845328..34ead75e7e02 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -269,9 +269,9 @@ static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int
-aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+aoeblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 6298f8e271e3..a9affb7c264d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1761,6 +1761,6 @@ aoecmd_exit(void)
kfree(kts);
kfree(ktiowq);
- free_page((unsigned long) page_address(empty_page));
+ __free_page(empty_page);
empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index cdf6e4041bb9..3b21750038ee 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -44,7 +44,7 @@ aoe_init(void)
{
int ret;
- aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
if (!aoe_wq)
return -ENOMEM;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c2eabe14af3..9778259b30d4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -44,45 +44,74 @@ struct brd_device {
};
/*
- * Look up and return a brd's page for a given sector.
+ * Look up and return a brd's page with reference grabbed for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+ struct page *page;
+ XA_STATE(xas, &brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+
+ rcu_read_lock();
+repeat:
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (!page)
+ goto out;
+
+ if (!get_page_unless_zero(page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
+ xas_reset(&xas);
+ goto repeat;
+ }
+out:
+ rcu_read_unlock();
+
+ return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
+ * The returned page will grab reference.
*/
static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
blk_opf_t opf)
- __releases(rcu)
- __acquires(rcu)
{
gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
struct page *page, *ret;
- rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
- if (!page) {
- rcu_read_lock();
+ if (!page)
return ERR_PTR(-ENOMEM);
- }
xa_lock(&brd->brd_pages);
ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
page, gfp);
- rcu_read_lock();
- if (ret) {
+ if (!ret) {
+ brd->brd_nr_pages++;
+ get_page(page);
+ xa_unlock(&brd->brd_pages);
+ return page;
+ }
+
+ if (!xa_is_err(ret)) {
+ get_page(ret);
xa_unlock(&brd->brd_pages);
- __free_page(page);
- if (xa_is_err(ret))
- return ERR_PTR(xa_err(ret));
+ put_page(page);
return ret;
}
- brd->brd_nr_pages++;
+
xa_unlock(&brd->brd_pages);
- return page;
+ put_page(page);
+ return ERR_PTR(xa_err(ret));
}
/*
@@ -95,7 +124,7 @@ static void brd_free_pages(struct brd_device *brd)
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
- __free_page(page);
+ put_page(page);
cond_resched();
}
@@ -117,7 +146,6 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
- rcu_read_lock();
page = brd_lookup_page(brd, sector);
if (!page && op_is_write(opf)) {
page = brd_insert_page(brd, sector, opf);
@@ -135,13 +163,13 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
memset(kaddr, 0, bv.bv_len);
}
kunmap_local(kaddr);
- rcu_read_unlock();
bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ if (page)
+ put_page(page);
return true;
out_error:
- rcu_read_unlock();
if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
@@ -149,13 +177,6 @@ out_error:
return false;
}
-static void brd_free_one_page(struct rcu_head *head)
-{
- struct page *page = container_of(head, struct page, rcu_head);
-
- __free_page(page);
-}
-
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
@@ -170,7 +191,7 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- call_rcu(&page->rcu_head, brd_free_one_page);
+ put_page(page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 24be0c2c4075..5336c3c5ca36 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -163,35 +163,35 @@
/* do print messages for unexpected interrupts */
static int print_unex = 1;
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/fdreg.h>
-#include <linux/fd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/async.h>
#include <linux/bio.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-#include <linux/fcntl.h>
+#include <linux/compat.h>
#include <linux/delay.h>
-#include <linux/mc146818rtc.h> /* CMOS defines */
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fd.h>
+#include <linux/fdreg.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/major.h>
-#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/mm.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <linux/async.h>
-#include <linux/compat.h>
+#include <linux/workqueue.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -233,8 +233,6 @@ static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
-#define K_64 0x10000 /* 64KB */
-
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
@@ -3092,16 +3090,13 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ ptr = memdup_user(param, sizeof(*ptr));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
*rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
- if (ret)
- return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
@@ -3363,9 +3358,9 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 8fc7761397bd..567192e371a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3148,17 +3148,17 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
- * @dev Pointer to the block_device strucutre.
+ * @disk Pointer to the gendisk strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
-static int mtip_block_getgeo(struct block_device *dev,
+static int mtip_block_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct driver_data *dd = dev->bd_disk->private_data;
+ struct driver_data *dd = disk->private_data;
sector_t capacity;
if (!dd)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6463d0e8d0ce..1188f32a5e5e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -311,7 +311,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
- queue_work(system_wq, &args->work);
+ queue_work(system_percpu_wq, &args->work);
}
}
if (!nsock->dead) {
@@ -1217,6 +1217,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 91642c9a3b29..f982027e8c85 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index faafd7ff43d6..af0e21149dbc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7389,7 +7389,7 @@ static int __init rbd_init(void)
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
- rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 15627417f12e..f1409e54010a 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -942,11 +942,11 @@ static void rnbd_client_release(struct gendisk *gen)
rnbd_clt_put_dev(dev);
}
-static int rnbd_client_getgeo(struct block_device *block_device,
+static int rnbd_client_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
@@ -1809,7 +1809,7 @@ static int __init rnbd_client_init(void)
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
- rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
deleted file mode 100644
index 6366da12c5a5..000000000000
--- a/drivers/block/rnull.rs
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! This is a Rust implementation of the C null block driver.
-//!
-//! Supported features:
-//!
-//! - blk-mq interface
-//! - direct completion
-//! - block size 4k
-//!
-//! The driver is not configurable.
-
-use kernel::{
- alloc::flags,
- block::mq::{
- self,
- gen_disk::{self, GenDisk},
- Operations, TagSet,
- },
- error::Result,
- new_mutex, pr_info,
- prelude::*,
- sync::{Arc, Mutex},
- types::ARef,
-};
-
-module! {
- type: NullBlkModule,
- name: "rnull_mod",
- authors: ["Andreas Hindborg"],
- description: "Rust implementation of the C null block driver",
- license: "GPL v2",
-}
-
-#[pin_data]
-struct NullBlkModule {
- #[pin]
- _disk: Mutex<GenDisk<NullBlkDevice>>,
-}
-
-impl kernel::InPlaceModule for NullBlkModule {
- fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
- pr_info!("Rust null_blk loaded\n");
-
- // Use a immediately-called closure as a stable `try` block
- let disk = /* try */ (|| {
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
-
- gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(fmt!("rnullb{}", 0), tagset)
- })();
-
- try_pin_init!(Self {
- _disk <- new_mutex!(disk?, "nullb:disk"),
- })
- }
-}
-
-struct NullBlkDevice;
-
-#[vtable]
-impl Operations for NullBlkDevice {
- #[inline(always)]
- fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
- mq::Request::end_ok(rq)
- .map_err(|_e| kernel::error::code::EIO)
- // We take no refcounts on the request, so we expect to be able to
- // end the request. The request reference must be unique at this
- // point, and so `end_ok` cannot fail.
- .expect("Fatal error - expected to be able to end request");
-
- Ok(())
- }
-
- fn commit_rqs() {}
-}
diff --git a/drivers/block/rnull/Kconfig b/drivers/block/rnull/Kconfig
new file mode 100644
index 000000000000..7bc5b376c128
--- /dev/null
+++ b/drivers/block/rnull/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Rust null block device driver configuration
+
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST && CONFIGFS_FS
+ help
+ This is the Rust implementation of the null block driver. Like
+ the C version, the driver allows the user to create virutal block
+ devices that can be configured via various configuration options.
+
+ If unsure, say N.
diff --git a/drivers/block/rnull/Makefile b/drivers/block/rnull/Makefile
new file mode 100644
index 000000000000..11cfa5e615dc
--- /dev/null
+++ b/drivers/block/rnull/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
new file mode 100644
index 000000000000..8498e9bae6fd
--- /dev/null
+++ b/drivers/block/rnull/configfs.rs
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{NullBlkDevice, THIS_MODULE};
+use core::fmt::{Display, Write};
+use kernel::{
+ block::mq::gen_disk::{GenDisk, GenDiskBuilder},
+ c_str,
+ configfs::{self, AttributeOperations},
+ configfs_attrs, new_mutex,
+ page::PAGE_SIZE,
+ prelude::*,
+ str::{kstrtobool_bytes, CString},
+ sync::Mutex,
+};
+use pin_init::PinInit;
+
+pub(crate) fn subsystem() -> impl PinInit<kernel::configfs::Subsystem<Config>, Error> {
+ let item_type = configfs_attrs! {
+ container: configfs::Subsystem<Config>,
+ data: Config,
+ child: DeviceConfig,
+ attributes: [
+ features: 0,
+ ],
+ };
+
+ kernel::configfs::Subsystem::new(c_str!("rnull"), item_type, try_pin_init!(Config {}))
+}
+
+#[pin_data]
+pub(crate) struct Config {}
+
+#[vtable]
+impl AttributeOperations<0> for Config {
+ type Data = Config;
+
+ fn show(_this: &Config, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_str("blocksize,size,rotational,irqmode\n")?;
+ Ok(writer.bytes_written())
+ }
+}
+
+#[vtable]
+impl configfs::GroupOperations for Config {
+ type Child = DeviceConfig;
+
+ fn make_group(
+ &self,
+ name: &CStr,
+ ) -> Result<impl PinInit<configfs::Group<DeviceConfig>, Error>> {
+ let item_type = configfs_attrs! {
+ container: configfs::Group<DeviceConfig>,
+ data: DeviceConfig,
+ attributes: [
+ // Named for compatibility with C null_blk
+ power: 0,
+ blocksize: 1,
+ rotational: 2,
+ size: 3,
+ irqmode: 4,
+ ],
+ };
+
+ Ok(configfs::Group::new(
+ name.try_into()?,
+ item_type,
+ // TODO: cannot coerce new_mutex!() to impl PinInit<_, Error>, so put mutex inside
+ try_pin_init!( DeviceConfig {
+ data <- new_mutex!(DeviceConfigInner {
+ powered: false,
+ block_size: 4096,
+ rotational: false,
+ disk: None,
+ capacity_mib: 4096,
+ irq_mode: IRQMode::None,
+ name: name.try_into()?,
+ }),
+ }),
+ ))
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum IRQMode {
+ None,
+ Soft,
+}
+
+impl TryFrom<u8> for IRQMode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0 => Ok(Self::None),
+ 1 => Ok(Self::Soft),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl Display for IRQMode {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self {
+ Self::None => f.write_str("0")?,
+ Self::Soft => f.write_str("1")?,
+ }
+ Ok(())
+ }
+}
+
+#[pin_data]
+pub(crate) struct DeviceConfig {
+ #[pin]
+ data: Mutex<DeviceConfigInner>,
+}
+
+#[pin_data]
+struct DeviceConfigInner {
+ powered: bool,
+ name: CString,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ disk: Option<GenDisk<NullBlkDevice>>,
+}
+
+#[vtable]
+impl configfs::AttributeOperations<0> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().powered {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ let power_op = kstrtobool_bytes(page)?;
+ let mut guard = this.data.lock();
+
+ if !guard.powered && power_op {
+ guard.disk = Some(NullBlkDevice::new(
+ &guard.name,
+ guard.block_size,
+ guard.rotational,
+ guard.capacity_mib,
+ guard.irq_mode,
+ )?);
+ guard.powered = true;
+ } else if guard.powered && !power_op {
+ drop(guard.disk.take());
+ guard.powered = false;
+ }
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<1> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().block_size))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u32>().map_err(|_| EINVAL)?;
+
+ GenDiskBuilder::validate_block_size(value)?;
+ this.data.lock().block_size = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<2> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().rotational {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ this.data.lock().rotational = kstrtobool_bytes(page)?;
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<3> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().capacity_mib))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u64>().map_err(|_| EINVAL)?;
+
+ this.data.lock().capacity_mib = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<4> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().irq_mode))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u8>().map_err(|_| EINVAL)?;
+
+ this.data.lock().irq_mode = IRQMode::try_from(value)?;
+ Ok(())
+ }
+}
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
new file mode 100644
index 000000000000..1ec694d7f1a6
--- /dev/null
+++ b/drivers/block/rnull/rnull.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+
+mod configfs;
+
+use configfs::IRQMode;
+use kernel::{
+ block::{
+ self,
+ mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ },
+ error::Result,
+ pr_info,
+ prelude::*,
+ sync::Arc,
+ types::ARef,
+};
+use pin_init::PinInit;
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ authors: ["Andreas Hindborg"],
+ description: "Rust implementation of the C null block driver",
+ license: "GPL v2",
+}
+
+#[pin_data]
+struct NullBlkModule {
+ #[pin]
+ configfs_subsystem: kernel::configfs::Subsystem<configfs::Config>,
+}
+
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Rust null_blk loaded\n");
+
+ try_pin_init!(Self {
+ configfs_subsystem <- configfs::subsystem(),
+ })
+ }
+}
+
+struct NullBlkDevice;
+
+impl NullBlkDevice {
+ fn new(
+ name: &CStr,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ ) -> Result<GenDisk<Self>> {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), GFP_KERNEL)?;
+
+ let queue_data = Box::new(QueueData { irq_mode }, GFP_KERNEL)?;
+
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(capacity_mib << (20 - block::SECTOR_SHIFT))
+ .logical_block_size(block_size)?
+ .physical_block_size(block_size)?
+ .rotational(rotational)
+ .build(fmt!("{}", name.to_str()?), tagset, queue_data)
+ }
+}
+
+struct QueueData {
+ irq_mode: IRQMode,
+}
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ type QueueData = KBox<QueueData>;
+
+ #[inline(always)]
+ fn queue_rq(queue_data: &QueueData, rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ match queue_data.irq_mode {
+ IRQMode::None => mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request"),
+ IRQMode::Soft => mq::Request::complete(rq),
+ }
+ Ok(())
+ }
+
+ fn commit_rqs(_queue_data: &QueueData) {}
+
+ fn complete(rq: ARef<mq::Request<Self>>) {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+ }
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 7af21fe67671..db1fe9772a4d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -119,9 +119,8 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
-static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int vdc_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
@@ -1189,7 +1188,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
+ mod_delayed_work(system_percpu_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
@@ -1217,7 +1216,7 @@ static int __init vdc_init(void)
{
int err;
- sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
if (!sunvdc_wq)
return -ENOMEM;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index eda33c5eb5e2..416015947ae6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -711,9 +711,9 @@ static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
-static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int floppy_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 67d4a867aec4..0c74a41a6753 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -201,7 +201,6 @@ struct ublk_queue {
bool force_abort;
bool canceling;
bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
- unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
struct ublk_io ios[];
@@ -234,7 +233,7 @@ struct ublk_device {
struct ublk_params params;
struct completion completion;
- unsigned int nr_queues_ready;
+ u32 nr_io_ready;
bool unprivileged_daemons;
struct mutex cancel_mutex;
bool canceling;
@@ -252,8 +251,7 @@ static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, struct ublk_io *io,
- size_t offset);
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *
@@ -532,7 +530,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
-static inline void __ublk_complete_rq(struct request *req);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -664,22 +663,44 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
+static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_AUTO_BUF_REG;
}
+static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_USER_COPY;
+}
+
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
!ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_map_io(const struct ublk_device *ub)
+{
+ return !ublk_dev_support_user_copy(ub) &&
+ !ublk_dev_support_zero_copy(ub) &&
+ !ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -697,6 +718,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub)
+{
+ return ublk_dev_support_user_copy(ub) ||
+ ublk_dev_support_zero_copy(ub) ||
+ ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
struct ublk_io *io)
{
@@ -711,8 +739,11 @@ static inline bool ublk_get_req_ref(struct ublk_io *io)
static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
{
- if (refcount_dec_and_test(&io->ref))
- __ublk_complete_rq(req);
+ if (!refcount_dec_and_test(&io->ref))
+ return;
+
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
static inline bool ublk_sub_req_ref(struct ublk_io *io)
@@ -728,6 +759,11 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
+static inline bool ublk_dev_need_get_data(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_NEED_GET_DATA;
+}
+
/* Called in slow path only, keep it noinline for trace purpose */
static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
@@ -764,11 +800,9 @@ static inline int __ublk_queue_cmd_buf_size(int depth)
return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
}
-static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
-
- return __ublk_queue_cmd_buf_size(ubq->q_depth);
+ return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth);
}
static int ublk_max_cmd_buf_size(void)
@@ -1019,13 +1053,13 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static int ublk_unmap_io(bool need_map,
const struct request *req,
const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (!ublk_need_map_io(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1072,13 +1106,8 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- enum req_op op = req_op(req);
u32 ublk_op;
- if (!ublk_queue_is_zoned(ubq) &&
- (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
- return BLK_STS_IOERR;
-
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
@@ -1117,10 +1146,9 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
@@ -1144,7 +1172,7 @@ static inline void __ublk_complete_rq(struct request *req)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
@@ -1189,7 +1217,7 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1500,9 +1528,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
-
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -1551,7 +1576,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
- ub->nr_queues_ready = 0;
+ ub->nr_io_ready = 0;
ub->unprivileged_daemons = false;
ub->ublksrv_tgid = -1;
}
@@ -1775,23 +1800,23 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
- if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ if (sz != ublk_queue_cmd_buf_size(ub))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ if (ublk_nosrv_should_reissue_outstanding(ub))
blk_mq_requeue_request(req, false);
else {
io->res = -EIO;
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
}
}
@@ -1811,7 +1836,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- __ublk_fail_req(ubq, io, io->req);
+ __ublk_fail_req(ub, io, io->req);
}
}
@@ -1873,7 +1898,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
spin_unlock(&ubq->cancel_lock);
if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
@@ -1916,9 +1941,11 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+static inline bool ublk_dev_ready(const struct ublk_device *ub)
{
- return ubq->nr_io_ready == ubq->q_depth;
+ u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth;
+
+ return ub->nr_io_ready == total;
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
@@ -2042,16 +2069,14 @@ static void ublk_reset_io_flags(struct ublk_device *ub)
}
/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_mark_io_ready(struct ublk_device *ub)
__must_hold(&ub->mutex)
{
- ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq))
- ub->nr_queues_ready++;
if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
ub->unprivileged_daemons = true;
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ ub->nr_io_ready++;
+ if (ublk_dev_ready(ub)) {
/* now we are ready for handling ublk io request */
ublk_reset_io_flags(ub);
complete_all(&ub->completion);
@@ -2122,11 +2147,11 @@ ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
}
static inline int
-ublk_config_io_buf(const struct ublk_queue *ubq, struct ublk_io *io,
+ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
struct io_uring_cmd *cmd, unsigned long buf_addr,
u16 *buf_idx)
{
- if (ublk_support_auto_buf_reg(ubq))
+ if (ublk_dev_support_auto_buf_reg(ub))
return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
io->addr = buf_addr;
@@ -2165,18 +2190,18 @@ static void ublk_io_release(void *priv)
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag,
struct ublk_io *io,
unsigned int index, unsigned int issue_flags)
{
- struct ublk_device *ub = cmd->file->private_data;
struct request *req;
int ret;
- if (!ublk_support_zero_copy(ubq))
+ if (!ublk_dev_support_zero_copy(ub))
return -EINVAL;
- req = __ublk_check_and_get_req(ub, ubq, io, 0);
+ req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
if (!req)
return -EINVAL;
@@ -2192,7 +2217,8 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
static int
ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, struct ublk_io *io,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io,
unsigned index, unsigned issue_flags)
{
unsigned new_registered_buffers;
@@ -2205,9 +2231,10 @@ ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
*/
new_registered_buffers = io->task_registered_buffers + 1;
if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
- return ublk_register_io_buf(cmd, ubq, io, index, issue_flags);
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+ issue_flags);
- if (!ublk_support_zero_copy(ubq) || !ublk_rq_has_data(req))
+ if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
return -EINVAL;
ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
@@ -2229,14 +2256,14 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
-static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
+static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr)
{
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
*/
- if (!buf_addr && !ublk_need_get_data(ubq))
+ if (!buf_addr && !ublk_dev_need_get_data(ub))
return -EINVAL;
} else if (buf_addr) {
/* User copy requires addr to be unset */
@@ -2245,10 +2272,9 @@ static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
return 0;
}
-static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
struct ublk_io *io, __u64 buf_addr)
{
- struct ublk_device *ub = ubq->dev;
int ret = 0;
/*
@@ -2257,8 +2283,8 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
* FETCH, so it is fine even for IO_URING_F_NONBLOCK.
*/
mutex_lock(&ub->mutex);
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
+ /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */
+ if (ublk_dev_ready(ub)) {
ret = -EBUSY;
goto out;
}
@@ -2272,28 +2298,28 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, buf_addr, NULL);
+ ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
if (ret)
goto out;
WRITE_ONCE(io->task, get_task_struct(current));
- ublk_mark_io_ready(ub, ubq);
+ ublk_mark_io_ready(ub);
out:
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
+static int ublk_check_commit_and_fetch(const struct ublk_device *ub,
struct ublk_io *io, __u64 buf_addr)
{
struct request *req = io->req;
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
- if (!buf_addr && (!ublk_need_get_data(ubq) ||
+ if (!buf_addr && (!ublk_dev_need_get_data(ub) ||
req_op(req) == REQ_OP_READ))
return -EINVAL;
} else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
@@ -2307,10 +2333,10 @@ static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
return 0;
}
-static bool ublk_need_complete_req(const struct ublk_queue *ubq,
+static bool ublk_need_complete_req(const struct ublk_device *ub,
struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq))
+ if (ublk_dev_need_req_ref(ub))
return ublk_sub_req_ref(io);
return true;
}
@@ -2333,23 +2359,28 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
return ublk_start_io(ubq, req, io);
}
-static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
- unsigned int issue_flags,
- const struct ublksrv_io_cmd *ub_cmd)
+static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
+ /* May point to userspace-mapped memory */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
- unsigned tag = ub_cmd->tag;
+ u16 q_id = READ_ONCE(ub_src->q_id);
+ u16 tag = READ_ONCE(ub_src->tag);
+ s32 result = READ_ONCE(ub_src->result);
+ u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */
struct request *req;
int ret;
bool compl;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
+
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
- __func__, cmd->cmd_op, ub_cmd->q_id, tag,
- ub_cmd->result);
+ __func__, cmd->cmd_op, q_id, tag, result);
ret = ublk_check_cmd_op(cmd_op);
if (ret)
@@ -2360,25 +2391,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* so no need to validate the q_id, tag, or task
*/
if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
- return ublk_unregister_io_buf(cmd, ub, ub_cmd->addr,
- issue_flags);
+ return ublk_unregister_io_buf(cmd, ub, addr, issue_flags);
ret = -EINVAL;
- if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ if (q_id >= ub->dev_info.nr_hw_queues)
goto out;
- ubq = ublk_get_queue(ub, ub_cmd->q_id);
+ ubq = ublk_get_queue(ub, q_id);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
goto out;
io = &ubq->ios[tag];
/* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
- ret = ublk_check_fetch_buf(ubq, ub_cmd->addr);
+ ret = ublk_check_fetch_buf(ub, addr);
if (ret)
goto out;
- ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ ret = ublk_fetch(cmd, ub, io, addr);
if (ret)
goto out;
@@ -2392,8 +2422,8 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* so can be handled on any task
*/
if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
- return ublk_register_io_buf(cmd, ubq, io, ub_cmd->addr,
- issue_flags);
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io,
+ addr, issue_flags);
goto out;
}
@@ -2414,24 +2444,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_REGISTER_IO_BUF:
- return ublk_daemon_register_io_buf(cmd, ubq, io, ub_cmd->addr,
+ return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr,
issue_flags);
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- ret = ublk_check_commit_and_fetch(ubq, io, ub_cmd->addr);
+ ret = ublk_check_commit_and_fetch(ub, io, addr);
if (ret)
goto out;
- io->res = ub_cmd->result;
+ io->res = result;
req = ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, &buf_idx);
- compl = ublk_need_complete_req(ubq, io);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx);
+ compl = ublk_need_complete_req(ub, io);
/* can't touch 'ublk_io' any more */
if (buf_idx != UBLK_INVALID_BUF_IDX)
io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
+ req->__sector = addr;
if (compl)
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
if (ret)
goto out;
@@ -2443,7 +2473,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* request
*/
req = ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, NULL);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, NULL);
WARN_ON_ONCE(ret);
if (likely(ublk_get_data(ubq, io, req))) {
__ublk_prep_compl_io_cmd(io, req);
@@ -2463,16 +2493,15 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, struct ublk_io *io, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
{
- unsigned tag = io - ubq->ios;
struct request *req;
/*
* can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
* which would overwrite it with io->cmd
*/
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
if (!req)
return NULL;
@@ -2494,33 +2523,13 @@ fail_put:
return NULL;
}
-static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- /*
- * Not necessary for async retry, but let's keep it simple and always
- * copy the values to avoid any potential reuse.
- */
- const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
- const struct ublksrv_io_cmd ub_cmd = {
- .q_id = READ_ONCE(ub_src->q_id),
- .tag = READ_ONCE(ub_src->tag),
- .result = READ_ONCE(ub_src->result),
- .addr = READ_ONCE(ub_src->addr)
- };
-
- WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
-
- return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
-}
-
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ io_uring_cmd_done(cmd, ret, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2583,17 +2592,14 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
- if (!ubq)
- return ERR_PTR(-EINVAL);
-
- if (!ublk_support_user_copy(ubq))
+ if (!ublk_dev_support_user_copy(ub))
return ERR_PTR(-EACCES);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
return ERR_PTR(-EINVAL);
*io = &ubq->ios[tag];
- req = __ublk_check_and_get_req(ub, ubq, *io, buf_off);
+ req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
@@ -2656,7 +2662,7 @@ static const struct file_operations ublk_ch_fops = {
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
- int size = ublk_queue_cmd_buf_size(ub, q_id);
+ int size = ublk_queue_cmd_buf_size(ub);
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
int i;
@@ -2683,7 +2689,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
ubq->q_depth = ub->dev_info.queue_depth;
- size = ublk_queue_cmd_buf_size(ub, q_id);
+ size = ublk_queue_cmd_buf_size(ub);
ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
if (!ptr)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e649fa67bac1..f061420dfb10 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -829,9 +829,9 @@ out:
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
-static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int virtblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
+ struct virtio_blk *vblk = disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
@@ -853,7 +853,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
@@ -1682,7 +1682,7 @@ static int __init virtio_blk_init(void)
{
int error;
- virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
if (!virtblk_wq)
return -ENOMEM;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5babe575c288..04fc6b552c04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -493,11 +493,11 @@ static void blkif_restart_queue_callback(void *arg)
schedule_work(&rinfo->work);
}
-static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct gendisk *disk, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
- sector_t nsect = get_capacity(bd->bd_disk);
+ sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f31652085adc..a43074657531 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1085,7 +1085,7 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
- queue_work(system_unbound_wq, &work.work);
+ queue_work(system_dfl_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
@@ -1225,18 +1225,6 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio,
- char *buf, ssize_t at)
-{
- ssize_t sz;
-
- down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf, at);
- up_read(&zram->init_lock);
-
- return sz;
-}
-
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
@@ -1387,8 +1375,12 @@ static ssize_t comp_algorithm_show(struct device *dev,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ ssize_t sz;
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf, 0);
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0);
+ up_read(&zram->init_lock);
+ return sz;
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1412,14 +1404,15 @@ static ssize_t recomp_algorithm_show(struct device *dev,
ssize_t sz = 0;
u32 prio;
+ down_read(&zram->init_lock);
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf, sz);
+ sz += zcomp_available_show(zram->comp_algs[prio], buf, sz);
}
-
+ up_read(&zram->init_lock);
return sz;
}
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 8b43dfc755de..b7ba667a3d09 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -20,7 +20,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define VERSION "0.11"
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index be69d21c9aa7..9d29ab811f80 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -484,6 +484,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
case 0x1f: /* Scorpious Peak */
+ case 0x22: /* BlazarIW (BzrIW) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -3253,6 +3254,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3593,6 +3595,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6e7bbbd35279..6d3963bd56a9 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -35,8 +36,13 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ /* BlazarI, Wildcat Lake */
{ BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
+ /* BlazarI, Lunar Lake */
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H484 */
+ { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H404 */
{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
@@ -554,25 +560,6 @@ static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
-{
- struct sk_buff *skb;
- int err;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, data, size);
- err = hci_devcd_append(hdev, skb);
- if (err) {
- bt_dev_err(hdev, "Failed to append data in the coredump");
- return err;
- }
-
- return 0;
-}
-
static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
{
u32 reg;
@@ -617,30 +604,35 @@ static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
- void *data, int size)
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
{
struct intel_tlv *tlv;
- tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv = dest;
tlv->type = type;
tlv->len = size;
memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
}
static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
{
- u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
struct btintel_pcie_dbgc *dbgc = &data->dbgc;
- u8 buf_idx, dump_time_len, fw_build;
struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
struct intel_tlv *tlv;
struct timespec64 now;
- struct sk_buff *skb;
struct tm tm_now;
- char buf[256];
- u16 hdr_len;
- int ret;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
@@ -657,88 +649,84 @@ static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
else
return -EINVAL;
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
ktime_get_real_ts64(&now);
time64_to_tm(now.tv_sec, 0, &tm_now);
- dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
- fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ snprintf(fw_build, sizeof(fw_build),
"Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
2000 + (data->dmp_hdr.fw_timestamp >> 8),
data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
data->dmp_hdr.fw_build_num);
- hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
- sizeof(*tlv) + dump_time_len +
- sizeof(*tlv) + fw_build;
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
- dump_size = hdr_len + sizeof(hdr_len);
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
- skb = alloc_skb(dump_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
/* Add debug buffers data length to dump size */
dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
- ret = hci_devcd_init(hdev, dump_size);
- if (ret) {
- bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
- kfree_skb(skb);
- return ret;
- }
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
- skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
- sizeof(data->dmp_hdr.cnvi_bt));
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
- sizeof(data->dmp_hdr.write_ptr));
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
- sizeof(data->dmp_hdr.wrap_ctr));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
- sizeof(data->dmp_hdr.trigger_reason));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
- sizeof(data->dmp_hdr.fw_git_sha1));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
- sizeof(data->dmp_hdr.cnvr_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
- sizeof(data->dmp_hdr.cnvi_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
-
- ret = hci_devcd_append(hdev, skb);
- if (ret)
- goto exit_err;
-
- for (i = 0; i < dbgc->count; i++) {
- ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
- BTINTEL_PCIE_DBGC_BUFFER_SIZE);
- if (ret)
- break;
- }
-
-exit_err:
- hci_devcd_complete(hdev);
- return ret;
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
}
static void btintel_pcie_dump_traces(struct hci_dev *hdev)
@@ -760,51 +748,6 @@ static void btintel_pcie_dump_traces(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
-static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
- u16 len = skb->len;
- u16 *hdrlen_ptr;
- char buf[80];
-
- hdrlen_ptr = skb_put_zero(skb, sizeof(len));
-
- snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
- INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
- data->dmp_hdr.fw_build_num);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Vendor: Intel\n");
- skb_put_data(skb, buf, strlen(buf));
-
- *hdrlen_ptr = skb->len - len;
-}
-
-static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
-
- switch (state) {
- case HCI_DEVCOREDUMP_IDLE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- case HCI_DEVCOREDUMP_ACTIVE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
- break;
- case HCI_DEVCOREDUMP_TIMEOUT:
- case HCI_DEVCOREDUMP_ABORT:
- case HCI_DEVCOREDUMP_DONE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- }
-}
-
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -1378,6 +1321,11 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
* exception event on exception. Instead controller writes the
@@ -1390,11 +1338,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
}
- if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
- btintel_pcie_dump_traces(data->hdev);
- clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
- }
-
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
btintel_pcie_recv_frame(data, skb);
@@ -2149,6 +2092,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
case 0x1f: /* ScP */
+ case 0x22: /* BzrIW */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -2184,13 +2128,6 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
- err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
- btintel_pcie_dump_notify);
- if (err) {
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
- goto exit_error;
- }
-
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -2236,6 +2173,7 @@ btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
{
struct btintel_pcie_dev_recovery *tmp, *data = NULL;
const char *name = pci_name(pdev);
+ const size_t name_len = strlen(name) + 1;
struct hci_dev *hdev = to_hci_dev(dev);
spin_lock(&btintel_pcie_recovery_lock);
@@ -2252,11 +2190,11 @@ btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
return data;
}
- data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
+ data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
if (!data)
return NULL;
- strscpy_pad(data->name, name, strlen(name) + 1);
+ strscpy(data->name, name, name_len);
spin_lock(&btintel_pcie_recovery_lock);
list_add_tail(&data->list, &btintel_pcie_recovery_list);
spin_unlock(&btintel_pcie_recovery_lock);
@@ -2319,7 +2257,6 @@ static void btintel_pcie_removal_work(struct work_struct *wk)
btintel_pcie_synchronize_irqs(data);
flush_work(&data->rx_work);
- flush_work(&data->hdev->dump.dump_rx);
bt_dev_dbg(data->hdev, "Release bluetooth interface");
btintel_pcie_release_hdev(data);
@@ -2410,6 +2347,13 @@ static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
btintel_pcie_reset(hdev);
}
+static bool btintel_pcie_wakeup(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(&data->pdev->dev);
+}
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
@@ -2435,6 +2379,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
hdev->reset = btintel_pcie_reset;
+ hdev->wakeup = btintel_pcie_wakeup;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -2573,11 +2518,100 @@ static void btintel_pcie_coredump(struct device *dev)
}
#endif
+static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ u32 dxstate;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, dxstate);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (err == 0) {
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D3 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
+ return -EBUSY;
+ }
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d3 state from d0 in %lld us",
+ ktime_to_us(ktime_get() - start));
+
+ return 0;
+}
+
+static int btintel_pcie_suspend(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int btintel_pcie_hibernate(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int btintel_pcie_freeze(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int btintel_pcie_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (err == 0) {
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D0 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
+ return -EBUSY;
+ }
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d0 state from d3 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return 0;
+}
+
+static const struct dev_pm_ops btintel_pcie_pm_ops = {
+ .suspend = btintel_pcie_suspend,
+ .resume = btintel_pcie_resume,
+ .freeze = btintel_pcie_freeze,
+ .thaw = btintel_pcie_resume,
+ .poweroff = btintel_pcie_hibernate,
+ .restore = btintel_pcie_resume,
+};
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+ .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
#ifdef CONFIG_DEV_COREDUMP
.driver.coredump = btintel_pcie_coredump
#endif
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 0fa876c5b954..04b21f968ad3 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -132,6 +132,8 @@ enum btintel_pcie_tlv_type {
BTINTEL_CNVI_TOP,
BTINTEL_DUMP_TIME,
BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
};
/* causes for the MBOX interrupts */
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 4fc673640bfc..50abefba6d04 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -29,7 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.1"
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 76995cfcd534..d9b90ea2ad38 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -27,7 +27,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.2"
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 76e7f857fb7d..d5153fed0518 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -24,7 +24,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define MANUFACTURER_NXP 37
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8085fabadde8..5e9ebf0c5312 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
#define BTUSB_ACTIONS_SEMI BIT(27)
+#define BTUSB_BARROT BIT(28)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -522,6 +523,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8851BU Bluetooth devices */
{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
@@ -698,6 +701,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3633), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -732,6 +737,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3627), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
@@ -810,6 +817,10 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Barrot Technology Bluetooth devices */
+ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT },
+ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT },
+
/* Actions Semiconductor ATS2851 based devices */
{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
@@ -1192,6 +1203,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
if (!hci_skb_expect(skb)) {
+ /* Each chunk should correspond to at least 1 or more
+ * events so if there are still bytes left that doesn't
+ * constitute a new event this is likely a bug in the
+ * controller.
+ */
+ if (count && count < HCI_EVENT_HDR_SIZE) {
+ bt_dev_warn(data->hdev,
+ "Unexpected continuation: %d bytes",
+ count);
+ count = 0;
+ }
+
/* Complete frame */
btusb_recv_event(data, skb);
skb = NULL;
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
deleted file mode 100644
index 28cf2d8c2d48..000000000000
--- a/drivers/bluetooth/h4_recv.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Generic Bluetooth HCI UART driver
- *
- * Copyright (C) 2015-2018 Intel Corporation
- */
-
-#include <linux/unaligned.h>
-
-struct h4_recv_pkt {
- u8 type; /* Packet type */
- u8 hlen; /* Header length */
- u8 loff; /* Data length offset in header */
- u8 lsize; /* Data length field size */
- u16 maxlen; /* Max overall packet length */
- int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
-};
-
-#define H4_RECV_ACL \
- .type = HCI_ACLDATA_PKT, \
- .hlen = HCI_ACL_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE \
-
-#define H4_RECV_SCO \
- .type = HCI_SCODATA_PKT, \
- .hlen = HCI_SCO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 1, \
- .maxlen = HCI_MAX_SCO_SIZE
-
-#define H4_RECV_EVENT \
- .type = HCI_EVENT_PKT, \
- .hlen = HCI_EVENT_HDR_SIZE, \
- .loff = 1, \
- .lsize = 1, \
- .maxlen = HCI_MAX_EVENT_SIZE
-
-#define H4_RECV_ISO \
- .type = HCI_ISODATA_PKT, \
- .hlen = HCI_ISO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE
-
-static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
- struct sk_buff *skb,
- const unsigned char *buffer,
- int count,
- const struct h4_recv_pkt *pkts,
- int pkts_count)
-{
- /* Check for error from previous call */
- if (IS_ERR(skb))
- skb = NULL;
-
- while (count) {
- int i, len;
-
- if (!skb) {
- for (i = 0; i < pkts_count; i++) {
- if (buffer[0] != (&pkts[i])->type)
- continue;
-
- skb = bt_skb_alloc((&pkts[i])->maxlen,
- GFP_ATOMIC);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- hci_skb_pkt_type(skb) = (&pkts[i])->type;
- hci_skb_expect(skb) = (&pkts[i])->hlen;
- break;
- }
-
- /* Check for invalid packet type */
- if (!skb)
- return ERR_PTR(-EILSEQ);
-
- count -= 1;
- buffer += 1;
- }
-
- len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- skb_put_data(skb, buffer, len);
-
- count -= len;
- buffer += len;
-
- /* Check for partial packet */
- if (skb->len < hci_skb_expect(skb))
- continue;
-
- for (i = 0; i < pkts_count; i++) {
- if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
- break;
- }
-
- if (i >= pkts_count) {
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (skb->len == (&pkts[i])->hlen) {
- u16 dlen;
-
- switch ((&pkts[i])->lsize) {
- case 0:
- /* No variable data length */
- dlen = 0;
- break;
- case 1:
- /* Single octet variable length */
- dlen = skb->data[(&pkts[i])->loff];
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- case 2:
- /* Double octet variable length */
- dlen = get_unaligned_le16(skb->data +
- (&pkts[i])->loff);
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- default:
- /* Unsupported variable length */
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (!dlen) {
- /* No more data, complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- } else {
- /* Complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- }
-
- return skb;
-}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 664d82d1e613..591abe6d63dd 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c1c0a4759c7e..25845c04e562 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -176,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
- mc_dev->obj_desc.type);
+ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
@@ -203,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
@@ -1104,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index e1a283805ea7..a86800b123b9 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -151,16 +151,16 @@ static void ccache_flush_range(phys_addr_t start, size_t len)
if (!len)
return;
- mb();
+ mb(); /* complete earlier memory accesses before the cache flush */
for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
line += SIFIVE_CCACHE_LINE_SIZE) {
#ifdef CONFIG_32BIT
- writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+ writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
#else
- writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+ writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
#endif
- mb();
}
+ mb(); /* issue later memory accesses after the cache flush */
}
static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c85827843447..e316cbc5baa9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -77,7 +77,7 @@ config HW_RANDOM_AIROHA
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on (ARCH_MICROCHIP || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f4adc6feb3b2..92bed266d07c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -84,6 +84,13 @@ config IPMI_IPMB
bus, and it also supports direct messaging on the bus using
IPMB direct messages. This module requires I2C support.
+config IPMI_LS2K
+ bool 'Loongson-2K IPMI interface'
+ depends on LOONGARCH
+ select MFD_LS2K_BMC_CORE
+ help
+ Provides a driver for Loongson-2K IPMI interfaces.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index e0944547c9d0..4ea450a82242 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -8,6 +8,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
ipmi_si_mem_io.o
ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o
ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o
+ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o
ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 6a4f279c7c1f..3a51e58b2487 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info)
ipmi_ipmb_stop_thread(iidev);
}
-static void ipmi_ipmb_sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_dev *iidev = send_info;
unsigned long flags;
@@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info,
spin_unlock_irqrestore(&iidev->lock, flags);
up(&iidev->wake_thread);
+ return IPMI_CC_NO_ERROR;
}
static void ipmi_ipmb_request_events(void *send_info)
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index ecfcb50302f6..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
- struct si_sm_io *io, enum kcs_states state)
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
{
- kcs->state = state;
+ kcs->state = KCS_IDLE;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
return 2;
}
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
-{
- return init_kcs_data_with_state(kcs, io, KCS_IDLE);
-}
-
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if (kcs->state != KCS_IDLE) {
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ init_kcs_data(kcs, kcs->io);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8e9050f99e9e..a0b67a35a5f0 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,7 +38,9 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user);
static int ipmi_init_msghandler(void);
static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
@@ -50,6 +52,8 @@ static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
+static struct timer_list ipmi_timer;
+
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -432,6 +436,7 @@ struct ipmi_smi {
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
+ struct device_attribute maintenance_mode_devattr;
/* Used for wake ups at startup. */
@@ -464,7 +469,7 @@ struct ipmi_smi {
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
- spinlock_t seq_lock;
+ struct mutex seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
@@ -539,7 +544,11 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- bool maintenance_mode_enable;
+
+#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
+#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
+#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
+ int maintenance_mode_state;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -955,7 +964,6 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
- atomic_dec(&msg->user->nr_msgs);
} else {
/*
* Deliver it in smi_work. The message will hold a
@@ -1116,12 +1124,11 @@ static int intf_find_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
- unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
@@ -1134,7 +1141,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
rv = 0;
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1145,14 +1152,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1163,7 +1169,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
ent->timeout = ent->orig_timeout;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1174,7 +1180,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
unsigned int err)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
@@ -1182,7 +1187,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1196,7 +1201,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
msg = ent->recv_msg;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
if (msg)
deliver_err_response(intf, msg, err);
@@ -1209,7 +1214,6 @@ int ipmi_create_user(unsigned int if_num,
void *handler_data,
struct ipmi_user **user)
{
- unsigned long flags;
struct ipmi_user *new_user = NULL;
int rv = 0;
struct ipmi_smi *intf;
@@ -1277,9 +1281,9 @@ int ipmi_create_user(unsigned int if_num,
new_user->gets_events = false;
mutex_lock(&intf->users_mutex);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
list_add(&new_user->link, &intf->users);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
mutex_unlock(&intf->users_mutex);
if (handler->ipmi_watchdog_pretimeout)
@@ -1325,7 +1329,6 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
- unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
struct ipmi_recv_msg *msg, *msg2;
@@ -1346,7 +1349,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
list_del(&user->link);
atomic_dec(&intf->nr_users);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1355,7 +1358,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Remove the user from the command receiver's table. First
@@ -1534,8 +1537,15 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
+ /*
+ * Lower level drivers only care about firmware mode
+ * as it affects their timing. They don't care about
+ * reset, which disables all commands for a while.
+ */
intf->handlers->set_maintenance_mode(
- intf->send_info, intf->maintenance_mode_enable);
+ intf->send_info,
+ (intf->maintenance_mode_state ==
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
@@ -1552,16 +1562,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode_enable
- = (intf->auto_maintenance_timeout > 0);
+ /* Just leave it alone. */
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode_enable = true;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
break;
default:
@@ -1616,8 +1627,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
}
list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
+ ipmi_set_recv_msg_user(msg, user);
deliver_local_response(intf, msg);
}
}
@@ -1922,14 +1932,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
+ int newst;
+
+ if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
+ newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
+ else
+ newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- intf->auto_maintenance_timeout
- = maintenance_mode_timeout_ms;
+ intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = true;
+ && intf->maintenance_mode_state < newst) {
+ intf->maintenance_mode_state = newst;
maintenance_mode_update(intf);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
@@ -1943,7 +1959,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
@@ -2024,12 +2040,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
@@ -2087,7 +2100,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2140,7 +2153,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 4;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
return 0;
}
@@ -2203,12 +2216,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* Create a sequence number with a 1 second
@@ -2257,7 +2267,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2288,22 +2298,15 @@ static int i_ipmi_request(struct ipmi_user *user,
int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
- if (user) {
- if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
- /* Decrement will happen at the end of the routine. */
- rv = -EBUSY;
- goto out;
- }
- }
-
- if (supplied_recv)
+ if (supplied_recv) {
recv_msg = supplied_recv;
- else {
- recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
- rv = -ENOMEM;
- goto out;
- }
+ recv_msg->user = user;
+ if (user)
+ atomic_inc(&user->nr_msgs);
+ } else {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg))
+ return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
@@ -2314,22 +2317,22 @@ static int i_ipmi_request(struct ipmi_user *user,
if (smi_msg == NULL) {
if (!supplied_recv)
ipmi_free_recv_msg(recv_msg);
- rv = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
}
if (!run_to_completion)
mutex_lock(&intf->users_mutex);
+ if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
+ /* No messages while the BMC is in reset. */
+ rv = -EBUSY;
+ goto out_err;
+ }
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
- recv_msg->user = user;
- if (user)
- /* The put happens when the message is freed. */
- kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
@@ -2358,8 +2361,10 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) {
out_err:
- ipmi_free_smi_msg(smi_msg);
- ipmi_free_recv_msg(recv_msg);
+ if (!supplied_smi)
+ ipmi_free_smi_msg(smi_msg);
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
@@ -2369,9 +2374,6 @@ out_err:
if (!run_to_completion)
mutex_unlock(&intf->users_mutex);
-out:
- if (rv && user)
- atomic_dec(&user->nr_msgs);
return rv;
}
@@ -2622,6 +2624,12 @@ retry_bmc_lock:
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
+ /* Don't allow sysfs access when in maintenance mode. */
+ if (intf->maintenance_mode_state) {
+ rv = -EBUSY;
+ goto out_noprocessing;
+ }
+
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
@@ -3517,6 +3525,19 @@ static ssize_t nr_msgs_show(struct device *dev,
}
static DEVICE_ATTR_RO(nr_msgs);
+static ssize_t maintenance_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi,
+ maintenance_mode_devattr);
+
+ return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
+ intf->auto_maintenance_timeout);
+}
+static DEVICE_ATTR_RO(maintenance_mode);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3575,7 +3596,7 @@ int ipmi_add_smi(struct module *owner,
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
- spin_lock_init(&intf->seq_lock);
+ mutex_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
@@ -3653,6 +3674,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
+ intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
+ sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
@@ -3760,6 +3789,7 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
+ device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3862,7 +3892,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3883,9 +3913,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -3915,47 +3944,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
- ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
- ipmb_addr->slave_addr = msg->rsp[6];
- ipmb_addr->lun = msg->rsp[7] & 3;
- ipmb_addr->channel = msg->rsp[3] & 0xf;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[7] >> 2;
- recv_msg->msg.netfn = msg->rsp[4] >> 2;
- recv_msg->msg.cmd = msg->rsp[8];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 10, not 9 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 10;
- memcpy(recv_msg->msg_data, &msg->rsp[9],
- msg->rsp_size - 10);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -3968,7 +3991,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
int rv = 0;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_direct_addr *daddr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
unsigned char netfn = msg->rsp[0] >> 2;
unsigned char cmd = msg->rsp[3];
@@ -3977,9 +4000,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4001,44 +4023,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
- daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
- daddr->channel = 0;
- daddr->slave_addr = msg->rsp[1];
- daddr->rs_lun = msg->rsp[0] & 3;
- daddr->rq_lun = msg->rsp[2] & 3;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = (msg->rsp[2] >> 2);
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[3];
- recv_msg->msg.data = recv_msg->msg_data;
-
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, msg->rsp + 4,
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4050,7 +4066,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4152,7 +4168,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
@@ -4173,9 +4189,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4206,49 +4221,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
- lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
- lan_addr->session_handle = msg->rsp[4];
- lan_addr->remote_SWID = msg->rsp[8];
- lan_addr->local_SWID = msg->rsp[5];
- lan_addr->lun = msg->rsp[9] & 3;
- lan_addr->channel = msg->rsp[3] & 0xf;
- lan_addr->privilege = msg->rsp[3] >> 4;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[9] >> 2;
- recv_msg->msg.netfn = msg->rsp[6] >> 2;
- recv_msg->msg.cmd = msg->rsp[10];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 12, not 11 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 12;
- memcpy(recv_msg->msg_data, &msg->rsp[11],
- msg->rsp_size - 12);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4270,7 +4280,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
/*
* We expect the OEM SW to perform error checking
@@ -4299,9 +4309,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4314,48 +4323,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
*/
rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /*
- * OEM Messages are expected to be delivered via
- * the system interface to SMS software. We might
- * need to visit this again depending on OEM
- * requirements
- */
- smi_addr = ((struct ipmi_system_interface_addr *)
- &recv_msg->addr);
- smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr->channel = IPMI_BMC_CHANNEL;
- smi_addr->lun = msg->rsp[0] & 3;
-
- recv_msg->user = user;
- recv_msg->user_msg_data = NULL;
- recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[1];
- recv_msg->msg.data = recv_msg->msg_data;
+ } else if (!IS_ERR(recv_msg)) {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * The message starts at byte 4 which follows the
- * Channel Byte in the "GET MESSAGE" command
- */
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, &msg->rsp[4],
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4413,8 +4416,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
if (!user->gets_events)
continue;
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg)) {
mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
@@ -4435,8 +4438,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
- recv_msg->user = user;
- kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
mutex_unlock(&intf->users_mutex);
@@ -4452,8 +4453,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(NULL);
+ if (IS_ERR(recv_msg)) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
@@ -4488,7 +4489,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4529,9 +4530,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1,
+ msg->data[1], msg->rsp_size);
return_unspecified:
/* Generate an error response for the message. */
@@ -4561,14 +4563,14 @@ return_unspecified:
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL)) {
+ && (msg->recv_msg == NULL)) {
if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
* This is the local response to a command send, start
- * the timer for these. The user_data will not be
+ * the timer for these. The recv_msg will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
@@ -4628,7 +4630,7 @@ return_unspecified:
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ && (msg->recv_msg != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
@@ -4645,7 +4647,7 @@ return_unspecified:
cc = msg->rsp[2];
process_response_response:
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
requeue = 0;
if (!recv_msg)
@@ -4801,6 +4803,7 @@ static void smi_work(struct work_struct *t)
int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
struct ipmi_recv_msg *msg, *msg2;
+ int cc;
/*
* Start the next message if available.
@@ -4809,7 +4812,7 @@ static void smi_work(struct work_struct *t)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
-
+restart:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4830,8 +4833,17 @@ static void smi_work(struct work_struct *t)
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- if (newmsg)
- intf->handlers->sender(intf->send_info, newmsg);
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+ if (cc) {
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+ else
+ ipmi_free_smi_msg(newmsg);
+ goto restart;
+ }
+ }
handle_new_recv_msgs(intf);
@@ -4868,12 +4880,10 @@ static void smi_work(struct work_struct *t)
list_del(&msg->link);
- if (refcount_read(&user->destroyed) == 0) {
+ if (refcount_read(&user->destroyed) == 0)
ipmi_free_recv_msg(msg);
- } else {
- atomic_dec(&user->nr_msgs);
+ else
user->handler->ipmi_recv_hndl(msg, user->handler_data);
- }
}
mutex_unlock(&intf->user_msgs_mutex);
@@ -4951,8 +4961,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
- int slot, unsigned long *flags,
- bool *need_timer)
+ int slot, bool *need_timer)
{
struct ipmi_recv_msg *msg;
@@ -5004,7 +5013,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
return;
}
- spin_unlock_irqrestore(&intf->seq_lock, *flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Send the new message. We send with a zero
@@ -5025,7 +5034,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} else
ipmi_free_smi_msg(smi_msg);
- spin_lock_irqsave(&intf->seq_lock, *flags);
+ mutex_lock(&intf->seq_lock);
}
}
@@ -5052,7 +5061,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
* list.
*/
INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
@@ -5062,8 +5071,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
- &flags, &need_timer);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ &need_timer);
+ mutex_unlock(&intf->seq_lock);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
@@ -5083,7 +5092,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
+ intf->auto_maintenance_timeout = 0;
maintenance_mode_update(intf);
}
}
@@ -5099,15 +5110,13 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
+ if (intf->maintenance_mode_state)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
-static struct timer_list ipmi_timer;
-
static atomic_t stop_operation;
static void ipmi_timeout_work(struct work_struct *work)
@@ -5131,6 +5140,8 @@ static void ipmi_timeout_work(struct work_struct *work)
}
need_timer = true;
}
+ if (intf->maintenance_mode_state)
+ need_timer = true;
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
@@ -5174,7 +5185,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
- rv->user_data = NULL;
+ rv->recv_msg = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
@@ -5190,27 +5201,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
{
struct ipmi_recv_msg *rv;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv) {
- rv->user = NULL;
- rv->done = free_recv_msg;
- atomic_inc(&recv_msg_inuse_count);
+ if (!rv) {
+ if (user)
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-ENOMEM);
}
+
+ rv->user = user;
+ rv->done = free_recv_msg;
+ if (user)
+ kref_get(&user->refcount);
+ atomic_inc(&recv_msg_inuse_count);
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user && !oops_in_progress)
+ if (msg->user && !oops_in_progress) {
+ atomic_dec(&msg->user->nr_msgs);
kref_put(&msg->user->refcount, free_ipmi_user);
+ }
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user)
+{
+ WARN_ON_ONCE(msg->user); /* User should not be set. */
+ msg->user = user;
+ atomic_inc(&user->nr_msgs);
+ kref_get(&user->refcount);
+}
+
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 4a2efafcd1f8..52a1130defe5 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
}
-static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_smi_powernv *smi = send_info;
struct opal_ipmi_msg *opal_msg;
@@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
pr_devel("%s: -> %d\n", __func__, rc);
-
- if (!rc) {
- smi->cur_msg = msg;
- spin_unlock_irqrestore(&smi->msg_lock, flags);
- return;
+ if (rc) {
+ comp = IPMI_ERR_UNSPECIFIED;
+ goto err_unlock;
}
- comp = IPMI_ERR_UNSPECIFIED;
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return IPMI_CC_NO_ERROR;
+
err_unlock:
spin_unlock_irqrestore(&smi->msg_lock, flags);
err:
- send_error_reply(smi, msg, comp);
+ return comp;
}
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index 508c3fd45877..687835b53da5 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -101,6 +101,13 @@ void ipmi_si_pci_shutdown(void);
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
+#ifdef CONFIG_IPMI_LS2K
+void ipmi_si_ls2k_init(void);
+void ipmi_si_ls2k_shutdown(void);
+#else
+static inline void ipmi_si_ls2k_init(void) { }
+static inline void ipmi_si_ls2k_shutdown(void) { }
+#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 8b5524069c15..70e55f5ff85e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -53,6 +53,7 @@
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
+#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
enum si_intf_state {
SI_NORMAL,
@@ -61,7 +62,8 @@ enum si_intf_state {
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
- SI_SETTING_ENABLES
+ SI_SETTING_ENABLES,
+ SI_HOSED
/* FIXME - add watchdog stuff. */
};
@@ -313,7 +315,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
- int rv;
+ int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
@@ -390,6 +392,17 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_get_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+}
+
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -742,6 +755,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
break;
}
+ case SI_HOSED: /* Shouldn't happen. */
+ break;
}
}
@@ -756,6 +771,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
enum si_sm_result si_sm_result;
restart:
+ if (smi_info->si_state == SI_HOSED)
+ /* Just in case, hosed state is only left from the timeout. */
+ return SI_SM_HOSED;
+
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -779,18 +798,20 @@ restart:
/*
* Do the before return_hosed_msg, because that
- * releases the lock.
+ * releases the lock. We just disable operations for
+ * a while and retry in hosed state.
*/
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_HOSED;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
- return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
- goto restart;
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+ goto out;
}
/*
@@ -798,8 +819,6 @@ restart:
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
- unsigned char msg[2];
-
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
@@ -817,11 +836,7 @@ restart:
* interrupts work with the SMI, that's not really
* possible.
*/
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
-
- start_new_msg(smi_info, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ start_get_flags(smi_info);
goto restart;
}
}
@@ -894,27 +909,29 @@ static void flush_messages(void *send_info)
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
+ while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp(smi_info, "Enqueue");
+ if (smi_info->si_state == SI_HOSED)
+ return IPMI_BUS_ERR;
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
- return;
+ return IPMI_CC_NO_ERROR;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
@@ -929,6 +946,7 @@ static void sender(void *send_info,
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ return IPMI_CC_NO_ERROR;
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@@ -1087,6 +1105,10 @@ static void smi_timeout(struct timer_list *t)
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp(smi_info, "Timer");
+ if (smi_info->si_state == SI_HOSED)
+ /* Try something to see if the BMC is now operational. */
+ start_get_flags(smi_info);
+
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
@@ -1096,14 +1118,11 @@ static void smi_timeout(struct timer_list *t)
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_mod_timer;
- }
-
- /*
- * If the state machine asks for a short delay, then shorten
- * the timer timeout.
- */
- if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
@@ -1111,7 +1130,6 @@ static void smi_timeout(struct timer_list *t)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
-do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -2120,6 +2138,8 @@ static int __init init_ipmi_si(void)
ipmi_si_pci_init();
+ ipmi_si_ls2k_init();
+
ipmi_si_parisc_init();
mutex_lock(&smi_infos_lock);
@@ -2331,6 +2351,8 @@ static void cleanup_ipmi_si(void)
ipmi_si_pci_shutdown();
+ ipmi_si_ls2k_shutdown();
+
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c
new file mode 100644
index 000000000000..45442c257efd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_ls2k.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Loongson-2K BMC IPMI interface
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ipmi_si.h"
+
+#define LS2K_KCS_FIFO_IBFH 0x0
+#define LS2K_KCS_FIFO_IBFT 0x1
+#define LS2K_KCS_FIFO_OBFH 0x2
+#define LS2K_KCS_FIFO_OBFT 0x3
+
+/* KCS registers */
+#define LS2K_KCS_REG_STS 0x4
+#define LS2K_KCS_REG_DATA_OUT 0x5
+#define LS2K_KCS_REG_DATA_IN 0x6
+#define LS2K_KCS_REG_CMD 0x8
+
+#define LS2K_KCS_CMD_DATA 0xa
+#define LS2K_KCS_VERSION 0xb
+#define LS2K_KCS_WR_REQ 0xc
+#define LS2K_KCS_WR_ACK 0x10
+
+#define LS2K_KCS_STS_OBF BIT(0)
+#define LS2K_KCS_STS_IBF BIT(1)
+#define LS2K_KCS_STS_SMS_ATN BIT(2)
+#define LS2K_KCS_STS_CMD BIT(3)
+
+#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD)
+
+static bool ls2k_registered;
+
+static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ int reg_offset;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_STS;
+ } else {
+ writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS);
+ reg_offset = LS2K_KCS_REG_DATA_OUT;
+ }
+
+ return readb(addr + reg_offset);
+}
+
+static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ unsigned char inb = 0, cmd;
+ bool obf, ibf;
+
+ obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT);
+ ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT);
+ cmd = readb(addr + LS2K_KCS_CMD_DATA);
+
+ if (offset & BIT(0)) {
+ inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK;
+ inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf)
+ | FIELD_PREP(LS2K_KCS_STS_IBF, ibf)
+ | FIELD_PREP(LS2K_KCS_STS_CMD, cmd);
+ } else {
+ inb = readb(addr + LS2K_KCS_REG_DATA_OUT);
+ writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT);
+ }
+
+ return inb;
+}
+
+static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char sts = readb(addr + LS2K_KCS_REG_STS);
+ int reg_offset;
+
+ if (sts & LS2K_KCS_STS_IBF)
+ return;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_CMD;
+ sts |= LS2K_KCS_STS_CMD;
+ } else {
+ reg_offset = LS2K_KCS_REG_DATA_IN;
+ sts &= ~LS2K_KCS_STS_CMD;
+ }
+
+ writew(val, addr + reg_offset);
+ writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char ibfh, ibft;
+ int reg_offset;
+
+ ibfh = readb(addr + LS2K_KCS_FIFO_IBFH);
+ ibft = readb(addr + LS2K_KCS_FIFO_IBFT);
+
+ if (ibfh ^ ibft)
+ return;
+
+ reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN;
+ writew(val, addr + reg_offset);
+
+ writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA);
+ writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr)
+ iounmap(io->addr);
+}
+
+static int ipmi_ls2k_mem_setup(struct si_sm_io *io)
+{
+ unsigned char version;
+
+ io->addr = ioremap(io->addr_data, io->regspacing);
+ if (!io->addr)
+ return -EIO;
+
+ version = readb(io->addr + LS2K_KCS_VERSION);
+
+ io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0;
+ io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0;
+ io->io_cleanup = ls2k_mem_cleanup;
+
+ return 0;
+}
+
+static int ipmi_ls2k_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_info = &ipmi_kcs_si_info;
+ io.io_setup = ipmi_ls2k_mem_setup;
+ io.addr_data = pdev->resource[0].start;
+ io.regspacing = resource_size(&pdev->resource[0]);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_ls2k_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_ls2k_platform_driver = {
+ .driver = {
+ .name = "ls2k-ipmi-si",
+ },
+ .probe = ipmi_ls2k_probe,
+ .remove = ipmi_ls2k_remove,
+};
+
+void ipmi_si_ls2k_init(void)
+{
+ platform_driver_register(&ipmi_ls2k_platform_driver);
+ ls2k_registered = true;
+}
+
+void ipmi_si_ls2k_shutdown(void)
+{
+ if (ls2k_registered)
+ platform_driver_unregister(&ipmi_ls2k_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 1bc42830444d..1b63f7d2fcda 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
@@ -1089,6 +1088,7 @@ static void sender(void *send_info,
msg->data[0], msg->data[1],
(long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
}
+ return IPMI_CC_NO_ERROR;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 48839958b0b1..34b815901b20 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -512,11 +512,18 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
return 0;
}
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return -ENOSYS;
+}
+#else
static unsigned long get_unmapped_area_zero(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
-#ifdef CONFIG_MMU
if (flags & MAP_SHARED) {
/*
* mmap_zero() will call shmem_zero_setup() to create a file,
@@ -527,12 +534,18 @@ static unsigned long get_unmapped_area_zero(struct file *file,
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
- /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ /*
+ * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,
+ * attempt to map aligned to huge page size if possible, otherwise we
+ * fall back to system page size mappings.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return -ENOSYS;
+ return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
#endif
}
+#endif /* CONFIG_MMU */
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
diff --git a/drivers/clk/clk-rp1.c b/drivers/clk/clk-rp1.c
index afff90d48734..fd144755b879 100644
--- a/drivers/clk/clk-rp1.c
+++ b/drivers/clk/clk-rp1.c
@@ -368,6 +368,11 @@ struct rp1_clk_desc {
struct clk_divider div;
};
+static struct rp1_clk_desc *clk_audio_core;
+static struct rp1_clk_desc *clk_audio;
+static struct rp1_clk_desc *clk_i2s;
+static struct clk_hw *clk_xosc;
+
static inline
void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val)
{
@@ -475,7 +480,6 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
struct rp1_clockman *clockman = pll_core->clockman;
const struct rp1_pll_core_data *data = pll_core->data;
- unsigned long calc_rate;
u32 fbdiv_int, fbdiv_frac;
/* Disable dividers to start with. */
@@ -484,8 +488,8 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
clockman_write(clockman, data->fbdiv_frac_reg, 0);
spin_unlock(&clockman->regs_lock);
- calc_rate = get_pll_core_divider(hw, rate, parent_rate,
- &fbdiv_int, &fbdiv_frac);
+ get_pll_core_divider(hw, rate, parent_rate,
+ &fbdiv_int, &fbdiv_frac);
spin_lock(&clockman->regs_lock);
clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
@@ -497,8 +501,6 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
if (WARN_ON_ONCE(parent_rate > (rate / 16)))
return -ERANGE;
- pll_core->cached_rate = calc_rate;
-
spin_lock(&clockman->regs_lock);
/* Don't need to divide ref unless parent_rate > (output freq / 16) */
clockman_write(clockman, data->cs_reg,
@@ -530,13 +532,16 @@ static unsigned long rp1_pll_core_recalc_rate(struct clk_hw *hw,
return calc_rate;
}
-static long rp1_pll_core_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv_int, fbdiv_frac;
- return get_pll_core_divider(hw, rate, *parent_rate,
- &fbdiv_int, &fbdiv_frac);
+ req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate,
+ &fbdiv_int,
+ &fbdiv_frac);
+
+ return 0;
}
static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate,
@@ -614,14 +619,20 @@ static unsigned long rp1_pll_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2);
}
-static long rp1_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
u32 div1, div2;
- get_pll_prim_dividers(rate, *parent_rate, &div1, &div2);
+ if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate)
+ req->best_parent_rate = clk_audio_core->cached_rate;
- return DIV_ROUND_CLOSEST(*parent_rate, div1 * div2);
+ get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2);
+
+ req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2);
+
+ return 0;
}
static int rp1_pll_ph_is_on(struct clk_hw *hw)
@@ -671,13 +682,15 @@ static unsigned long rp1_pll_ph_recalc_rate(struct clk_hw *hw,
return parent_rate / data->fixed_divider;
}
-static long rp1_pll_ph_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_ph_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
const struct rp1_pll_ph_data *data = pll_ph->data;
- return *parent_rate / data->fixed_divider;
+ req->rate = req->best_parent_rate / data->fixed_divider;
+
+ return 0;
}
static int rp1_pll_divider_is_on(struct clk_hw *hw)
@@ -754,11 +767,12 @@ static unsigned long rp1_pll_divider_recalc_rate(struct clk_hw *hw,
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long rp1_pll_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, parent_rate);
+ req->rate = clk_divider_ops.determine_rate(hw, req);
+
+ return 0;
}
static int rp1_clock_is_on(struct clk_hw *hw)
@@ -964,6 +978,59 @@ static int rp1_clock_set_rate(struct clk_hw *hw, unsigned long rate,
return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff);
}
+static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw,
+ unsigned long target_rate,
+ int *pdiv_prim, int *pdiv_clk)
+{
+ static const int prim_divs[] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16,
+ 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49,
+ };
+ const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc);
+ const unsigned long core_min = xosc_rate * 16;
+ const unsigned long core_max = 2400000000;
+ int best_div_prim = 1, best_div_clk = 1;
+ unsigned long best_rate = core_max + 1;
+ unsigned long core_rate = 0;
+ int div_int, div_frac;
+ u64 div;
+ int i;
+
+ /* Given the target rate, choose a set of divisors/multipliers */
+ for (i = 0; i < ARRAY_SIZE(prim_divs); i++) {
+ int div_prim = prim_divs[i];
+ int div_clk;
+
+ for (div_clk = 1; div_clk <= 256; div_clk++) {
+ core_rate = target_rate * div_clk * div_prim;
+ if (core_rate >= core_min) {
+ if (core_rate < best_rate) {
+ best_rate = core_rate;
+ best_div_prim = div_prim;
+ best_div_clk = div_clk;
+ }
+ break;
+ }
+ }
+ }
+
+ if (best_rate < core_max) {
+ div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate;
+ div_int = div >> 24;
+ div_frac = div % (1 << 24);
+ core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24;
+ } else {
+ core_rate = 0;
+ }
+
+ if (pdiv_prim)
+ *pdiv_prim = best_div_prim;
+ if (pdiv_clk)
+ *pdiv_clk = best_div_clk;
+
+ return core_rate;
+}
+
static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
int parent_idx,
unsigned long rate,
@@ -972,12 +1039,35 @@ static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
{
struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
const struct rp1_clock_data *data = clock->data;
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ struct clk_hw *clk_i2s_hw = &clk_i2s->hw;
struct clk_hw *parent;
u32 div;
u64 tmp;
parent = clk_hw_get_parent_by_index(hw, parent_idx);
+ if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) {
+ *prate = clk_audio->cached_rate;
+ *calc_rate = rate;
+ return;
+ }
+
+ if (hw == clk_i2s_hw && parent == clk_audio_hw) {
+ unsigned long core_rate, audio_rate, i2s_rate;
+ int div_prim, div_clk;
+
+ core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk);
+ audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim);
+ i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk);
+ clk_audio_core->cached_rate = core_rate;
+ clk_audio->cached_rate = audio_rate;
+ clk_i2s->cached_rate = i2s_rate;
+ *prate = audio_rate;
+ *calc_rate = i2s_rate;
+ return;
+ }
+
*prate = clk_hw_get_rate(parent);
div = rp1_clock_choose_div(rate, *prate, data);
@@ -1062,19 +1152,47 @@ static int rp1_clock_determine_rate(struct clk_hw *hw,
return 0;
}
+static int rp1_varsrc_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ /*
+ * "varsrc" exists purely to let clock dividers know the frequency
+ * of an externally-managed clock source (such as MIPI DSI byte-clock)
+ * which may change at run-time as a side-effect of some other driver.
+ */
+ clock->cached_rate = rate;
+ return 0;
+}
+
+static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ return clock->cached_rate;
+}
+
+static int rp1_varsrc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return 0;
+}
+
static const struct clk_ops rp1_pll_core_ops = {
.is_prepared = rp1_pll_core_is_on,
.prepare = rp1_pll_core_on,
.unprepare = rp1_pll_core_off,
.set_rate = rp1_pll_core_set_rate,
.recalc_rate = rp1_pll_core_recalc_rate,
- .round_rate = rp1_pll_core_round_rate,
+ .determine_rate = rp1_pll_core_determine_rate,
};
static const struct clk_ops rp1_pll_ops = {
.set_rate = rp1_pll_set_rate,
.recalc_rate = rp1_pll_recalc_rate,
- .round_rate = rp1_pll_round_rate,
+ .determine_rate = rp1_pll_determine_rate,
};
static const struct clk_ops rp1_pll_ph_ops = {
@@ -1082,7 +1200,7 @@ static const struct clk_ops rp1_pll_ph_ops = {
.prepare = rp1_pll_ph_on,
.unprepare = rp1_pll_ph_off,
.recalc_rate = rp1_pll_ph_recalc_rate,
- .round_rate = rp1_pll_ph_round_rate,
+ .determine_rate = rp1_pll_ph_determine_rate,
};
static const struct clk_ops rp1_pll_divider_ops = {
@@ -1091,7 +1209,7 @@ static const struct clk_ops rp1_pll_divider_ops = {
.unprepare = rp1_pll_divider_off,
.set_rate = rp1_pll_divider_set_rate,
.recalc_rate = rp1_pll_divider_recalc_rate,
- .round_rate = rp1_pll_divider_round_rate,
+ .determine_rate = rp1_pll_divider_determine_rate,
};
static const struct clk_ops rp1_clk_ops = {
@@ -1106,6 +1224,12 @@ static const struct clk_ops rp1_clk_ops = {
.determine_rate = rp1_clock_determine_rate,
};
+static const struct clk_ops rp1_varsrc_ops = {
+ .set_rate = rp1_varsrc_set_rate,
+ .recalc_rate = rp1_varsrc_recalc_rate,
+ .determine_rate = rp1_varsrc_determine_rate,
+};
+
static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman,
struct rp1_clk_desc *desc)
{
@@ -1241,6 +1365,36 @@ static struct rp1_clk_desc pll_sys_desc = REGISTER_PLL(
)
);
+static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_PRIM,
+ .fc0_src = FC_NUM(4, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_PRIM,
+ .fc0_src = FC_NUM(3, 2),
+ )
+);
+
static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
.hw.init = CLK_HW_INIT_PARENTS_DATA(
"pll_sys_sec",
@@ -1256,16 +1410,42 @@ static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
)
);
+static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_SEC,
+ .fc0_src = FC_NUM(5, 3),
+ )
+);
+
+static const struct clk_parent_data clk_eth_tsu_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
.hw.init = CLK_HW_INIT_PARENTS_DATA(
"clk_eth_tsu",
- (const struct clk_parent_data[]) { { .index = 0 } },
+ clk_eth_tsu_parents,
&rp1_clk_ops,
0
),
CLK_DATA(rp1_clock_data,
.num_std_parents = 0,
- .num_aux_parents = 1,
+ .num_aux_parents = 8,
.ctrl_reg = CLK_ETH_TSU_CTRL,
.div_int_reg = CLK_ETH_TSU_DIV_INT,
.sel_reg = CLK_ETH_TSU_SEL,
@@ -1278,6 +1458,7 @@ static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
static const struct clk_parent_data clk_eth_parents[] = {
{ .hw = &pll_sys_sec_desc.div.hw },
{ .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
};
static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
@@ -1289,7 +1470,7 @@ static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
),
CLK_DATA(rp1_clock_data,
.num_std_parents = 0,
- .num_aux_parents = 2,
+ .num_aux_parents = 3,
.ctrl_reg = CLK_ETH_CTRL,
.div_int_reg = CLK_ETH_DIV_INT,
.sel_reg = CLK_ETH_SEL,
@@ -1342,6 +1523,756 @@ static struct rp1_clk_desc pll_sys_pri_ph_desc = REGISTER_PLL(
)
);
+static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_AUDIO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_VIDEO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(4, 3),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_SEC,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_tern",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_TERN,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_slow_sys",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SLOW_SYS_CTRL,
+ .div_int_reg = CLK_SLOW_SYS_DIV_INT,
+ .sel_reg = CLK_SLOW_SYS_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 4),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static const struct clk_parent_data clk_dma_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dma",
+ clk_dma_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_DMA_CTRL,
+ .div_int_reg = CLK_DMA_DIV_INT,
+ .sel_reg = CLK_DMA_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_uart",
+ clk_uart_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_UART_CTRL,
+ .div_int_reg = CLK_UART_DIV_INT,
+ .sel_reg = CLK_UART_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(6, 7),
+ )
+);
+
+static const struct clk_parent_data clk_pwm0_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm0",
+ clk_pwm0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM0_CTRL,
+ .div_int_reg = CLK_PWM0_DIV_INT,
+ .div_frac_reg = CLK_PWM0_DIV_FRAC,
+ .sel_reg = CLK_PWM0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(0, 5),
+ )
+);
+
+static const struct clk_parent_data clk_pwm1_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm1",
+ clk_pwm1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM1_CTRL,
+ .div_int_reg = CLK_PWM1_DIV_INT,
+ .div_frac_reg = CLK_PWM1_DIV_FRAC,
+ .sel_reg = CLK_PWM1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(1, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_in_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_in",
+ clk_audio_in_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 5,
+ .ctrl_reg = CLK_AUDIO_IN_CTRL,
+ .div_int_reg = CLK_AUDIO_IN_DIV_INT,
+ .sel_reg = CLK_AUDIO_IN_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(2, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_out_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_out",
+ clk_audio_out_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 4,
+ .ctrl_reg = CLK_AUDIO_OUT_CTRL,
+ .div_int_reg = CLK_AUDIO_OUT_DIV_INT,
+ .sel_reg = CLK_AUDIO_OUT_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 153600 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(3, 5),
+ )
+);
+
+static const struct clk_parent_data clk_i2s_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_audio_desc.hw },
+ { .hw = &pll_audio_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_i2s",
+ clk_i2s_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_I2S_CTRL,
+ .div_int_reg = CLK_I2S_DIV_INT,
+ .sel_reg = CLK_I2S_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI0_CFG_CTRL,
+ .div_int_reg = CLK_MIPI0_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI0_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI1_CFG_CTRL,
+ .div_int_reg = CLK_MIPI1_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI1_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 6),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_adc",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_ADC_CTRL,
+ .div_int_reg = CLK_ADC_DIV_INT,
+ .sel_reg = CLK_ADC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_timer",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_TIMER_CTRL,
+ .div_int_reg = CLK_SDIO_TIMER_DIV_INT,
+ .sel_reg = CLK_SDIO_TIMER_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_alt_src",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL,
+ .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT,
+ .sel_reg = CLK_SDIO_ALT_SRC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 4),
+ )
+);
+
+static const struct clk_parent_data clk_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dpi",
+ clk_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_DPI_DIV_INT,
+ .sel_reg = VIDEO_CLK_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp0_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_i2s_desc.hw },
+ { .hw = &clk_adc_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp0",
+ clk_gp0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(0),
+ .ctrl_reg = CLK_GP0_CTRL,
+ .div_int_reg = CLK_GP0_DIV_INT,
+ .div_frac_reg = CLK_GP0_DIV_FRAC,
+ .sel_reg = CLK_GP0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp1_parents[] = {
+ { .hw = &clk_sdio_timer_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_adc_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp1",
+ clk_gp1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(1),
+ .ctrl_reg = CLK_GP1_CTRL,
+ .div_int_reg = CLK_GP1_DIV_INT,
+ .div_frac_reg = CLK_GP1_DIV_FRAC,
+ .sel_reg = CLK_GP1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 1),
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi0_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi1_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static const struct clk_parent_data clk_mipi0_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_dpi",
+ clk_mipi0_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 6),
+ )
+);
+
+static const struct clk_parent_data clk_mipi1_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_dpi",
+ clk_mipi1_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp2_parents[] = {
+ { .hw = &clk_sdio_alt_src_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_sec_desc.hw },
+ { .index = -1 },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clk_audio_in_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .hw = &clk_pwm1_desc.hw },
+ { .hw = &clk_mipi0_dpi_desc.hw },
+ { .hw = &clk_mipi1_cfg_desc.hw },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp2",
+ clk_gp2_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(2),
+ .ctrl_reg = CLK_GP2_CTRL,
+ .div_int_reg = CLK_GP2_DIV_INT,
+ .div_frac_reg = CLK_GP2_DIV_FRAC,
+ .sel_reg = CLK_GP2_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp3_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_pri_ph_desc.hw },
+ { .hw = &clk_audio_out_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi1_dpi_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp3",
+ clk_gp3_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(3),
+ .ctrl_reg = CLK_GP3_CTRL,
+ .div_int_reg = CLK_GP3_DIV_INT,
+ .div_frac_reg = CLK_GP3_DIV_FRAC,
+ .sel_reg = CLK_GP3_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp4_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi0_cfg_desc.hw },
+ { .hw = &clk_uart_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp4",
+ clk_gp4_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(4),
+ .ctrl_reg = CLK_GP4_CTRL,
+ .div_int_reg = CLK_GP4_DIV_INT,
+ .div_frac_reg = CLK_GP4_DIV_FRAC,
+ .sel_reg = CLK_GP4_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 1),
+ )
+);
+
+static const struct clk_parent_data clk_vec_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_vec",
+ clk_vec_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_VEC_CTRL,
+ .div_int_reg = VIDEO_CLK_VEC_DIV_INT,
+ .sel_reg = VIDEO_CLK_VEC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 108 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp5_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &clk_eth_tsu_desc.hw },
+ { .index = -1 },
+ { .hw = &clk_vec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp5",
+ clk_gp5_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(5),
+ .ctrl_reg = CLK_GP5_CTRL,
+ .div_int_reg = CLK_GP5_DIV_INT,
+ .div_frac_reg = CLK_GP5_DIV_FRAC,
+ .sel_reg = CLK_GP5_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
static struct rp1_clk_desc *const clk_desc_array[] = {
[RP1_PLL_SYS_CORE] = &pll_sys_core_desc,
[RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc,
@@ -1352,6 +2283,38 @@ static struct rp1_clk_desc *const clk_desc_array[] = {
[RP1_CLK_SYS] = &clk_sys_desc,
[RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc,
[RP1_PLL_SYS_SEC] = &pll_sys_sec_desc,
+ [RP1_PLL_AUDIO] = &pll_audio_desc,
+ [RP1_PLL_VIDEO] = &pll_video_desc,
+ [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc,
+ [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc,
+ [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc,
+ [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc,
+ [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc,
+ [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc,
+ [RP1_CLK_DMA] = &clk_dma_desc,
+ [RP1_CLK_UART] = &clk_uart_desc,
+ [RP1_CLK_PWM0] = &clk_pwm0_desc,
+ [RP1_CLK_PWM1] = &clk_pwm1_desc,
+ [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc,
+ [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc,
+ [RP1_CLK_I2S] = &clk_i2s_desc,
+ [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc,
+ [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc,
+ [RP1_CLK_ADC] = &clk_adc_desc,
+ [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc,
+ [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc,
+ [RP1_CLK_GP0] = &clk_gp0_desc,
+ [RP1_CLK_GP1] = &clk_gp1_desc,
+ [RP1_CLK_GP2] = &clk_gp2_desc,
+ [RP1_CLK_GP3] = &clk_gp3_desc,
+ [RP1_CLK_GP4] = &clk_gp4_desc,
+ [RP1_CLK_GP5] = &clk_gp5_desc,
+ [RP1_CLK_VEC] = &clk_vec_desc,
+ [RP1_CLK_DPI] = &clk_dpi_desc,
+ [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc,
+ [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc,
+ [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc,
+ [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc,
};
static const struct regmap_range rp1_reg_ranges[] = {
@@ -1466,6 +2429,11 @@ static int rp1_clk_probe(struct platform_device *pdev)
hws[i] = desc->clk_register(clockman, desc);
}
+ clk_audio_core = &pll_audio_core_desc;
+ clk_audio = &pll_audio_desc;
+ clk_i2s = &clk_i2s_desc;
+ clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0);
+
platform_set_drvdata(pdev, clockman);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 95d66191df4b..e902ba75e006 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
@@ -38,13 +39,6 @@ enum {
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
-/* HIWORD_MASK FIELD_PREP */
-#define HWM_FIELD_PREP(mask, value) \
-({ \
- u64 _m = mask; \
- (_m << 16) | FIELD_PREP(_m, value); \
-})
-
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
@@ -313,15 +307,15 @@ static int plltv_set_rate(struct sp_pll *clk)
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
- r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
- r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
- r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
- r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= FIELD_PREP_WM16(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= FIELD_PREP_WM16(MASK_NFRA, clk->p[NFRA]);
- r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+ r1 = FIELD_PREP_WM16(MASK_DIVR, clk->p[DIVR]);
- r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
- r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+ r2 = FIELD_PREP_WM16(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= FIELD_PREP_WM16(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 82a8cb9545eb..e7ebb63970d3 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -53,6 +53,7 @@
#define SYSTEM_CLK_RATE 0x030
#define TEGRA30_CLK_PERIPH_BANKS 5
+#define TEGRA30_CLK_CLK_MAX 311
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4f7f9201598d..083d8369a591 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
index 4fe39eadd163..b6b1cdc4d11d 100644
--- a/drivers/cpufreq/airoha-cpufreq.c
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = {
};
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
{ .compatible = "airoha,en7581" },
{},
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index f28a4435fba7..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 5940d262374f..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
- cpufreq_cpu_put(policy);
-
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4a17162a392d..12de0ac7bbaf 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -50,8 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -87,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@@ -684,8 +682,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -725,8 +722,8 @@ static int cppc_get_perf_ctrs_sample(int cpu,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
@@ -736,8 +733,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpu_data = policy->driver_data;
- cpufreq_cpu_put(policy);
-
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
if (ret) {
if (ret == -EFAULT)
@@ -747,8 +742,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 015dd393eaba..cd1816a12bb9 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,7 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
{ .compatible = "airoha,en7581", },
{ .compatible = "allwinner,sun50i-a100" },
@@ -188,9 +189,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 58e3839a2140..852e024facc3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -664,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -914,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
@@ -1121,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1844,7 +1845,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1859,7 +1859,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cur;
@@ -1875,9 +1875,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
@@ -1893,9 +1891,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
@@ -1919,9 +1915,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -2750,9 +2744,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2769,9 +2761,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2792,7 +2782,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2921,10 +2911,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -3058,9 +3046,7 @@ static int __init cpufreq_core_init(void)
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy) {
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
return false;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 56500b25d77c..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0e65d37c9231..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 35de513af6c9..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
@@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0d5d283a5429..38897bb14a2c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -620,24 +620,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -651,34 +636,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -782,7 +746,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -802,10 +766,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -937,11 +897,19 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
static bool no_cas __ro_after_init;
@@ -1337,9 +1305,8 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
@@ -1411,6 +1378,9 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
@@ -1502,9 +1472,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpudata->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return false;
@@ -1695,41 +1663,40 @@ unlock_driver:
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
-
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1758,7 +1725,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
+ update_qos_requests(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1792,7 +1759,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
+ update_qos_requests(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
@@ -2575,7 +2542,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -3802,6 +3769,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
{}
};
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3822,23 +3809,32 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+
+ if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index ba0e08c8486a..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 74f1b4c796e4..fce5aa5ceea0 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -43,20 +52,51 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
@@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
@@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f3f02c4b6888..00de1166188a 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 54f8117103c8..765a5bb81829 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -591,6 +595,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..4215621deb3f 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -554,17 +554,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ef078426bfd5..38c165d526d1 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
return false;
}
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 9c0b01e00508..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index cbabb726c664..4270686fc3e3 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -103,13 +103,12 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -117,7 +116,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 5a5147277cd0..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
- { .family = "AM62X", .revision = "SR1.0" },
- { .family = "AM62AX", .revision = "SR1.0" },
- { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
{ /* sentinel */ }
};
@@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index 7dd1b0c263c7..6ffa16d239b2 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy)
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
if (policy->freq_table)
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 5f386761b156..7ab6f68b96a8 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = {
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
- struct platform_device *pdev = NULL;
+ struct platform_device *pdev;
struct device_node *cpu_node, *saw_node;
- struct cpuidle_qcom_spm_data *data = NULL;
+ struct cpuidle_qcom_spm_data *data;
int ret;
cpu_node = of_cpu_device_node_get(cpu);
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0835da449db8..56132e843c99 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -635,8 +635,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ unsigned int cpu = dev->cpu;
int i, ret;
+ if (per_cpu(cpuidle_devices, cpu)) {
+ pr_info("CPU%d: cpuidle device already registered\n", cpu);
+ return -EEXIST;
+ }
+
if (!try_module_get(drv->owner))
return -EINVAL;
@@ -648,7 +654,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
}
- per_cpu(cpuidle_devices, dev->cpu) = dev;
+ per_cpu(cpuidle_devices, cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b2e3d0b0a116..4d9aa5ce31f0 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -314,45 +314,47 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (s->exit_latency_ns > latency_req)
break;
- if (s->target_residency_ns > predicted_ns) {
- /*
- * Use a physical idle state, not busy polling, unless
- * a timer is going to trigger soon enough.
- */
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->target_residency_ns <= data->next_timer_ns) {
- predicted_ns = s->target_residency_ns;
- idx = i;
- break;
- }
- if (predicted_ns < TICK_NSEC)
- break;
-
- if (!tick_nohz_tick_stopped()) {
- /*
- * If the state selected so far is shallow,
- * waking up early won't hurt, so retain the
- * tick in that case and let the governor run
- * again in the next iteration of the loop.
- */
- predicted_ns = drv->states[idx].target_residency_ns;
- break;
- }
+ if (s->target_residency_ns <= predicted_ns) {
+ idx = i;
+ continue;
+ }
+
+ /*
+ * Use a physical idle state, not busy polling, unless a timer
+ * is going to trigger soon enough.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
+ }
+ if (predicted_ns < TICK_NSEC)
+ break;
+
+ if (!tick_nohz_tick_stopped()) {
/*
- * If the state selected so far is shallow and this
- * state's target residency matches the time till the
- * closest timer event, select this one to avoid getting
- * stuck in the shallow one for too long.
+ * If the state selected so far is shallow, waking up
+ * early won't hurt, so retain the tick in that case and
+ * let the governor run again in the next iteration of
+ * the idle loop.
*/
- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- s->target_residency_ns <= delta_tick)
- idx = i;
-
- return idx;
+ predicted_ns = drv->states[idx].target_residency_ns;
+ break;
}
- idx = i;
+ /*
+ * If the state selected so far is shallow and this state's
+ * target residency matches the time till the closest timer
+ * event, select this one to avoid getting stuck in the shallow
+ * one for too long.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_tick)
+ idx = i;
+
+ return idx;
}
if (idx == -1)
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index d6f5da61cb7d..61de64817604 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
+ if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
+ i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name);
}
out:
- i+= sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
@@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev,
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
- ret = sprintf(buf, "%s\n", drv->name);
+ ret = sysfs_emit(buf, "%s\n", drv->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
@@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev,
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
- ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
+ ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
@@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%u\n", state->_name);\
+ return sysfs_emit(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
@@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
@@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
- return sprintf(buf, "<null>\n");\
- return sprintf(buf, "%s\n", state->_name);\
+ return sysfs_emit(buf, "<null>\n");\
+ return sysfs_emit(buf, "%s\n", state->_name);\
}
#define define_show_state_time_function(_name) \
@@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
@@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
}
@@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
}
@@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\
}
define_show_state_s2idle_ull_function(usage);
@@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
- ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index c7a1060ba57a..76ea9273d1dc 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -439,7 +439,7 @@ config CRYPTO_DEV_ATMEL_AUTHENC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 65d6d0af140a..8dff5c2c40fd 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -28,6 +28,7 @@
#include <linux/fs_struct.h>
#include <linux/psp.h>
#include <linux/amd-iommu.h>
+#include <linux/crash_dump.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
@@ -1526,6 +1527,15 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
+ /*
+ * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
+ * may already be initialized in the previous kernel. Since no
+ * SNP/SEV guests are run under a kdump kernel, there is no
+ * need to initialize SNP or SEV during kdump boot.
+ */
+ if (is_kdump_kernel())
+ return 0;
+
sev = psp_master->sev_data;
if (sev->state == SEV_STATE_INIT)
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..5a2c9badcc64 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -34,15 +34,18 @@
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
+#define DDRMON_CTRL_LPDDR5 BIT(6)
#define DDRMON_CTRL_DDR4 BIT(5)
#define DDRMON_CTRL_LPDDR4 BIT(4)
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
#define DDRMON_CTRL_LPDDR23 BIT(2)
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
-#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
+#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \
+ DDRMON_CTRL_DDR4 | \
DDRMON_CTRL_LPDDR4 | \
DDRMON_CTRL_LPDDR23)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
#define DDRMON_CH0_WR_NUM 0x20
#define DDRMON_CH0_RD_NUM 0x24
@@ -116,12 +119,60 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl,
+ u32 *mask)
+{
+ u32 ddrmon_ver;
+
+ *mask = DDRMON_CTRL_DDR_TYPE_MASK;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = DDRMON_CTRL_LPDDR23;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = DDRMON_CTRL_LPDDR4;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode;
+ *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK;
+ break;
+ }
+
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
{
void __iomem *dfi_regs = dfi->regs;
int i, ret = 0;
+ u32 ctrl;
+ u32 ctrl_mask;
mutex_lock(&dfi->mutex);
@@ -135,8 +186,11 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
goto out;
}
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask);
+ if (ret)
+ goto out;
+
for (i = 0; i < dfi->max_channels; i++) {
- u32 ctrl = 0;
if (!(dfi->channel_mask & BIT(i)))
continue;
@@ -146,21 +200,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* set ddr type to dfi */
- switch (dfi->ddr_type) {
- case ROCKCHIP_DDRTYPE_LPDDR2:
- case ROCKCHIP_DDRTYPE_LPDDR3:
- ctrl = DDRMON_CTRL_LPDDR23;
- break;
- case ROCKCHIP_DDRTYPE_LPDDR4:
- case ROCKCHIP_DDRTYPE_LPDDR4X:
- ctrl = DDRMON_CTRL_LPDDR4;
- break;
- default:
- break;
- }
-
- writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
+ writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
/* enable count, use software mode */
@@ -435,7 +475,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -651,10 +691,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
case ROCKCHIP_DDRTYPE_LPDDR4:
case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
dfi->burst_len = 16;
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -726,7 +770,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi)
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
{
struct regmap *regmap_pmu = dfi->regmap_pmu;
- u32 reg2, reg3, reg4;
+ u32 reg2, reg3, reg4, reg6;
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
@@ -751,6 +795,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
return 0;
};
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 22fe9e631f8a..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -386,7 +386,8 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
diff --git a/drivers/dibs/Kconfig b/drivers/dibs/Kconfig
new file mode 100644
index 000000000000..5dc347b9b235
--- /dev/null
+++ b/drivers/dibs/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config DIBS
+ tristate "DIBS support"
+ default n
+ help
+ Direct Internal Buffer Sharing (DIBS)
+ A communication method that uses common physical (internal) memory
+ for synchronous direct access into a remote buffer.
+
+ Select this option to provide the abstraction layer between
+ dibs devices and dibs clients like the SMC protocol.
+ The module name is dibs.
+
+config DIBS_LO
+ bool "intra-OS shortcut with dibs loopback"
+ depends on DIBS
+ default n
+ help
+ DIBS_LO enables the creation of an software-emulated dibs device
+ named lo which can be used for transferring data when communication
+ occurs within the same OS. This helps in convenient testing of
+ dibs clients, since dibs loopback is independent of architecture or
+ hardware.
diff --git a/drivers/dibs/Makefile b/drivers/dibs/Makefile
new file mode 100644
index 000000000000..85805490c77f
--- /dev/null
+++ b/drivers/dibs/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DIBS class module
+#
+
+dibs-y += dibs_main.o
+obj-$(CONFIG_DIBS) += dibs.o
+dibs-$(CONFIG_DIBS_LO) += dibs_loopback.o \ No newline at end of file
diff --git a/drivers/dibs/dibs_loopback.c b/drivers/dibs/dibs_loopback.c
new file mode 100644
index 000000000000..aa029e29c6b2
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for dibs loopback/loopback-ism device.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dibs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "dibs_loopback.h"
+
+#define DIBS_LO_SUPPORT_NOCOPY 0x1
+#define DIBS_DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char dibs_lo_dev_name[] = "lo";
+/* global loopback device */
+static struct dibs_lo_dev *lo_dev;
+
+static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
+{
+ return DIBS_LOOPBACK_FABRIC;
+}
+
+static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
+{
+ /* rgid should be the same as lgid */
+ if (!uuid_equal(rgid, &dibs->gid))
+ return -ENETUNREACH;
+ return 0;
+}
+
+static int dibs_lo_max_dmbs(void)
+{
+ return DIBS_LO_MAX_DMBS;
+}
+
+static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
+{
+ struct dibs_lo_dmb_node *dmb_node, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ struct folio *folio;
+ unsigned long flags;
+ int sba_idx, rc;
+
+ ldev = dibs->drv_priv;
+ sba_idx = dmb->idx;
+ /* check space for new dmb */
+ for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
+ if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
+ break;
+ }
+ if (sba_idx == DIBS_LO_MAX_DMBS)
+ return -ENOSPC;
+
+ dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
+ if (!dmb_node) {
+ rc = -ENOMEM;
+ goto err_bit;
+ }
+
+ dmb_node->sba_idx = sba_idx;
+ dmb_node->len = dmb->dmb_len;
+
+ /* not critical; fail under memory pressure and fallback to TCP */
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_ZERO,
+ get_order(dmb_node->len));
+ if (!folio) {
+ rc = -ENOMEM;
+ goto err_node;
+ }
+ dmb_node->cpu_addr = folio_address(folio);
+ dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
+ refcount_set(&dmb_node->refcnt, 1);
+
+again:
+ /* add new dmb into hash table */
+ get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
+ if (tmp_node->token == dmb_node->token) {
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ goto again;
+ }
+ }
+ hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ atomic_inc(&ldev->dmb_cnt);
+
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[sba_idx] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ return 0;
+
+err_node:
+ kfree(dmb_node);
+err_bit:
+ clear_bit(sba_idx, ldev->sba_idx_mask);
+ return rc;
+}
+
+static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
+ struct dibs_lo_dmb_node *dmb_node)
+{
+ /* remove dmb from hash table */
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_del(&dmb_node->list);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+
+ clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+ folio_put(virt_to_folio(dmb_node->cpu_addr));
+ kfree(dmb_node);
+
+ if (atomic_dec_and_test(&ldev->dmb_cnt))
+ wake_up(&ldev->ldev_release);
+}
+
+static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ unsigned long flags;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb from hash table */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ if (!dmb_node)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&dmb_node->refcnt)) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ }
+ return 0;
+}
+
+static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
+{
+ return DIBS_LO_SUPPORT_NOCOPY;
+}
+
+static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!refcount_inc_not_zero(&dmb_node->refcnt))
+ /* the dmb is being unregistered, but has
+ * not been removed from the hash table.
+ */
+ return -EINVAL;
+
+ /* provide dmb information */
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+ return 0;
+}
+
+static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
+ if (tmp_node->token == token) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
+ unsigned int idx, bool sf, unsigned int offset,
+ void *data, unsigned int size)
+{
+ struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ u16 s_mask;
+ u8 client_id;
+ u32 sba_idx;
+
+ ldev = dibs->drv_priv;
+
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
+ if (tmp_node->token == dmb_tok) {
+ rmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!rmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ memcpy((char *)rmb_node->cpu_addr + offset, data, size);
+ sba_idx = rmb_node->sba_idx;
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!sf)
+ return 0;
+
+ spin_lock(&dibs->lock);
+ client_id = dibs->dmb_clientid_arr[sba_idx];
+ s_mask = ror16(0x1000, idx);
+ if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
+ dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
+ spin_unlock(&dibs->lock);
+
+ return 0;
+}
+
+static const struct dibs_dev_ops dibs_lo_ops = {
+ .get_fabric_id = dibs_lo_get_fabric_id,
+ .query_remote_gid = dibs_lo_query_rgid,
+ .max_dmbs = dibs_lo_max_dmbs,
+ .register_dmb = dibs_lo_register_dmb,
+ .unregister_dmb = dibs_lo_unregister_dmb,
+ .move_data = dibs_lo_move_data,
+ .support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
+ .attach_dmb = dibs_lo_attach_dmb,
+ .detach_dmb = dibs_lo_detach_dmb,
+};
+
+static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
+{
+ rwlock_init(&ldev->dmb_ht_lock);
+ hash_init(ldev->dmb_ht);
+ atomic_set(&ldev->dmb_cnt, 0);
+ init_waitqueue_head(&ldev->ldev_release);
+}
+
+static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
+{
+ if (atomic_read(&ldev->dmb_cnt))
+ wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
+}
+
+static int dibs_lo_dev_probe(void)
+{
+ struct dibs_lo_dev *ldev;
+ struct dibs_dev *dibs;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ kfree(ldev);
+ return -ENOMEM;
+ }
+
+ ldev->dibs = dibs;
+ dibs->drv_priv = ldev;
+ dibs_lo_dev_init(ldev);
+ uuid_gen(&dibs->gid);
+ dibs->ops = &dibs_lo_ops;
+
+ dibs->dev.parent = NULL;
+ dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_reg;
+ lo_dev = ldev;
+ return 0;
+
+err_reg:
+ kfree(dibs->dmb_clientid_arr);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
+ kfree(ldev);
+
+ return ret;
+}
+
+static void dibs_lo_dev_remove(void)
+{
+ if (!lo_dev)
+ return;
+
+ dibs_dev_del(lo_dev->dibs);
+ dibs_lo_dev_exit(lo_dev);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&lo_dev->dibs->dev);
+ kfree(lo_dev);
+ lo_dev = NULL;
+}
+
+int dibs_loopback_init(void)
+{
+ return dibs_lo_dev_probe();
+}
+
+void dibs_loopback_exit(void)
+{
+ dibs_lo_dev_remove();
+}
diff --git a/drivers/dibs/dibs_loopback.h b/drivers/dibs/dibs_loopback.h
new file mode 100644
index 000000000000..0664f6a8e662
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dibs loopback (aka loopback-ism) device structure definitions.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#ifndef _DIBS_LOOPBACK_H
+#define _DIBS_LOOPBACK_H
+
+#include <linux/dibs.h>
+#include <linux/hashtable.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#if IS_ENABLED(CONFIG_DIBS_LO)
+#define DIBS_LO_DMBS_HASH_BITS 12
+#define DIBS_LO_MAX_DMBS 5000
+
+struct dibs_lo_dmb_node {
+ struct hlist_node list;
+ u64 token;
+ u32 len;
+ u32 sba_idx;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ refcount_t refcnt;
+};
+
+struct dibs_lo_dev {
+ struct dibs_dev *dibs;
+ atomic_t dmb_cnt;
+ rwlock_t dmb_ht_lock;
+ DECLARE_BITMAP(sba_idx_mask, DIBS_LO_MAX_DMBS);
+ DECLARE_HASHTABLE(dmb_ht, DIBS_LO_DMBS_HASH_BITS);
+ wait_queue_head_t ldev_release;
+};
+
+int dibs_loopback_init(void);
+void dibs_loopback_exit(void);
+#else
+static inline int dibs_loopback_init(void)
+{
+ return 0;
+}
+
+static inline void dibs_loopback_exit(void)
+{
+}
+#endif
+
+#endif /* _DIBS_LOOPBACK_H */
diff --git a/drivers/dibs/dibs_main.c b/drivers/dibs/dibs_main.c
new file mode 100644
index 000000000000..0374f8350ff7
--- /dev/null
+++ b/drivers/dibs/dibs_main.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DIBS - Direct Internal Buffer Sharing
+ *
+ * Implementation of the DIBS class module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#define KMSG_COMPONENT "dibs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dibs.h>
+
+#include "dibs_loopback.h"
+
+MODULE_DESCRIPTION("Direct Internal Buffer Sharing class");
+MODULE_LICENSE("GPL");
+
+static struct class *dibs_class;
+
+/* use an array rather a list for fast mapping: */
+static struct dibs_client *clients[MAX_DIBS_CLIENTS];
+static u8 max_client;
+static DEFINE_MUTEX(clients_lock);
+struct dibs_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects dibs device list */
+};
+
+static struct dibs_dev_list dibs_dev_list = {
+ .list = LIST_HEAD_INIT(dibs_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(dibs_dev_list.mutex),
+};
+
+static void dibs_setup_forwarding(struct dibs_client *client,
+ struct dibs_dev *dibs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->subs[client->id] = client;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+}
+
+int dibs_register_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&clients_lock);
+
+ if (i < MAX_DIBS_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ dibs->priv[i] = NULL;
+ client->ops->add_dev(dibs);
+ dibs_setup_forwarding(client, dibs);
+ }
+ }
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_register_client);
+
+int dibs_unregister_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ unsigned long flags;
+ int max_dmbs;
+ int rc = 0;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ max_dmbs = dibs->ops->max_dmbs();
+ for (int i = 0; i < max_dmbs; ++i) {
+ if (dibs->dmb_clientid_arr[i] == client->id) {
+ WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
+ __func__, client->name);
+ rc = -EBUSY;
+ goto err_reg_dmb;
+ }
+ }
+ /* Stop forwarding IRQs and events */
+ dibs->subs[client->id] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ clients[client->id]->ops->del_dev(dibs);
+ dibs->priv[client->id] = NULL;
+ }
+
+ mutex_lock(&clients_lock);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ mutex_unlock(&clients_lock);
+
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+
+err_reg_dmb:
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_unregister_client);
+
+static void dibs_dev_release(struct device *dev)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ kfree(dibs);
+}
+
+struct dibs_dev *dibs_dev_alloc(void)
+{
+ struct dibs_dev *dibs;
+
+ dibs = kzalloc(sizeof(*dibs), GFP_KERNEL);
+ if (!dibs)
+ return dibs;
+ dibs->dev.release = dibs_dev_release;
+ dibs->dev.class = dibs_class;
+ device_initialize(&dibs->dev);
+
+ return dibs;
+}
+EXPORT_SYMBOL_GPL(dibs_dev_alloc);
+
+static ssize_t gid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ return sysfs_emit(buf, "%pUb\n", &dibs->gid);
+}
+static DEVICE_ATTR_RO(gid);
+
+static ssize_t fabric_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+ u16 fabric_id;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+ fabric_id = dibs->ops->get_fabric_id(dibs);
+
+ return sysfs_emit(buf, "0x%04x\n", fabric_id);
+}
+static DEVICE_ATTR_RO(fabric_id);
+
+static struct attribute *dibs_dev_attrs[] = {
+ &dev_attr_gid.attr,
+ &dev_attr_fabric_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dibs_dev_attr_group = {
+ .attrs = dibs_dev_attrs,
+};
+
+int dibs_dev_add(struct dibs_dev *dibs)
+{
+ int max_dmbs;
+ int i, ret;
+
+ max_dmbs = dibs->ops->max_dmbs();
+ spin_lock_init(&dibs->lock);
+ dibs->dmb_clientid_arr = kzalloc(max_dmbs, GFP_KERNEL);
+ if (!dibs->dmb_clientid_arr)
+ return -ENOMEM;
+ memset(dibs->dmb_clientid_arr, NO_DIBS_CLIENT, max_dmbs);
+
+ ret = device_add(&dibs->dev);
+ if (ret)
+ goto free_client_arr;
+
+ ret = sysfs_create_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+ if (ret) {
+ dev_err(&dibs->dev, "sysfs_create_group failed for dibs_dev\n");
+ goto err_device_del;
+ }
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i]) {
+ clients[i]->ops->add_dev(dibs);
+ dibs_setup_forwarding(clients[i], dibs);
+ }
+ }
+ mutex_unlock(&clients_lock);
+ list_add(&dibs->list, &dibs_dev_list.list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return 0;
+
+err_device_del:
+ device_del(&dibs->dev);
+free_client_arr:
+ kfree(dibs->dmb_clientid_arr);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(dibs_dev_add);
+
+void dibs_dev_del(struct dibs_dev *dibs)
+{
+ unsigned long flags;
+ int i;
+
+ sysfs_remove_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i)
+ dibs->subs[i] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i])
+ clients[i]->ops->del_dev(dibs);
+ }
+ mutex_unlock(&clients_lock);
+ list_del_init(&dibs->list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ device_del(&dibs->dev);
+ kfree(dibs->dmb_clientid_arr);
+}
+EXPORT_SYMBOL_GPL(dibs_dev_del);
+
+static int __init dibs_init(void)
+{
+ int rc;
+
+ memset(clients, 0, sizeof(clients));
+ max_client = 0;
+
+ dibs_class = class_create("dibs");
+ if (IS_ERR(dibs_class))
+ return PTR_ERR(dibs_class);
+
+ rc = dibs_loopback_init();
+ if (rc)
+ pr_err("%s fails with %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void __exit dibs_exit(void)
+{
+ dibs_loopback_exit();
+ class_destroy(dibs_class);
+}
+
+module_init(dibs_init);
+module_exit(dibs_exit);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 3cbe87d4a464..8ab49924f8b7 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/nospec.h>
#include <linux/syscalls.h>
@@ -202,6 +203,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
/**
* dma_heap_get_name - get heap name
@@ -214,6 +216,7 @@ const char *dma_heap_get_name(struct dma_heap *heap)
{
return heap->name;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
/**
* dma_heap_add - adds a heap to dmabuf heaps
@@ -303,6 +306,7 @@ err0:
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP");
static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
{
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 0a852011653c..74c1f0ca95f2 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -165,6 +165,27 @@ dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_avg_factor(struct sk_buff *msg,
+ struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor;
+ int ret;
+
+ if (ops->phase_offset_avg_factor_get) {
+ ret = ops->phase_offset_avg_factor_get(dpll, dpll_priv(dpll),
+ &factor, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_AVG_FACTOR, factor))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -677,6 +698,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_phase_offset_avg_factor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -840,6 +864,23 @@ dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
}
static int
+dpll_phase_offset_avg_factor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor = nla_get_u32(a);
+
+ if (!ops->phase_offset_avg_factor_set) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "device not capable of changing phase offset average factor");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->phase_offset_avg_factor_set(dpll, dpll_priv(dpll), factor,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -1736,14 +1777,25 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
static int
dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- int ret;
-
- if (info->attrs[DPLL_A_PHASE_OFFSET_MONITOR]) {
- struct nlattr *a = info->attrs[DPLL_A_PHASE_OFFSET_MONITOR];
+ struct nlattr *a;
+ int rem, ret;
- ret = dpll_phase_offset_monitor_set(dpll, a, info->extack);
- if (ret)
- return ret;
+ nla_for_each_attr(a, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(a)) {
+ case DPLL_A_PHASE_OFFSET_MONITOR:
+ ret = dpll_phase_offset_monitor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PHASE_OFFSET_AVG_FACTOR:
+ ret = dpll_phase_offset_avg_factor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ }
}
return 0;
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 9f2efaf25268..3c6d570babf8 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -42,9 +42,10 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_MONITOR + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_AVG_FACTOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
[DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
+ [DPLL_A_PHASE_OFFSET_AVG_FACTOR] = { .type = NLA_U32, },
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -112,7 +113,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_PHASE_OFFSET_MONITOR,
+ .maxattr = DPLL_A_PHASE_OFFSET_AVG_FACTOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
index c3e2f02f319d..84e22aae57e5 100644
--- a/drivers/dpll/zl3073x/Makefile
+++ b/drivers/dpll/zl3073x/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZL3073X) += zl3073x.o
-zl3073x-objs := core.o devlink.o dpll.o prop.o
+zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o prop.o
obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
zl3073x_i2c-objs := i2c.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
index 7ebcfc5ec1f0..092e7027948a 100644
--- a/drivers/dpll/zl3073x/core.c
+++ b/drivers/dpll/zl3073x/core.c
@@ -95,9 +95,9 @@ EXPORT_SYMBOL_NS_GPL(zl30735_chip_info, "ZL3073X");
#define ZL_RANGE_OFFSET 0x80
#define ZL_PAGE_SIZE 0x80
-#define ZL_NUM_PAGES 15
+#define ZL_NUM_PAGES 256
#define ZL_PAGE_SEL 0x7F
-#define ZL_PAGE_SEL_MASK GENMASK(3, 0)
+#define ZL_PAGE_SEL_MASK GENMASK(7, 0)
#define ZL_NUM_REGS (ZL_NUM_PAGES * ZL_PAGE_SIZE)
/* Regmap range configuration */
@@ -174,9 +174,10 @@ static bool
zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
{
/* Check that multiop lock is held when accessing registers
- * from page 10 and above.
+ * from page 10 and above except the page 255 that does not
+ * need this protection.
*/
- if (ZL_REG_PAGE(reg) >= 10)
+ if (ZL_REG_PAGE(reg) >= 10 && ZL_REG_PAGE(reg) < 255)
lockdep_assert_held(&zldev->multiop_lock);
/* Check the index is in valid range for indexed register */
@@ -447,6 +448,152 @@ int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
}
/**
+ * zl3073x_do_hwreg_op - Perform HW register read/write operation
+ * @zldev: zl3073x device pointer
+ * @op: operation to perform
+ *
+ * Performs requested operation and waits for its completion.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_do_hwreg_op(struct zl3073x_dev *zldev, u8 op)
+{
+ int rc;
+
+ /* Set requested operation and set pending bit */
+ rc = zl3073x_write_u8(zldev, ZL_REG_HWREG_OP, op | ZL_HWREG_OP_PENDING);
+ if (rc)
+ return rc;
+
+ /* Poll for completion - pending bit cleared */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_HWREG_OP,
+ ZL_HWREG_OP_PENDING);
+}
+
+/**
+ * zl3073x_read_hwreg - Read HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value of the HW register
+ *
+ * Reads HW register value and stores it into @value.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value)
+{
+ int rc;
+
+ /* Set address to read data from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Perform the read operation */
+ rc = zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_READ);
+ if (rc)
+ return rc;
+
+ /* Read the received data */
+ return zl3073x_read_u32(zldev, ZL_REG_HWREG_READ_DATA, value);
+}
+
+/**
+ * zl3073x_write_hwreg - Write value to HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW registers address
+ * @value: Value to be written to HW register
+ *
+ * Stores the requested value into HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value)
+{
+ int rc;
+
+ /* Set address to write data to */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set data to be written */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_WRITE_DATA, value);
+ if (rc)
+ return rc;
+
+ /* Perform the write operation */
+ return zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_WRITE);
+}
+
+/**
+ * zl3073x_update_hwreg - Update certain bits in HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value to be written into HW register
+ * @mask: Bitmask indicating bits to be updated
+ *
+ * Reads given HW register, updates requested bits specified by value and
+ * mask and writes result back to HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask)
+{
+ u32 tmp;
+ int rc;
+
+ rc = zl3073x_read_hwreg(zldev, addr, &tmp);
+ if (rc)
+ return rc;
+
+ tmp &= ~mask;
+ tmp |= value & mask;
+
+ return zl3073x_write_hwreg(zldev, addr, tmp);
+}
+
+/**
+ * zl3073x_write_hwreg_seq - Write HW registers sequence
+ * @zldev: pointer to device structure
+ * @seq: pointer to first sequence item
+ * @num_items: number of items in sequence
+ *
+ * Writes given HW registers sequence.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_items; i++) {
+ dev_dbg(zldev->dev, "Write 0x%0x [0x%0x] to 0x%0x",
+ seq[i].value, seq[i].mask, seq[i].addr);
+
+ if (seq[i].mask == U32_MAX)
+ /* Write value directly */
+ rc = zl3073x_write_hwreg(zldev, seq[i].addr,
+ seq[i].value);
+ else
+ /* Update only bits specified by the mask */
+ rc = zl3073x_update_hwreg(zldev, seq[i].addr,
+ seq[i].value, seq[i].mask);
+ if (rc)
+ return rc;
+
+ if (seq->wait)
+ msleep(seq->wait);
+ }
+
+ return rc;
+}
+
+/**
* zl3073x_ref_state_fetch - get input reference state
* @zldev: pointer to zl3073x_dev structure
* @index: input reference index to fetch state for
@@ -809,21 +956,169 @@ zl3073x_dev_periodic_work(struct kthread_work *work)
msecs_to_jiffies(500));
}
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor)
+{
+ u8 dpll_meas_ctrl, value;
+ int rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Convert requested factor to register value */
+ value = (factor + 1) & 0x0f;
+
+ /* Update phase measurement control register */
+ dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
+ dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, value);
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Save the new factor */
+ zldev->phase_avg_factor = factor;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_phase_meas_setup - setup phase offset measurement
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * Enable phase offset measurement block, set measurement averaging factor
+ * and enable DPLL-to-its-ref phase measurement for all DPLLs.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+static int
+zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 dpll_meas_ctrl, mask = 0;
+ int rc;
+
+ /* Setup phase measurement averaging factor */
+ rc = zl3073x_dev_phase_avg_factor_set(zldev, zldev->phase_avg_factor);
+ if (rc)
+ return rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL measurement block */
+ dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
+
+ /* Update phase measurement control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL-to-connected-ref measurement for each channel */
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ mask |= BIT(zldpll->id);
+
+ return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
+}
+
+/**
+ * zl3073x_dev_start - Start normal operation
+ * @zldev: zl3073x device pointer
+ * @full: perform full initialization
+ *
+ * The function starts normal operation, which means registering all DPLLs and
+ * their pins, and starting monitoring. If full initialization is requested,
+ * the function additionally initializes the phase offset measurement block and
+ * fetches hardware-invariant parameters.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full)
+{
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ if (full) {
+ /* Fetch device state */
+ rc = zl3073x_dev_state_fetch(zldev);
+ if (rc)
+ return rc;
+
+ /* Setup phase offset measurement block */
+ rc = zl3073x_dev_phase_meas_setup(zldev);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to setup phase measurement\n");
+ return rc;
+ }
+ }
+
+ /* Register all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to register DPLL%u\n",
+ zldpll->id);
+ return rc;
+ }
+ }
+
+ /* Perform initial firmware fine phase correction */
+ rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to init fine phase correction\n");
+ return rc;
+ }
+
+ /* Start monitoring */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_stop - Stop normal operation
+ * @zldev: zl3073x device pointer
+ *
+ * The function stops the normal operation that mean deregistration of all
+ * DPLLs and their pins and stop monitoring.
+ *
+ * Return: 0 on success, <0 on error
+ */
+void zl3073x_dev_stop(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+
+ /* Stop monitoring */
+ kthread_cancel_delayed_work_sync(&zldev->work);
+
+ /* Unregister all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ if (zldpll->dpll_dev)
+ zl3073x_dpll_unregister(zldpll);
+ }
+}
+
static void zl3073x_dev_dpll_fini(void *ptr)
{
struct zl3073x_dpll *zldpll, *next;
struct zl3073x_dev *zldev = ptr;
- /* Stop monitoring thread */
+ /* Stop monitoring and unregister DPLLs */
+ zl3073x_dev_stop(zldev);
+
+ /* Destroy monitoring thread */
if (zldev->kworker) {
- kthread_cancel_delayed_work_sync(&zldev->work);
kthread_destroy_worker(zldev->kworker);
zldev->kworker = NULL;
}
- /* Release DPLLs */
+ /* Free all DPLLs */
list_for_each_entry_safe(zldpll, next, &zldev->dplls, list) {
- zl3073x_dpll_unregister(zldpll);
list_del(&zldpll->list);
zl3073x_dpll_free(zldpll);
}
@@ -839,7 +1134,7 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
INIT_LIST_HEAD(&zldev->dplls);
- /* Initialize all DPLLs */
+ /* Allocate all DPLLs */
for (i = 0; i < num_dplls; i++) {
zldpll = zl3073x_dpll_alloc(zldev, i);
if (IS_ERR(zldpll)) {
@@ -849,25 +1144,9 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
goto error;
}
- rc = zl3073x_dpll_register(zldpll);
- if (rc) {
- dev_err_probe(zldev->dev, rc,
- "Failed to register DPLL%u\n", i);
- zl3073x_dpll_free(zldpll);
- goto error;
- }
-
list_add_tail(&zldpll->list, &zldev->dplls);
}
- /* Perform initial firmware fine phase correction */
- rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
- if (rc) {
- dev_err_probe(zldev->dev, rc,
- "Failed to init fine phase correction\n");
- goto error;
- }
-
/* Initialize monitoring thread */
kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
kworker = kthread_run_worker(0, "zl3073x-%s", dev_name(zldev->dev));
@@ -875,9 +1154,14 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
rc = PTR_ERR(kworker);
goto error;
}
-
zldev->kworker = kworker;
- kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ /* Start normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc, "Failed to start device\n");
+ goto error;
+ }
/* Add devres action to release DPLL related resources */
rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev);
@@ -893,46 +1177,6 @@ error:
}
/**
- * zl3073x_dev_phase_meas_setup - setup phase offset measurement
- * @zldev: pointer to zl3073x_dev structure
- * @num_channels: number of DPLL channels
- *
- * Enable phase offset measurement block, set measurement averaging factor
- * and enable DPLL-to-its-ref phase measurement for all DPLLs.
- *
- * Returns: 0 on success, <0 on error
- */
-static int
-zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev, int num_channels)
-{
- u8 dpll_meas_ctrl, mask;
- int i, rc;
-
- /* Read DPLL phase measurement control register */
- rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
- if (rc)
- return rc;
-
- /* Setup phase measurement averaging factor */
- dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
- dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, 3);
-
- /* Enable DPLL measurement block */
- dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
-
- /* Update phase measurement control register */
- rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
- if (rc)
- return rc;
-
- /* Enable DPLL-to-connected-ref measurement for each channel */
- for (i = 0, mask = 0; i < num_channels; i++)
- mask |= BIT(i);
-
- return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
-}
-
-/**
* zl3073x_dev_probe - initialize zl3073x device
* @zldev: pointer to zl3073x device
* @chip_info: chip info based on compatible
@@ -991,6 +1235,9 @@ int zl3073x_dev_probe(struct zl3073x_dev *zldev,
*/
zldev->clock_id = get_random_u64();
+ /* Default phase offset averaging factor */
+ zldev->phase_avg_factor = 2;
+
/* Initialize mutex for operations where multiple reads, writes
* and/or polls are required to be done atomically.
*/
@@ -999,17 +1246,6 @@ int zl3073x_dev_probe(struct zl3073x_dev *zldev,
return dev_err_probe(zldev->dev, rc,
"Failed to initialize mutex\n");
- /* Fetch device state */
- rc = zl3073x_dev_state_fetch(zldev);
- if (rc)
- return rc;
-
- /* Setup phase offset measurement block */
- rc = zl3073x_dev_phase_meas_setup(zldev, chip_info->num_channels);
- if (rc)
- return dev_err_probe(zldev->dev, rc,
- "Failed to setup phase measurement\n");
-
/* Register DPLL channels */
rc = zl3073x_devm_dpll_init(zldev, chip_info->num_channels);
if (rc)
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
index 71af2c800110..1dca4ddcf235 100644
--- a/drivers/dpll/zl3073x/core.h
+++ b/drivers/dpll/zl3073x/core.h
@@ -3,6 +3,7 @@
#ifndef _ZL3073X_CORE_H
#define _ZL3073X_CORE_H
+#include <linux/bitfield.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -67,19 +68,19 @@ struct zl3073x_synth {
* @dev: pointer to device
* @regmap: regmap to access device registers
* @multiop_lock: to serialize multiple register operations
- * @clock_id: clock id of the device
* @ref: array of input references' invariants
* @out: array of outs' invariants
* @synth: array of synths' invariants
* @dplls: list of DPLLs
* @kworker: thread for periodic work
* @work: periodic work
+ * @clock_id: clock id of the device
+ * @phase_avg_factor: phase offset measurement averaging factor
*/
struct zl3073x_dev {
struct device *dev;
struct regmap *regmap;
struct mutex multiop_lock;
- u64 clock_id;
/* Invariants */
struct zl3073x_ref ref[ZL3073X_NUM_REFS];
@@ -92,6 +93,10 @@ struct zl3073x_dev {
/* Monitor */
struct kthread_worker *kworker;
struct kthread_delayed_work work;
+
+ /* Devlink parameters */
+ u64 clock_id;
+ u8 phase_avg_factor;
};
struct zl3073x_chip_info {
@@ -111,10 +116,42 @@ struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
int zl3073x_dev_probe(struct zl3073x_dev *zldev,
const struct zl3073x_chip_info *chip_info);
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full);
+void zl3073x_dev_stop(struct zl3073x_dev *zldev);
+
+static inline u8 zl3073x_dev_phase_avg_factor_get(struct zl3073x_dev *zldev)
+{
+ return zldev->phase_avg_factor;
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor);
+
/**********************
* Registers operations
**********************/
+/**
+ * struct zl3073x_hwreg_seq_item - HW register write sequence item
+ * @addr: HW register to be written
+ * @value: value to be written to HW register
+ * @mask: bitmask indicating bits to be updated
+ * @wait: number of ms to wait after register write
+ */
+struct zl3073x_hwreg_seq_item {
+ u32 addr;
+ u32 value;
+ u32 mask;
+ u32 wait;
+};
+
+#define HWREG_SEQ_ITEM(_addr, _value, _mask, _wait) \
+{ \
+ .addr = _addr, \
+ .value = FIELD_PREP_CONST(_mask, _value), \
+ .mask = _mask, \
+ .wait = _wait, \
+}
+
int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
unsigned int mask_reg, u16 mask_val);
int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask);
@@ -126,6 +163,13 @@ int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val);
int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val);
int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val);
int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val);
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value);
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value);
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask);
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items);
/*****************
* Misc operations
diff --git a/drivers/dpll/zl3073x/devlink.c b/drivers/dpll/zl3073x/devlink.c
index 7e7fe726ee37..ccc22332b346 100644
--- a/drivers/dpll/zl3073x/devlink.c
+++ b/drivers/dpll/zl3073x/devlink.c
@@ -9,6 +9,8 @@
#include "core.h"
#include "devlink.h"
#include "dpll.h"
+#include "flash.h"
+#include "fw.h"
#include "regs.h"
/**
@@ -86,14 +88,12 @@ zl3073x_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct zl3073x_dev *zldev = devlink_priv(devlink);
- struct zl3073x_dpll *zldpll;
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
return -EOPNOTSUPP;
- /* Unregister all DPLLs */
- list_for_each_entry(zldpll, &zldev->dplls, list)
- zl3073x_dpll_unregister(zldpll);
+ /* Stop normal operation */
+ zl3073x_dev_stop(zldev);
return 0;
}
@@ -107,7 +107,6 @@ zl3073x_devlink_reload_up(struct devlink *devlink,
{
struct zl3073x_dev *zldev = devlink_priv(devlink);
union devlink_param_value val;
- struct zl3073x_dpll *zldpll;
int rc;
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
@@ -125,24 +124,156 @@ zl3073x_devlink_reload_up(struct devlink *devlink,
zldev->clock_id = val.vu64;
}
- /* Re-register all DPLLs */
- list_for_each_entry(zldpll, &zldev->dplls, list) {
- rc = zl3073x_dpll_register(zldpll);
- if (rc)
- dev_warn(zldev->dev,
- "Failed to re-register DPLL%u\n", zldpll->id);
- }
+ /* Restart normal operation */
+ rc = zl3073x_dev_start(zldev, false);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to re-start normal operation\n");
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return 0;
}
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+
+ devlink_flash_update_status_notify(devlink, msg, component, done,
+ total);
+}
+
+/**
+ * zl3073x_devlink_flash_prepare - Prepare and enter flash mode
+ * @zldev: zl3073x device pointer
+ * @zlfw: pointer to loaded firmware
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function stops normal operation and switches the device to flash mode.
+ * If an error occurs the normal operation is resumed.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_prepare(struct zl3073x_dev *zldev,
+ struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *util;
+ int rc;
+
+ util = zlfw->component[ZL_FW_COMPONENT_UTIL];
+ if (!util) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Utility is missing in firmware",
+ NULL, 0, 0);
+ return -ENOEXEC;
+ }
+
+ /* Stop normal operation prior entering flash mode */
+ zl3073x_dev_stop(zldev);
+
+ rc = zl3073x_flash_mode_enter(zldev, util->data, util->size, extack);
+ if (rc) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to enter flash mode",
+ NULL, 0, 0);
+
+ /* Resume normal operation */
+ zl3073x_dev_start(zldev, true);
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_devlink_flash_finish - Leave flash mode and resume normal operation
+ * @zldev: zl3073x device pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function switches the device back to standard mode and resumes normal
+ * operation.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_finish(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Reset device CPU to normal mode */
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ /* Resume normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc)
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to start normal operation",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_devlink_flash_update - Devlink flash update callback
+ * @devlink: devlink structure pointer
+ * @params: flashing parameters pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ struct zl3073x_fw *zlfw;
+ int rc = 0;
+
+ zlfw = zl3073x_fw_load(zldev, params->fw->data, params->fw->size,
+ extack);
+ if (IS_ERR(zlfw)) {
+ zl3073x_devlink_flash_notify(zldev, "Failed to load firmware",
+ NULL, 0, 0);
+ rc = PTR_ERR(zlfw);
+ goto finish;
+ }
+
+ /* Stop normal operation and enter flash mode */
+ rc = zl3073x_devlink_flash_prepare(zldev, zlfw, extack);
+ if (rc)
+ goto finish;
+
+ rc = zl3073x_fw_flash(zldev, zlfw, extack);
+ if (rc) {
+ zl3073x_devlink_flash_finish(zldev, extack);
+ goto finish;
+ }
+
+ /* Resume normal mode */
+ rc = zl3073x_devlink_flash_finish(zldev, extack);
+
+finish:
+ if (!IS_ERR(zlfw))
+ zl3073x_fw_free(zlfw);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ NULL, 0, 0);
+
+ return rc;
+}
+
static const struct devlink_ops zl3073x_devlink_ops = {
.info_get = zl3073x_devlink_info_get,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = zl3073x_devlink_reload_down,
.reload_up = zl3073x_devlink_reload_up,
+ .flash_update = zl3073x_devlink_flash_update,
};
static void
diff --git a/drivers/dpll/zl3073x/devlink.h b/drivers/dpll/zl3073x/devlink.h
index 037720db204f..63dfd6fa1cd6 100644
--- a/drivers/dpll/zl3073x/devlink.h
+++ b/drivers/dpll/zl3073x/devlink.h
@@ -9,4 +9,7 @@ struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
int zl3073x_devlink_register(struct zl3073x_dev *zldev);
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total);
+
#endif /* _ZL3073X_DEVLINK_H */
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index 3e42e9e7fd27..93dc93eec79e 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -1577,6 +1577,59 @@ zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
static int
+zl3073x_dpll_phase_offset_avg_factor_get(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ *factor = zl3073x_dev_phase_avg_factor_get(zldpll->dev);
+
+ return 0;
+}
+
+static void
+zl3073x_dpll_change_work(struct work_struct *work)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = container_of(work, struct zl3073x_dpll, change_work);
+ dpll_device_change_ntf(zldpll->dpll_dev);
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_set(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *item, *zldpll = dpll_priv;
+ int rc;
+
+ if (factor > 15) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase offset average factor has to be from range <0,15>");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dev_phase_avg_factor_set(zldpll->dev, factor);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Failed to set phase offset averaging factor");
+ return rc;
+ }
+
+ /* The averaging factor is common for all DPLL channels so after change
+ * we have to send a notification for other DPLL devices.
+ */
+ list_for_each_entry(item, &zldpll->dev->dplls, list) {
+ if (item != zldpll)
+ schedule_work(&item->change_work);
+ }
+
+ return 0;
+}
+
+static int
zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
void *dpll_priv,
enum dpll_feature_state *state,
@@ -1635,6 +1688,8 @@ static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
static const struct dpll_device_ops zl3073x_dpll_device_ops = {
.lock_status_get = zl3073x_dpll_lock_status_get,
.mode_get = zl3073x_dpll_mode_get,
+ .phase_offset_avg_factor_get = zl3073x_dpll_phase_offset_avg_factor_get,
+ .phase_offset_avg_factor_set = zl3073x_dpll_phase_offset_avg_factor_set,
.phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
.phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
};
@@ -1983,6 +2038,8 @@ zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
{
WARN(!zldpll->dpll_dev, "DPLL device is not registered\n");
+ cancel_work_sync(&zldpll->change_work);
+
dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
zldpll);
dpll_device_put(zldpll->dpll_dev);
@@ -2258,6 +2315,7 @@ zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch)
zldpll->dev = zldev;
zldpll->id = ch;
INIT_LIST_HEAD(&zldpll->pins);
+ INIT_WORK(&zldpll->change_work, zl3073x_dpll_change_work);
return zldpll;
}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
index 304910ffc9c0..e8c39b44b356 100644
--- a/drivers/dpll/zl3073x/dpll.h
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -20,6 +20,7 @@
* @dpll_dev: pointer to registered DPLL device
* @lock_status: last saved DPLL lock status
* @pins: list of pins
+ * @change_work: device change notification work
*/
struct zl3073x_dpll {
struct list_head list;
@@ -32,6 +33,7 @@ struct zl3073x_dpll {
struct dpll_device *dpll_dev;
enum dpll_lock_status lock_status;
struct list_head pins;
+ struct work_struct change_work;
};
struct zl3073x_dpll *zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch);
diff --git a/drivers/dpll/zl3073x/flash.c b/drivers/dpll/zl3073x/flash.c
new file mode 100644
index 000000000000..83452a77e3e9
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/minmax.h>
+#include <linux/netlink.h>
+#include <linux/sched/signal.h>
+#include <linux/sizes.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "flash.h"
+
+#define ZL_FLASH_ERR_PFX "FW update failed: "
+#define ZL_FLASH_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL_FLASH_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+/**
+ * zl3073x_flash_download - Download image block to device memory
+ * @zldev: zl3073x device structure
+ * @component: name of the component to be downloaded
+ * @addr: device memory target address
+ * @data: pointer to data to download
+ * @size: size of data to download
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_download(struct zl3073x_dev *zldev, const char *component,
+ u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_CHECK_DELAY 5000 /* Check for interrupt each 5 seconds */
+ unsigned long check_time;
+ const void *ptr, *end;
+ int rc = 0;
+
+ dev_dbg(zldev->dev, "Downloading %zu bytes to device memory at 0x%0x\n",
+ size, addr);
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += 4, addr += 4) {
+ /* Write current word to HW memory */
+ rc = zl3073x_write_hwreg(zldev, addr,
+ get_unaligned((u32 *)ptr));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack,
+ "failed to write to memory at 0x%0x",
+ addr);
+ return rc;
+ }
+
+ if (time_is_before_jiffies(check_time)) {
+ if (signal_pending(current)) {
+ ZL_FLASH_ERR_MSG(extack,
+ "Flashing interrupted");
+ return -EINTR;
+ }
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+ }
+
+ /* Report status each 1 kB block */
+ if ((ptr - data) % 1024 == 0)
+ zl3073x_devlink_flash_notify(zldev, "Downloading image",
+ component, ptr - data,
+ size);
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Downloading image", component,
+ ptr - data, size);
+
+ dev_dbg(zldev->dev, "%zu bytes downloaded to device memory\n", size);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_error_check - Check for flash utility errors
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function checks for errors detected by the flash utility and
+ * reports them if any were found.
+ *
+ * Return: 0 on success, -EIO when errors are detected
+ */
+static int
+zl3073x_flash_error_check(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ u32 count, cause;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_COUNT, &count);
+ if (rc)
+ return rc;
+ else if (!count)
+ return 0; /* No error */
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_CAUSE, &cause);
+ if (rc)
+ return rc;
+
+ /* Report errors */
+ ZL_FLASH_ERR_MSG(extack,
+ "utility error occurred: count=%u cause=0x%x", count,
+ cause);
+
+ return -EIO;
+}
+
+/**
+ * zl3073x_flash_wait_ready - Check or wait for utility to be ready to flash
+ * @zldev: zl3073x device structure
+ * @timeout_ms: timeout for the waiting
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_wait_ready(struct zl3073x_dev *zldev, unsigned int timeout_ms)
+{
+#define ZL_FLASH_POLL_DELAY_MS 100
+ unsigned long timeout;
+ int rc, i;
+
+ dev_dbg(zldev->dev, "Waiting for flashing to be ready\n");
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ for (i = 0; time_is_after_jiffies(timeout); i++) {
+ u8 value;
+
+ /* Check for interrupt each 1s */
+ if (i > 9) {
+ if (signal_pending(current))
+ return -EINTR;
+ i = 0;
+ }
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value = FIELD_GET(ZL_WRITE_FLASH_OP, value);
+
+ if (value == ZL_WRITE_FLASH_OP_DONE)
+ return 0; /* Successfully done */
+
+ msleep(ZL_FLASH_POLL_DELAY_MS);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * zl3073x_flash_cmd_wait - Perform flash operation and wait for finish
+ * @zldev: zl3073x device structure
+ * @operation: operation to perform
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_cmd_wait(struct zl3073x_dev *zldev, u32 operation,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_PHASE1_TIMEOUT_MS 60000 /* up to 1 minute */
+#define ZL_FLASH_PHASE2_TIMEOUT_MS 120000 /* up to 2 minutes */
+ u8 value;
+ int rc;
+
+ dev_dbg(zldev->dev, "Sending flash command: 0x%x\n", operation);
+
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE1_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ /* Issue the requested operation */
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value &= ~ZL_WRITE_FLASH_OP;
+ value |= FIELD_PREP(ZL_WRITE_FLASH_OP, operation);
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_WRITE_FLASH, value);
+ if (rc)
+ return rc;
+
+ /* Wait for command completion */
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE2_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ return zl3073x_flash_error_check(zldev, extack);
+}
+
+/**
+ * zl3073x_flash_get_sector_size - Get flash sector size
+ * @zldev: zl3073x device structure
+ * @sector_size: sector size returned by the function
+ *
+ * The function reads the flash sector size detected by flash utility and
+ * stores it into @sector_size.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_get_sector_size(struct zl3073x_dev *zldev, size_t *sector_size)
+{
+ u8 flash_info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_INFO, &flash_info);
+ if (rc)
+ return rc;
+
+ switch (FIELD_GET(ZL_FLASH_INFO_SECTOR_SIZE, flash_info)) {
+ case ZL_FLASH_INFO_SECTOR_4K:
+ *sector_size = SZ_4K;
+ break;
+ case ZL_FLASH_INFO_SECTOR_64K:
+ *sector_size = SZ_64K;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_block - Download and flash memory block
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @operation: flash operation to perform
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function downloads the memory block given by the @data pointer and
+ * the size @size and flashes it into internal memory on flash page @page.
+ * The internal flash operation performed by the firmware is specified by
+ * the @operation parameter.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_block(struct zl3073x_dev *zldev, const char *component,
+ u32 operation, u32 page, u32 addr, const void *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Download block to device memory */
+ rc = zl3073x_flash_download(zldev, component, addr, data, size, extack);
+ if (rc)
+ return rc;
+
+ /* Set address to flash from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_START_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set size of block to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_SIZE, size);
+ if (rc)
+ return rc;
+
+ /* Set destination page to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, page);
+ if (rc)
+ return rc;
+
+ /* Set filling pattern */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FILL_PATTERN, U32_MAX);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, 0,
+ size);
+
+ dev_dbg(zldev->dev, "Flashing %zu bytes to page %u\n", size, page);
+
+ /* Execute sectors flash operation */
+ rc = zl3073x_flash_cmd_wait(zldev, operation, extack);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, size,
+ size);
+
+ return 0;
+}
+
+/**
+ * zl3073x_flash_sectors - Flash sectors
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting from page @page. The function uses sector flash
+ * method and has to take into account the flash sector size reported by
+ * flashing utility. Input data are spliced into blocks according this
+ * sector size and each block is flashed separately.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_MAX_BLOCK_SIZE 0x0001E000
+#define ZL_FLASH_PAGE_SIZE 256
+ size_t max_block_size, block_size, sector_size;
+ const void *ptr, *end;
+ int rc;
+
+ /* Get flash sector size */
+ rc = zl3073x_flash_get_sector_size(zldev, &sector_size);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "Failed to get flash sector size");
+ return rc;
+ }
+
+ /* Determine max block size depending on sector size */
+ max_block_size = ALIGN_DOWN(ZL_FLASH_MAX_BLOCK_SIZE, sector_size);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += block_size) {
+ char comp_str[32];
+
+ block_size = min_t(size_t, max_block_size, end - ptr);
+
+ /* Add suffix '-partN' if the requested component size is
+ * greater than max_block_size.
+ */
+ if (max_block_size < size)
+ snprintf(comp_str, sizeof(comp_str), "%s-part%zu",
+ component, (ptr - data) / max_block_size + 1);
+ else
+ strscpy(comp_str, component);
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, comp_str,
+ ZL_WRITE_FLASH_OP_SECTORS, page, addr,
+ ptr, block_size, extack);
+ if (rc)
+ goto finish;
+
+ /* Move to next page */
+ page += block_size / ZL_FLASH_PAGE_SIZE;
+ }
+
+finish:
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page - Flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting with page @page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, component, ZL_WRITE_FLASH_OP_PAGE, page,
+ addr, data, size, extack);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page_copy - Copy flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @src_page: source page to copy
+ * @dst_page: destination page
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function copies one flash page specified by @src_page into the flash
+ * page specified by @dst_page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Set source page to be copied */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_READ, src_page);
+ if (rc)
+ return rc;
+
+ /* Set destination page for the copy */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, dst_page);
+ if (rc)
+ return rc;
+
+ /* Perform copy operation */
+ rc = zl3073x_flash_cmd_wait(zldev, ZL_WRITE_FLASH_OP_COPY_PAGE, extack);
+ if (rc)
+ ZL_FLASH_ERR_MSG(extack, "Failed to copy page %u to page %u",
+ src_page, dst_page);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_verify - Check flash utility
+ * @zldev: zl3073x device structure
+ *
+ * Return: 0 if the flash utility is ready, <0 on error
+ */
+static int
+zl3073x_flash_mode_verify(struct zl3073x_dev *zldev)
+{
+ u8 family, release;
+ u32 hash;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_FLASH_HASH, &hash);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_FAMILY, &family);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_RELEASE, &release);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev,
+ "Flash utility check: hash 0x%08x, fam 0x%02x, rel 0x%02x\n",
+ hash, family, release);
+
+ /* Return success for correct family */
+ return (family == 0x21) ? 0 : -ENODEV;
+}
+
+static int
+zl3073x_flash_host_ctrl_enable(struct zl3073x_dev *zldev)
+{
+ u8 host_ctrl;
+ int rc;
+
+ /* Enable host control */
+ rc = zl3073x_read_u8(zldev, ZL_REG_HOST_CONTROL, &host_ctrl);
+ if (rc)
+ return rc;
+
+ host_ctrl |= ZL_HOST_CONTROL_ENABLE;
+
+ return zl3073x_write_u8(zldev, ZL_REG_HOST_CONTROL, host_ctrl);
+}
+
+/**
+ * zl3073x_flash_mode_enter - Switch the device to flash mode
+ * @zldev: zl3073x device structure
+ * @util_ptr: buffer with flash utility
+ * @util_size: size of buffer with flash utility
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function prepares and switches the device into flash mode.
+ *
+ * The procedure:
+ * 1) Stop device CPU by specific HW register sequence
+ * 2) Download flash utility to device memory
+ * 3) Resume device CPU by specific HW register sequence
+ * 4) Check communication with flash utility
+ * 5) Enable host control necessary to access flash API
+ * 6) Check for potential error detected by the utility
+ *
+ * The API provided by normal firmware is not available in flash mode
+ * so the caller has to ensure that this API is not used in this mode.
+ *
+ * After performing flash operation the caller should call
+ * @zl3073x_flash_mode_leave to return back to normal operation.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written prior utility download */
+ static const struct zl3073x_hwreg_seq_item pre_seq[] = {
+ HWREG_SEQ_ITEM(0x80000400, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80206340, 1, BIT(4), 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(2), 0),
+ HWREG_SEQ_ITEM(0x10000024, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(10), 1000),
+ };
+ /* Sequence to be written after utility download */
+ static const struct zl3073x_hwreg_seq_item post_seq[] = {
+ HWREG_SEQ_ITEM(0x10400004, 0x000000C0, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400008, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400010, 0x20000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400014, 0x20000004, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, GENMASK(10, 9), 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 0, BIT(0), 1000),
+ };
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Prepare flash mode", "utility",
+ 0, 0);
+
+ /* Execure pre-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, pre_seq, ARRAY_SIZE(pre_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute pre-load sequence");
+ goto error;
+ }
+
+ /* Download utility image to device memory */
+ rc = zl3073x_flash_download(zldev, "utility", 0x20000000, util_ptr,
+ util_size, extack);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot download flash utility");
+ goto error;
+ }
+
+ /* Execute post-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, post_seq, ARRAY_SIZE(post_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute post-load sequence");
+ goto error;
+ }
+
+ /* Check that utility identifies itself correctly */
+ rc = zl3073x_flash_mode_verify(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "flash utility check failed");
+ goto error;
+ }
+
+ /* Enable host control */
+ rc = zl3073x_flash_host_ctrl_enable(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot enable host control");
+ goto error;
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Flash mode enabled", "utility",
+ 0, 0);
+
+ return 0;
+
+error:
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_leave - Leave flash mode
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function instructs the device to leave the flash mode and
+ * to return back to normal operation.
+ *
+ * The procedure:
+ * 1) Set reset flag
+ * 2) Reset the device CPU by specific HW register sequence
+ * 3) Wait for the device to be ready
+ * 4) Check the reset flag was cleared
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written after flash */
+ static const struct zl3073x_hwreg_seq_item fw_reset_seq[] = {
+ HWREG_SEQ_ITEM(0x80000404, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80000410, 1, BIT(0), 0),
+ };
+ u8 reset_status;
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Leaving flash mode", "utility",
+ 0, 0);
+
+ /* Read reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Set reset bit */
+ reset_status |= ZL_REG_RESET_STATUS_RESET;
+
+ /* Update reset status register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_RESET_STATUS, reset_status);
+ if (rc)
+ return rc;
+
+ /* We do not check the return value here as the sequence resets
+ * the device CPU and the last write always return an error.
+ */
+ zl3073x_write_hwreg_seq(zldev, fw_reset_seq, ARRAY_SIZE(fw_reset_seq));
+
+ /* Wait for the device to be ready */
+ msleep(500);
+
+ /* Read again the reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Check the reset bit was cleared */
+ if (reset_status & ZL_REG_RESET_STATUS_RESET) {
+ dev_err(zldev->dev,
+ "Reset not confirmed after switch to normal mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/flash.h b/drivers/dpll/zl3073x/flash.h
new file mode 100644
index 000000000000..effe1b16b359
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __ZL3073X_FLASH_H
+#define __ZL3073X_FLASH_H
+
+#include <linux/types.h>
+
+struct netlink_ext_ack;
+struct zl3073x_dev;
+
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack);
+
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+#endif /* __ZL3073X_FLASH_H */
diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c
new file mode 100644
index 000000000000..d5418ff74886
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "flash.h"
+#include "fw.h"
+
+#define ZL3073X_FW_ERR_PFX "FW load failed: "
+#define ZL3073X_FW_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL3073X_FW_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+enum zl3073x_flash_type {
+ ZL3073X_FLASH_TYPE_NONE = 0,
+ ZL3073X_FLASH_TYPE_SECTORS,
+ ZL3073X_FLASH_TYPE_PAGE,
+ ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+};
+
+struct zl3073x_fw_component_info {
+ const char *name;
+ size_t max_size;
+ enum zl3073x_flash_type flash_type;
+ u32 load_addr;
+ u32 dest_page;
+ u32 copy_page;
+};
+
+static const struct zl3073x_fw_component_info component_info[] = {
+ [ZL_FW_COMPONENT_UTIL] = {
+ .name = "utility",
+ .max_size = 0x2300,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_NONE,
+ },
+ [ZL_FW_COMPONENT_FW1] = {
+ .name = "firmware1",
+ .max_size = 0x35000,
+ .load_addr = 0x20002000,
+ .flash_type = ZL3073X_FLASH_TYPE_SECTORS,
+ .dest_page = 0x020,
+ },
+ [ZL_FW_COMPONENT_FW2] = {
+ .name = "firmware2",
+ .max_size = 0x0040,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e0,
+ .copy_page = 0x000,
+ },
+ [ZL_FW_COMPONENT_FW3] = {
+ .name = "firmware3",
+ .max_size = 0x0248,
+ .load_addr = 0x20000400,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e4,
+ .copy_page = 0x004,
+ },
+ [ZL_FW_COMPONENT_CFG0] = {
+ .name = "config0",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3d0,
+ },
+ [ZL_FW_COMPONENT_CFG1] = {
+ .name = "config1",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3c0,
+ },
+ [ZL_FW_COMPONENT_CFG2] = {
+ .name = "config2",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3b0,
+ },
+ [ZL_FW_COMPONENT_CFG3] = {
+ .name = "config3",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3a0,
+ },
+ [ZL_FW_COMPONENT_CFG4] = {
+ .name = "config4",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x390,
+ },
+ [ZL_FW_COMPONENT_CFG5] = {
+ .name = "config5",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x380,
+ },
+ [ZL_FW_COMPONENT_CFG6] = {
+ .name = "config6",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x370,
+ },
+};
+
+/* Sanity check */
+static_assert(ARRAY_SIZE(component_info) == ZL_FW_NUM_COMPONENTS);
+
+/**
+ * zl3073x_fw_component_alloc - Alloc structure to hold firmware component
+ * @size: size of buffer to store data
+ *
+ * Return: pointer to allocated component structure or NULL on error.
+ */
+static struct zl3073x_fw_component *
+zl3073x_fw_component_alloc(size_t size)
+{
+ struct zl3073x_fw_component *comp;
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ comp->size = size;
+ comp->data = kzalloc(size, GFP_KERNEL);
+ if (!comp->data) {
+ kfree(comp);
+ return NULL;
+ }
+
+ return comp;
+}
+
+/**
+ * zl3073x_fw_component_free - Free allocated component structure
+ * @comp: pointer to allocated component
+ */
+static void
+zl3073x_fw_component_free(struct zl3073x_fw_component *comp)
+{
+ if (comp)
+ kfree(comp->data);
+
+ kfree(comp);
+}
+
+/**
+ * zl3073x_fw_component_id_get - Get ID for firmware component name
+ * @name: input firmware component name
+ *
+ * Return:
+ * - ZL3073X_FW_COMPONENT_* ID for known component name
+ * - ZL3073X_FW_COMPONENT_INVALID if the given name is unknown
+ */
+static enum zl3073x_fw_component_id
+zl3073x_fw_component_id_get(const char *name)
+{
+ enum zl3073x_fw_component_id id;
+
+ for (id = 0; id < ZL_FW_NUM_COMPONENTS; id++)
+ if (!strcasecmp(name, component_info[id].name))
+ return id;
+
+ return ZL_FW_COMPONENT_INVALID;
+}
+
+/**
+ * zl3073x_fw_component_load - Load component from firmware source
+ * @zldev: zl3073x device structure
+ * @pcomp: pointer to loaded component
+ * @psrc: data pointer to load component from
+ * @psize: remaining bytes in buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function allocates single firmware component and loads the data from
+ * the buffer specified by @psrc and @psize. Pointer to allocated component
+ * is stored in output @pcomp. Source data pointer @psrc and remaining bytes
+ * @psize are updated accordingly.
+ *
+ * Return:
+ * * 1 when component was allocated and loaded
+ * * 0 when there is no component to load
+ * * <0 on error
+ */
+static ssize_t
+zl3073x_fw_component_load(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component **pcomp,
+ const char **psrc, size_t *psize,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ struct zl3073x_fw_component *comp = NULL;
+ struct device *dev = zldev->dev;
+ enum zl3073x_fw_component_id id;
+ char buf[32], name[16];
+ u32 count, size, *dest;
+ int pos, rc;
+
+ /* Fetch image name and size from input */
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%15s %u %n", name, &count, &pos);
+ if (!rc) {
+ /* No more data */
+ return 0;
+ } else if (rc == 1 || count > U32_MAX / sizeof(u32)) {
+ ZL3073X_FW_ERR_MSG(extack, "invalid component size");
+ return -EINVAL;
+ }
+ *psrc += pos;
+ *psize -= pos;
+
+ dev_dbg(dev, "Firmware component '%s' found\n", name);
+
+ id = zl3073x_fw_component_id_get(name);
+ if (id == ZL_FW_COMPONENT_INVALID) {
+ ZL3073X_FW_ERR_MSG(extack, "unknown component type '%s'", name);
+ return -EINVAL;
+ }
+
+ info = &component_info[id];
+ size = count * sizeof(u32); /* get size in bytes */
+
+ /* Check image size validity */
+ if (size > component_info[id].max_size) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "[%s] component is too big (%u bytes)\n",
+ info->name, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Indicated component image size: %u bytes\n", size);
+
+ /* Alloc component */
+ comp = zl3073x_fw_component_alloc(size);
+ if (!comp) {
+ ZL3073X_FW_ERR_MSG(extack, "failed to alloc memory");
+ return -ENOMEM;
+ }
+ comp->id = id;
+
+ /* Load component data from firmware source */
+ for (dest = comp->data; count; count--, dest++) {
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%x %n", dest, &pos);
+ if (!rc)
+ goto err_data;
+
+ *psrc += pos;
+ *psize -= pos;
+ }
+
+ *pcomp = comp;
+
+ return 1;
+
+err_data:
+ ZL3073X_FW_ERR_MSG(extack, "[%s] invalid or missing data", info->name);
+
+ zl3073x_fw_component_free(comp);
+
+ return -ENODATA;
+}
+
+/**
+ * zl3073x_fw_free - Free allocated firmware
+ * @fw: firmware pointer
+ *
+ * The function frees existing firmware allocated by @zl3073x_fw_load.
+ */
+void zl3073x_fw_free(struct zl3073x_fw *fw)
+{
+ size_t i;
+
+ if (!fw)
+ return;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++)
+ zl3073x_fw_component_free(fw->component[i]);
+
+ kfree(fw);
+}
+
+/**
+ * zl3073x_fw_load - Load all components from source
+ * @zldev: zl3073x device structure
+ * @data: source buffer pointer
+ * @size: size of source buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The functions allocate firmware structure and loads all components from
+ * the given buffer specified by @data and @size.
+ *
+ * Return: pointer to firmware on success, error pointer on error
+ */
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *comp;
+ enum zl3073x_fw_component_id id;
+ struct zl3073x_fw *fw;
+ ssize_t rc;
+
+ /* Allocate firmware structure */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ /* Load single component */
+ rc = zl3073x_fw_component_load(zldev, &comp, &data, &size,
+ extack);
+ if (rc <= 0)
+ /* Everything was read or error occurred */
+ break;
+
+ id = comp->id;
+
+ /* Report error if the given component is present twice
+ * or more.
+ */
+ if (fw->component[id]) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "duplicate component '%s' detected",
+ component_info[id].name);
+ zl3073x_fw_component_free(comp);
+ rc = -EINVAL;
+ break;
+ }
+
+ fw->component[id] = comp;
+ } while (true);
+
+ if (rc) {
+ /* Free allocated firmware in case of error */
+ zl3073x_fw_free(fw);
+ return ERR_PTR(rc);
+ }
+
+ return fw;
+}
+
+/**
+ * zl3073x_flash_bundle_flash - Flash all components
+ * @zldev: zl3073x device structure
+ * @components: pointer to components array
+ * @extack: netlink extack pointer to report errors
+ *
+ * Returns 0 in case of success or negative number otherwise.
+ */
+static int
+zl3073x_fw_component_flash(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component *comp,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ int rc;
+
+ info = &component_info[comp->id];
+
+ switch (info->flash_type) {
+ case ZL3073X_FLASH_TYPE_NONE:
+ /* Non-flashable component - used for utility */
+ return 0;
+ case ZL3073X_FLASH_TYPE_SECTORS:
+ rc = zl3073x_flash_sectors(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data,
+ comp->size, extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE_AND_COPY:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ if (!rc)
+ rc = zl3073x_flash_page_copy(zldev, info->name,
+ info->dest_page,
+ info->copy_page, extack);
+ break;
+ }
+ if (rc)
+ ZL3073X_FW_ERR_MSG(extack, "Failed to flash component '%s'",
+ info->name);
+
+ return rc;
+}
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++) {
+ if (!zlfw->component[i])
+ continue; /* Component is not present */
+
+ rc = zl3073x_fw_component_flash(zldev, zlfw->component[i],
+ extack);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/dpll/zl3073x/fw.h b/drivers/dpll/zl3073x/fw.h
new file mode 100644
index 000000000000..fcaa89ab075e
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_FW_H
+#define _ZL3073X_FW_H
+
+/*
+ * enum zl3073x_fw_component_id - Identifiers for possible flash components
+ */
+enum zl3073x_fw_component_id {
+ ZL_FW_COMPONENT_INVALID = -1,
+ ZL_FW_COMPONENT_UTIL = 0,
+ ZL_FW_COMPONENT_FW1,
+ ZL_FW_COMPONENT_FW2,
+ ZL_FW_COMPONENT_FW3,
+ ZL_FW_COMPONENT_CFG0,
+ ZL_FW_COMPONENT_CFG1,
+ ZL_FW_COMPONENT_CFG2,
+ ZL_FW_COMPONENT_CFG3,
+ ZL_FW_COMPONENT_CFG4,
+ ZL_FW_COMPONENT_CFG5,
+ ZL_FW_COMPONENT_CFG6,
+ ZL_FW_NUM_COMPONENTS
+};
+
+/**
+ * struct zl3073x_fw_component - Firmware component
+ * @id: Flash component ID
+ * @size: Size of the buffer
+ * @data: Pointer to buffer with component data
+ */
+struct zl3073x_fw_component {
+ enum zl3073x_fw_component_id id;
+ size_t size;
+ void *data;
+};
+
+/**
+ * struct zl3073x_fw - Firmware bundle
+ * @component: firmware components array
+ */
+struct zl3073x_fw {
+ struct zl3073x_fw_component *component[ZL_FW_NUM_COMPONENTS];
+};
+
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack);
+void zl3073x_fw_free(struct zl3073x_fw *fw);
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _ZL3073X_FW_H */
diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h
index 614e33128a5c..19a25325bd9c 100644
--- a/drivers/dpll/zl3073x/regs.h
+++ b/drivers/dpll/zl3073x/regs.h
@@ -72,6 +72,9 @@
#define ZL_REG_FW_VER ZL_REG(0, 0x05, 2)
#define ZL_REG_CUSTOM_CONFIG_VER ZL_REG(0, 0x07, 4)
+#define ZL_REG_RESET_STATUS ZL_REG(0, 0x18, 1)
+#define ZL_REG_RESET_STATUS_RESET BIT(0)
+
/*************************
* Register Page 2, Status
*************************/
@@ -260,4 +263,52 @@
#define ZL_REG_OUTPUT_ESYNC_WIDTH ZL_REG(14, 0x18, 4)
#define ZL_REG_OUTPUT_PHASE_COMP ZL_REG(14, 0x20, 4)
+/*
+ * Register Page 255 - HW registers access
+ */
+#define ZL_REG_HWREG_OP ZL_REG(0xff, 0x00, 1)
+#define ZL_HWREG_OP_WRITE 0x28
+#define ZL_HWREG_OP_READ 0x29
+#define ZL_HWREG_OP_PENDING BIT(1)
+
+#define ZL_REG_HWREG_ADDR ZL_REG(0xff, 0x04, 4)
+#define ZL_REG_HWREG_WRITE_DATA ZL_REG(0xff, 0x08, 4)
+#define ZL_REG_HWREG_READ_DATA ZL_REG(0xff, 0x0c, 4)
+
+/*
+ * Registers available in flash mode
+ */
+#define ZL_REG_FLASH_HASH ZL_REG(0, 0x78, 4)
+#define ZL_REG_FLASH_FAMILY ZL_REG(0, 0x7c, 1)
+#define ZL_REG_FLASH_RELEASE ZL_REG(0, 0x7d, 1)
+
+#define ZL_REG_HOST_CONTROL ZL_REG(1, 0x02, 1)
+#define ZL_HOST_CONTROL_ENABLE BIT(0)
+
+#define ZL_REG_IMAGE_START_ADDR ZL_REG(1, 0x04, 4)
+#define ZL_REG_IMAGE_SIZE ZL_REG(1, 0x08, 4)
+#define ZL_REG_FLASH_INDEX_READ ZL_REG(1, 0x0c, 4)
+#define ZL_REG_FLASH_INDEX_WRITE ZL_REG(1, 0x10, 4)
+#define ZL_REG_FILL_PATTERN ZL_REG(1, 0x14, 4)
+
+#define ZL_REG_WRITE_FLASH ZL_REG(1, 0x18, 1)
+#define ZL_WRITE_FLASH_OP GENMASK(2, 0)
+#define ZL_WRITE_FLASH_OP_DONE 0x0
+#define ZL_WRITE_FLASH_OP_SECTORS 0x2
+#define ZL_WRITE_FLASH_OP_PAGE 0x3
+#define ZL_WRITE_FLASH_OP_COPY_PAGE 0x4
+
+#define ZL_REG_FLASH_INFO ZL_REG(2, 0x00, 1)
+#define ZL_FLASH_INFO_SECTOR_SIZE GENMASK(3, 0)
+#define ZL_FLASH_INFO_SECTOR_4K 0
+#define ZL_FLASH_INFO_SECTOR_64K 1
+
+#define ZL_REG_ERROR_COUNT ZL_REG(2, 0x04, 4)
+#define ZL_REG_ERROR_CAUSE ZL_REG(2, 0x08, 4)
+
+#define ZL_REG_OP_STATE ZL_REG(2, 0x14, 1)
+#define ZL_OP_STATE_NO_COMMAND 0
+#define ZL_OP_STATE_PENDING 1
+#define ZL_OP_STATE_DONE 2
+
#endif /* _ZL3073X_REGS_H */
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index aae774e7a5c3..e5e0174a0335 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -229,8 +229,7 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
- if (!queue_delayed_work(fw_workqueue, &card->br_work,
- delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
@@ -241,10 +240,10 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
+ time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) {
trace_bus_reset_postpone(card->index, card->generation, card->br_short);
- if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2)))
fw_card_put(card);
return;
}
@@ -280,225 +279,254 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
-static void bm_work(struct work_struct *work)
+enum bm_contention_outcome {
+ // The bus management contention window is not expired.
+ BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0,
+ // The IRM node has link off.
+ BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF,
+ // The IRM node complies IEEE 1394:1994 only.
+ BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY,
+ // Another bus reset, BM work has been rescheduled.
+ BM_CONTENTION_OUTCOME_AT_NEW_GENERATION,
+ // We have been unable to send the lock request to IRM node due to some local problem.
+ BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION,
+ // The lock request failed, maybe the IRM isn't really IRM capable after all.
+ BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM,
+ // Somebody else is BM.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM,
+ // The local node succeeds after contending for bus manager.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM,
+};
+
+static enum bm_contention_outcome contend_for_bm(struct fw_card *card)
+__must_hold(&card->lock)
{
- static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ int generation = card->generation;
+ int local_id = card->local_node->node_id;
+ __be32 data[2] = {
+ cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED),
+ cpu_to_be32(local_id),
};
- struct fw_card *card = from_work(card, work, bm_work.work);
- struct fw_device *root_device, *irm_device;
- struct fw_node *root_node;
- int root_id, new_root_id, irm_id, bm_id, local_id;
- int gap_count, generation, grace, rcode;
- bool do_reset = false;
- bool root_device_is_running;
- bool root_device_is_cmc;
- bool irm_is_1394_1995_only;
- bool keep_this_irm;
- __be32 transaction_data[2];
-
- spin_lock_irq(&card->lock);
+ bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125));
+ bool irm_is_1394_1995_only = false;
+ bool keep_this_irm = false;
+ struct fw_node *irm_node;
+ struct fw_device *irm_device;
+ int irm_node_id;
+ int rcode;
+
+ lockdep_assert_held(&card->lock);
+
+ if (!grace) {
+ if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate)
+ return BM_CONTENTION_OUTCOME_WITHIN_WINDOW;
+ }
- if (card->local_node == NULL) {
- spin_unlock_irq(&card->lock);
- goto out_put_card;
+ irm_node = card->irm_node;
+ if (!irm_node->link_on) {
+ fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id);
+ return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF;
}
- generation = card->generation;
+ irm_device = fw_node_get_device(irm_node);
+ if (irm_device && irm_device->config_rom) {
+ irm_is_1394_1995_only = (irm_device->config_rom[2] & 0x000000f0) == 0;
- root_node = card->root_node;
- fw_node_get(root_node);
- root_device = root_node->data;
- root_device_is_running = root_device &&
- atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
- root_device_is_cmc = root_device && root_device->cmc;
+ // Canon MV5i works unreliably if it is not root node.
+ keep_this_irm = irm_device->config_rom[3] >> 8 == CANON_OUI;
+ }
- irm_device = card->irm_node->data;
- irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
- (irm_device->config_rom[2] & 0x000000f0) == 0;
+ if (irm_is_1394_1995_only && !keep_this_irm) {
+ fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n",
+ local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ }
- /* Canon MV5i works unreliably if it is not root node. */
- keep_this_irm = irm_device && irm_device->config_rom &&
- irm_device->config_rom[3] >> 8 == CANON_OUI;
+ irm_node_id = irm_node->node_id;
- root_id = root_node->node_id;
- irm_id = card->irm_node->node_id;
- local_id = card->local_node->node_id;
+ spin_unlock_irq(&card->lock);
- grace = time_after64(get_jiffies_64(),
- card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation,
+ SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data,
+ sizeof(data));
- if ((is_next_generation(generation, card->bm_generation) &&
- !card->bm_abdicate) ||
- (card->bm_generation != generation && grace)) {
- /*
- * This first step is to figure out who is IRM and
- * then try to become bus manager. If the IRM is not
- * well defined (e.g. does not have an active link
- * layer or does not responds to our lock request, we
- * will have to do a little vigilante bus management.
- * In that case, we do a goto into the gap count logic
- * so that when we do the reset, we still optimize the
- * gap count. That could well save a reset in the
- * next generation.
- */
+ spin_lock_irq(&card->lock);
- if (!card->irm_node->link_on) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM has link off", new_root_id);
- goto pick_me;
+ switch (rcode) {
+ case RCODE_GENERATION:
+ return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION;
+ case RCODE_SEND_ERROR:
+ return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION;
+ case RCODE_COMPLETE:
+ {
+ int bm_id = be32_to_cpu(data[0]);
+
+ // Used by cdev layer for "struct fw_cdev_event_bus_reset".
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ card->bm_node_id = 0xffc0 & bm_id;
+ else
+ card->bm_node_id = local_id;
+
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM;
+ else
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM;
+ }
+ default:
+ if (!keep_this_irm) {
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ } else {
+ return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM;
}
+ }
+}
- if (irm_is_1394_1995_only && !keep_this_irm) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM is not 1394a compliant", new_root_id);
- goto pick_me;
- }
+DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T))
+DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T))
+
+static void bm_work(struct work_struct *work)
+{
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
+ struct fw_node *root_node __free(node_unref) = NULL;
+ int root_id, new_root_id, irm_id, local_id;
+ int expected_gap_count, generation;
+ bool stand_for_root = false;
- transaction_data[0] = cpu_to_be32(0x3f);
- transaction_data[1] = cpu_to_be32(local_id);
+ spin_lock_irq(&card->lock);
+ if (card->local_node == NULL) {
spin_unlock_irq(&card->lock);
+ return;
+ }
- rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
- irm_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- transaction_data, 8);
+ generation = card->generation;
- if (rcode == RCODE_GENERATION)
- /* Another bus reset, BM work has been rescheduled. */
- goto out;
+ root_node = fw_node_get(card->root_node);
- bm_id = be32_to_cpu(transaction_data[0]);
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
- scoped_guard(spinlock_irq, &card->lock) {
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- }
+ if (card->bm_generation != generation) {
+ enum bm_contention_outcome result = contend_for_bm(card);
- if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
- /* Somebody else is BM. Only act as IRM. */
- if (local_id == irm_id)
+ switch (result) {
+ case BM_CONTENTION_OUTCOME_WITHIN_WINDOW:
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION:
+ // BM work has been rescheduled.
+ spin_unlock_irq(&card->lock);
+ return;
+ case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION:
+ // Let's try again later and hope that the local problem has gone away by
+ // then.
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM:
+ // Let's do a bus reset and pick the local node as root, and thus, IRM.
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM:
+ if (local_id == irm_id) {
+ // Only acts as IRM.
+ spin_unlock_irq(&card->lock);
allocate_broadcast_channel(card, generation);
-
- goto out;
- }
-
- if (rcode == RCODE_SEND_ERROR) {
- /*
- * We have been unable to send the lock request due to
- * some local problem. Let's try again later and hope
- * that the problem has gone away by then.
- */
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
+ spin_lock_irq(&card->lock);
+ }
+ fallthrough;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM:
+ default:
+ card->bm_generation = generation;
+ break;
}
+ }
- spin_lock_irq(&card->lock);
-
- if (rcode != RCODE_COMPLETE && !keep_this_irm) {
- /*
- * The lock request failed, maybe the IRM
- * isn't really IRM capable after all. Let's
- * do a bus reset and pick the local node as
- * root, and thus, IRM.
- */
- new_root_id = local_id;
- fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
- fw_rcode_string(rcode), new_root_id);
- goto pick_me;
+ // We're bus manager for this generation, so next step is to make sure we have an active
+ // cycle master and do gap count optimization.
+ if (!stand_for_root) {
+ if (card->gap_count == GAP_COUNT_MISMATCHED) {
+ // If self IDs have inconsistent gap counts, do a
+ // bus reset ASAP. The config rom read might never
+ // complete, so don't wait for it. However, still
+ // send a PHY configuration packet prior to the
+ // bus reset. The PHY configuration packet might
+ // fail, but 1394-2008 8.4.5.2 explicitly permits
+ // it in this case, so it should be safe to try.
+ stand_for_root = true;
+
+ // We must always send a bus reset if the gap count
+ // is inconsistent, so bypass the 5-reset limit.
+ card->bm_retries = 0;
+ } else {
+ // Now investigate root node.
+ struct fw_device *root_device = fw_node_get_device(root_node);
+
+ if (root_device == NULL) {
+ // Either link_on is false, or we failed to read the
+ // config rom. In either case, pick another root.
+ stand_for_root = true;
+ } else {
+ bool root_device_is_running =
+ atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+
+ if (!root_device_is_running) {
+ // If we haven't probed this device yet, bail out now
+ // and let's try again once that's done.
+ spin_unlock_irq(&card->lock);
+ return;
+ } else if (!root_device->cmc) {
+ // Current root has an active link layer and we
+ // successfully read the config rom, but it's not
+ // cycle master capable.
+ stand_for_root = true;
+ }
+ }
}
- } else if (card->bm_generation != generation) {
- /*
- * We weren't BM in the last generation, and the last
- * bus reset is less than 125ms ago. Reschedule this job.
- */
- spin_unlock_irq(&card->lock);
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
}
- /*
- * We're bus manager for this generation, so next step is to
- * make sure we have an active cycle master and do gap count
- * optimization.
- */
- card->bm_generation = generation;
-
- if (card->gap_count == 0) {
- /*
- * If self IDs have inconsistent gap counts, do a
- * bus reset ASAP. The config rom read might never
- * complete, so don't wait for it. However, still
- * send a PHY configuration packet prior to the
- * bus reset. The PHY configuration packet might
- * fail, but 1394-2008 8.4.5.2 explicitly permits
- * it in this case, so it should be safe to try.
- */
- new_root_id = local_id;
- /*
- * We must always send a bus reset if the gap count
- * is inconsistent, so bypass the 5-reset limit.
- */
- card->bm_retries = 0;
- } else if (root_device == NULL) {
- /*
- * Either link_on is false, or we failed to read the
- * config rom. In either case, pick another root.
- */
+ if (stand_for_root) {
new_root_id = local_id;
- } else if (!root_device_is_running) {
- /*
- * If we haven't probed this device yet, bail out now
- * and let's try again once that's done.
- */
- spin_unlock_irq(&card->lock);
- goto out;
- } else if (root_device_is_cmc) {
- /*
- * We will send out a force root packet for this
- * node as part of the gap count optimization.
- */
- new_root_id = root_id;
} else {
- /*
- * Current root has an active link layer and we
- * successfully read the config rom, but it's not
- * cycle master capable.
- */
- new_root_id = local_id;
+ // We will send out a force root packet for this node as part of the gap count
+ // optimization on behalf of the node.
+ new_root_id = root_id;
}
- pick_me:
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
root_node->max_hops < ARRAY_SIZE(gap_count_table))
- gap_count = gap_count_table[root_node->max_hops];
+ expected_gap_count = gap_count_table[root_node->max_hops];
else
- gap_count = 63;
+ expected_gap_count = 63;
- /*
- * Finally, figure out if we should do a reset or not. If we have
- * done less than 5 resets with the same physical topology and we
- * have either a new root or a new gap count setting, let's do it.
- */
-
- if (card->bm_retries++ < 5 &&
- (card->gap_count != gap_count || new_root_id != root_id))
- do_reset = true;
+ // Finally, figure out if we should do a reset or not. If we have done less than 5 resets
+ // with the same physical topology and we have either a new root or a new gap count
+ // setting, let's do it.
+ if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) {
+ int card_gap_count = card->gap_count;
- spin_unlock_irq(&card->lock);
+ spin_unlock_irq(&card->lock);
- if (do_reset) {
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
- new_root_id, gap_count);
- fw_send_phy_config(card, new_root_id, generation, gap_count);
+ new_root_id, expected_gap_count);
+ fw_send_phy_config(card, new_root_id, generation, expected_gap_count);
/*
* Where possible, use a short bus reset to minimize
* disruption to isochronous transfers. But in the event
@@ -511,31 +539,27 @@ static void bm_work(struct work_struct *work)
* may treat it as two, causing a gap count inconsistency
* again. Using a long bus reset prevents this.
*/
- reset_bus(card, card->gap_count != 0);
+ reset_bus(card, card_gap_count != 0);
/* Will allocate broadcast channel after the reset. */
- goto out;
- }
+ } else {
+ struct fw_device *root_device = fw_node_get_device(root_node);
- if (root_device_is_cmc) {
- /*
- * Make sure that the cycle master sends cycle start packets.
- */
- transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
- rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
- root_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_STATE_SET,
- transaction_data, 4);
- if (rcode == RCODE_GENERATION)
- goto out;
- }
+ spin_unlock_irq(&card->lock);
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ if (root_device && root_device->cmc) {
+ // Make sure that the cycle master sends cycle start packets.
+ __be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ &data, sizeof(data));
+ if (rcode == RCODE_GENERATION)
+ return;
+ }
- out:
- fw_node_put(root_node);
- out_put_card:
- fw_card_put(card);
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+ }
}
void fw_card_initialize(struct fw_card *card,
@@ -547,20 +571,24 @@ void fw_card_initialize(struct fw_card *card,
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
- card->current_tlabel = 0;
- card->tlabel_mask = 0;
- card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
- card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
- card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
- card->split_timeout_jiffies =
- DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
+
+ card->transactions.current_tlabel = 0;
+ card->transactions.tlabel_mask = 0;
+ INIT_LIST_HEAD(&card->transactions.list);
+ spin_lock_init(&card->transactions.lock);
+
+ card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
+ card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
+ card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT);
+ spin_lock_init(&card->split_timeout.lock);
+
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
- INIT_LIST_HEAD(&card->transaction_list);
- INIT_LIST_HEAD(&card->phy_receiver_list);
+
spin_lock_init(&card->lock);
card->local_node = NULL;
@@ -570,9 +598,13 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
+DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T))
+
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
+ struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL;
+ struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL;
int ret;
// This workqueue should be:
@@ -587,10 +619,10 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
// contexts if they are scheduled to the same cycle.
- card->isoc_wq = alloc_workqueue("firewire-isoc-card%u",
- WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- supported_isoc_contexts, card->index);
- if (!card->isoc_wq)
+ isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!isoc_wq)
return -ENOMEM;
// This workqueue should be:
@@ -602,14 +634,14 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == 4 A hardIRQ could notify events for a pair of requests and
// response AR/AT contexts.
- card->async_wq = alloc_workqueue("firewire-async-card%u",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- 4, card->index);
- if (!card->async_wq) {
- ret = -ENOMEM;
- goto err_isoc;
- }
+ async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!async_wq)
+ return -ENOMEM;
+ card->isoc_wq = isoc_wq;
+ card->async_wq = async_wq;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
@@ -617,18 +649,18 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
scoped_guard(mutex, &card_mutex) {
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0)
- goto err_async;
+ if (ret < 0) {
+ card->isoc_wq = NULL;
+ card->async_wq = NULL;
+ return ret;
+ }
+ retain_and_null_ptr(isoc_wq);
+ retain_and_null_ptr(async_wq);
list_add_tail(&card->link, &card_list);
}
return 0;
-err_async:
- destroy_workqueue(card->async_wq);
-err_isoc:
- destroy_workqueue(card->isoc_wq);
- return ret;
}
EXPORT_SYMBOL(fw_card_add);
@@ -773,7 +805,7 @@ void fw_core_remove_card(struct fw_card *card)
destroy_workqueue(card->isoc_wq);
destroy_workqueue(card->async_wq);
- WARN_ON(!list_empty(&card->transaction_list));
+ WARN_ON(!list_empty(&card->transactions.list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2e93189d7142..49dc1612c691 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -47,6 +47,9 @@
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
+static DEFINE_SPINLOCK(phy_receiver_list_lock);
+static LIST_HEAD(phy_receiver_list);
+
struct client {
u32 version;
struct fw_device *device;
@@ -937,11 +940,12 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
+ r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data),
+ flex_array_size(r, data, a->length))) {
ret = -EFAULT;
goto failed;
}
@@ -1324,8 +1328,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
// Allow 1000ms grace period for other reallocations.
if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) {
+ schedule_iso_resource(r, msecs_to_jiffies(333));
skip = true;
} else {
// We could be called twice within the same generation.
@@ -1669,15 +1673,16 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
- struct fw_card *card = client->device->card;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
- guard(spinlock_irq)(&card->lock);
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
+ list_move_tail(&client->phy_receiver_link, &phy_receiver_list);
- list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
return 0;
@@ -1687,10 +1692,17 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- guard(spinlock_irqsave)(&card->lock);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ guard(spinlock_irqsave)(&phy_receiver_list_lock);
+
+ list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) {
+ struct inbound_phy_packet_event *e;
+
+ if (client->device->card != card)
+ continue;
- list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1857,7 +1869,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client_resource *resource;
unsigned long index;
- scoped_guard(spinlock_irq, &client->device->card->lock)
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
list_del(&client->phy_receiver_link);
scoped_guard(mutex, &client->device->client_list_mutex)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index aeacd4cfd694..457a0da024a7 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -847,16 +847,15 @@ static void fw_schedule_device_work(struct fw_device *device,
*/
#define MAX_RETRIES 10
-#define RETRY_DELAY (3 * HZ)
-#define INITIAL_DELAY (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
+#define RETRY_DELAY secs_to_jiffies(3)
+#define INITIAL_DELAY msecs_to_jiffies(500)
+#define SHUTDOWN_DELAY secs_to_jiffies(2)
static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device = from_work(device, work, work.work);
- if (time_before64(get_jiffies_64(),
- device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_is_after_jiffies64(device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
@@ -887,7 +886,7 @@ static void fw_device_release(struct device *dev)
* bus manager work looks at this node.
*/
scoped_guard(spinlock_irqsave, &card->lock)
- device->node->data = NULL;
+ fw_node_set_device(device->node, NULL);
fw_node_put(device->node);
kfree(device->config_rom);
@@ -1007,7 +1006,7 @@ static void fw_device_init(struct work_struct *work)
int ret;
/*
- * All failure paths here set node->data to NULL, so that we
+ * All failure paths here call fw_node_set_device(node, NULL), so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
@@ -1051,9 +1050,9 @@ static void fw_device_init(struct work_struct *work)
struct fw_node *obsolete_node = reused->node;
device->node = obsolete_node;
- device->node->data = device;
+ fw_node_set_device(device->node, device);
reused->node = current_node;
- reused->node->data = reused;
+ fw_node_set_device(reused->node, reused);
reused->max_speed = device->max_speed;
reused->node_id = current_node->node_id;
@@ -1292,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
- node->data = device;
+ fw_node_set_device(node, device);
/*
* Many devices are slow to respond after bus resets,
@@ -1307,7 +1306,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
goto create;
@@ -1324,7 +1323,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_UPDATED:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
break;
@@ -1339,7 +1338,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
- if (!node->data)
+ if (!fw_node_get_device(node))
break;
/*
@@ -1354,7 +1353,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* the device in shutdown state to have that code fail
* to create the device.
*/
- device = node->data;
+ device = fw_node_get_device(node);
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_shutdown;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 74a6aa7d8cc9..2f73bcd5696f 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -241,7 +241,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// If PHYs report different gap counts, set an invalid count which will force a gap
// count reconfiguration and a reset.
if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
- gap_count = 0;
+ gap_count = GAP_COUNT_MISMATCHED;
update_hop_count(node);
@@ -325,9 +325,11 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
-/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
+__must_hold(&card->lock)
{
+ lockdep_assert_held(&card->lock);
+
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
@@ -435,20 +437,22 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
}
}
-static void update_topology_map(struct fw_card *card,
- u32 *self_ids, int self_id_count)
+static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
+ const u32 *self_ids, int self_id_count)
{
- int node_count = (card->root_node->node_id & 0x3f) + 1;
- __be32 *map = card->topology_map;
+ __be32 *map = buffer;
+ int node_count = (root_node_id & 0x3f) + 1;
+
+ memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
+ *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++);
- fw_compute_block_crc(card->topology_map);
+ fw_compute_block_crc(buffer);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -458,46 +462,45 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- guard(spinlock_irqsave)(&card->lock);
-
- /*
- * If the selfID buffer is not the immediate successor of the
- * previously processed one, we cannot reliably compare the
- * old and new topologies.
- */
- if (!is_next_generation(generation, card->generation) &&
- card->local_node != NULL) {
- fw_destroy_nodes(card);
- card->bm_retries = 0;
+ scoped_guard(spinlock, &card->lock) {
+ // If the selfID buffer is not the immediate successor of the
+ // previously processed one, we cannot reliably compare the
+ // old and new topologies.
+ if (!is_next_generation(generation, card->generation) && card->local_node != NULL) {
+ fw_destroy_nodes(card);
+ card->bm_retries = 0;
+ }
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
+ card->node_id = node_id;
+ // Update node_id before generation to prevent anybody from using
+ // a stale node_id together with a current generation.
+ smp_wmb();
+ card->generation = generation;
+ card->reset_jiffies = get_jiffies_64();
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
+
+ local_node = build_tree(card, self_ids, self_id_count, generation);
+
+ card->color++;
+
+ if (local_node == NULL) {
+ fw_err(card, "topology build failed\n");
+ // FIXME: We need to issue a bus reset in this case.
+ } else if (card->local_node == NULL) {
+ card->local_node = local_node;
+ for_each_fw_node(card, local_node, report_found_node);
+ } else {
+ update_tree(card, local_node);
+ }
}
- card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
- card->node_id = node_id;
- /*
- * Update node_id before generation to prevent anybody from using
- * a stale node_id together with a current generation.
- */
- smp_wmb();
- card->generation = generation;
- card->reset_jiffies = get_jiffies_64();
- card->bm_node_id = 0xffff;
- card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
- local_node = build_tree(card, self_ids, self_id_count, generation);
-
- update_topology_map(card, self_ids, self_id_count);
-
- card->color++;
-
- if (local_node == NULL) {
- fw_err(card, "topology build failed\n");
- /* FIXME: We need to issue a bus reset in this case. */
- } else if (card->local_node == NULL) {
- card->local_node = local_node;
- for_each_fw_node(card, local_node, report_found_node);
- } else {
- update_tree(card, local_node);
+ // Just used by transaction layer.
+ scoped_guard(spinlock, &card->topology_map.lock) {
+ update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
+ card->root_node->node_id, self_ids, self_id_count);
}
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 1d1c2d8f85ae..dd3656a0c1ff 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -49,12 +49,14 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
{
struct fw_transaction *t = NULL, *iter;
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry(iter, &card->transactions.list, link) {
if (iter == transaction) {
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
@@ -117,11 +119,11 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- scoped_guard(spinlock_irqsave, &card->lock) {
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
if (list_empty(&t->link))
return;
list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << t->tlabel);
}
if (!t->with_tstamp) {
@@ -135,14 +137,18 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- guard(spinlock_irqsave)(&card->lock);
+ unsigned long delta;
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ delta = card->split_timeout.jiffies;
+ mod_timer(&t->split_timeout_timer, jiffies + delta);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -162,8 +168,12 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
- t->split_timeout_cycle =
- compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ t->split_timeout_cycle =
+ compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ }
start_split_transaction_timeout(t, card);
break;
}
@@ -259,18 +269,21 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
+__must_hold(&card->transactions_lock)
{
int tlabel;
- tlabel = card->current_tlabel;
- while (card->tlabel_mask & (1ULL << tlabel)) {
+ lockdep_assert_held(&card->transactions.lock);
+
+ tlabel = card->transactions.current_tlabel;
+ while (card->transactions.tlabel_mask & (1ULL << tlabel)) {
tlabel = (tlabel + 1) & 0x3f;
- if (tlabel == card->current_tlabel)
+ if (tlabel == card->transactions.current_tlabel)
return -EBUSY;
}
- card->current_tlabel = (tlabel + 1) & 0x3f;
- card->tlabel_mask |= 1ULL << tlabel;
+ card->transactions.current_tlabel = (tlabel + 1) & 0x3f;
+ card->transactions.tlabel_mask |= 1ULL << tlabel;
return tlabel;
}
@@ -331,7 +344,6 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
void *payload, size_t length, union fw_transaction_callback callback,
bool with_tstamp, void *callback_data)
{
- unsigned long flags;
int tlabel;
/*
@@ -339,11 +351,11 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
* the list while holding the card spinlock.
*/
- spin_lock_irqsave(&card->lock, flags);
-
- tlabel = allocate_tlabel(card);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ tlabel = allocate_tlabel(card);
if (tlabel < 0) {
- spin_unlock_irqrestore(&card->lock, flags);
if (!with_tstamp) {
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
} else {
@@ -368,15 +380,22 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
t->callback = callback;
t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
-
- fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
- speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
- list_add_tail(&t->link, &card->transaction_list);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ // The node_id field of fw_card can be updated when handling SelfIDComplete.
+ fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id,
+ generation, speed, offset, payload, length);
+ }
- spin_unlock_irqrestore(&card->lock, flags);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ list_add_tail(&t->link, &card->transactions.list);
+ // Safe with no lock, since the index field of fw_card is immutable once assigned.
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
@@ -458,7 +477,7 @@ static struct fw_packet phy_config_packet = {
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- long timeout = DIV_ROUND_UP(HZ, 10);
+ long timeout = msecs_to_jiffies(100);
u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
@@ -779,11 +798,14 @@ EXPORT_SYMBOL(fw_fill_response);
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
u32 timestamp;
- cycles = card->split_timeout_cycles;
+ lockdep_assert_held(&card->split_timeout.lock);
+
+ cycles = card->split_timeout.cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
@@ -834,9 +856,12 @@ static struct fw_request *allocate_request(struct fw_card *card,
return NULL;
kref_init(&request->kref);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp);
+
request->response.speed = p->speed;
- request->response.timestamp =
- compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -1111,12 +1136,14 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry(iter, &card->transactions.list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
@@ -1196,7 +1223,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
}
start = (offset - topology_map_region.start) / 4;
- memcpy(payload, &card->topology_map[start], length);
+
+ // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->topology_map.lock)
+ memcpy(payload, &card->topology_map.buffer[start], length);
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1211,16 +1242,17 @@ static const struct fw_address_region registers_region =
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
- cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+ cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19);
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
cycles = clamp(cycles, 800u, 3u * 8000u);
- card->split_timeout_cycles = cycles;
- card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+ card->split_timeout.cycles = cycles;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
@@ -1270,12 +1302,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_hi);
+ *data = cpu_to_be32(card->split_timeout.hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_hi = be32_to_cpu(*data) & 7;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1283,12 +1318,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_lo);
+ *data = cpu_to_be32(card->split_timeout.lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 9b298af1cac0..e67395ce26b5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,6 +27,11 @@ struct fw_packet;
/* -card */
+// This is the arbitrary value we use to indicate a mismatched gap count.
+#define GAP_COUNT_MISMATCHED 0
+
+#define isoc_cycles_to_jiffies(cycles) usecs_to_jiffies((u32)div_u64((u64)cycles * USEC_PER_SEC, 8000))
+
extern __printf(2, 3)
void fw_err(const struct fw_card *card, const char *fmt, ...);
extern __printf(2, 3)
@@ -167,6 +172,9 @@ static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_fun
/* -topology */
+// The initial value of BUS_MANAGER_ID register, to express nothing registered.
+#define BUS_MANAGER_ID_NOT_REGISTERED 0x3f
+
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@@ -194,8 +202,8 @@ struct fw_node {
/* For serializing node topology into a list. */
struct list_head link;
- /* Upper layer specific data. */
- void *data;
+ // The device when already associated, else NULL.
+ struct fw_device *device;
struct fw_node *ports[] __counted_by(port_count);
};
@@ -219,6 +227,16 @@ static inline void fw_node_put(struct fw_node *node)
kref_put(&node->kref, release_node);
}
+static inline struct fw_device *fw_node_get_device(struct fw_node *node)
+{
+ return node->device;
+}
+
+static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device)
+{
+ node->device = device;
+}
+
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5d8301b0f3aa..030aed5453a1 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -228,13 +228,10 @@ struct fw_ohci {
__le32 *self_id;
dma_addr_t self_id_bus;
- struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
-static struct workqueue_struct *selfid_workqueue;
-
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
@@ -393,225 +390,10 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
")");
-#define OHCI_PARAM_DEBUG_AT_AR 1
-#define OHCI_PARAM_DEBUG_SELFIDS 2
-#define OHCI_PARAM_DEBUG_IRQS 4
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
- ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
- ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
- ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", or a combination, or all = -1)");
-
static bool param_remote_dma;
module_param_named(remote_dma, param_remote_dma, bool, 0444);
MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
-static void log_irqs(struct fw_ohci *ohci, u32 evt)
-{
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
- return;
-
- ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
- evt & OHCI1394_selfIDComplete ? " selfID" : "",
- evt & OHCI1394_RQPkt ? " AR_req" : "",
- evt & OHCI1394_RSPkt ? " AR_resp" : "",
- evt & OHCI1394_reqTxComplete ? " AT_req" : "",
- evt & OHCI1394_respTxComplete ? " AT_resp" : "",
- evt & OHCI1394_isochRx ? " IR" : "",
- evt & OHCI1394_isochTx ? " IT" : "",
- evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
- evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
- evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
- evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
- evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
- evt & OHCI1394_busReset ? " busReset" : "",
- evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
- OHCI1394_RSPkt | OHCI1394_reqTxComplete |
- OHCI1394_respTxComplete | OHCI1394_isochRx |
- OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
- OHCI1394_regAccessFail | OHCI1394_busReset)
- ? " ?" : "");
-}
-
-static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
-{
- static const char *const speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
- };
- static const char *const power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
- };
- static const char port[] = {
- [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
- [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
- [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
- [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
- };
- struct self_id_sequence_enumerator enumerator = {
- .cursor = ohci->self_id_buffer,
- .quadlet_count = self_id_count,
- };
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
- return;
-
- ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
-
- while (enumerator.quadlet_count > 0) {
- unsigned int quadlet_count;
- unsigned int port_index;
- const u32 *s;
- int i;
-
- s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
- if (IS_ERR(s))
- break;
-
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s,
- phy_packet_self_id_get_phy_id(*s),
- port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-
- port_index = 3;
- for (i = 1; i < quadlet_count; ++i) {
- ohci_notice(ohci,
- "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- s[i],
- phy_packet_self_id_get_phy_id(s[i]),
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
- );
-
- port_index += 8;
- }
- }
-}
-
-static const char *evts[] = {
- [0x00] = "evt_no_status", [0x01] = "-reserved-",
- [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
- [0x04] = "evt_underrun", [0x05] = "evt_overrun",
- [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
- [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
- [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
- [0x0c] = "-reserved-", [0x0d] = "-reserved-",
- [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
- [0x10] = "-reserved-", [0x11] = "ack_complete",
- [0x12] = "ack_pending ", [0x13] = "-reserved-",
- [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
- [0x16] = "ack_busy_B", [0x17] = "-reserved-",
- [0x18] = "-reserved-", [0x19] = "-reserved-",
- [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
- [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
- [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
- [0x20] = "pending/cancelled",
-};
-
-static void log_ar_at_event(struct fw_ohci *ohci,
- char dir, int speed, u32 *header, int evt)
-{
- static const char *const tcodes[] = {
- [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
- [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
- [TCODE_WRITE_RESPONSE] = "W resp",
- [0x3] = "-reserved-",
- [TCODE_READ_QUADLET_REQUEST] = "QR req",
- [TCODE_READ_BLOCK_REQUEST] = "BR req",
- [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
- [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
- [TCODE_CYCLE_START] = "cycle start",
- [TCODE_LOCK_REQUEST] = "Lk req",
- [TCODE_STREAM_DATA] = "async stream packet",
- [TCODE_LOCK_RESPONSE] = "Lk resp",
- [0xc] = "-reserved-",
- [0xd] = "-reserved-",
- [TCODE_LINK_INTERNAL] = "link internal",
- [0xf] = "-reserved-",
- };
- int tcode = async_header_get_tcode(header);
- char specific[12];
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
- return;
-
- if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
-
- if (evt == OHCI1394_evt_bus_reset) {
- ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
- return;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_READ_QUADLET_RESPONSE:
- case TCODE_CYCLE_START:
- snprintf(specific, sizeof(specific), " = %08x",
- be32_to_cpu((__force __be32)header[3]));
- break;
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_RESPONSE:
- case TCODE_LOCK_REQUEST:
- case TCODE_LOCK_RESPONSE:
- snprintf(specific, sizeof(specific), " %x,%x",
- async_header_get_data_length(header),
- async_header_get_extended_tcode(header));
- break;
- default:
- specific[0] = '\0';
- }
-
- switch (tcode) {
- case TCODE_STREAM_DATA:
- ohci_notice(ohci, "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
- break;
- case TCODE_LINK_INTERNAL:
- ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
- break;
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_QUADLET_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_LOCK_REQUEST:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
- break;
- default:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], specific);
- }
-}
-
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
@@ -957,8 +739,6 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
- log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
-
/*
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
@@ -977,7 +757,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_work).
+ * at a slightly incorrect time (in handle_selfid_complete_event).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1566,8 +1346,6 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
-
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
@@ -1772,6 +1550,25 @@ static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet
static void detect_dead_context(struct fw_ohci *ohci,
const char *name, unsigned int regs)
{
+ static const char *const evts[] = {
+ [0x00] = "evt_no_status", [0x01] = "-reserved-",
+ [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
+ [0x04] = "evt_underrun", [0x05] = "evt_overrun",
+ [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+ [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
+ [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
+ [0x0c] = "-reserved-", [0x0d] = "-reserved-",
+ [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
+ [0x10] = "-reserved-", [0x11] = "ack_complete",
+ [0x12] = "ack_pending ", [0x13] = "-reserved-",
+ [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
+ [0x16] = "ack_busy_B", [0x17] = "-reserved-",
+ [0x18] = "-reserved-", [0x19] = "-reserved-",
+ [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
+ [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
+ [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
+ [0x20] = "pending/cancelled",
+ };
u32 ctl;
ctl = reg_read(ohci, CONTROL_SET(regs));
@@ -2030,9 +1827,9 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
return self_id_count;
}
-static void bus_reset_work(struct work_struct *work)
+static irqreturn_t handle_selfid_complete_event(int irq, void *data)
{
- struct fw_ohci *ohci = from_work(ohci, work, bus_reset_work);
+ struct fw_ohci *ohci = data;
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2043,11 +1840,11 @@ static void bus_reset_work(struct work_struct *work)
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
- return;
+ goto end;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
ohci_notice(ohci, "malconfigured bus\n");
- return;
+ goto end;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
@@ -2061,8 +1858,11 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
- return;
+ goto end;
}
+
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
+
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
@@ -2073,7 +1873,7 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
- return;
+ goto end;
}
quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
@@ -2100,7 +1900,7 @@ static void bus_reset_work(struct work_struct *work)
ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
j, self_id_count, id, id2);
- return;
+ goto end;
}
ohci->self_id_buffer[j] = id;
}
@@ -2110,13 +1910,13 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count < 0) {
ohci_notice(ohci,
"could not construct local self ID\n");
- return;
+ goto end;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n");
- return;
+ goto end;
}
rmb();
@@ -2138,7 +1938,7 @@ static void bus_reset_work(struct work_struct *work)
new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
- return;
+ goto end;
}
// FIXME: Document how the locking works.
@@ -2195,12 +1995,12 @@ static void bus_reset_work(struct work_struct *work)
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
- log_selfids(ohci, generation, self_id_count);
-
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer,
ohci->csr_state_setclear_abdicate);
ohci->csr_state_setclear_abdicate = false;
+end:
+ return IRQ_HANDLED;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -2214,11 +2014,6 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- if (unlikely(param_debug > 0)) {
- dev_notice_ratelimited(ohci->card.device,
- "The debug parameter is superseded by tracepoints events, and deprecated.");
- }
-
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2226,21 +2021,11 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
- log_irqs(ohci, event);
- // The flag is masked again at bus_reset_work() scheduled by selfID event.
+
+ // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
- if (event & OHCI1394_selfIDComplete) {
- if (trace_self_id_complete_enabled()) {
- u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
-
- trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
- has_be_header_quirk(ohci));
- }
- queue_work(selfid_workqueue, &ohci->bus_reset_work);
- }
-
if (event & OHCI1394_RQPkt)
queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
@@ -2311,7 +2096,10 @@ static irqreturn_t irq_handler(int irq, void *data)
} else
flush_writes(ohci);
- return IRQ_HANDLED;
+ if (event & OHCI1394_selfIDComplete)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
}
static int software_reset(struct fw_ohci *ohci)
@@ -2624,7 +2412,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_work).
+ * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
*/
next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2705,7 +2493,6 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
@@ -3695,7 +3482,6 @@ static int pci_probe(struct pci_dev *dev,
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, flags, irq, err;
- size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
@@ -3722,8 +3508,6 @@ static int pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
-
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
@@ -3791,8 +3575,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
- size = sizeof(struct iso_context) * ohci->n_ir;
- ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->ir_context_list)
return -ENOMEM;
@@ -3806,8 +3589,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
- size = sizeof(struct iso_context) * ohci->n_it;
- ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->it_context_list)
return -ENOMEM;
@@ -3832,7 +3614,9 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = request_threaded_irq(irq, irq_handler, NULL,
+ // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
+ // invoking the threaded IRQ handler for SelfIDComplete event.
+ err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
ohci);
if (err < 0) {
@@ -3876,7 +3660,6 @@ static void pci_remove(struct pci_dev *dev)
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
}
- cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
@@ -3949,17 +3732,12 @@ static struct pci_driver fw_ohci_pci_driver = {
static int __init fw_ohci_init(void)
{
- selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
- if (!selfid_workqueue)
- return -ENOMEM;
-
return pci_register_driver(&fw_ohci_pci_driver);
}
static void __exit fw_ohci_cleanup(void)
{
pci_unregister_driver(&fw_ohci_pci_driver);
- destroy_workqueue(selfid_workqueue);
}
module_init(fw_ohci_init);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 24e59ddf85e7..c7698cfaa4e8 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -401,8 +401,8 @@ static void scmi_device_release(struct device *dev)
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
{
- pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(scmi_dev->dev.parent->of_node),
+ pr_debug("(%pOF) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ scmi_dev->dev.parent->of_node,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
@@ -474,9 +474,8 @@ __scmi_device_create(struct device_node *np, struct device *parent,
if (retval)
goto put_dev;
- pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- dev_name(&scmi_dev->dev), protocol, name);
+ pr_debug("(%pOF) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ parent->of_node, dev_name(&scmi_dev->dev), protocol, name);
return scmi_dev;
put_dev:
@@ -493,8 +492,8 @@ _scmi_device_create(struct device_node *np, struct device *parent,
sdev = __scmi_device_create(np, parent, protocol, name);
if (!sdev)
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node), protocol, name);
+ pr_err("(%pOF) Failed to create device for protocol 0x%x (%s)\n",
+ parent->of_node, protocol, name);
return sdev;
}
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
index 03960aca3610..03848283c2a0 100644
--- a/drivers/firmware/arm_scmi/quirks.c
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -71,6 +71,7 @@
*/
#include <linux/ctype.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/hashtable.h>
@@ -89,9 +90,9 @@
struct scmi_quirk {
bool enabled;
const char *name;
- char *vendor;
- char *sub_vendor_id;
- char *impl_ver_range;
+ const char *vendor;
+ const char *sub_vendor_id;
+ const char *impl_ver_range;
u32 start_range;
u32 end_range;
struct static_key_false *key;
@@ -217,7 +218,7 @@ static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
{
- const char *last, *first = quirk->impl_ver_range;
+ const char *last, *first __free(kfree) = NULL;
size_t len;
char *sep;
int ret;
@@ -228,8 +229,12 @@ static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
if (!len)
return 0;
+ first = kmemdup(quirk->impl_ver_range, len + 1, GFP_KERNEL);
+ if (!first)
+ return -ENOMEM;
+
last = first + len - 1;
- sep = strchr(quirk->impl_ver_range, '-');
+ sep = strchr(first, '-');
if (sep)
*sep = '\0';
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index bd041c99b92b..ae0f67e6cc45 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -127,8 +127,8 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||
(num_mb == 4 && num_sh != 2)) {
dev_warn(cdev,
- "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
- of_node_full_name(np), num_mb, num_sh);
+ "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n",
+ np, num_mb, num_sh);
return -EINVAL;
}
@@ -140,8 +140,7 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
- dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
- of_node_full_name(np));
+ dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np);
ret = -EINVAL;
}
}
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 3949a877e17d..dc0f46340153 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -498,7 +498,7 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
mutex_unlock(&channel->mu);
}
-static struct scmi_transport_ops scmi_optee_ops = {
+static const struct scmi_transport_ops scmi_optee_ops = {
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index cb934db9b2b4..326c4a93e44b 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -871,6 +871,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ /* Set device ready */
+ virtio_device_ready(vdev);
+
ret = platform_driver_register(&scmi_virtio_driver);
if (ret) {
vdev->priv = NULL;
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a8915d3b4df5..700a3f24f4ef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -25,7 +25,10 @@
enum scmi_imx_misc_protocol_cmd {
SCMI_IMX_MISC_CTRL_SET = 0x3,
SCMI_IMX_MISC_CTRL_GET = 0x4,
+ SCMI_IMX_MISC_DISCOVER_BUILD_INFO = 0x6,
SCMI_IMX_MISC_CTRL_NOTIFY = 0x8,
+ SCMI_IMX_MISC_CFG_INFO_GET = 0xC,
+ SCMI_IMX_MISC_BOARD_INFO = 0xE,
};
struct scmi_imx_misc_info {
@@ -65,6 +68,27 @@ struct scmi_imx_misc_ctrl_get_out {
__le32 val[];
};
+struct scmi_imx_misc_buildinfo_out {
+ __le32 buildnum;
+ __le32 buildcommit;
+#define MISC_MAX_BUILDDATE 16
+ u8 builddate[MISC_MAX_BUILDDATE];
+#define MISC_MAX_BUILDTIME 16
+ u8 buildtime[MISC_MAX_BUILDTIME];
+};
+
+struct scmi_imx_misc_board_info_out {
+ __le32 attributes;
+#define MISC_MAX_BRDNAME 16
+ u8 brdname[MISC_MAX_BRDNAME];
+};
+
+struct scmi_imx_misc_cfg_info_out {
+ __le32 msel;
+#define MISC_MAX_CFGNAME 16
+ u8 cfgname[MISC_MAX_CFGNAME];
+};
+
static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_imx_misc_info *mi)
{
@@ -272,6 +296,81 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
return ret;
}
+static int scmi_imx_misc_build_info_discover(const struct scmi_protocol_handle *ph)
+{
+ char date[MISC_MAX_BUILDDATE], time[MISC_MAX_BUILDTIME];
+ struct scmi_imx_misc_buildinfo_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_DISCOVER_BUILD_INFO, 0,
+ sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(date, out->builddate, MISC_MAX_BUILDDATE);
+ strscpy(time, out->buildtime, MISC_MAX_BUILDTIME);
+ dev_info(ph->dev, "SM Version\t= Build %u, Commit %08x %s %s\n",
+ le32_to_cpu(out->buildnum), le32_to_cpu(out->buildcommit),
+ date, time);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_board_info(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_board_info_out *out;
+ char name[MISC_MAX_BRDNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_BOARD_INFO, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->brdname, MISC_MAX_BRDNAME);
+ dev_info(ph->dev, "Board\t\t= %s, attr=0x%08x\n",
+ name, le32_to_cpu(out->attributes));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_cfg_info_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_cfg_info_out *out;
+ char name[MISC_MAX_CFGNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CFG_INFO_GET, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->cfgname, MISC_MAX_CFGNAME);
+ dev_info(ph->dev, "SM Config\t= %s, mSel = %u\n",
+ name, le32_to_cpu(out->msel));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = {
.misc_ctrl_set = scmi_imx_misc_ctrl_set,
.misc_ctrl_get = scmi_imx_misc_ctrl_get,
@@ -299,6 +398,18 @@ static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
+ ret = scmi_imx_misc_build_info_discover(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_board_info(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_cfg_info_get(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
return ph->set_priv(ph, minfo, version);
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index 4e246a78a042..741f4eace350 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -1660,6 +1660,7 @@ protocol_id: 0x84
|Name |Description |
+--------------------+---------------------------------------------------------+
|int32 status |SUCCESS: system log return |
+| |NOT_SUPPORTED: system log not available |
+--------------------+---------------------------------------------------------+
|uint32 numLogflags |Descriptor for the log data returned by this call. |
| |Bits[31:20] Number of remaining log words. |
@@ -1670,6 +1671,30 @@ protocol_id: 0x84
|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags|
+--------------------+---------------------------------------------------------+
+MISC_BOARD_INFO
+~~~~~~~~~~~~~~~
+
+message_id: 0xE
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: config name return |
+| |NOT_SUPPORTED: name not available |
++--------------------+---------------------------------------------------------+
+|uint32 attributes |Board-specific attributes reserved for future expansion |
+| |without breaking backwards compatibility. The firmware |
+| |sets the value to 0 |
++--------------------+---------------------------------------------------------+
+|uint8 boardname[16] |Board name. NULL terminated ASCII string, up to 16 bytes |
+| |in length. This is System Manager(SM) firmware-exported |
+| |board-name and may not align with the board name in the |
+| |device tree. |
++--------------------+---------------------------------------------------------+
+
NEGOTIATE_PROTOCOL_VERSION
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index fda6a1573609..17127880e10a 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -393,7 +393,7 @@ static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
return vinfo->num_domains;
}
-static struct scmi_voltage_proto_ops voltage_proto_ops = {
+static const struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index 14fbcd11657c..fdcd3a07abcd 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -404,7 +404,7 @@ static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+#undef ENTRY /* It's specific, uses local variable, don't use it (again). */
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index a00e07b853f2..a65c2d5b9e7b 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kexec_handover.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
@@ -164,12 +165,32 @@ static __init void reserve_regions(void)
pr_info("Processing EFI memory map:\n");
/*
- * Discard memblocks discovered so far: if there are any at this
- * point, they originate from memory nodes in the DT, and UEFI
- * uses its own memory map instead.
+ * Discard memblocks discovered so far except for KHO scratch
+ * regions. Most memblocks at this point originate from memory nodes
+ * in the DT and UEFI uses its own memory map instead. However, if
+ * KHO is enabled, scratch regions, which are good known memory
+ * must be preserved.
*/
memblock_dump_all();
- memblock_remove(0, PHYS_ADDR_MAX);
+
+ if (is_kho_boot()) {
+ struct memblock_region *r;
+
+ /* Remove all non-KHO regions */
+ for_each_mem_region(r) {
+ if (!memblock_is_kho_scratch(r)) {
+ memblock_remove(r->base, r->size);
+ r--;
+ }
+ }
+ } else {
+ /*
+ * KHO is disabled. Discard memblocks discovered so far:
+ * if there are any at this point, they originate from memory
+ * nodes in the DT, and UEFI uses its own memory map instead.
+ */
+ memblock_remove(0, PHYS_ADDR_MAX);
+ }
for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index f2fdd3756648..179f5d46d8dd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -5,7 +5,7 @@
config MESON_SM
tristate "Amlogic Secure Monitor driver"
depends on ARCH_MESON || COMPILE_TEST
- default y
+ default ARCH_MESON
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index f25a9746249b..3ab67aaa9e5d 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
+ struct meson_sm_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+
+ put_device(&pdev->dev);
+
+ return fw;
}
EXPORT_SYMBOL_GPL(meson_sm_get);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 26cd0458aacd..e777b7cb9b12 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1119,7 +1119,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
- return -EINVAL;
+ return ret;
}
*srcvm = next_vm;
@@ -1994,11 +1994,14 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
{ .compatible = "asus,zenbook-a14-ux3407qa" },
{ .compatible = "asus,zenbook-a14-ux3407ra" },
+ { .compatible = "dell,inspiron-14-plus-7441" },
+ { .compatible = "dell,latitude-7455" },
{ .compatible = "dell,xps13-9345" },
{ .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
+ { .compatible = "lenovo,thinkbook-16" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
@@ -2006,6 +2009,7 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
+ { .compatible = "qcom,hamoa-iot-evk" },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
@@ -2094,6 +2098,122 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
#endif /* CONFIG_QCOM_QSEECOM */
/**
+ * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
+ * @inbuf: start address of memory area used for inbound buffer.
+ * @inbuf_size: size of the memory area used for inbound buffer.
+ * @outbuf: start address of memory area used for outbound buffer.
+ * @outbuf_size: size of the memory area used for outbound buffer.
+ * @result: result of QTEE object invocation.
+ * @response_type: response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @inbuf and @outbuf
+ * should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_INVOKE,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = inbuf,
+ .args[1] = inbuf_size,
+ .args[2] = outbuf,
+ .args[3] = outbuf_size,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
+
+/**
+ * qcom_scm_qtee_callback_response() - Submit response for callback request.
+ * @buf: start address of memory area used for outbound buffer.
+ * @buf_size: size of the memory area used for outbound buffer.
+ * @result: Result of QTEE object invocation.
+ * @response_type: Response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @buf should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = buf,
+ .args[1] = buf_size,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
+
+static void qcom_scm_qtee_free(void *data)
+{
+ struct platform_device *qtee_dev = data;
+
+ platform_device_unregister(qtee_dev);
+}
+
+static void qcom_scm_qtee_init(struct qcom_scm *scm)
+{
+ struct platform_device *qtee_dev;
+ u64 result, response_type;
+ int ret;
+
+ /*
+ * Probe for smcinvoke support. This will fail due to invalid buffers,
+ * but first, it checks whether the call is supported in QTEE syscall
+ * handler. If it is not supported, -EIO is returned.
+ */
+ ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
+ if (ret == -EIO)
+ return;
+
+ /* Setup QTEE interface device. */
+ qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(qtee_dev))
+ return;
+
+ devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
+}
+
+/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
@@ -2325,6 +2445,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
ret = qcom_scm_qseecom_init(scm);
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
+ /* Initialize the QTEE object interface. */
+ qcom_scm_qtee_init(scm);
+
return 0;
}
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 0e8dd838099e..a56c8212cc0c 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -156,6 +156,13 @@ int qcom_scm_shm_bridge_enable(struct device *scm_dev);
#define QCOM_SCM_SVC_GPU 0x28
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+/* ARM_SMCCC_OWNER_TRUSTED_OS calls */
+
+#define QCOM_SCM_SVC_SMCINVOKE 0x06
+#define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00
+#define QCOM_SCM_SMCINVOKE_CB_RSP 0x01
+#define QCOM_SCM_SMCINVOKE_INVOKE 0x02
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index ea0a35355657..9f232e53115e 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -77,6 +77,7 @@ static bool qcom_tzmem_using_shm_bridge;
/* List of machines that are known to not support SHM bridge correctly. */
static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc7180", /* hang in rmtfs memory assignment */
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
@@ -109,7 +110,19 @@ notsupp:
return 0;
}
-static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+/**
+ * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
+ * @paddr: Physical address of the memory to share.
+ * @size: Size of the memory to share.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function creates a SHM bridge
+ * for the given memory region with QTEE. The handle returned by this function
+ * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
{
u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
int ret;
@@ -117,17 +130,49 @@ static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
if (!qcom_tzmem_using_shm_bridge)
return 0;
- pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+ pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
+ size_and_flags, QCOM_SCM_VMID_HLOS,
+ handle);
+ if (ret) {
+ dev_err(qcom_tzmem_dev,
+ "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
+ ret, &paddr, size);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
+
+/**
+ * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function deletes the SHM bridge
+ * for the given memory region. The handle must be the same as the one
+ * returned by qcom_tzmem_shm_bridge_create().
+ */
+void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+ if (qcom_tzmem_using_shm_bridge)
+ qcom_scm_shm_bridge_delete(handle);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ int ret;
u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
- size_and_flags, QCOM_SCM_VMID_HLOS,
- handle);
+ ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
if (ret)
return ret;
@@ -140,10 +185,7 @@ static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
{
u64 *handle = area->priv;
- if (!qcom_tzmem_using_shm_bridge)
- return;
-
- qcom_scm_shm_bridge_delete(*handle);
+ qcom_tzmem_shm_bridge_delete(*handle);
kfree(handle);
}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 39b33a356ebd..961d7599e422 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -4,7 +4,9 @@
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
+#include <linux/errno.h>
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/ktime.h>
#include <linux/types.h>
@@ -33,6 +35,19 @@ enum exynos_acpm_pmic_func {
ACPM_PMIC_BULK_WRITE,
};
+static const int acpm_pmic_linux_errmap[] = {
+ [0] = 0, /* ACPM_PMIC_SUCCESS */
+ [1] = -EACCES, /* Read register can't be accessed or issues to access it. */
+ [2] = -EACCES, /* Write register can't be accessed or issues to access it. */
+};
+
+static int acpm_pmic_to_linux_err(int err)
+{
+ if (err >= 0 && err < ARRAY_SIZE(acpm_pmic_linux_errmap))
+ return acpm_pmic_linux_errmap[err];
+ return -EIO;
+}
+
static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
{
return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
@@ -79,7 +94,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
*buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -110,7 +125,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
if (ret)
return ret;
- ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ ret = acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
if (ret)
return ret;
@@ -150,7 +165,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -190,7 +205,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -220,5 +235,5 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index ae5fd1936ad3..49fd2ae01055 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2015,6 +2015,47 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
+ * @dev: Device pointer corresponding to the SCI entity
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_lpm_abort(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct ti_sci_msg_hdr *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ int ret = 0;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -3739,11 +3780,22 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
return 0;
}
+static void __maybe_unused ti_sci_pm_complete(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
+ if (ti_sci_cmd_lpm_abort(dev))
+ dev_err(dev, "LPM clear selection failed.\n");
+ }
+}
+
static const struct dev_pm_ops ti_sci_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = ti_sci_suspend,
.suspend_noirq = ti_sci_suspend_noirq,
.resume_noirq = ti_sci_resume_noirq,
+ .complete = ti_sci_pm_complete,
#endif
};
@@ -3876,10 +3928,11 @@ static int ti_sci_probe(struct platform_device *pdev)
}
ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
- dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n",
info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
- info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : ""
);
ti_sci_setup_ops(info);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 053387d7baa0..701c416b2e78 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -42,6 +42,7 @@
#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+#define TI_SCI_MSG_LPM_ABORT 0x0311
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -147,6 +148,7 @@ struct ti_sci_msg_req_reboot {
* MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
* MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
* MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
*
* Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
* providing currently available SOC/firmware capabilities. SoC that don't
@@ -157,6 +159,7 @@ struct ti_sci_msg_resp_query_fw_caps {
#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
u64 fw_caps;
} __packed;
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
index f93aa0cecdb9..3dacccf7855c 100644
--- a/drivers/fwctl/mlx5/main.c
+++ b/drivers/fwctl/mlx5/main.c
@@ -58,6 +58,9 @@ enum {
MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
+ MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID = 0x730,
+ MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT = 0x731,
+ MLX5_CMD_OP_QUERY_DELEGATED_VHCA = 0x732,
MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
@@ -188,6 +191,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
* filter commands manually for now.
*/
switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
@@ -196,6 +200,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OPCODE_QUERY_VUID:
+ case MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT:
/*
* FW limits SET_HCA_CAP on the tools UID to only the other function
* mode which is used for function pre-configuration
@@ -281,6 +286,8 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_XRQ:
case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID:
+ case MLX5_CMD_OP_QUERY_DELEGATED_VHCA:
return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
@@ -345,7 +352,7 @@ static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
*/
if (ret && ret != -EREMOTEIO) {
if (rpc_out != rpc_in)
- kfree(rpc_out);
+ kvfree(rpc_out);
return ERR_PTR(ret);
}
return rpc_out;
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
index 9b9d1f6b5556..1809853f6353 100644
--- a/drivers/fwctl/pds/main.c
+++ b/drivers/fwctl/pds/main.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/bitfield.h>
+#include <linux/string.h>
#include <uapi/fwctl/fwctl.h>
#include <uapi/fwctl/pds.h>
@@ -366,18 +367,10 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
return ERR_PTR(err);
if (rpc->in.len > 0) {
- in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
- if (!in_payload) {
- dev_err(dev, "Failed to allocate in_payload\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
- rpc->in.len)) {
+ in_payload = memdup_user(u64_to_user_ptr(rpc->in.payload), rpc->in.len);
+ if (IS_ERR(in_payload)) {
dev_dbg(dev, "Failed to copy in_payload from user\n");
- err = -EFAULT;
- goto err_in_payload;
+ return in_payload;
}
in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
@@ -453,7 +446,6 @@ err_out_payload:
rpc->in.len, DMA_TO_DEVICE);
err_in_payload:
kfree(in_payload);
-err_out:
if (err)
return ERR_PTR(err);
@@ -481,7 +473,7 @@ static int pdsfc_probe(struct auxiliary_device *adev,
pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
struct pdsfc_dev, fwctl);
if (!pdsfc)
- return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
+ return -ENOMEM;
pdsfc->padev = padev;
err = pdsfc_identify(pdsfc);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index bcfc323a8315..47174eb3ba76 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -769,7 +769,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
list_del(&cfg->head);
switch (cfg->param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
if (ret)
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f7ea8e895c0c..fda170730468 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -396,6 +396,8 @@ source "drivers/gpu/drm/sprd/Kconfig"
source "drivers/gpu/drm/imagination/Kconfig"
+source "drivers/gpu/drm/tyr/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && HYPERV
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4dafbdc8f86a..4b2f7d794275 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -220,6 +220,7 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_TYR) += tyr/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 930de203d533..64e7acff8f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -84,7 +84,8 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@@ -137,7 +138,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
amdgpu_vkms.o
# add GFX block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef3af170dda4..2a0df4cabb99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -434,7 +435,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@@ -545,7 +545,7 @@ struct amdgpu_wb {
* this value can be accessed directly by using the offset as an index.
* For the GPU address, it is necessary to use gpu_addr and the offset.
*/
- volatile uint32_t *wb;
+ uint32_t *wb;
/**
* @gpu_addr:
@@ -721,7 +721,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
@@ -752,6 +752,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
@@ -819,6 +820,20 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -896,6 +911,9 @@ struct amdgpu_pcie_reset_ctx {
bool in_link_reset;
bool occurs_dpc;
bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
};
/*
@@ -929,12 +947,6 @@ enum amdgpu_enforce_isolation_mode {
AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
};
-
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
-
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -1138,9 +1150,6 @@ struct amdgpu_device {
/* for userq and VM fences */
struct amdgpu_seq64 seq64;
- /* KFD */
- struct amdgpu_kfd_dev kfd;
-
/* UMC */
struct amdgpu_umc umc;
@@ -1302,6 +1311,12 @@ struct amdgpu_device {
struct list_head userq_mgr_list;
struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1785,4 +1800,9 @@ static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index cbc40cad581b..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -76,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -130,6 +131,27 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
struct aca_banks *banks, struct ras_query_context *qctx)
@@ -168,6 +190,15 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
bank.smu_err_type = type;
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
+
aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
@@ -178,27 +209,6 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -229,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index aa88bad7416b..9e120c934cc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -107,11 +107,13 @@ struct amdgpu_kfd_dev {
bool init_complete;
struct work_struct reset_work;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
-
/* Client for KFD BO GEM handle allocations */
struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 04ef0ca10541..0239114fb6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 6d08bc2781a3..f2278a0937ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index e0e6a6a49d90..aaccf0b9947d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 6f0dc23c901b..e0ceab400b2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b16cce7c22c3..7c54fe6b0f5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -510,7 +510,8 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -520,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -977,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -1088,7 +1089,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (ret) {
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
@@ -1102,6 +1103,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -2564,8 +2568,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
@@ -2594,6 +2597,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
ret = 0;
}
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
@@ -2989,9 +2994,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index e476e45b996a..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1816,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
static struct attribute *amdgpu_vbios_version_attrs[] = {
- &dev_attr_vbios_version.attr,
- NULL
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
};
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vbios_version_attr_group = {
- .attrs = amdgpu_vbios_version_attrs
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
};
int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
-
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 555cd6d877c3..a716c9886c74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,7 +38,6 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct page **user_pages;
struct hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 5e375e9c4f5d..47e9bfba0642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
drm_mode_probed_add(connector, mode);
}
@@ -1195,29 +1192,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 25252231a68a..ef996493115f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -68,7 +68,6 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
hdr->error_severity = sev;
hdr->valid_bits.platform_id = 1;
- hdr->valid_bits.partition_id = 1;
hdr->valid_bits.timestamp = 1;
amdgpu_cper_get_timestamp(&hdr->timestamp);
@@ -174,7 +173,7 @@ int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
struct cper_sec_nonstd_err *section;
bool poison;
- poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true;
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
@@ -206,6 +205,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
{
struct cper_sec_desc *section_desc;
struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
@@ -219,11 +219,17 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->hdr.valid_bits.err_context_cnt = 1;
section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
/* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
@@ -234,8 +240,8 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d3f220be2ef9..9cd7741d2254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -29,6 +29,7 @@
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <linux/dma-buf.h>
+#include <linux/hmm.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
- GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- /* get chunks */
- chunk_array_user = u64_to_user_ptr(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto free_chunk;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
- GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
/* Assume the worst on the following checks */
ret = -EINVAL;
@@ -286,7 +274,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size) {
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
@@ -396,7 +384,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -468,7 +456,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
@@ -896,26 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- int i;
-
- e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
- r = -ENOMEM;
- goto out_free_user_pages;
- }
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ if (r)
goto out_free_user_pages;
- }
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -959,7 +934,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
- e->user_invalidated && e->user_pages) {
+ e->user_invalidated) {
amdgpu_bo_placement_from_domain(e->bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
@@ -968,11 +943,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
- e->user_pages);
+ e->range);
}
-
- kvfree(e->user_pages);
- e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -983,7 +955,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -1014,11 +986,7 @@ out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->bo;
- if (!e->user_pages)
- continue;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
- kvfree(e->user_pages);
- e->user_pages = NULL;
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1061,13 +1029,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1238,7 +1206,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1451,7 +1419,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1466,9 +1434,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
@@ -1767,30 +1735,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = u64_to_user_ptr(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0e6e2e2acf5b..a70651050acf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2136,12 +2136,14 @@ static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
struct drm_file *file;
struct amdgpu_fpriv *fpriv;
struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
int r;
file = m->private;
if (!file)
return -EINVAL;
+ adev = drm_to_adev(file->minor->dev);
fpriv = file->driver_priv;
if (!fpriv || !fpriv->vm.root.bo)
return -ENODEV;
@@ -2153,7 +2155,11 @@ static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
return -EINVAL;
}
- seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo));
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
amdgpu_bo_unreserve(root_bo);
amdgpu_bo_unref(&root_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c8459337fcb8..a77000c2e0bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -95,6 +95,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
@@ -178,6 +179,8 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
enum amd_ip_block_type block)
{
@@ -2445,6 +2448,33 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2473,8 +2503,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- dev_info(adev->dev, "detected ip block number %d <%s>\n",
- adev->num_ip_blocks, ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2595,6 +2630,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
return 0;
chip_name = "navi12";
break;
+ case CHIP_CYAN_SKILLFISH:
+ chip_name = "cyan_skillfish";
+ break;
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
@@ -2674,6 +2712,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2857,6 +2913,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.supported)
amdgpu_xgmi_early_init(adev);
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -3389,7 +3447,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3648,6 +3706,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -4992,7 +5051,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -5012,6 +5072,10 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
+ return 0;
+
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
if (ret) {
dev_warn(adev->dev, "evicting device resources failed\n");
@@ -5697,7 +5761,7 @@ int amdgpu_device_link_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU link reset\n");
- if (!adev->pcie_reset_ctx.occurs_dpc)
+ if (!amdgpu_reset_in_dpc(adev))
ret = amdgpu_dpm_link_reset(adev);
if (ret)
@@ -5826,6 +5890,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
@@ -6132,12 +6197,11 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
struct list_head *device_list,
struct amdgpu_hive_info *hive)
{
struct amdgpu_device *tmp_adev = NULL;
- int r;
/*
* Build list of devices to reset.
@@ -6149,7 +6213,7 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
- if (adev->pcie_reset_ctx.occurs_dpc)
+ if (amdgpu_reset_in_dpc(adev))
tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
if (!list_is_first(&adev->reset_list, device_list))
@@ -6157,14 +6221,6 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
} else {
list_add_tail(&adev->reset_list, device_list);
}
-
- if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
- r = amdgpu_device_health_check(device_list);
- if (r)
- return r;
- }
-
- return 0;
}
static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
@@ -6233,9 +6289,8 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- (!adev->pcie_reset_ctx.occurs_dpc) &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -6263,11 +6318,7 @@ static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list, reset_list) {
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6445,8 +6496,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
if (!amdgpu_sriov_vf(adev))
hive = amdgpu_get_xgmi_hive(adev);
@@ -6457,8 +6509,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->hive = hive;
INIT_LIST_HEAD(&device_list);
- if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
- goto end_reset;
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
@@ -6880,17 +6937,13 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (!amdgpu_dpm_is_link_reset_supported(adev)) {
- dev_warn(adev->dev, "No support for XGMI hive yet...\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
adev->pci_channel_state = state;
switch (state) {
@@ -6900,10 +6953,23 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_frozen:
/* Fatal error, prepare for slot reset */
dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- if (hive)
mutex_lock(&hive->hive_lock);
- adev->pcie_reset_ctx.occurs_dpc = true;
+ }
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
@@ -6911,10 +6977,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
hive, false);
- if (hive) {
+ if (hive)
mutex_unlock(&hive->hive_lock);
- amdgpu_put_xgmi_hive(hive);
- }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -6962,22 +7026,34 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_device *tmp_adev;
struct amdgpu_hive_info *hive;
struct list_head device_list;
- int r = 0, i;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
u32 memsize;
-
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
+ u16 status;
dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- /* wait for asic to come out of reset */
- msleep(700);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -7072,7 +7148,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
- adev->pcie_reset_ctx.occurs_dpc = false;
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -7080,6 +7155,58 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
}
}
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(adev->pdev);
+ int r;
+
+ if (!parent || parent->vendor != PCI_VENDOR_ID_ATI)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(parent);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(parent);
+
+ parent = pci_upstream_bridge(parent);
+ r = pci_save_state(parent);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(parent);
+
+ adev->pcie_reset_ctx.swus = parent;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
+
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
+ }
+
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
+}
+
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -7104,6 +7231,8 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -7490,3 +7619,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
size += sysfs_emit_at(buf, size, "\n");
return size;
}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index efe0058b48ca..73401f0aeb34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -2124,7 +2124,6 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
@@ -2132,6 +2131,10 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
@@ -2746,6 +2749,36 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ce27cb5bb05e..8561ad7f6180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -343,11 +343,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..bff25ef3e2d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -886,7 +886,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -960,7 +960,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
*/
MODULE_PARM_DESC(
freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
/**
@@ -2172,6 +2172,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -2597,6 +2602,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
/* don't allow going deep first time followed by s2idle the next time */
if (adev->last_suspend_state != PM_SUSPEND_ON &&
adev->last_suspend_state != pm_suspend_target_state) {
@@ -2604,11 +2610,14 @@ static int amdgpu_pmops_suspend(struct device *dev)
pm_suspend_target_state);
return -EINVAL;
}
+#endif
return 0;
}
+#if IS_ENABLED(CONFIG_SUSPEND)
/* cache the state last used for suspend */
adev->last_suspend_state = pm_suspend_target_state;
+#endif
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2665,7 +2674,7 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering())
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
@@ -2933,11 +2942,14 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
- if (fpriv) {
+ if (fpriv && drm_dev_enter(dev, &idx)) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
}
return drm_release(inode, filp);
@@ -2964,15 +2976,15 @@ out:
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -3044,6 +3056,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3117,7 +3130,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 91d638098889..b349bb3676d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_GWS] = "gws",
[AMDGPU_PL_OA] = "oa",
[AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
};
unsigned int hw_ip, i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 9e7506965cab..fd8cca241da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -120,7 +120,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
if (!am_fence)
return -ENOMEM;
- am_fence->context = 0;
} else {
am_fence = af;
}
@@ -738,7 +737,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
}
-/**
+/*
* Kernel queue reset handling
*
* The driver can reset individual queues for most engines, but those queues
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d1ccbfcf21fa..b7ebae289bea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -443,15 +443,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int r;
/* reject invalid gem flags */
- if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED |
- AMDGPU_GEM_CREATE_GFX12_DCC |
- AMDGPU_GEM_CREATE_DISCARDABLE))
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
return -EINVAL;
/* reject invalid gem domains */
@@ -466,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -577,8 +572,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
+ r = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (r)
goto release_object;
@@ -586,6 +580,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto user_pages_done;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
@@ -791,36 +787,6 @@ error:
return fence;
}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
-}
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -841,7 +807,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct dma_fence_chain *timeline_chain = NULL;
struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -945,10 +910,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -960,10 +924,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
@@ -997,17 +960,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@@ -1018,7 +998,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
@@ -1027,20 +1007,17 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- goto out;
+ goto out_exec;
}
@@ -1053,17 +1030,146 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
- amdgpu_bo_unreserve(robj);
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
drm_gem_object_put(gobj);
return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
+ return r;
+}
+
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b558336bc4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c80c8f543532..a09ccf7d8aa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1474,7 +1474,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
owner = (void *)(unsigned long)atomic_inc_return(&counter);
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
- 64, 0, &job);
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
if (r)
goto err;
@@ -2279,7 +2280,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
{
u32 count = 0;
@@ -2303,7 +2304,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
{
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
@@ -2330,7 +2331,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer,
* @buffer: This is an output variable that gets the PACKET3 preamble end.
* @count: Index to start set the preemble end.
*/
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
{
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 08f268dab8f5..fb5f7a0ee029 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -642,9 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 97b562a79ea8..9dcf51991b5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -690,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
if (r)
goto error_alloc;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 397c6ccdb903..55097ca10738 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -154,15 +154,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -356,9 +356,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index e36fede7f74c..2c6a6b858112 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -167,13 +167,12 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
+ void *owner,
struct hmm_range **phmm_range)
{
struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
- unsigned long i;
unsigned long *pfns;
int r = 0;
@@ -222,14 +221,6 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; pages && i < npages; i++)
- pages[i] = hmm_pfn_to_page(pfns[i]);
-
*phmm_range = hmm_range;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index e2edcd010ccc..953e1d06de20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -33,7 +33,7 @@
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
+ void *owner,
struct hmm_range **phmm_range);
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 57101d24422f..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -184,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -215,15 +215,6 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
if (!adev->is_atom_fw) {
@@ -248,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
}
/* looks up bus based on id */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 5dd78a9cb12d..3ef5bc95642c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -275,13 +275,12 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = id_mgr->reserved;
+ *id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
@@ -474,40 +473,61 @@ bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
return vm->reserved_vmid[vmhub];
}
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
mutex_lock(&id_mgr->lock);
-
- ++id_mgr->reserved_use_count;
- if (!id_mgr->reserved) {
- struct amdgpu_vmid *id;
-
- id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
- list);
- /* Remove from normal round robin handling */
- list_del_init(&id->list);
- id_mgr->reserved = id;
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
}
-
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
+
return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
}
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (!--id_mgr->reserved_use_count) {
- /* give the reserved ID back to normal round robin */
- list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
- id_mgr->reserved = NULL;
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
}
-
mutex_unlock(&id_mgr->lock);
}
@@ -574,7 +594,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- id_mgr->reserved_use_count = 0;
/* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
@@ -594,11 +613,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
- /* alloc a default reserved vmid to enforce isolation */
- for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..b3649cd3af56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- struct amdgpu_vmid *reserved;
- unsigned int reserved_use_count;
+ bool reserved_vmid;
};
int amdgpu_pasid_alloc(unsigned int bits);
@@ -79,10 +78,10 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7f7ea046e209..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
- volatile uint32_t *wptr_cpu;
+ uint32_t *wptr_cpu;
uint64_t rptr_addr;
- volatile uint32_t *rptr_cpu;
+ uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9b1c55115921..d020a890a0ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -209,11 +209,12 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 2f302266662b..4a6487eb6cb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -44,6 +44,22 @@
struct amdgpu_fence;
enum amdgpu_ib_pool_type;
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
@@ -96,7 +112,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job);
+ struct amdgpu_job **job,
+ u64 k_job_id);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 82d58ac7afb0..6b7d66b6d4cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -121,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -194,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -368,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
@@ -537,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri
drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
}
}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 4f0775e39b54..346ae0ab09d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -25,11 +25,18 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
#define AMDGPU_MAX_JPEG_RINGS 10
#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
+
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
const struct amdgpu_hwip_reg_entry *reg, u32 count);
void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8a76960803c6..8676400834fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -939,6 +939,10 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
if (amdgpu_passthrough(adev))
dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
AMDGPU_IDS_FLAGS_MODE_SHIFT) &
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 135598502c8d..5bf9be073cdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -191,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error_doorbell;
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
return 0;
error_doorbell:
@@ -216,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
{
int i;
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -366,6 +384,53 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, 0,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+ }
+
+ return r;
+}
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c0d2c195fe2e..6b506fc72f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -41,6 +41,7 @@
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -147,6 +148,10 @@ struct amdgpu_mes {
uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+ int hung_queue_db_array_size;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
};
struct amdgpu_mes_gang {
@@ -280,6 +285,18 @@ struct mes_reset_queue_input {
bool is_kq;
};
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -367,6 +384,13 @@ struct amdgpu_mes_funcs {
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -390,6 +414,13 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio);
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6da4f946cac0..20460cfd09bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -496,8 +496,6 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 122a88294883..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -153,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -1313,7 +1321,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (r)
goto out;
- r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
@@ -1545,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}
@@ -1628,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c316920f3450..656b8a931dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
@@ -167,6 +167,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 693357caa9a8..1578e4e2bf84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -666,6 +666,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "FB_FW_RESERV_ADDR";
case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -877,9 +881,7 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 38face981c3e..6e8aad91bcd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
- if (!ta_bin)
- return -ENOMEM;
- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
- ret = -EFAULT;
- goto err_free_bin;
- }
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
- if (!shared_buf)
- return -ENOMEM;
- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
- ret = -EFAULT;
- goto err_free_shared_buf;
- }
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 540817e296da..e0ee21150860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -122,12 +122,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -136,10 +139,14 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
@@ -169,18 +176,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -207,6 +212,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -297,6 +352,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -311,6 +368,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -500,6 +566,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -513,22 +582,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -2566,18 +2629,26 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
goto out;
}
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
+ *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
if (!*bps) {
ret = -ENOMEM;
goto out;
}
for (; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
(*bps)[i] = (struct ras_badpage){
.bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
+
+ if (amdgpu_ras_check_critical_address(adev,
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
if (status == -EBUSY)
@@ -2586,7 +2657,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
- *count = data->count;
+ *count = con->bad_page_num;
out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2638,6 +2709,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@@ -2666,6 +2738,13 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
@@ -2722,7 +2801,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2814,8 +2893,11 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
for (j = 0; j < count; j++) {
if (amdgpu_ras_check_bad_page_unlock(con,
- bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
continue;
+ }
if (!data->space_left &&
amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
@@ -2828,6 +2910,7 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record));
data->count++;
data->space_left--;
+ con->bad_page_num++;
}
return 0;
@@ -2974,7 +3057,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
break;
@@ -2988,8 +3071,10 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
}
+
+ con->eh_data->count_saved = con->eh_data->count;
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
@@ -3012,7 +3097,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count, unit_num, bad_page_num, i;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -3033,27 +3118,26 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- bad_page_num = control->ras_num_bad_pages;
- save_count = data->count - bad_page_num;
+ unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs;
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
- unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
*new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
+ if (unit_num > 0) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num], save_count)) {
+ &data->bps[data->count_saved], unit_num)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num +
+ &data->bps[data->count_saved +
i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
@@ -3062,6 +3146,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -3116,17 +3201,17 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
}
}
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
ret = amdgpu_ras_eeprom_check(control);
if (ret)
goto out;
/* HW not usable */
- if (amdgpu_ras_is_rma(adev)) {
+ if (amdgpu_ras_is_rma(adev))
ret = -EHWPOISON;
- goto out;
- }
-
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
}
out:
@@ -3134,18 +3219,24 @@ out:
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -3153,11 +3244,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -3241,7 +3332,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -3261,7 +3352,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -3287,7 +3378,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -3299,13 +3389,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -3316,49 +3402,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
+ consumption_q_count = ecc_log->consumption_q_count;
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ if (de_queried_count && consumption_q_count)
+ break;
- if (timeout) {
- if (!--timeout)
- break;
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ msleep(100);
+ } while (--timeout);
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3394,6 +3470,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3403,8 +3485,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3434,6 +3514,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3446,7 +3527,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3463,6 +3545,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3487,9 +3570,12 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
}
return 0;
@@ -3570,8 +3656,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
@@ -3589,6 +3677,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3661,6 +3750,8 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+
return 0;
}
/* recovery end */
@@ -4087,6 +4178,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -4366,6 +4463,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -5274,6 +5374,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -5310,3 +5413,80 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 927d6bff734a..6cf0dfd38be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -492,8 +492,15 @@ struct ras_ecc_err {
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
};
struct amdgpu_ras {
@@ -515,6 +522,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -557,6 +565,7 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
@@ -570,6 +579,17 @@ struct amdgpu_ras {
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
};
struct ras_fs_data {
@@ -608,6 +628,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -973,6 +994,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9bda9ad13f88..3eb3fb55ccb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -743,8 +743,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
else
control->ras_num_mca_recs += num;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -766,6 +765,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
if ((amdgpu_bad_page_threshold != -1) &&
(amdgpu_bad_page_threshold != -2)) {
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
@@ -774,9 +777,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.health_percent = 0;
}
ras->is_rma = true;
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
}
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -1457,8 +1461,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
dev_dbg(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 50fcd86e1033..be2e56ce1355 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index dabfbdf6f1ce..28c4ad62f50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -340,6 +340,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
case AMDGPU_RESET_SRC_USER:
strscpy(buf, "user trigger", len);
break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
default:
strscpy(buf, "unknown", len);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4d9b9701139b..07b4d37f1db6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS {
AMDGPU_RESET_SRC_MES,
AMDGPU_RESET_SRC_HWS,
AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
};
struct amdgpu_reset_context {
@@ -160,4 +161,16 @@ int amdgpu_reset_do_xgmi_reset_on_init(
bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 486c3646710c..8f6ce948c684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -364,7 +364,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7670f5d82b9e..b6b649179776 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -114,7 +114,7 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
@@ -211,7 +211,18 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -298,7 +309,7 @@ struct amdgpu_ring {
unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
- volatile u32 *rptr_cpu_addr;
+ u32 *rptr_cpu_addr;
/**
* @wptr:
@@ -378,19 +389,19 @@ struct amdgpu_ring {
* This is the CPU address pointer in the writeback slot. This is used
* to commit changes to the GPU.
*/
- volatile u32 *wptr_cpu_addr;
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
- volatile u32 *fence_cpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
- volatile u32 *trail_fence_cpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
u32 *set_q_mode_ptr;
u64 set_q_mode_token;
@@ -470,10 +481,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
-
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index db5791e1a7ce..5aa830a02d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 i;
int r;
@@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index c210625be220..2ce310b31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs {
* and it also provides a pointer to it which is used by the firmware
* to load the clear state in some cases.
*/
- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@@ -275,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
+ uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
+ uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
+ uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index d45ebfb642ca..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -67,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
- u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -94,9 +94,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
- va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- va_flags);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 27ab4e754b2a..aa9ee5dffa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@@ -226,7 +227,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
if (r)
return r;
@@ -406,7 +408,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false);
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -447,7 +449,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
- res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@@ -538,10 +541,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
- new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@@ -629,6 +634,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -646,6 +657,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -695,7 +708,7 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range **range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
@@ -732,7 +745,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
- readonly, NULL, pages, range);
+ readonly, NULL, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -784,12 +797,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = pages ? pages[i] : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
}
/*
@@ -1355,7 +1368,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
- mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
@@ -1510,7 +1524,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
if (r)
goto out;
@@ -1841,6 +1856,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1877,11 +1945,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
- /* Initialize VRAM pool with all of VRAM divided into pages */
- r = amdgpu_vram_mgr_init(adev);
- if (r) {
- dev_err(adev->dev, "Failed initializing VRAM heap.\n");
- return r;
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
/* Change the size here instead of the init above so only lpfn is affected */
@@ -2008,6 +2078,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
+ return r;
+ }
+
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
@@ -2070,6 +2152,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@@ -2082,7 +2166,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
- amdgpu_vram_mgr_fini(adev);
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
amdgpu_doorbell_fini(adev);
@@ -2091,6 +2176,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
dev_info(adev->dev, "amdgpu: ttm finalized\n");
@@ -2167,7 +2253,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed)
+ bool delayed, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
@@ -2177,7 +2263,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
&adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
- num_dw * 4, pool, job);
+ num_dw * 4, pool, job, k_job_id);
if (r)
return r;
@@ -2217,7 +2303,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job, false);
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
@@ -2252,7 +2339,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed)
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2265,7 +2353,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job, delayed);
+ &job, delayed, k_job_id);
if (r)
return r;
@@ -2335,7 +2423,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
- &next, true, true);
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2354,7 +2443,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **f,
- bool delayed)
+ bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2384,7 +2474,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true, delayed);
+ &next, true, delayed, k_job_id);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2309df3f68a9..0be2728aa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,7 +34,8 @@
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -182,14 +183,15 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **fence,
- bool delayed);
+ bool delayed,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range **range);
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range);
@@ -197,7 +199,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages,
struct hmm_range **range)
{
return -EPERM;
@@ -213,7 +214,7 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index c92b8794aa73..2e039fb778ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -252,6 +252,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 8190c24a649a..48e0932f5b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -44,8 +44,41 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
static int
-amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
@@ -54,6 +87,49 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
r = userq_funcs->unmap(uq_mgr, queue);
if (r)
queue->state = AMDGPU_USERQ_STATE_HUNG;
@@ -112,22 +188,6 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
kfree(queue);
}
-int
-amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
-{
- struct amdgpu_usermode_queue *queue;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&uq_mgr->userq_mutex);
- /* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
- ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
-
- mutex_unlock(&uq_mgr->userq_mutex);
- return ret;
-}
-
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
@@ -323,6 +383,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
debugfs_remove_recursive(queue->debugfs_queue);
#endif
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
@@ -364,7 +429,7 @@ static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
return -EINVAL;
}
- seq_printf(m, "queue_type %d\n", queue->queue_type);
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
amdgpu_bo_unreserve(bo);
@@ -404,27 +469,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
- /* Usermode queues are only supported for GFX IP as of now */
- if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
- args->in.ip_type != AMDGPU_HW_IP_DMA &&
- args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
- drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
- args->in.ip_type);
- return -EINVAL;
- }
-
r = amdgpu_userq_priority_permit(filp, priority);
if (r)
return r;
- if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
- (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
- (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
- !amdgpu_is_tmz(adev)) {
- drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
- return -EINVAL;
- }
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
@@ -456,6 +504,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = -ENOMEM;
goto unlock;
}
+
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@@ -543,22 +600,45 @@ unlock:
return r;
}
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
{
- union drm_amdgpu_userq *args = data;
- int r;
+ struct amdgpu_device *adev = drm_to_adev(dev);
switch (args->in.op) {
case AMDGPU_USERQ_OP_CREATE:
if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
return -EINVAL;
- r = amdgpu_userq_create(filp, args);
- if (r)
- drm_file_err(filp, "Failed to create usermode queue\n");
- break;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
case AMDGPU_USERQ_OP_FREE:
if (args->in.ip_type ||
args->in.doorbell_handle ||
@@ -568,10 +648,34 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
args->in.queue_size ||
args->in.rptr_va ||
args->in.wptr_va ||
- args->in.wptr_va ||
args->in.mqd ||
args->in.mqd_size)
return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
r = amdgpu_userq_destroy(filp, args->in.queue_id);
if (r)
drm_file_err(filp, "Failed to destroy usermode queue\n");
@@ -594,7 +698,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
/* Resume all the queues for this process */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_map_helper(uq_mgr, queue);
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -604,108 +708,106 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
static int
-amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
int ret;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ spin_lock(&vm->invalidated_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->invalidated_lock);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- DRM_ERROR("Fail to validate\n");
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
- return ret;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->invalidated_lock);
+ }
+ spin_unlock(&vm->invalidated_lock);
+
+ return 0;
}
+/* Make sure the whole VM is ready to be used */
static int
-amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
- struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx *ticket;
struct drm_exec exec;
- struct amdgpu_bo *bo;
- struct dma_resv *resv;
- bool clear, unlock;
- int ret = 0;
+ int ret;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
- ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret)) {
- drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ if (unlikely(ret))
goto unlock_all;
- }
-
- /* Lock the done list */
- list_for_each_entry(bo_va, &vm->done, base.vm_status) {
- bo = bo_va->base.bo;
- if (!bo)
- continue;
-
- ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
- goto unlock_all;
- }
- }
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
-
- /* Per VM BOs never need to bo cleared in the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false);
- if (ret)
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
goto unlock_all;
- spin_lock(&vm->status_lock);
- }
-
- ticket = &exec.ticket;
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
- base.vm_status);
- resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
- bo = bo_va->base.bo;
- ret = amdgpu_userq_validate_vm_bo(NULL, bo);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to validate BO\n");
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
goto unlock_all;
- }
- /* Try to reserve the BO to avoid clearing its ptes */
- if (!adev->debug_vm && dma_resv_trylock(resv)) {
- clear = false;
- unlock = true;
- /* The caller is already holding the reservation lock */
- } else if (dma_resv_locking_ctx(resv) == ticket) {
- clear = false;
- unlock = false;
- /* Somebody else is using the BO right now */
- } else {
- clear = true;
- unlock = false;
- }
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
- ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
- if (unlock)
- dma_resv_unlock(resv);
- if (ret)
- goto unlock_all;
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
- spin_lock(&vm->status_lock);
- }
- spin_unlock(&vm->status_lock);
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
@@ -726,7 +828,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
mutex_lock(&uq_mgr->userq_mutex);
- ret = amdgpu_userq_validate_bos(uq_mgr);
+ ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
@@ -751,7 +853,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
/* Try to unmap all the queues in this process ctx */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -877,7 +979,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
if (r)
ret = r;
}
@@ -902,7 +1007,10 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
mutex_lock(&uqm->userq_mutex);
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- r = amdgpu_userq_map_helper(uqm, queue);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
if (r)
ret = r;
}
@@ -936,7 +1044,7 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
- r = amdgpu_userq_unmap_helper(uqm, queue);
+ r = amdgpu_userq_preempt_helper(uqm, queue);
if (r)
ret = r;
}
@@ -970,7 +1078,7 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
- r = amdgpu_userq_map_helper(uqm, queue);
+ r = amdgpu_userq_restore_helper(uqm, queue);
if (r)
ret = r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index b1ca91b7cda4..c027dd916672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -78,6 +78,12 @@ struct amdgpu_userq_funcs {
struct amdgpu_usermode_queue *queue);
int (*map)(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
};
/* Usermode queues for gfx */
@@ -114,8 +120,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence);
-int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
-
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);
@@ -133,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index c2a983ff23c9..761bad98da3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -67,6 +67,14 @@ static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
return le64_to_cpu(*fence_drv->cpu_addr);
}
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq)
{
@@ -276,7 +284,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
- if (!dma_fence_is_signaled_locked(fence))
+ if (!dma_fence_is_signaled(fence))
list_add_tail(&userq_fence->link, &fence_drv->fences);
else
dma_fence_put(fence);
@@ -408,6 +416,40 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
dma_fence_put(fence);
}
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
index 97a125ab8a78..d76add2afc77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -67,6 +67,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_destroy(struct kref *ref);
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 74758b5ffc6c..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b9060bcd4806..ce318f5de047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f1f67521c29c..5e0786ea911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
@@ -184,16 +185,16 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
dev_info(adev->dev,
- "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -256,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
int j;
if (adev->vcn.harvest_config & (1 << i))
- return 0;
+ return;
amdgpu_bo_free_kernel(
&adev->vcn.inst[i].dpg_sram_bo,
@@ -285,10 +286,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
amdgpu_ucode_release(&adev->vcn.inst[0].fw);
adev->vcn.inst[i].fw = NULL;
}
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
-
- return 0;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
@@ -352,8 +355,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
if (adev->vcn.harvest_config & (1 << i))
return 0;
- cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
-
/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
@@ -405,6 +406,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_vcn_inst *vcn_inst =
@@ -412,7 +461,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i = vcn_inst->inst, j;
- int r = 0;
if (adev->vcn.harvest_config & (1 << i))
return;
@@ -438,16 +486,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[i];
if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
@@ -457,30 +500,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&vcn_inst->total_submission_cnt);
cancel_delayed_work_sync(&vcn_inst->idle_work);
- /* We can safely return early here because we've cancelled the
- * the delayed work so there is no one else to set it to false
- * and we don't care if someone else sets it to true.
- */
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
@@ -508,6 +532,7 @@ pg_lock:
vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -601,7 +626,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -781,7 +806,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -911,7 +936,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -978,7 +1003,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -1132,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
- volatile struct amdgpu_vcn_fwlog *plog;
+ struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@@ -1145,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
- plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@@ -1212,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
- volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
- volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
- volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
@@ -1527,3 +1552,86 @@ int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
return amdgpu_vcn_reset_engine(adev, ring->me);
}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0bc0a94d7cf0..dc8a17bcc3c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -237,6 +237,8 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
enum amdgpu_vcn_caps {
AMDGPU_VCN_RRMT_ENABLED,
};
@@ -362,6 +364,8 @@ struct amdgpu_vcn {
bool workload_profile_active;
struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -497,7 +501,7 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
- uint8_t pad3[9];
+ uint8_t pad3[404];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
@@ -512,7 +516,7 @@ enum vcn_ring_type {
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
@@ -557,4 +561,11 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
unsigned int vmid,
struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 13f0cdeb59c4..3328ab63376b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -828,11 +828,14 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
{
ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
RATELIMIT_MSG_ON_RELEASE);
ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
mutex_init(&adev->virt.ras.ras_telemetry_mutex);
@@ -1501,3 +1504,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
if (virt->ops && virt->ops->req_bad_pages)
virt->ops->req_bad_pages(adev);
}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3da3ebb1d9a1..d1172c8e58c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -98,6 +98,7 @@ struct amdgpu_virt_ops {
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@@ -252,10 +253,15 @@ struct amdgpu_virt_ras_err_handler_data {
struct amdgpu_virt_ras {
struct ratelimit_state ras_error_cnt_rs;
struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
struct mutex ras_telemetry_mutex;
uint64_t cper_rptr;
};
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -267,12 +273,14 @@ struct amdgpu_virt {
struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work;
- struct work_struct bad_pages_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@@ -447,4 +455,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 155bb9891a17..79bad9cbe2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_VEGAM:
- dce_v11_0_disable_dce(adev);
- break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c39bb06ebda1..8c28e8923f02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -128,6 +128,17 @@ struct amdgpu_vm_tlb_seq_struct {
};
/**
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
+ *
+ * Asserts that the VM root PD is locked.
+ */
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
+{
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+}
+
+/**
* amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
*
* @adev: amdgpu_device pointer
@@ -143,6 +154,8 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
{
int r;
+ amdgpu_vm_assert_locked(vm);
+
if (vm->pasid == pasid)
return 0;
@@ -181,12 +194,11 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
- spin_lock(&vm_bo->vm->status_lock);
+ amdgpu_vm_assert_locked(vm);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -198,9 +210,8 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->status_lock);
+ amdgpu_vm_assert_locked(vm_bo->vm);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -213,9 +224,8 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->status_lock);
+ amdgpu_vm_assert_locked(vm_bo->vm);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
- spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -229,9 +239,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->status_lock);
+ spin_lock(&vm_bo->vm->invalidated_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->status_lock);
+ spin_unlock(&vm_bo->vm->invalidated_lock);
}
/**
@@ -244,10 +254,9 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
vm_bo->moved = true;
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -260,13 +269,11 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
- if (vm_bo->bo->parent) {
- spin_lock(&vm_bo->vm->status_lock);
+ amdgpu_vm_assert_locked(vm_bo->vm);
+ if (vm_bo->bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- spin_unlock(&vm_bo->vm->status_lock);
- } else {
+ else
amdgpu_vm_bo_idle(vm_bo);
- }
}
/**
@@ -279,9 +286,8 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->status_lock);
+ amdgpu_vm_assert_locked(vm_bo->vm);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -295,10 +301,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+ spin_unlock(&vm->invalidated_lock);
+
+ amdgpu_vm_assert_locked(vm_bo->vm);
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -308,14 +317,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
else if (bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
- spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
- * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * Takes the vm stats_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
@@ -327,7 +335,8 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
- spin_lock(&vm->status_lock);
+ dma_resv_assert_held(bo->tbo.base.resv);
+ spin_lock(&vm->stats_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
@@ -339,7 +348,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
vm->stats[bo_memtype].drm.private += size;
}
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -364,11 +373,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
- * Caller need to have the vm status_lock held. Useful for when multiple update
+ * Caller need to have the vm stats_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
- struct ttm_resource *res, int sign)
+ struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
@@ -392,7 +401,8 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
@@ -411,9 +421,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
{
struct amdgpu_vm *vm = base->vm;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -439,10 +449,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -485,6 +495,42 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
}
/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->invalidated_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+ spin_unlock(&vm->invalidated_lock);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ if (unlikely(ret))
+ return ret;
+ }
+ spin_lock(&vm->invalidated_lock);
+ prev = prev->next;
+ }
+ spin_unlock(&vm->invalidated_lock);
+
+ return 0;
+}
+
+/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
* @adev: amdgpu device pointer
@@ -575,7 +621,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
- struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_vm_bo_base *bo_base, *tmp;
struct amdgpu_bo *bo;
int r;
@@ -588,13 +634,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->evicted)) {
- bo_base = list_first_entry(&vm->evicted,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
-
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
bo = bo_base->bo;
r = validate(param, bo);
@@ -607,37 +647,21 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
- spin_lock(&vm->status_lock);
}
- while (ticket && !list_empty(&vm->evicted_user)) {
- bo_base = list_first_entry(&vm->evicted_user,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
-
- bo = bo_base->bo;
- if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
- struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
+ if (ticket) {
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
+ vm_status) {
+ bo = bo_base->bo;
+ dma_resv_assert_held(bo->tbo.base.resv);
- pr_warn_ratelimited("Evicted user BO is not reserved\n");
- if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->task.pid);
- amdgpu_vm_put_task_info(ti);
- }
+ r = validate(param, bo);
+ if (r)
+ return r;
- return -EINVAL;
+ amdgpu_vm_bo_invalidated(bo_base);
}
-
- r = validate(param, bo);
- if (r)
- return r;
-
- amdgpu_vm_bo_invalidated(bo_base);
-
- spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -660,13 +684,13 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
bool ret;
+ amdgpu_vm_assert_locked(vm);
+
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
- spin_lock(&vm->status_lock);
ret &= list_empty(&vm->evicted);
- spin_unlock(&vm->status_lock);
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
@@ -957,16 +981,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_vm_bo_base *entry;
+ struct amdgpu_vm_bo_base *entry, *tmp;
bool flush_tlb_needed = false;
- LIST_HEAD(relocated);
int r, idx;
- spin_lock(&vm->status_lock);
- list_splice_init(&vm->relocated, &relocated);
- spin_unlock(&vm->status_lock);
+ amdgpu_vm_assert_locked(vm);
- if (list_empty(&relocated))
+ if (list_empty(&vm->relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -977,11 +998,12 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
if (r)
goto error;
- list_for_each_entry(entry, &relocated, vm_status) {
+ list_for_each_entry(entry, &vm->relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -997,9 +1019,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- while (!list_empty(&relocated)) {
- entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
- vm_status);
+ list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
amdgpu_vm_bo_idle(entry);
}
@@ -1146,7 +1166,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, sync);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
goto error_free;
@@ -1225,9 +1246,9 @@ error_free:
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -1339,13 +1360,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1486,7 +1508,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1593,29 +1615,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo_va *bo_va, *tmp;
struct dma_resv *resv;
bool clear, unlock;
int r;
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- spin_lock(&vm->status_lock);
}
+ spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
@@ -1647,9 +1664,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
@@ -1765,7 +1782,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1825,7 +1842,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1884,7 +1901,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -2178,9 +2195,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2288,10 +2305,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
@@ -2558,11 +2575,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
+ spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->invalidated);
- spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
INIT_KFIFO(vm->faults);
+ spin_lock_init(&vm->stats_lock);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@@ -2741,7 +2759,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2772,10 +2790,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
- if (vm->reserved_vmid[i]) {
- amdgpu_vmid_free_reserved(adev, i);
- vm->reserved_vmid[i] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
@@ -2871,6 +2886,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
/* No valid flags defined yet */
if (args->in.flags)
@@ -2879,17 +2895,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
- }
-
+ amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
default:
return -EINVAL;
@@ -3027,7 +3036,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
- spin_lock(&vm->status_lock);
+ amdgpu_vm_assert_locked(vm);
+
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -3065,11 +3075,13 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
+ spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
+ spin_unlock(&vm->invalidated_lock);
total_invalidated_objs = id;
id = 0;
@@ -3079,7 +3091,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fd086efd8457..adc5c9161fa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by vm status_lock */
+ /* protected by vm reservation and invalidated_lock */
struct list_head vm_status;
/* if the bo is counted as shared in mem stats
- * protected by vm status_lock */
+ * protected by vm BO being reserved */
bool shared;
/* protected by the BO being reserved */
@@ -308,7 +308,7 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, u64 k_job_id);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -343,18 +343,22 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
- /* Lock to protect vm_bo add/del/move on all lists of vm */
- spinlock_t status_lock;
-
- /* Memory statistics for this vm, protected by status_lock */
+ /* Memory statistics for this vm, protected by stats_lock */
+ spinlock_t stats_lock;
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ *
+ * Lists are protected by the root PD dma_resv lock.
+ */
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
- /* BOs for user mode queues that need a validation */
- struct list_head evicted_user;
-
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -364,15 +368,32 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ *
+ * Lists are protected by the invalidated_lock.
+ */
+ spinlock_t invalidated_lock;
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
+ struct list_head freed;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -394,7 +415,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked;
unsigned int pasid;
- bool reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -491,6 +512,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -538,11 +561,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
@@ -670,4 +693,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
struct amdgpu_task_info *task_info);
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 0c1ef5850a5e..22e2e5b47341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
*
* @p: see amdgpu_vm_update_params definition
* @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
{
if (!sync)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 30022123b0bf..7a4c12ff9b18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
if (r)
goto exit;
@@ -541,9 +543,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
- spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
@@ -587,7 +587,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_pt_cursor seek;
struct amdgpu_vm_bo_base *entry;
- spin_lock(&params->vm->status_lock);
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
if (entry && entry->bo)
list_move(&entry->vm_status, &params->tlb_flush_waitlist);
@@ -595,7 +594,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
/* enter start node now */
list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
- spin_unlock(&params->vm->status_lock);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 46d9fb433ab2..36805dcfa159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
- unsigned int count)
+ unsigned int count, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
- ndw * 4, pool, &p->job);
+ ndw * 4, pool, &p->job, k_job_id);
if (r)
return r;
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, u64 k_job_id)
{
int r;
- r = amdgpu_vm_sdma_alloc_job(p, 0);
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
if (r)
return r;
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_vm_sdma_alloc_job(p, count);
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 121ee17b522b..474bfe36c0c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -379,9 +379,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
if (ret)
goto out;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vpe.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
if (ret)
goto out;
@@ -435,6 +436,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
vpe_ring_stop(vpe);
/* Power off VPE */
@@ -445,10 +448,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- cancel_delayed_work_sync(&adev->vpe.idle_work);
-
return vpe_hw_fini(ip_block);
}
@@ -874,6 +873,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -942,6 +962,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 78f9e86ccc09..a5adb2ed9b3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -396,43 +396,33 @@ out:
return ret;
}
-static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
-}
-
-static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
{
- DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
- return false;
-}
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
-static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
- return true;
-}
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
-static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
-}
+out:
+ mutex_unlock(&mgr->lock);
-static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
- return -ENOSPC;
+ return ret;
}
/**
@@ -568,6 +558,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
remaining_size -= size;
}
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
+
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
@@ -645,6 +639,10 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
@@ -895,14 +893,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
}
-static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
- .alloc = amdgpu_dummy_vram_mgr_new,
- .free = amdgpu_dummy_vram_mgr_del,
- .intersects = amdgpu_dummy_vram_mgr_intersects,
- .compatible = amdgpu_dummy_vram_mgr_compatible,
- .debug = amdgpu_dummy_vram_mgr_debug
-};
-
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@@ -933,18 +923,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
- if (!adev->gmc.is_app_apu) {
- man->func = &amdgpu_vram_mgr_func;
-
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
- } else {
- man->func = &amdgpu_dummy_vram_mgr_func;
- DRM_INFO("Setup dummy vram mgr\n");
- }
+ man->func = &amdgpu_vram_mgr_func;
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 2c88d5fd87da..5f5fd9a911c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -35,12 +35,26 @@ struct amdgpu_vram_mgr {
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
@@ -72,4 +86,7 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
ares->flags |= DRM_BUDDY_CLEARED;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index c417f8689220..1083db8cea2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -120,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
xcp->valid = true;
}
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
@@ -158,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
}
xcp_mgr->num_xcps = num_xcps;
@@ -406,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 70a0f8400b57..1928d9e224fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -112,6 +112,7 @@ struct amdgpu_xcp {
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
struct amdgpu_xcp_mgr *xcp_mgr;
struct kobject kobj;
+ uint64_t unique_id;
};
struct amdgpu_xcp_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bba0b26fee8f..5f36aff17e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -126,4 +126,8 @@ uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
uint16_t max_speed, uint8_t max_width);
+
+/* Cleanup macro for use with __free(xgmi_put_hive) */
+DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 33edad1f9dcd..3a79ed7d8031 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -405,12 +405,17 @@ struct amd_sriov_ras_cper_dump {
uint32_t buf[];
};
+struct amd_sriov_ras_chk_criti {
+ uint32_t hit;
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
struct amd_sriov_ras_cper_dump cper_dump;
+ struct amd_sriov_ras_chk_criti chk_criti;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 427b073de2fc..7a063e44d429 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+ if (!ectx.ws) {
+ ret = -ENOMEM;
+ goto free;
+ }
ectx.ws_size = ws;
} else {
ectx.ws = NULL;
@@ -1494,6 +1498,28 @@ static void atom_get_vbios_version(struct atom_context *ctx)
}
}
+static void atom_get_vbios_build(struct atom_context *ctx)
+{
+ unsigned char *atom_rom_hdr;
+ unsigned char *str;
+ uint16_t base, len;
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ atom_rom_hdr = CSTR(base);
+
+ str = CSTR(CU16(base + ATOM_ROM_CFG_PTR));
+ /* Skip config string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+ /* Skip change list string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+
+ len = min(atom_rom_hdr - str, STRLEN_NORMAL);
+ if (len)
+ strscpy(ctx->build_num, str, len);
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
@@ -1554,6 +1580,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
atom_get_vbios_pn(ctx);
atom_get_vbios_date(ctx);
atom_get_vbios_version(ctx);
+ atom_get_vbios_build(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index b807f6639a4c..825ff28731f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -37,6 +37,7 @@ struct drm_device;
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
+#define ATOM_ROM_CFG_PTR 0xC
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
@@ -151,6 +152,7 @@ struct atom_context {
uint32_t version;
uint8_t vbios_ver_str[STRLEN_NORMAL];
uint8_t date[STRLEN_NORMAL];
+ uint8_t build_num[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..96616a865aac
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ba73518f5cdf..72ca6538b2e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1141,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
deleted file mode 100644
index b01d88d078fa..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ /dev/null
@@ -1,3818 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_edid.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_vblank.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "vid.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "atombios_crtc.h"
-#include "atombios_encoders.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#include "amdgpu_display.h"
-#include "dce_v11_0.h"
-
-#include "dce/dce_11_0_d.h"
-#include "dce/dce_11_0_sh_mask.h"
-#include "dce/dce_11_0_enum.h"
-#include "oss/oss_3_0_d.h"
-#include "oss/oss_3_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "ivsrcid/ivsrcid_vislands30.h"
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
-
-static const u32 crtc_offsets[] =
-{
- CRTC0_REGISTER_OFFSET,
- CRTC1_REGISTER_OFFSET,
- CRTC2_REGISTER_OFFSET,
- CRTC3_REGISTER_OFFSET,
- CRTC4_REGISTER_OFFSET,
- CRTC5_REGISTER_OFFSET,
- CRTC6_REGISTER_OFFSET
-};
-
-static const u32 hpd_offsets[] =
-{
- HPD0_REGISTER_OFFSET,
- HPD1_REGISTER_OFFSET,
- HPD2_REGISTER_OFFSET,
- HPD3_REGISTER_OFFSET,
- HPD4_REGISTER_OFFSET,
- HPD5_REGISTER_OFFSET
-};
-
-static const uint32_t dig_offsets[] = {
- DIG0_REGISTER_OFFSET,
- DIG1_REGISTER_OFFSET,
- DIG2_REGISTER_OFFSET,
- DIG3_REGISTER_OFFSET,
- DIG4_REGISTER_OFFSET,
- DIG5_REGISTER_OFFSET,
- DIG6_REGISTER_OFFSET,
- DIG7_REGISTER_OFFSET,
- DIG8_REGISTER_OFFSET
-};
-
-static const struct {
- uint32_t reg;
- uint32_t vblank;
- uint32_t vline;
- uint32_t hpd;
-
-} interrupt_status_offsets[] = { {
- .reg = mmDISP_INTERRUPT_STATUS,
- .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
-} };
-
-static const u32 cz_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14300000,
-};
-
-static const u32 cz_mgcg_cgcg_init[] =
-{
- mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
- mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
-};
-
-static const u32 stoney_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14302000,
-};
-
-static const u32 polaris11_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static const u32 polaris10_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- amdgpu_device_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_device_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- break;
- case CHIP_STONEY:
- amdgpu_device_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_device_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- amdgpu_device_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
- break;
- default:
- break;
- }
-}
-
-static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-
- return r;
-}
-
-static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-}
-
-static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Enable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_get(adev, &adev->pageflip_irq, i);
-}
-
-static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Disable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_put(adev, &adev->pageflip_irq, i);
-}
-
-/**
- * dce_v11_0_page_flip - pageflip callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
- * @async: asynchronous flip
- *
- * Triggers the actual pageflip by updating the primary
- * surface base address.
- */
-static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base, bool async)
-{
- struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
- struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
- u32 tmp;
-
- /* flip immediate for async, default is vsync */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* update pitch */
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
- fb->pitches[0] / fb->format->cpp[0]);
- /* update the scanout addresses */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- /* writing to the low address triggers the update */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(crtc_base));
- /* post the write */
- RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
-}
-
-static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
- u32 *vbl, u32 *position)
-{
- if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
- return -EINVAL;
-
- *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- return 0;
-}
-
-/**
- * dce_v11_0_hpd_sense - hpd sense callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Checks if a digital monitor is connected (evergreen+).
- * Returns true if connected, false if not connected.
- */
-static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- bool connected = false;
-
- if (hpd >= adev->mode_info.num_hpd)
- return connected;
-
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
- DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
- connected = true;
-
- return connected;
-}
-
-/**
- * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Set the polarity of the hpd pin (evergreen+).
- */
-static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- u32 tmp;
- bool connected = dce_v11_0_hpd_sense(adev, hpd);
-
- if (hpd >= adev->mode_info.num_hpd)
- return;
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- if (connected)
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
- else
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-/**
- * dce_v11_0_hpd_init - hpd setup callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Setup the hpd pins used by the card (evergreen+).
- * Enable the pin, set the polarity, and enable the hpd interrupts.
- */
-static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- continue;
- }
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_CONNECT_INT_DELAY,
- AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_DISCONNECT_INT_DELAY,
- AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
- dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
- amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-/**
- * dce_v11_0_hpd_fini - hpd tear down callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the hpd pins used by the card (evergreen+).
- * Disable the hpd interrupts.
- */
-static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
- return mmDC_GPIO_HPD_A;
-}
-
-static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
- crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
-
- return true;
-}
-
-static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- u32 tmp;
-
- /* Lockout access through VGA aperture*/
- tmp = RREG32(mmVGA_HDP_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
- else
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32(mmVGA_HDP_CONTROL, tmp);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
- else
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-}
-
-static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
-{
- int num_crtc = 0;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- num_crtc = 3;
- break;
- case CHIP_STONEY:
- num_crtc = 2;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- num_crtc = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- num_crtc = 5;
- break;
- default:
- num_crtc = 0;
- }
- return num_crtc;
-}
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev)
-{
- /*Disable VGA render and enabled crtc, if has DCE engine*/
- if (amdgpu_atombios_has_dce_engine_info(adev)) {
- u32 tmp;
- int crtc_enabled, i;
-
- dce_v11_0_set_vga_render_state(adev, false);
-
- /*Disable crtc*/
- for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- }
- }
-}
-
-static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- bpc = amdgpu_connector_get_monitor_bpc(connector);
- dither = amdgpu_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
- }
- break;
- case 8:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
- }
- break;
- case 10:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
- }
- break;
- default:
- /* not needed */
- break;
- }
-
- WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-
-/* display watermark setup */
-/**
- * dce_v11_0_line_buffer_adjust - Set up the line buffer
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @mode: the current display mode on the selected display
- * controller
- *
- * Setup up the line buffer allocation for
- * the selected display controller (CIK).
- * Returns the line buffer size in pixels.
- */
-static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- struct drm_display_mode *mode)
-{
- u32 tmp, buffer_alloc, i, mem_cfg;
- u32 pipe_offset = amdgpu_crtc->crtc_id;
- /*
- * Line Buffer Setup
- * There are 6 line buffers, one for each display controllers.
- * There are 3 partitions per LB. Select the number of partitions
- * to enable based on the display width. For display widths larger
- * than 4096, you need use to use 2 display controllers and combine
- * them using the stereo blender.
- */
- if (amdgpu_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920) {
- mem_cfg = 1;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 2560) {
- mem_cfg = 2;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 4096) {
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- } else {
- DRM_DEBUG_KMS("Mode too big for LB!\n");
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- }
- } else {
- mem_cfg = 1;
- buffer_alloc = 0;
- }
-
- tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
- WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
- WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
- break;
- udelay(1);
- }
-
- if (amdgpu_crtc->base.enabled && mode) {
- switch (mem_cfg) {
- case 0:
- default:
- return 4096 * 2;
- case 1:
- return 1920 * 2;
- case 2:
- return 2560 * 2;
- }
- }
-
- /* controller not enabled, so no lb used */
- return 0;
-}
-
-/**
- * cik_get_number_of_dram_channels - get the number of dram channels
- *
- * @adev: amdgpu_device pointer
- *
- * Look up the number of video ram channels (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the number of dram channels
- */
-static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(mmMC_SHARED_CHMAP);
-
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- case 3:
- return 8;
- case 4:
- return 3;
- case 5:
- return 6;
- case 6:
- return 10;
- case 7:
- return 12;
- case 8:
- return 16;
- }
-}
-
-struct dce10_wm_params {
- u32 dram_channels; /* number of dram channels */
- u32 yclk; /* bandwidth per dram data pin in kHz */
- u32 sclk; /* engine clock in kHz */
- u32 disp_clk; /* display clock in kHz */
- u32 src_width; /* viewport width */
- u32 active_time; /* active display time in ns */
- u32 blank_time; /* blank time in ns */
- bool interlaced; /* mode is interlaced */
- fixed20_12 vsc; /* vertical scale ratio */
- u32 num_heads; /* number of active crtcs */
- u32 bytes_per_pixel; /* bytes per pixel display + overlay */
- u32 lb_size; /* line buffer allocated to pipe */
- u32 vtaps; /* vertical scaler taps */
-};
-
-/**
- * dce_v11_0_dram_bandwidth - get the dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the raw dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate raw DRAM Bandwidth */
- fixed20_12 dram_efficiency; /* 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- dram_efficiency.full = dfixed_const(7);
- dram_efficiency.full = dfixed_div(dram_efficiency, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
- *
- * @wm: watermark calculation data
- *
- * Calculate the dram bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth for display in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- /* Calculate DRAM Bandwidth and the part allocated to display. */
- fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
- disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_data_return_bandwidth - get the data return bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the data return bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the data return bandwidth in MBytes/s
- */
-static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display Data return Bandwidth */
- fixed20_12 return_efficiency; /* 0.8 */
- fixed20_12 sclk, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- sclk.full = dfixed_const(wm->sclk);
- sclk.full = dfixed_div(sclk, a);
- a.full = dfixed_const(10);
- return_efficiency.full = dfixed_const(8);
- return_efficiency.full = dfixed_div(return_efficiency, a);
- a.full = dfixed_const(32);
- bandwidth.full = dfixed_mul(a, sclk);
- bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the dmif bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dmif bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the DMIF Request Bandwidth */
- fixed20_12 disp_clk_request_efficiency; /* 0.8 */
- fixed20_12 disp_clk, bandwidth;
- fixed20_12 a, b;
-
- a.full = dfixed_const(1000);
- disp_clk.full = dfixed_const(wm->disp_clk);
- disp_clk.full = dfixed_div(disp_clk, a);
- a.full = dfixed_const(32);
- b.full = dfixed_mul(a, disp_clk);
-
- a.full = dfixed_const(10);
- disp_clk_request_efficiency.full = dfixed_const(8);
- disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
- bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_available_bandwidth - get the min available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the min available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the min available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
- u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
- u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
- u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
-
- return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-}
-
-/**
- * dce_v11_0_average_bandwidth - get the average available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the average available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the average available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display mode Average Bandwidth
- * DisplayMode should contain the source and destination dimensions,
- * timing, etc.
- */
- fixed20_12 bpp;
- fixed20_12 line_time;
- fixed20_12 src_width;
- fixed20_12 bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- line_time.full = dfixed_const(wm->active_time + wm->blank_time);
- line_time.full = dfixed_div(line_time, a);
- bpp.full = dfixed_const(wm->bytes_per_pixel);
- src_width.full = dfixed_const(wm->src_width);
- bandwidth.full = dfixed_mul(src_width, bpp);
- bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
- bandwidth.full = dfixed_div(bandwidth, line_time);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_latency_watermark - get the latency watermark
- *
- * @wm: watermark calculation data
- *
- * Calculate the latency watermark (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the latency watermark in ns
- */
-static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
-{
- /* First calculate the latency in ns */
- u32 mc_latency = 2000; /* 2000 ns. */
- u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
- u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
- u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
- u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
- u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
- (wm->num_heads * cursor_line_pair_return_time);
- u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
- u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
- u32 tmp, dmif_size = 12288;
- fixed20_12 a, b, c;
-
- if (wm->num_heads == 0)
- return 0;
-
- a.full = dfixed_const(2);
- b.full = dfixed_const(1);
- if ((wm->vsc.full > a.full) ||
- ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
- (wm->vtaps >= 5) ||
- ((wm->vsc.full >= a.full) && wm->interlaced))
- max_src_lines_per_dst_line = 4;
- else
- max_src_lines_per_dst_line = 2;
-
- a.full = dfixed_const(available_bandwidth);
- b.full = dfixed_const(wm->num_heads);
- a.full = dfixed_div(a, b);
- tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
- tmp = min(dfixed_trunc(a), tmp);
-
- lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
-
- a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(lb_fill_bw);
- b.full = dfixed_div(c, b);
- a.full = dfixed_div(a, b);
- line_fill_time = dfixed_trunc(a);
-
- if (line_fill_time < wm->active_time)
- return latency;
- else
- return latency + (line_fill_time - wm->active_time);
-
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
- * average and available dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
- * average and available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * available bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_check_latency_hiding - check latency hiding
- *
- * @wm: watermark calculation data
- *
- * Check latency hiding (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
-{
- u32 lb_partitions = wm->lb_size / wm->src_width;
- u32 line_time = wm->active_time + wm->blank_time;
- u32 latency_tolerant_lines;
- u32 latency_hiding;
- fixed20_12 a;
-
- a.full = dfixed_const(1);
- if (wm->vsc.full > a.full)
- latency_tolerant_lines = 1;
- else {
- if (lb_partitions <= (wm->vtaps + 1))
- latency_tolerant_lines = 1;
- else
- latency_tolerant_lines = 2;
- }
-
- latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
- if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_program_watermarks - program display watermarks
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @lb_size: line buffer size
- * @num_heads: number of display controllers in use
- *
- * Calculate and program the display watermarks for the
- * selected display controller (CIK).
- */
-static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- u32 lb_size, u32 num_heads)
-{
- struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
- struct dce10_wm_params wm_low, wm_high;
- u32 active_time;
- u32 line_time = 0;
- u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
-
- if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
- (u32)mode->clock);
- line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
- (u32)mode->clock);
- line_time = min_t(u32, line_time, 65535);
-
- /* watermark for high clocks */
- if (adev->pm.dpm_enabled) {
- wm_high.yclk =
- amdgpu_dpm_get_mclk(adev, false) * 10;
- wm_high.sclk =
- amdgpu_dpm_get_sclk(adev, false) * 10;
- } else {
- wm_high.yclk = adev->pm.current_mclk * 10;
- wm_high.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = active_time;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = amdgpu_crtc->vsc;
- wm_high.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_high.num_heads = num_heads;
-
- /* set for high clocks */
- latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce_v11_0_check_latency_hiding(&wm_high) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
-
- /* watermark for low clocks */
- if (adev->pm.dpm_enabled) {
- wm_low.yclk =
- amdgpu_dpm_get_mclk(adev, true) * 10;
- wm_low.sclk =
- amdgpu_dpm_get_sclk(adev, true) * 10;
- } else {
- wm_low.yclk = adev->pm.current_mclk * 10;
- wm_low.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = active_time;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = amdgpu_crtc->vsc;
- wm_low.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_low.num_heads = num_heads;
-
- /* set for low clocks */
- latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce_v11_0_check_latency_hiding(&wm_low) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
- lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
- }
-
- /* select wm A */
- wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* select wm B */
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* restore original selection */
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
-
- /* save values for DPM */
- amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
- /* Save number of lines the linebuffer leads before the scanout */
- amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
-}
-
-/**
- * dce_v11_0_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
-{
- struct drm_display_mode *mode = NULL;
- u32 num_heads = 0, lb_size;
- int i;
-
- amdgpu_display_update_priority(adev);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i]->base.enabled)
- num_heads++;
- }
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- mode = &adev->mode_info.crtcs[i]->base.mode;
- lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
- dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
- lb_size, num_heads);
- }
-}
-
-static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
-{
- int i;
- u32 offset, tmp;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp &
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
- adev->mode_info.audio.pin[i].connected = false;
- else
- adev->mode_info.audio.pin[i].connected = true;
- }
-}
-
-static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
-{
- int i;
-
- dce_v11_0_audio_get_connected_pins(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- if (adev->mode_info.audio.pin[i].connected)
- return &adev->mode_info.audio.pin[i];
- }
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
-}
-
-static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
- WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
-}
-
-static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- int interlace = 0;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- interlace = 1;
- if (connector->latency_present[interlace]) {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, connector->video_latency[interlace]);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, connector->audio_latency[interlace]);
- } else {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, 0);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, 0);
- }
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
- /* program the speaker allocation */
- tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- DP_CONNECTION, 0);
- /* set HDMI mode */
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- HDMI_CONNECTION, 1);
- if (sad_count)
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, sadb[0]);
- else
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, 5); /* stereo */
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
- if (sad_count < 0)
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- if (sad_count <= 0)
- return;
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
- }
-
- kfree(sads);
-}
-
-static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
- struct amdgpu_audio_pin *pin,
- bool enable)
-{
- if (!pin)
- return;
-
- WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
-}
-
-static const u32 pin_offsets[] =
-{
- AUD0_REGISTER_OFFSET,
- AUD1_REGISTER_OFFSET,
- AUD2_REGISTER_OFFSET,
- AUD3_REGISTER_OFFSET,
- AUD4_REGISTER_OFFSET,
- AUD5_REGISTER_OFFSET,
- AUD6_REGISTER_OFFSET,
- AUD7_REGISTER_OFFSET,
-};
-
-static int dce_v11_0_audio_init(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return 0;
-
- adev->mode_info.audio.enabled = true;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->mode_info.audio.num_pins = 7;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.audio.num_pins = 8;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.audio.num_pins = 6;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- adev->mode_info.audio.pin[i].channels = -1;
- adev->mode_info.audio.pin[i].rate = -1;
- adev->mode_info.audio.pin[i].bits_per_sample = -1;
- adev->mode_info.audio.pin[i].status_bits = 0;
- adev->mode_info.audio.pin[i].category_code = 0;
- adev->mode_info.audio.pin[i].connected = false;
- adev->mode_info.audio.pin[i].offset = pin_offsets[i];
- adev->mode_info.audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- /* XXX remove once we switch to ip funcs */
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- return 0;
-}
-
-static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
-{
- if (!amdgpu_audio)
- return;
-
- if (!adev->mode_info.audio.enabled)
- return;
-
- adev->mode_info.audio.enabled = false;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
- WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
- WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
- WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
- WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
- WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
- WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
-
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
-
- WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
-}
-
-static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- u32 dto_phase = 24 * 1000;
- u32 dto_modulo = clock;
- u32 tmp;
-
- if (!dig || !dig->afmt)
- return;
-
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
- tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
- amdgpu_crtc->crtc_id);
- WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
- WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
-}
-
-/*
- * update the info frames with the data from the current display mode
- */
-static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- ssize_t err;
- u32 tmp;
- int bpc = 8;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
-
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- bpc = amdgpu_crtc->bpc;
- }
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
-
- dce_v11_0_audio_set_dto(encoder, mode->clock);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
-
- WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
-
- tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
- WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable audio info frames (frames won't be set until audio is enabled) */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
- WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- /* anything other than 0 */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
-
- tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* set the default audio delay */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
- /* should be suffient for all audio modes and small enough for all hblanks */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
- WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* allow 60958 channel status fields to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
- if (bpc > 8)
- /* clear SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
- else
- /* select SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
- /* allow hw to sent ACR packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
- WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- dce_v11_0_afmt_update_ACR(encoder, mode->clock);
-
- tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
- WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
- WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
- WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
-
- dce_v11_0_audio_write_speaker_allocation(encoder);
-
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
- (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
-
- dce_v11_0_afmt_audio_select_pin(encoder);
- dce_v11_0_audio_write_sad_regs(encoder);
- dce_v11_0_audio_write_latency_fields(encoder, mode);
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable AVI info frames */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* send audio packets */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
- WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
- WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
- WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
-
- /* enable audio after to setting up hw */
- dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
-}
-
-static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
- dig->afmt->pin = NULL;
- }
-
- dig->afmt->enabled = enable;
-
- DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
-}
-
-static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++)
- adev->mode_info.afmt[i] = NULL;
-
- /* DCE11 has audio blocks tied to DIG encoders */
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
- if (adev->mode_info.afmt[i]) {
- adev->mode_info.afmt[i]->offset = dig_offsets[i];
- adev->mode_info.afmt[i]->id = i;
- } else {
- int j;
- for (j = 0; j < i; j++) {
- kfree(adev->mode_info.afmt[j]);
- adev->mode_info.afmt[j] = NULL;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- kfree(adev->mode_info.afmt[i]);
- adev->mode_info.afmt[i] = NULL;
- }
-}
-
-static const u32 vga_control_regs[6] =
-{
- mmD1VGA_CONTROL,
- mmD2VGA_CONTROL,
- mmD3VGA_CONTROL,
- mmD4VGA_CONTROL,
- mmD5VGA_CONTROL,
- mmD6VGA_CONTROL,
-};
-
-static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 vga_control;
-
- vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
- if (enable)
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
- else
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
-}
-
-static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (enable)
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
- else
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
-}
-
-static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_framebuffer *target_fb;
- struct drm_gem_object *obj;
- struct amdgpu_bo *abo;
- uint64_t fb_location, tiling_flags;
- uint32_t fb_format, fb_pitch_pixels;
- u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
- u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
- int r;
- bool bypass_lut = false;
-
- /* no fb bound */
- if (!atomic && !crtc->primary->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
-
- if (atomic)
- target_fb = fb;
- else
- target_fb = crtc->primary->fb;
-
- /* If atomic, assume fb object is pinned & idle & fenced and
- * just update base pointers
- */
- obj = target_fb->obj[0];
- abo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(abo, false);
- if (unlikely(r != 0))
- return r;
-
- if (!atomic) {
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(abo);
- return -EINVAL;
- }
- }
- fb_location = amdgpu_bo_gpu_offset(abo);
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- amdgpu_bo_unreserve(abo);
-
- pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- switch (target_fb->format->format) {
- case DRM_FORMAT_C8:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_RGB565:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- default:
- DRM_ERROR("Unsupported screen format %p4cc\n",
- &target_fb->format->format);
- return -EINVAL;
- }
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
- unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_2D_TILED_THIN1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
- tile_split);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
- mtaspect);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
- ADDR_SURF_MICRO_TILING_DISPLAY);
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_1D_TILED_THIN1);
- }
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
- pipe_config);
-
- dce_v11_0_vga_enable(crtc, false);
-
- /* Make sure surface address is updated at vertical blank rather than
- * horizontal blank
- */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
-
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
- if (bypass_lut)
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
- WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
- WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
-
- fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
-
- dce_v11_0_grph_enable(crtc, true);
-
- WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
- target_fb->height);
-
- x &= ~3;
- y &= ~1;
- WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
- (x << 16) | y);
- viewport_w = crtc->mode.hdisplay;
- viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
- (viewport_w << 16) | viewport_h);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
-
- if (!atomic && fb && fb != crtc->primary->fb) {
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
-
- /* Bytes per pixel may have changed */
- dce_v11_0_bandwidth_update(adev);
-
- return 0;
-}
-
-static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- u32 tmp;
-
- tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
- WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u16 *r, *g, *b;
- int i;
- u32 tmp;
-
- DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
-
- tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
- WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
- WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
- WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
- for (i = 0; i < 256; i++) {
- WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- ((*r++ & 0xffc0) << 14) |
- ((*g++ & 0xffc0) << 4) |
- (*b++ >> 6));
- }
-
- tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
- WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
- WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
- WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
- WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- /* XXX match this to the depth of the crtc fmt block, move to modeset? */
- WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
- WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return 0;
- }
-}
-
-/**
- * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
- *
- * @crtc: drm crtc
- *
- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
- * monitors a dedicated PPLL must be used. If a particular board has
- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
- * as there is no need to program the PLL itself. If we are not able to
- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
- * avoid messing up an existing monitor.
- *
- * Asic specific PLL information
- *
- * DCE 10.x
- * Tonga
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
- */
-static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 pll_in_use;
- int pll;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return ATOM_DP_DTO;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL1;
- else
- return ATOM_COMBOPHY_PLL0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL3;
- else
- return ATOM_COMBOPHY_PLL2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL5;
- else
- return ATOM_COMBOPHY_PLL4;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return ATOM_PPLL_INVALID;
- }
- }
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
- if (adev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = amdgpu_pll_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
-
- /* XXX need to determine what plls are available on each DCE11 part */
- pll_in_use = amdgpu_pll_get_use_mask(crtc);
- if (adev->flags & AMD_IS_APU) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- return ATOM_PPLL_INVALID;
-}
-
-static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
-{
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint32_t cur_lock;
-
- cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
- if (lock)
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
- else
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
- WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
-}
-
-static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(amdgpu_crtc->cursor_addr));
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
- int x, int y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- int xorigin = 0, yorigin = 0;
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
-
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
- }
-
- WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- return 0;
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- dce_v11_0_lock_cursor(crtc, true);
- ret = dce_v11_0_cursor_move_locked(crtc, x, y);
- dce_v11_0_lock_cursor(crtc, false);
-
- return ret;
-}
-
-static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_gem_object *obj;
- struct amdgpu_bo *aobj;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- dce_v11_0_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > amdgpu_crtc->max_cursor_width) ||
- (height > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
- return -ENOENT;
- }
-
- aobj = gem_to_amdgpu_bo(obj);
- ret = amdgpu_bo_reserve(aobj, false);
- if (ret != 0) {
- drm_gem_object_put(obj);
- return ret;
- }
-
- aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_bo_unreserve(aobj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put(obj);
- return ret;
- }
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-
- dce_v11_0_lock_cursor(crtc, true);
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
- hot_y != amdgpu_crtc->cursor_hot_y) {
- int x, y;
-
- x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
- y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
-
- dce_v11_0_cursor_move_locked(crtc, x, y);
-
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- dce_v11_0_show_cursor(crtc);
- dce_v11_0_lock_cursor(crtc, false);
-
-unpin:
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, true);
- if (likely(ret == 0)) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- drm_gem_object_put(amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_crtc->cursor_bo = obj;
- return 0;
-}
-
-static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- dce_v11_0_lock_cursor(crtc, true);
-
- dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
- amdgpu_crtc->cursor_y);
-
- dce_v11_0_show_cursor(crtc);
-
- dce_v11_0_lock_cursor(crtc, false);
- }
-}
-
-static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- dce_v11_0_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
- .cursor_set2 = dce_v11_0_crtc_cursor_set2,
- .cursor_move = dce_v11_0_crtc_cursor_move,
- .gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_display_crtc_set_config,
- .destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_display_crtc_page_flip_target,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-};
-
-static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- unsigned type;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- amdgpu_crtc->enabled = true;
- amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
- dce_v11_0_vga_enable(crtc, false);
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_display_crtc_idx_to_irq_type(adev,
- amdgpu_crtc->crtc_id);
- amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_crtc_vblank_on(crtc);
- dce_v11_0_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- drm_crtc_vblank_off(crtc);
- if (amdgpu_crtc->enabled) {
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, false);
- }
- amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
- amdgpu_crtc->enabled = false;
- break;
- }
- /* adjust pm to dpms */
- amdgpu_dpm_compute_clocks(adev);
-}
-
-static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
-{
- /* disable crtc pair power gating before programming */
- amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
- amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
-{
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
-}
-
-static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_atom_ss ss;
- int i;
-
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
- /* disable the GRPH */
- dce_v11_0_grph_enable(crtc, false);
-
- amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i] &&
- adev->mode_info.crtcs[i]->enabled &&
- i != amdgpu_crtc->crtc_id &&
- amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
- /* one other crtc is using this pll don't turn
- * off the pll
- */
- goto done;
- }
- }
-
- switch (amdgpu_crtc->pll_id) {
- case ATOM_PPLL0:
- case ATOM_PPLL1:
- case ATOM_PPLL2:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- case ATOM_COMBOPHY_PLL0:
- case ATOM_COMBOPHY_PLL1:
- case ATOM_COMBOPHY_PLL2:
- case ATOM_COMBOPHY_PLL3:
- case ATOM_COMBOPHY_PLL4:
- case ATOM_COMBOPHY_PLL5:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- default:
- break;
- }
-done:
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
-}
-
-static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (!amdgpu_crtc->adjusted_clock)
- return -EINVAL;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- int encoder_mode =
- amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
-
- /* SetPixelClock calculates the plls and ss values now */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
- amdgpu_crtc->pll_id,
- encoder_mode, amdgpu_encoder->encoder_id,
- adjusted_mode->clock, 0, 0, 0, 0,
- amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
- } else {
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
- }
- amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
- dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
- amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
- amdgpu_atombios_crtc_scaler_setup(crtc);
- dce_v11_0_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- amdgpu_crtc->hw_mode = *adjusted_mode;
-
- return 0;
-}
-
-static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
- if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
- return false;
- if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
- return false;
- /* pick pll */
- amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
- /* if we can't get a PPLL for a non-DP encoder, fail */
- if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
- !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return false;
-
- return true;
-}
-
-static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
-}
-
-static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
- .dpms = dce_v11_0_crtc_dpms,
- .mode_fixup = dce_v11_0_crtc_mode_fixup,
- .mode_set = dce_v11_0_crtc_mode_set,
- .mode_set_base = dce_v11_0_crtc_set_base,
- .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
- .prepare = dce_v11_0_crtc_prepare,
- .commit = dce_v11_0_crtc_commit,
- .disable = dce_v11_0_crtc_disable,
- .get_scanout_position = amdgpu_crtc_get_scanout_position,
-};
-
-static void dce_v11_0_panic_flush(struct drm_plane *plane)
-{
- struct drm_framebuffer *fb;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_device *adev;
- uint32_t fb_format;
-
- if (!plane->fb)
- return;
-
- fb = plane->fb;
- amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
- adev = drm_to_adev(fb->dev);
-
- /* Disable DC tiling */
- fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
-
-}
-
-static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
- .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
- .panic_flush = dce_v11_0_panic_flush,
-};
-
-static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
-{
- struct amdgpu_crtc *amdgpu_crtc;
-
- amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
- (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
- if (amdgpu_crtc == NULL)
- return -ENOMEM;
-
- drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
- amdgpu_crtc->crtc_id = index;
- adev->mode_info.crtcs[index] = amdgpu_crtc;
-
- amdgpu_crtc->max_cursor_width = 128;
- amdgpu_crtc->max_cursor_height = 128;
- adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- switch (amdgpu_crtc->crtc_id) {
- case 0:
- default:
- amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
- break;
- case 1:
- amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
- break;
- case 2:
- amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
- break;
- case 3:
- amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
- break;
- case 4:
- amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
- break;
- case 5:
- amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
- break;
- }
-
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
- drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
-
- return 0;
-}
-
-static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
- adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
-
- dce_v11_0_set_display_funcs(adev);
-
- adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_STONEY:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- dce_v11_0_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
-{
- int r, i;
- struct amdgpu_device *adev = ip_block->adev;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
- if (r)
- return r;
- }
-
- for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
- if (r)
- return r;
- }
-
- /* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
-
- adev_to_drm(adev)->mode_config.async_page_flip = true;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
- adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
-
- adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
- r = amdgpu_display_modeset_create_props(adev);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
-
- /* allocate crtcs */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = dce_v11_0_crtc_init(adev, i);
- if (r)
- return r;
- }
-
- if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev_to_drm(adev));
- else
- return -EINVAL;
-
- /* setup afmt */
- r = dce_v11_0_afmt_init(adev);
- if (r)
- return r;
-
- r = dce_v11_0_audio_init(adev);
- if (r)
- return r;
-
- /* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
- if (r)
- return r;
-
- INIT_DELAYED_WORK(&adev->hotplug_work,
- amdgpu_display_hotplug_work_func);
-
- drm_kms_helper_poll_init(adev_to_drm(adev));
-
- adev->mode_info.mode_config_initialized = true;
- return 0;
-}
-
-static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- drm_edid_free(adev->mode_info.bios_hardcoded_edid);
-
- drm_kms_helper_poll_fini(adev_to_drm(adev));
-
- dce_v11_0_audio_fini(adev);
-
- dce_v11_0_afmt_fini(adev);
-
- drm_mode_config_cleanup(adev_to_drm(adev));
- adev->mode_info.mode_config_initialized = false;
-
- return 0;
-}
-
-static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_init_golden_registers(adev);
-
- /* disable vga render */
- dce_v11_0_set_vga_render_state(adev, false);
- /* init dig PHYs, disp eng pll */
- amdgpu_atombios_crtc_powergate_init(adev);
- amdgpu_atombios_encoder_init_dig(adev);
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
- DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
- amdgpu_atombios_crtc_set_dce_clock(adev, 0,
- DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
- } else {
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
- }
-
- /* initialize hpd */
- dce_v11_0_hpd_init(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_init(adev);
-
- return 0;
-}
-
-static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_hpd_fini(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_fini(adev);
-
- flush_delayed_work(&adev->hotplug_work);
-
- return 0;
-}
-
-static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int r;
-
- r = amdgpu_display_suspend_helper(adev);
- if (r)
- return r;
-
- adev->mode_info.bl_level =
- amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
-
- return dce_v11_0_hw_fini(ip_block);
-}
-
-static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
-
- amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
- adev->mode_info.bl_level);
-
- ret = dce_v11_0_hw_init(ip_block);
-
- /* turn on the BL */
- if (adev->mode_info.bl_encoder) {
- u8 bl_level = amdgpu_display_backlight_get_level(adev,
- adev->mode_info.bl_encoder);
- amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
- bl_level);
- }
- if (ret)
- return ret;
-
- return amdgpu_display_resume_helper(adev);
-}
-
-static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
-{
- return true;
-}
-
-static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
-{
- u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = ip_block->adev;
-
- if (dce_v11_0_is_display_hung(adev))
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
- return 0;
-}
-
-static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned hpd,
- enum amdgpu_interrupt_state state)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return 0;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK2:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK3:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK4:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK5:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK6:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE1:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE2:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE3:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE4:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE5:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE6:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 reg;
-
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
-
- reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
- if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
- else
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
-
- return 0;
-}
-
-static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned long flags;
- unsigned crtc_id;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_flip_work *works;
-
- crtc_id = (entry->src_id - 8) >> 1;
- amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
- if (crtc_id >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
- return -EINVAL;
- }
-
- if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
-
- /* IRQ could occur when in initial stage */
- if(amdgpu_crtc == NULL)
- return 0;
-
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
- "AMDGPU_FLIP_SUBMITTED(%d)\n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- return 0;
- }
-
- /* page flip completed. clean up */
- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- amdgpu_crtc->pflip_works = NULL;
-
- /* wakeup usersapce */
- if(works->event)
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-
- drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
-
- return 0;
-}
-
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
- int hpd)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return;
- }
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
- WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
- WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = entry->src_id - 1;
- uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
- crtc);
-
- switch (entry->src_data[0]) {
- case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank)
- dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev_to_drm(adev), crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
- break;
- case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline)
- dce_v11_0_crtc_vline_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
- break;
- default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- uint32_t disp_int, mask;
- unsigned hpd;
-
- if (entry->src_data[0] >= adev->mode_info.num_hpd) {
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- return 0;
- }
-
- hpd = entry->src_data[0];
- disp_int = RREG32(interrupt_status_offsets[hpd].reg);
- mask = interrupt_status_offsets[hpd].hpd;
-
- if (disp_int & mask) {
- dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_delayed_work(&adev->hotplug_work, 0);
- DRM_DEBUG("IH: HPD%d\n", hpd + 1);
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
- .name = "dce_v11_0",
- .early_init = dce_v11_0_early_init,
- .sw_init = dce_v11_0_sw_init,
- .sw_fini = dce_v11_0_sw_fini,
- .hw_init = dce_v11_0_hw_init,
- .hw_fini = dce_v11_0_hw_fini,
- .suspend = dce_v11_0_suspend,
- .resume = dce_v11_0_resume,
- .is_idle = dce_v11_0_is_idle,
- .soft_reset = dce_v11_0_soft_reset,
- .set_clockgating_state = dce_v11_0_set_clockgating_state,
- .set_powergating_state = dce_v11_0_set_powergating_state,
-};
-
-static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- amdgpu_encoder->pixel_clock = adjusted_mode->clock;
-
- /* need to call this here rather than in prepare() since we need some crtc info */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- /* set scaler clears this on some chips */
- dce_v11_0_set_interleave(encoder->crtc, mode);
-
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- dce_v11_0_afmt_enable(encoder, true);
- dce_v11_0_afmt_setmode(encoder, adjusted_mode);
- }
-}
-
-static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
-
- if ((amdgpu_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- if (dig) {
- dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
- if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
- dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
- }
- }
-
- amdgpu_atombios_scratch_regs_lock(adev, true);
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (amdgpu_connector->router.cd_valid)
- amdgpu_i2c_router_select_cd_port(amdgpu_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_atombios_encoder_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- amdgpu_atombios_encoder_set_crtc_source(encoder);
- /* set up the FMT blocks */
- dce_v11_0_program_fmt(encoder);
-}
-
-static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- /* need to call this here as we need the crtc set up */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- amdgpu_atombios_scratch_regs_lock(adev, false);
-}
-
-static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
-
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- dce_v11_0_afmt_enable(encoder, false);
- dig = amdgpu_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- amdgpu_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
- .dpms = dce_v11_0_ext_dpms,
- .prepare = dce_v11_0_ext_prepare,
- .mode_set = dce_v11_0_ext_mode_set,
- .commit = dce_v11_0_ext_commit,
- .disable = dce_v11_0_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .disable = dce_v11_0_encoder_disable,
- .detect = amdgpu_atombios_encoder_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .detect = amdgpu_atombios_encoder_dac_detect,
-};
-
-static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
- kfree(amdgpu_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
-}
-
-static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
- .destroy = dce_v11_0_encoder_destroy,
-};
-
-static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
-
- encoder = &amdgpu_encoder->base;
- switch (adev->mode_info.num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 3:
- encoder->possible_crtcs = 0x7;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 5:
- encoder->possible_crtcs = 0x1f;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- amdgpu_encoder->enc_priv = NULL;
-
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- amdgpu_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
- } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- } else {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- }
- drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- amdgpu_encoder->is_ext_encoder = true;
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- else
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
- break;
- }
-}
-
-static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .bandwidth_update = &dce_v11_0_bandwidth_update,
- .vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
- .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
- .hpd_sense = &dce_v11_0_hpd_sense,
- .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
- .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
- .page_flip = &dce_v11_0_page_flip,
- .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
- .add_encoder = &dce_v11_0_encoder_add,
- .add_connector = &amdgpu_connector_add,
-};
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
-{
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
- .set = dce_v11_0_set_crtc_irq_state,
- .process = dce_v11_0_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
- .set = dce_v11_0_set_pageflip_irq_state,
- .process = dce_v11_0_pageflip_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
- .set = dce_v11_0_set_hpd_irq_state,
- .process = dce_v11_0_hpd_irq,
-};
-
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
-{
- if (adev->mode_info.num_crtc > 0)
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
- else
- adev->crtc_irq.num_types = 0;
- adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
- adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
-
- adev->hpd_irq.num_types = adev->mode_info.num_hpd;
- adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_v11_0_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
-
-const struct amdgpu_ip_block_version dce_v11_2_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 81760a26f2ff..acc887a58518 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1034,7 +1034,6 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 19a265bd4d19..2ccd6aad8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1096,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 7bd506f06eb1..8841d7213de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
memset(&ib, 0, sizeof(ib));
@@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -7668,19 +7667,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v10_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c37527704d43..66c47c466532 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -603,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -850,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -4658,8 +4657,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index fd44d5503e28..710ec9c34e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -497,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -685,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0, clustercount = 0, i;
const struct cs_section_def *sect = NULL;
@@ -3524,8 +3523,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 70d7a1f434c4..7693b7953426 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
@@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2aa323dab34e..5976ed55d9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = {
};
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 367449d8061b..0856ff65288c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1220,8 +1220,7 @@ out:
return err;
}
-static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 20b30f4b3c7d..dd19a97436db 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -2650,6 +2649,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_sq(adev);
+ break;
default:
break;
}
@@ -4172,19 +4174,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c48cd47b531f..8058ea91ecaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
}
}
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->gfx.mec_fw_version >= 98) {
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ data = RREG32_SOC15(GC, 0, regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regSQ_CONFIG1, data);
+ }
+}
+
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 7584624b641c..a603724c1dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid, uint32_t last_vmid);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 51babf5c78c8..77f9d5b9a556 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -2461,19 +2461,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- }
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -3562,6 +3560,7 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
unsigned long flags;
int r;
@@ -3599,6 +3598,7 @@ pipe_reset:
if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
+ reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
if (r)
@@ -3621,10 +3621,20 @@ pipe_reset:
r = amdgpu_ring_test_ring(kiq_ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
+ goto pipe_reset;
+
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ goto pipe_reset;
+ }
+
+
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 7923f491cf73..d7499be8c4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -466,24 +466,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -508,21 +490,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -563,7 +563,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
@@ -964,8 +963,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
if (!adev->in_s0ix)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f15d691e9a20..7bc389d9f5c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -430,24 +430,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -472,21 +454,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -527,7 +527,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
- .map_mtype = gmc_v11_0_map_mtype,
.get_vm_pde = gmc_v11_0_get_vm_pde,
.get_vm_pte = gmc_v11_0_get_vm_pte,
.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
@@ -906,8 +905,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index de763105fdfd..404cc8c2ff2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -336,6 +336,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried;
int vmid, i;
+ if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) {
+ struct mes_inv_tlbs_pasid_input input = {0};
+ input.pasid = pasid;
+ input.flush_type = flush_type;
+ input.hub_id = AMDGPU_GFXHUB(0);
+ /* MES will invalidate all gc_hub for the device from master */
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ if (all_hub) {
+ /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
+ input.hub_id = AMDGPU_MMHUB0(0);
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ }
+ return;
+ }
+
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
@@ -453,20 +469,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 0 valid
*/
-static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -490,18 +492,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
@@ -543,7 +562,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
- .map_mtype = gmc_v12_0_map_mtype,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
@@ -876,8 +894,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 8030fcd64210..f6ad7911f1e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a8d5795084fc..93d7ccb7d013 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index b45fa0cea9d2..c5e2a2c41e06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c4d69cf4e06c..0d1dd587db5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1073,27 +1073,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v
* 0 valid
*/
-static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_RW:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -1123,6 +1102,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1236,25 +1216,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_RW:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ break;
+ }
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags &= ~AMDGPU_PTE_VALID;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
- flags);
+ gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1391,7 +1389,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
@@ -1837,11 +1834,19 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
+
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 5900b560b7de..333e9c30c091 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -587,8 +587,7 @@ static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 068ed849dbad..95b3f4e55ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -562,8 +562,7 @@ static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 40a3530e0453..b32ea4129c61 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -552,8 +552,7 @@ static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 9e428e669ada..b5bb7f4d607c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .extra_dw = 64,
+ .extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 58239c405fda..27c76bd424cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -806,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -854,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
-
-/**
- * jpeg_v2_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- u32 i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 63fadda7a673..654e43e83e2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,9 +45,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 3e2c389242db..20983f126b49 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -696,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -727,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index a44eb2667664..d1a011c40ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -597,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index da3ee69f1a3b..33db2c1ae6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -762,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index b86288a69e7b..aae7328973d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -444,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -1177,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 481d1a2dbe5a..54fd9c800c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -686,7 +686,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -807,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index e0a71909252b..46bf15dce2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -584,7 +584,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v5_0_0_is_idle(ip_block))
@@ -683,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 54523dc1f702..baf097d2e1ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -196,6 +196,14 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
if (r)
return r;
@@ -307,7 +315,7 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -689,7 +697,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -1016,8 +1024,9 @@ static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_ban
/* reference to smu driver if header file */
static int jpeg_v5_0_1_err_codes[] = {
- 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
- 24, 25, 26, 27, 28, 29, 30, 31
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 48, 49, 50, 51,
};
static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1058,6 +1067,11 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -1065,11 +1079,6 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
goto late_fini;
}
- r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
- &jpeg_v5_0_1_aca_info, NULL);
- if (r)
- goto late_fini;
-
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index d6f50b13e2ba..2db9b2c63693 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -21,6 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "mes_userqueue.h"
@@ -198,6 +199,53 @@ static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
return 0;
}
+static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
+ int queue_type)
+{
+ int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
+ struct mes_detect_and_reset_queue_input input;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ unsigned int hung_db_num = 0;
+ int queue_id, r, i;
+ u32 db_array[4];
+
+ if (db_array_size > 4) {
+ dev_err(adev->dev, "DB array size (%d vs 4) too small\n",
+ db_array_size);
+ return -EINVAL;
+ }
+
+ memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
+
+ input.queue_type = queue_type;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
+ &hung_db_num, db_array);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
+ } else if (hung_db_num) {
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return r;
+}
+
static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct drm_amdgpu_userq_in *args_in,
struct amdgpu_usermode_queue *queue)
@@ -206,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
struct amdgpu_mqd_prop *userq_props;
+ struct amdgpu_gfx_shadow_info shadow_info;
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -215,13 +264,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
return -ENOMEM;
}
- if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
- !mqd_user->queue_va || mqd_user->queue_size == 0) {
- DRM_ERROR("Invalid MQD parameters for userqueue\n");
- r = -EINVAL;
- goto free_props;
- }
-
r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
if (r) {
DRM_ERROR("Failed to create MQD object for userqueue\n");
@@ -238,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
+ if (adev->gfx.funcs->get_gfx_shadow_info)
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
@@ -254,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
+ if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
+ max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
+ goto free_mqd;
+
userq_props->eop_gpu_addr = compute_mqd->eop_va;
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -281,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->csa_addr = mqd_gfx_v11->csa_va;
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+ if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size))
+ goto free_mqd;
+
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -298,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
+ if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
+ shadow_info.csa_size))
+ goto free_mqd;
+
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
}
@@ -347,9 +404,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
}
+static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_suspend_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ signed long timeout = 2100000; /* 2100 ms */
+ u64 fence_gpu_addr;
+ u32 fence_offset;
+ u64 *fence_ptr;
+ int i, r;
+
+ if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
+ return 0;
+ r = amdgpu_device_wb_get(adev, &fence_offset);
+ if (r)
+ return r;
+
+ fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.suspend_fence_addr = fence_gpu_addr;
+ queue_input.suspend_fence_value = 1;
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to suspend gang: %d\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < timeout; i++) {
+ if (*fence_ptr == 1)
+ goto out;
+ udelay(1);
+ }
+ r = -ETIMEDOUT;
+
+out:
+ amdgpu_device_wb_free(adev, fence_offset);
+ return r;
+}
+
+static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_resume_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG)
+ return -EINVAL;
+ if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
+ return 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
+ return r;
+}
+
const struct amdgpu_userq_funcs userq_mes_funcs = {
.mqd_create = mes_userq_mqd_create,
.mqd_destroy = mes_userq_mqd_destroy,
.unmap = mes_userq_unmap,
.map = mes_userq_map,
+ .detect_and_reset = mes_userq_detect_and_reset,
+ .preempt = mes_userq_preempt,
+ .restore = mes_userq_restore,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 3f6a828cad8a..e82188431f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -66,6 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define GFX_MES_DRAM_SIZE 0x80000
#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
+#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4
+
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -711,6 +713,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(mes->adev->dev,
+ "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
+
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
@@ -784,6 +792,32 @@ static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v11_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -793,6 +827,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
+ .detect_and_reset_hung_queues = mes_v11_0_detect_and_reset_hung_queues,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1685,6 +1720,8 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size =
+ MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 6b222630f3fa..aff06f06aeee 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -47,6 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
+#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4
+
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -108,6 +110,7 @@ static const char *mes_v12_0_opcodes[] = {
"SET_SE_MODE",
"SET_GANG_SUBMIT",
"SET_HW_RSRC_1",
+ "INVALIDATE_TLBS",
};
static const char *mes_v12_0_misc_opcodes[] = {
@@ -567,13 +570,41 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
@@ -738,6 +769,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(adev->dev,
+ "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
@@ -879,6 +915,74 @@ static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
+static int mes_v12_inv_tlb_convert_hub_id(uint8_t id)
+{
+ /*
+ * MES doesn't support invalidate gc_hub on slave xcc individually
+ * master xcc will invalidate all gc_hub for the partition
+ */
+ if (AMDGPU_IS_GFXHUB(id))
+ return 0;
+ else if (AMDGPU_IS_MMHUB0(id))
+ return 1;
+ else
+ return -EINVAL;
+
+}
+
+static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input)
+{
+ union MESAPI__INV_TLBS mes_inv_tlbs;
+ int ret;
+
+ memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs));
+
+ mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER;
+ mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS;
+ mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_inv_tlbs.invalidate_tlbs.inv_sel = 0;
+ mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type;
+ mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid;
+
+ /*convert amdgpu_mes_hub_id to mes expected hub_id */
+ ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id);
+ if (ret < 0)
+ return -EINVAL;
+ mes_inv_tlbs.invalidate_tlbs.hub_id = ret;
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE,
+ &mes_inv_tlbs, sizeof(mes_inv_tlbs),
+ offsetof(union MESAPI__INV_TLBS, api_status));
+
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -888,6 +992,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
+ .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid,
+ .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1793,6 +1899,8 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size =
+ MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 48101a34e049..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f6d8597452ed..e5282a5d05d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -202,8 +202,8 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
- case IDH_REQ_RAS_BAD_PAGES:
- event = IDH_RAS_BAD_PAGES_READY;
+ case IDH_REQ_RAS_CHK_CRITI:
+ event = IDH_REQ_RAS_CHK_CRITI_READY;
break;
default:
break;
@@ -359,14 +359,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -397,10 +415,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +508,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
return 0;
}
@@ -535,6 +559,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
}
+static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
+{
+ uint32_t addr_hi, addr_lo;
+
+ addr_hi = (uint32_t)(addr >> 32);
+ addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -548,4 +582,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
.req_bad_pages = xgpu_nv_req_ras_bad_pages,
+ .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 5808689562cc..c1083e5e41e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -43,6 +43,7 @@ enum idh_request {
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
IDH_REQ_RAS_BAD_PAGES = 205,
+ IDH_REQ_RAS_CHK_CRITI = 206
};
enum idh_event {
@@ -62,6 +63,7 @@ enum idh_event {
IDH_RAS_BAD_PAGES_READY = 15,
IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
IDH_UNRECOV_ERR_NOTIFICATION = 17,
+ IDH_REQ_RAS_CHK_CRITI_READY = 18,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index d5002ff931d8..860bc5cb03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -151,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
-+ * BIF_SDMA4_DOORBELL_RANGE:
-+ * ARCTURUS: 0x3be0
-+ * ALDEBARAN: 0x3be4
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 83e9782aef39..8f4817404f10 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index dd2d66090d23..68aef47254a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res)
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
@@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
- struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
-
- i2c_del_adapter(control);
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9e74c9822e62..9785fada4fa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
-
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e590cbdd8de9..8dc32787d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -536,8 +536,11 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ /* The IP block decode of consumption is SMU */
+ if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
+ con->umc_ecc_log.consumption_q_count++;
return 0;
+ }
if (!status)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5dbaebb592b3..2e79a3afc774 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
*
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
- * Initialize the hardware, boot up the VCPU and do some testing
+ * Initialize the hardware, boot up the VCPU and do some testing.
+ *
+ * On SI, the UVD is meant to be used in a specific power state,
+ * or alternatively the driver can manually enable its clock.
+ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
+ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
+ * for the SMU and afterwards enables the UVD clock.
+ * This is automatically done by amdgpu_uvd_ring_begin_use when work
+ * is submitted to the UVD ring. Here, we have to call it manually
+ * in order to power up UVD before firmware validation.
+ *
+ * Note that we must not disable the UVD clock here, as that would
+ * cause the ring test to fail. However, UVD is powered off
+ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
+ * the UVD idle work handler which will disable the UVD clock when
+ * all fences are signalled.
*/
static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
int r;
uvd_v3_1_mc_resume(adev);
+ uvd_v3_1_enable_mgcg(adev, true);
+
+ /* Make sure UVD is powered during FW validation.
+ * It's going to be automatically powered off after the ring test.
+ */
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev);
if (r) {
@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- uvd_v3_1_enable_mgcg(adev, true);
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 1c07b701d0e4..ceb94bbb03a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9d237b5937fb..1f8866f3f63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c74947705d77..a316797875a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
amdgpu_vcn_fwlog_init(adev->vcn.inst);
@@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev, 0);
+ amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -1338,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1399,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 68b4371df0f1..8897dcc9c1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -136,10 +136,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -232,14 +230,9 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -259,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@@ -274,11 +267,9 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev, 0);
-
- kfree(adev->vcn.ip_dump);
+ amdgpu_vcn_sw_fini(adev, 0);
- return r;
+ return 0;
}
/**
@@ -862,9 +853,10 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
+ int ret;
vcn_v2_0_enable_static_power_gating(vinst);
@@ -948,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -1004,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1311,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -2095,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2168,8 +2105,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_0_dump_ip_state,
- .print_ip_state = vcn_v2_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index bc30a5326866..cebee453871c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -116,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
- int r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
@@ -149,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ amdgpu_vcn_put_profile(adev);
} else {
schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
@@ -167,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
@@ -177,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
* the delayed work so there is no one else to set it to false
* and we don't care if someone else sets it to true.
*/
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -218,6 +194,7 @@ pg_lock:
v->pause_dpg_mode(v, &new_state);
}
mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
@@ -297,12 +274,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << j))
continue;
@@ -423,14 +398,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -450,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -472,13 +442,9 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
r = amdgpu_vcn_suspend(adev, i);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -1032,9 +998,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1125,8 +1092,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1183,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared =
+ struct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1695,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
@@ -2127,66 +2099,6 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -2200,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -2217,8 +2129,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 2811226b0ea5..d9cf8f0feeb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -175,8 +175,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
/*
@@ -193,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -304,14 +302,9 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -334,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -356,12 +349,9 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
return 0;
}
@@ -1039,9 +1029,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1134,8 +1125,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1198,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int j, k, r;
@@ -1719,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1838,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
@@ -2346,67 +2342,6 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t inst_off;
- bool is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2420,8 +2355,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v3_0_dump_ip_state,
- .print_ip_state = vcn_v3_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 706f3b2f484f..3ae666522d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
@@ -183,8 +183,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t *ptr;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -255,14 +253,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -285,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -309,13 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -1009,9 +997,10 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1094,8 +1083,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1143,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1360,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1612,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1624,7 +1618,6 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
vcn_v4_0_stop_dpg_mode(vinst);
- r = 0;
goto done;
}
@@ -1984,7 +1977,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
+ .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -2240,67 +2233,6 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2314,8 +2246,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_dump_ip_state,
- .print_ip_state = vcn_v4_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 2a3663b551af..eacf4e93ba2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -134,6 +134,19 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ if (amdgpu_dpm_reset_vcn_is_supported(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ return 0;
+}
+
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -160,8 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t *ptr;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -201,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+
+ /* There are no per-instance irq source IDs on 4.0.3, the IH
+ * packets use a separate field to differentiate instances.
+ */
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
&adev->vcn.inst[i].sched_score);
if (r)
@@ -213,10 +228,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -231,20 +242,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-
- r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
if (r)
return r;
- return 0;
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
/**
@@ -261,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(&adev->ddev, &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -281,13 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -391,7 +388,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -848,10 +845,10 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -944,8 +941,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1010,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_3_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1185,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
int j, k, r, vcn_inst;
uint32_t tmp;
@@ -1395,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
int r = 0, vcn_inst;
uint32_t tmp;
@@ -1872,71 +1874,10 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off, inst_id;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_id = GET_INST(VCN, i);
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
- inst_id));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
+ .late_init = vcn_v4_0_3_late_init,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1947,8 +1888,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_3_dump_ip_state,
- .print_ip_state = vcn_v4_0_3_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index caf2d95a85d4..b107ee80e472 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -147,12 +147,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t *ptr;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -233,15 +230,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
- return 0;
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5));
+
+ return r;
}
/**
@@ -258,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -279,13 +270,9 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -923,9 +910,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1006,8 +994,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1054,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1273,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1596,7 +1589,7 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1704,67 +1697,6 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1778,8 +1710,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_5_dump_ip_state,
- .print_ip_state = vcn_v4_0_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 07a6e9582880..0202df5db1e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -115,21 +115,6 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
-{
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
-
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-}
-
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -144,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -201,7 +186,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -224,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -245,13 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -710,9 +692,10 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -766,8 +749,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -814,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1007,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1320,7 +1308,7 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1428,67 +1416,6 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1502,8 +1429,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index b8927652bc50..51bbccd4360f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,11 +32,6 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p);
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
-
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index cdefd7fcb0da..714350cabf2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -40,6 +40,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
@@ -79,6 +113,25 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn5_fw_shared *fw_shared;
@@ -153,17 +206,23 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_fw_shared_init(adev, i);
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- vcn_v5_0_0_alloc_ip_dump(adev);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
+
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1));
+ if (r)
+ return r;
return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
@@ -182,7 +241,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -201,15 +260,27 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
+
+ return 0;
+}
+
+static int vcn_v5_0_1_hw_init_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
return 0;
}
@@ -225,7 +296,7 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v5_0_1_start_sriov(adev);
@@ -243,14 +314,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 11 * vcn_inst),
- adev->vcn.inst[i].aid_id);
+ vcn_v5_0_1_hw_init_inst(adev, i);
/* Re-init fw_shared, if required */
vcn_v5_0_1_fw_shared_init(adev, i);
@@ -284,7 +349,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -601,11 +666,11 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared =
+ struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -666,8 +731,13 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* resetting ring, fw should not check RB ring */
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
@@ -732,8 +802,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -907,7 +977,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r, vcn_inst;
@@ -1099,7 +1169,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0, vcn_inst;
@@ -1229,6 +1299,31 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ vcn_v5_0_1_hw_init_inst(adev, ring->me);
+ vcn_v5_0_1_start_dpg_mode(vinst, vinst->indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1257,6 +1352,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_1_ring_reset,
};
/**
@@ -1460,7 +1556,7 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.name = "vcn_v5_0_1",
.early_init = vcn_v5_0_1_early_init,
- .late_init = NULL,
+ .late_init = vcn_v5_0_1_late_init,
.sw_init = vcn_v5_0_1_sw_init,
.sw_fini = vcn_v5_0_1_sw_fini,
.hw_init = vcn_v5_0_1_hw_init,
@@ -1475,8 +1571,8 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
@@ -1557,7 +1653,7 @@ static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank
/* reference to smu driver if header file */
static int vcn_v5_0_1_err_codes[] = {
- 14, 15, /* VCN */
+ 14, 15, 47, /* VCN [D|V|S] */
};
static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1603,6 +1699,13 @@ static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
goto late_fini;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9b3510e53112..a611a7345125 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -67,7 +67,6 @@
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "iceland_ih.h"
#include "tonga_ih.h"
#include "cz_ih.h"
@@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 828a9ceef1e7..0f0719528bcc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -521,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!minfo.cu_mask.ptr)
- return -ENOMEM;
-
- retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
- if (retval) {
+ minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
+ if (IS_ERR(minfo.cu_mask.ptr)) {
pr_debug("Could not copy CU mask from userspace");
- retval = -EFAULT;
- goto out;
+ return PTR_ERR(minfo.cu_mask.ptr);
}
mutex_lock(&p->mutex);
@@ -538,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
mutex_unlock(&p->mutex);
-out:
kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -1070,7 +1064,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
- if (interval_tree_iter_first(&p->svms.objects,
+
+ /* Skip a special case that allocates VRAM without VA,
+ * VA will be invalid of 0.
+ */
+ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
+ interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
@@ -2566,8 +2565,8 @@ static int criu_restore(struct file *filep,
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
- !args->num_devices || !args->num_bos)
+ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
+ !args->priv_data_size || !args->num_devices)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -3252,8 +3251,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int retcode = -EINVAL;
bool ptrace_attached = false;
- if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
+ retcode = -ENOTTY;
goto err_i1;
+ }
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
@@ -3266,8 +3267,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
asize = amdkfd_size;
cmd = ioctl->cmd;
- } else
+ } else {
+ retcode = -ENOTTY;
goto err_i1;
+ }
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 349c351e242b..e9cfb80bd436 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -495,6 +495,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
mutex_init(&kfd->doorbell_mutex);
ida_init(&kfd->doorbell_ida);
+ atomic_set(&kfd->kfd_processes_count, 0);
return kfd;
}
@@ -1133,7 +1134,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
}
for (i = 0; i < kfd->num_nodes; i++) {
- node = kfd->nodes[i];
+ /* Race if another thread in b/w
+ * kfd_cleanup_nodes and kfree(kfd),
+ * when kfd->nodes[i] = NULL
+ */
+ if (kfd->nodes[i])
+ node = kfd->nodes[i];
+ else
+ return;
+
spin_lock_irqsave(&node->interrupt_lock, flags);
if (node->interrupts_active
@@ -1485,6 +1494,15 @@ int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
mutex_lock(&kfd_processes_mutex);
+ /* kfd_processes_count is per kfd_dev, return -EBUSY without
+ * further check
+ */
+ if (!!atomic_read(&kfd->kfd_processes_count)) {
+ pr_debug("process_wq_release not finished\n");
+ r = -EBUSY;
+ goto out;
+ }
+
if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
goto out;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2b0a830f5b29..fb3129883a4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
- return false;
-
- pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
- queue_size);
+ pr_debug("Initializing queue type %d size %d\n", type, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
+ WARN(1, "Invalid queue type %d\n", type);
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 79251f22b702..59a5a3fea65d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -39,22 +39,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
-static uint64_t
-svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
+static u64
+svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
-svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
- dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
+svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t pte_flags;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
void *cpu_addr;
int r;
@@ -68,7 +68,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
if (r)
return r;
@@ -122,15 +123,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
- uint64_t *vram, uint64_t npages,
+ u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
- const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t gart_s, gart_d;
+ u64 gart_s, gart_d;
struct dma_fence *next;
- uint64_t size;
+ u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@@ -260,39 +261,39 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
- unsigned long upages = 0;
+ unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
+ if (migrate->dst[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ mpages++;
}
- return upages;
+ return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t ttm_res_offset)
+ dma_addr_t *scratch, u64 ttm_res_offset)
{
- uint64_t npages = migrate->npages;
+ u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
- uint64_t mpages = 0;
+ u64 mpages = 0;
dma_addr_t *src;
- uint64_t *dst;
- uint64_t i, j;
+ u64 *dst;
+ u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
- dst = (uint64_t *)(scratch + npages);
+ dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
@@ -385,11 +386,11 @@ out_free_vram_pages:
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ struct vm_area_struct *vma, u64 start,
+ u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
+ u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -408,7 +409,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -447,9 +448,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- mpages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@@ -490,7 +491,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- uint64_t ttm_res_offset;
+ u64 ttm_res_offset;
struct kfd_node *node;
unsigned long mpages = 0;
long r = 0;
@@ -580,14 +581,14 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t npages)
+ dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
- uint64_t *src;
+ u64 *src;
dma_addr_t *dst;
struct page *dpage;
- uint64_t i = 0, j;
- uint64_t addr;
+ u64 i = 0, j;
+ u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
@@ -595,7 +596,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
addr = migrate->start;
- src = (uint64_t *)(scratch + npages);
+ src = (u64 *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
@@ -683,12 +684,11 @@ out_oom:
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start, uint64_t end,
+ struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
+ u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
@@ -710,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -736,7 +736,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -749,9 +748,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -764,8 +763,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@@ -848,6 +846,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 67694bcd9464..70ef051511bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -111,7 +111,14 @@
#define KFD_KERNEL_QUEUE_SIZE 2048
-#define KFD_UNMAP_LATENCY_MS (4000)
+/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC
+ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time
+ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is
+ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload
+ * The format here makes CP workload 10% of total timeout
+ */
+#define KFD_UNMAP_LATENCY_MS \
+ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1)
#define KFD_MAX_SDMA_QUEUES 128
@@ -375,6 +382,8 @@ struct kfd_dev {
/* for dynamic partitioning */
int kfd_dev_lock;
+
+ atomic_t kfd_processes_count;
};
enum kfd_mempool {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 5be28c6c4f6a..ddfe30c13e9d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1088,6 +1088,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->runtime_inuse = false;
}
+ atomic_dec(&pdd->dev->kfd->kfd_processes_count);
+
kfree(pdd);
p->pdds[i] = NULL;
}
@@ -1649,6 +1651,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
+ atomic_inc(&dev->kfd->kfd_processes_count);
+
return pdd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a0f22ea6d15a..273f42e3afdd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1189,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
}
static uint64_t
-svm_range_get_pte_flags(struct kfd_node *node,
+svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
@@ -1292,10 +1292,6 @@ svm_range_get_pte_flags(struct kfd_node *node,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
-
- if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
- mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
@@ -1305,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
- pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
+ amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
+ pte_flags |= AMDGPU_PTE_READABLE;
+ if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
return pte_flags;
}
@@ -1412,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1714,9 +1713,32 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ /* HMM requires at least READ permissions. If provided with PROT_NONE,
+ * unmap the memory. If it's not already mapped, this is a no-op
+ * If PROT_WRITE is provided without READ, warn first then unmap
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ unsigned long e, s;
+
+ svm_range_lock(prange);
+ if (vma->vm_flags & VM_WRITE)
+ pr_debug("VM_WRITE without VM_READ is not supported");
+ s = max(start, prange->start);
+ e = min(end, prange->last);
+ if (e >= s)
+ r = svm_range_unmap_from_gpus(prange, s, e,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
+ svm_range_unlock(prange);
+ /* If unmap returns non-zero, we'll bail on the next for loop
+ * iteration, so just leave r and continue
+ */
+ addr = next;
+ continue;
+ }
+
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner, NULL,
+ readonly, owner,
&hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r)
@@ -4239,7 +4261,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
- r = EINVAL;
+ r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 720b20e842ba..5c98746eb72d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -530,6 +530,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ dev->gpu->xcp ?
+ dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index 8bc36f04b1b7..44009aa8216e 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -46,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = {
static int8_t pdev_num;
static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+static DEFINE_MUTEX(xcp_mutex);
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
{
struct platform_device *pdev;
struct xcp_device *pxcp_dev;
char dev_name[20];
- int ret;
+ int ret, i;
+
+ guard(mutex)(&xcp_mutex);
if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
return -ENODEV;
- snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num);
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if (!xcp_dev[i])
+ break;
+ }
+
+ if (i >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i);
pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -73,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
goto out_devres;
}
- xcp_dev[pdev_num] = pxcp_dev;
- xcp_dev[pdev_num]->pdev = pdev;
+ xcp_dev[i] = pxcp_dev;
+ xcp_dev[i]->pdev = pdev;
*ddev = &pxcp_dev->drm;
pdev_num++;
@@ -89,16 +100,43 @@ out_unregister:
}
EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
-void amdgpu_xcp_drv_release(void)
+static void free_xcp_dev(int8_t index)
{
- for (--pdev_num; pdev_num >= 0; --pdev_num) {
- struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
+ if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) {
+ struct platform_device *pdev = xcp_dev[index]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
- xcp_dev[pdev_num] = NULL;
+
+ xcp_dev[index] = NULL;
+ pdev_num--;
+ }
+}
+
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) {
+ free_xcp_dev(i);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free);
+
+void amdgpu_xcp_drv_release(void)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ free_xcp_dev(i);
}
- pdev_num = 0;
}
EXPORT_SYMBOL(amdgpu_xcp_drv_release);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
index c1c4b679bf95..580a1602c8e3 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -25,5 +25,6 @@
#define _AMDGPU_XCP_DRV_H_
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev);
void amdgpu_xcp_drv_release(void);
#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 89d605de0595..0084a8d55254 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ef026143dc1c..8e1622bf7a42 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -82,6 +80,7 @@
#include <linux/component.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -102,15 +101,6 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
@@ -243,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -427,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
/*
* Previous frame finished and HW is ready for optimization.
*/
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
+ dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
@@ -541,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -578,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -676,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -2144,7 +2188,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
drm_err(adev_to_drm(adev),
- "failed to initialize sw for display support.\n");
+ "failed to initialize vblank for display support.\n");
goto error;
}
@@ -2903,7 +2947,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
- r = i2c_add_adapter(&oem_i2c->base);
+ r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
if (r) {
drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
@@ -2915,17 +2959,6 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return 0;
}
-static void dm_oem_i2c_hw_fini(struct amdgpu_device *adev)
-{
- struct amdgpu_display_manager *dm = &adev->dm;
-
- if (dm->oem_i2c) {
- i2c_del_adapter(&dm->oem_i2c->base);
- kfree(dm->oem_i2c);
- dm->oem_i2c = NULL;
- }
-}
-
/**
* dm_hw_init() - Initialize DC device
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
@@ -2976,8 +3009,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- dm_oem_i2c_hw_fini(adev);
-
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
@@ -3005,14 +3036,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
- if (enable) {
- if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
- } else
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
-
- if (rc)
- drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(
+ to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, false);
+
+ if (rc)
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
+ enable ? "en" : "dis");
+ }
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
@@ -3600,23 +3637,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+ .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
+ const struct drm_panel_backlight_quirk *panel_backlight_quirk;
struct amdgpu_dm_backlight_caps *caps;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
- int min_input_signal_override;
+ struct drm_device *drm;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
- adev = drm_to_adev(conn_base->dev);
+ drm = conn_base->dev;
+ adev = drm_to_adev(drm);
caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -3649,9 +3688,24 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
else
caps->aux_min_input_signal = 1;
- min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
- if (min_input_signal_override >= 0)
- caps->min_input_signal = min_input_signal_override;
+ panel_backlight_quirk =
+ drm_get_panel_backlight_quirk(aconnector->drm_edid);
+ if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
+ if (panel_backlight_quirk->min_brightness) {
+ caps->min_input_signal =
+ panel_backlight_quirk->min_brightness - 1;
+ drm_info(drm,
+ "Applying panel backlight quirk, min_brightness: %d\n",
+ caps->min_input_signal);
+ }
+ if (panel_backlight_quirk->brightness_mask) {
+ drm_info(drm,
+ "Applying panel backlight quirk, brightness_mask: 0x%X\n",
+ panel_backlight_quirk->brightness_mask);
+ caps->brightness_mask =
+ panel_backlight_quirk->brightness_mask;
+ }
+ }
}
DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
@@ -4766,8 +4820,8 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
uint32_t *user_brightness)
{
u32 brightness = scale_input_to_fw(min, max, *user_brightness);
- u8 prev_signal = 0, prev_lum = 0;
- int i = 0;
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
return;
@@ -4775,32 +4829,54 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
if (!caps->data_points)
return;
- /* choose start to run less interpolation steps */
- if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
- i = caps->data_points/2;
- do {
- u8 signal = caps->luminance_data[i].input_signal;
- u8 lum = caps->luminance_data[i].luminance;
+ /*
+ * Handle the case where brightness is below the first data point
+ * Interpolate between (0,0) and (first_signal, first_lum)
+ */
+ if (brightness < caps->luminance_data[0].input_signal) {
+ lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
+ caps->luminance_data[0].input_signal);
+ goto scale;
+ }
- /*
- * brightness == signal: luminance is percent numerator
- * brightness < signal: interpolate between previous and current luminance numerator
- * brightness > signal: find next data point
- */
- if (brightness > signal) {
- prev_signal = signal;
- prev_lum = lum;
- i++;
- continue;
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
+
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
}
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- *user_brightness = scale_fw_to_input(min, max,
- DIV_ROUND_CLOSEST(lum * brightness, 101));
- return;
- } while (i < caps->data_points);
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
}
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4851,6 +4927,10 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
+ /* Apply brightness quirk */
+ if (caps->brightness_mask)
+ brightness |= caps->brightness_mask;
+
/* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
@@ -4919,10 +4999,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -4988,8 +5066,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -6352,6 +6433,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ && aconnector
+ && aconnector->force_yuv422_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -6383,13 +6468,15 @@ static void fill_stream_properties_from_drm_display_mode(
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -7307,10 +7394,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (aconnector->i2c) {
- i2c_del_adapter(&aconnector->i2c->base);
- kfree(aconnector->i2c);
- }
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@@ -7610,6 +7693,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
bpc_limit = 8;
do {
+ drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
@@ -7645,16 +7729,41 @@ create_validate_stream_for_sink(struct drm_connector *connector,
} while (stream == NULL && requested_bpc >= bpc_limit);
- if ((dc_result == DC_FAIL_ENC_VALIDATE ||
- dc_result == DC_EXCEED_DONGLE_CAP) &&
- !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
- __func__, __LINE__);
-
- aconnector->force_yuv420_output = true;
+ switch (dc_result) {
+ /*
+ * If we failed to validate DP bandwidth stream with the requested RGB color depth,
+ * we try to fallback and configure in order:
+ * YUV422 (8bpc, 6bpc)
+ * YUV420 (8bpc, 6bpc)
+ */
+ case DC_FAIL_ENC_VALIDATE:
+ case DC_EXCEED_DONGLE_CAP:
+ case DC_NO_DP_LINK_BANDWIDTH:
+ /* recursively entered twice and already tried both YUV422 and YUV420 */
+ if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
+ break;
+ /* first failure; try YUV422 */
+ if (!aconnector->force_yuv422_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv422_output = true;
+ /* recursively entered and YUV422 failed, try YUV420 */
+ } else if (!aconnector->force_yuv420_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv420_output = true;
+ }
stream = create_validate_stream_for_sink(connector, drm_mode,
- dm_state, old_stream);
+ dm_state, old_stream);
+ aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
+ break;
+ case DC_OK:
+ break;
+ default:
+ drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
+ __func__, __LINE__, dc_result);
+ break;
}
return stream;
@@ -7799,6 +7908,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -7930,7 +8047,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -8154,6 +8271,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
{"1920x1200", 1920, 1200}
};
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ return;
+
n = ARRAY_SIZE(common_modes);
for (i = 0; i < n; i++) {
@@ -8493,6 +8614,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -8622,7 +8755,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
}
aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+ res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
@@ -10119,69 +10252,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
-
- if (!connector)
- continue;
-
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -10195,7 +10299,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -10203,29 +10307,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -10269,7 +10357,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10277,6 +10365,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int i, ret;
+
+ ret = drm_dp_mst_atomic_setup_commit(state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
+ (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret) {
+ drm_dbg_atomic(state->dev, "Failed to update color state\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10379,6 +10539,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10430,6 +10592,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
uint8_t cnt;
+
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
if (acrtc->dm_irq_params.window_param[cnt].enable) {
@@ -10732,6 +10895,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@@ -11049,7 +11214,7 @@ skip_modeset:
if (dm_new_crtc_state->base.color_mgmt_changed ||
dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
if (ret)
goto fail;
}
@@ -12633,7 +12798,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
- if (!adev->dm.freesync_module)
+ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6aae51c1beb3..009f206226f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -152,6 +153,20 @@ struct idle_workqueue {
bool running;
};
+/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
#define MAX_LUMINANCE_DATA_POINTS 99
/**
@@ -201,6 +216,11 @@ struct amdgpu_dm_backlight_caps {
*/
bool aux_support;
/**
+ * @brightness_mask: After deriving brightness, OR it with this mask.
+ * Workaround for panels with issues with certain brightness values.
+ */
+ u32 brightness_mask;
+ /**
* @ac_level: the default brightness if booted on AC
*/
u8 ac_level;
@@ -760,6 +780,9 @@ struct amdgpu_dm_connector {
uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -783,6 +806,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
+ bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */
@@ -1030,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index c0dfe2d8b3be..a4ac6d442278 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -27,7 +28,6 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
@@ -566,12 +566,11 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -885,33 +884,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
}
/**
- * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC.
* @crtc: amdgpu_dm crtc state
+ * @check_only: only check color state without update dc stream
*
- * With no plane level color management properties we're free to use any
- * of the HW blocks as long as the CRTC CTM always comes before the
- * CRTC RGM and after the CRTC DGM.
- *
- * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * This function just verifies CRTC LUT sizes, if there is enough space for
+ * output transfer function and if its parameters can be calculated by AMD
+ * color module. It also adjusts some settings for programming CRTC degamma at
+ * plane stage, using plane DGM block.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
*
* For supporting both plane level color management and CRTC level color
- * management at once we have to either restrict the usage of CRTC properties
- * or blend adjustments together.
+ * management at once we have to either restrict the usage of some CRTC
+ * properties or blend adjustments together.
*
* Returns:
- * 0 on success. Error code if setup fails.
+ * 0 on success. Error code if validation fails.
*/
-int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
- struct drm_color_ctm *ctm = NULL;
+ struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
@@ -940,6 +939,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
+ if (check_only) {
+ out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL);
+ if (!out_tf)
+ return -ENOMEM;
+ } else {
+ out_tf = &stream->out_transfer_func;
+ }
+
/* Setup regamma and degamma. */
if (is_legacy) {
/*
@@ -954,8 +961,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,16 +970,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
- if (r)
- return r;
} else {
regamma_size = has_regamma ? regamma_size : 0;
- r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
- if (r)
- return r;
}
/*
@@ -981,6 +984,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* have to place the CTM in the OCSC in that case.
*/
crtc->cm_has_degamma = has_degamma;
+ if (check_only)
+ kvfree(out_tf);
+
+ return r;
+}
+
+/**
+ * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * With no plane level color management properties we're free to use any
+ * of the HW blocks as long as the CRTC CTM always comes before the
+ * CRTC RGM and after the CRTC DGM.
+ *
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ *
+ * The RGM block is typically more fully featured and accurate across
+ * all ASICs - DCE can't support a custom non-linear CRTC DGM.
+ *
+ * For supporting both plane level color management and CRTC level color
+ * management at once we have to either restrict the usage of CRTC properties
+ * or blend adjustments together.
+ *
+ * Returns:
+ * 0 on success. Error code if setup fails.
+ */
+int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+{
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm = NULL;
+ int ret;
+
+ ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
+ if (ret)
+ return ret;
/* Setup CRTC CTM. */
if (crtc->base.ctm) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 033bd817d871..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 3da056c8d20b..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 45feb404b097..1ec9d03ad747 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -218,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
break;
}
- if (idle_work->enable)
+ if (idle_work->enable) {
+ dc_post_update_surfaces_to_stream(idle_work->dm->dc);
dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ }
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -273,8 +275,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
dc_allow_idle_optimizations(dm->dc, true);
+ }
mutex_unlock(&dm->dc_lock);
@@ -317,13 +321,17 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
sr_supported && vblank->config.disable_immediate)
drm_crtc_vblank_restore(crtc);
+ }
- /* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_crtc_vrr_active(acrtc_state))
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
- } else {
- /* vblank irq off -> vupdate irq off */
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ }
}
if (rc)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index b726bcd18e29..f263e1a4537e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -3106,6 +3107,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3324,7 +3354,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3502,6 +3533,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index b1d1897f5eaf..19038f336155 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -222,6 +223,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
link_adjust.auth_delay = 2;
+ link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -571,6 +573,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
+ link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
@@ -765,14 +768,18 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
config->psp.handle = &adev->psp;
- if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
- dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_16 ||
+ dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21 ||
+ dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
- dc->ctx->dce_version == DCN_VERSION_3_6 ||
- dc->ctx->dce_version == DCN_VERSION_3_16)
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
+ dc->ctx->dce_version == DCN_VERSION_4_01)
config->psp.caps.dtm_v3_supported = 1;
+
config->ddc.handle = dc_get_link_at_index(dc, i);
ddc_funcs->write_i2c = lp_write_i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9e3e51a2dc49..fe100e4c9801 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index b61e210f6246..a1c722112c22 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index ba17c23b2706..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6c9de834455b..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 77a9d2c7d318..5e92eaa67aa3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -329,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -668,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -822,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
-int dm_mst_get_pbn_divider(struct dc_link *link)
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link)
{
+ uint32_t pbn_div_x100;
+ uint64_t dividend, divisor;
+
if (!link)
return 0;
- return dc_link_bandwidth_kbps(link,
- dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+ dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100;
+ divisor = 8 * 1000 * 54;
+
+ pbn_div_x100 = div64_u64(dividend, divisor);
+
+ return dfixed_const(pbn_div_x100) / 100;
}
struct dsc_mst_fairness_params {
@@ -1763,14 +1802,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1867,8 +1912,10 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_link_bw = aconnector->mst_local_bw;
}
- if (end_link_bw > 0 && stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..6f7ea684b555 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -59,7 +60,7 @@ enum mst_msg_ready_type {
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-int dm_mst_get_pbn_divider(struct dc_link *link);
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3d2f8eedeef2..e027798ece03 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -146,7 +146,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
- uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+ uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@@ -732,7 +732,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
- *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+ *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..11b2ea6edf95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index ff7b867ae98b..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index e2366321a3c1..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..80704d709e44 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
@@ -30,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "dc/inc/link.h"
+#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..8550d5e8b753 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx)
{
}
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx)
{
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 95f890fda8aa..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3c9ecea7eebc..dc943abd6dba 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -37,6 +37,7 @@ DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
DC_LIBS += dml2
+DC_LIBS += soc_and_ip_translator
endif
DC_LIBS += dce120
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index d897f8a30ede..4da5adab799c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1136,7 +1136,7 @@ static void calculate_bandwidth(
}
}
}
- data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 452206b5095e..6073cadde76c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -284,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
dc_fixpt_mul(
square,
res),
- n * (n - 1)));
+ (long long)n * (n - 1)));
n -= 2;
} while (n != 0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 6d2924114a3e..b413a672c2c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -170,7 +170,7 @@ bool dal_vector_remove_at_index(
memmove(
vector->container + (index * vector->struct_size),
vector->container + ((index + 1) * vector->struct_size),
- (vector->count - index - 1) * vector->struct_size);
+ (size_t)(vector->count - index - 1) * vector->struct_size);
vector->count -= 1;
return true;
@@ -219,7 +219,7 @@ bool dal_vector_insert_at(
memmove(
insert_address + vector->struct_size,
insert_address,
- vector->struct_size * (vector->count - position));
+ (size_t)vector->struct_size * (vector->count - position));
memmove(
insert_address,
@@ -271,7 +271,7 @@ struct vector *dal_vector_clone(
/* copy vector's data */
memmove(vec_cloned->container, vector->container,
- vec_cloned->struct_size * vec_cloned->capacity);
+ (size_t)vec_cloned->struct_size * vec_cloned->capacity);
return vec_cloned;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4071851f9e86..15cf13ec5302 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dc_state_priv.h"
-#include "link.h"
+#include "link_service.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index dbd6ef1b60a0..6131ede2db7a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -463,6 +463,9 @@ void dce_clk_mgr_construct(
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 13cf415e38e5..d50b9440210e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -164,7 +164,7 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
+ cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index a39641a0ff09..69dd80d9f738 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -147,6 +147,8 @@ void dce60_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ struct clk_mgr *base = &clk_mgr->base;
+
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
@@ -157,5 +159,8 @@ void dce60_clk_mgr_construct(
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce60_funcs;
+
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 3253115a153d..827bc2431d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9e2ef0e724fc..7aee02d56292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params(
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
@@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params(
return;
}
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ } else {
+ ASSERT(0);
+ }
+
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
@@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index bc123f1884da..051052bd10c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,7 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 91d872d6d392..9e63fa72101c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn314_smu.h"
@@ -77,6 +77,7 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -87,8 +88,70 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK0_DFS_CNTL 0x0269
+#define regCLK1_CLK0_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DFS_CNTL 0x026c
+#define regCLK1_CLK1_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DFS_CNTL 0x026f
+#define regCLK1_CLK2_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DFS_CNTL 0x0272
+#define regCLK1_CLK3_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DFS_CNTL 0x0275
+#define regCLK1_CLK4_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DFS_CNTL 0x0278
+#define regCLK1_CLK5_DFS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_CURRENT_CNT 0x02fb
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x02fc
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x02fd
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x02fe
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x02ff
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0300
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
+#define regCLK1_CLK0_BYPASS_CNTL 0x028a
+#define regCLK1_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_BYPASS_CNTL 0x0293
+#define regCLK1_CLK1_BYPASS_CNTL_BASE_IDX 0
#define regCLK1_CLK2_BYPASS_CNTL 0x029c
#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_BYPASS_CNTL 0x02a5
+#define regCLK1_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_BYPASS_CNTL 0x02ae
+#define regCLK1_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_BYPASS_CNTL 0x02b7
+#define regCLK1_CLK5_BYPASS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_DS_CNTL 0x0283
+#define regCLK1_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DS_CNTL 0x028c
+#define regCLK1_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DS_CNTL 0x0295
+#define regCLK1_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DS_CNTL 0x029e
+#define regCLK1_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DS_CNTL 0x02a7
+#define regCLK1_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DS_CNTL 0x02b0
+#define regCLK1_CLK5_DS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_ALLOW_DS 0x0284
+#define regCLK1_CLK0_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK1_ALLOW_DS 0x028d
+#define regCLK1_CLK1_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK2_ALLOW_DS 0x0296
+#define regCLK1_CLK2_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK3_ALLOW_DS 0x029f
+#define regCLK1_CLK3_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK4_ALLOW_DS 0x02a8
+#define regCLK1_CLK4_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK5_ALLOW_DS 0x02b1
+#define regCLK1_CLK5_ALLOW_DS_BASE_IDX 0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
@@ -185,6 +248,8 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr_int);
+ struct clk_log_info log_info = {0};
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -200,6 +265,9 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+ dcn314_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn314->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
}
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -218,6 +286,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ display_count = dcn314_get_active_display_cnt_wa(dc, context);
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -236,7 +306,6 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn314_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
@@ -293,11 +362,19 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn314_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -385,10 +462,65 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+
+static void dcn314_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
- return;
+
+ struct dcn35_clk_internal internal = {0};
+
+ dcn314_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
}
static struct clk_bw_params dcn314_bw_params = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 002c28e80720..0577eb527bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -65,4 +65,9 @@ void dcn314_clk_mgr_construct(struct dc_context *ctx,
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info);
+
+
#endif //__DCN314_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index e4d22f74f986..b315ed91e010 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -46,7 +46,7 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "link.h"
+#include "link_service.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 49efea0c8fcf..1769b1f26e75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 084994c650c4..7da7b41bd092 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn32_smu13_driver_if.h"
@@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
+
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index cf2d35363e8b..5d80fdf63ffc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -63,7 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -120,7 +121,7 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, *total_delay_us, clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 0e638bc6bf77..b11383fba35f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -44,7 +44,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
#undef DC_LOGGER
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index b59703467128..306016c1f109 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -13,7 +13,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
@@ -162,7 +162,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
unsigned int i;
char *entry_i = (char *)entry_0;
- uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+ uint32_t ret = dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
@@ -174,7 +174,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) {
- *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ *((unsigned int *)entry_i) = (dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
@@ -231,20 +231,20 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ if (!clk_mgr_base->force_smu_not_present && dcn401_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
- dcn30_smu_check_driver_if_version(clk_mgr);
- dcn30_smu_check_msg_header_version(clk_mgr);
+ dcn401_smu_check_driver_if_version(clk_mgr);
+ dcn401_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0;
@@ -253,7 +253,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0;
@@ -263,7 +263,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0;
@@ -273,7 +273,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0;
@@ -1318,8 +1318,8 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
table->Watermarks.WatermarkRow[i].WmSetting = i;
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
- dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
- dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
+ dcn401_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
+ dcn401_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
@@ -1390,7 +1390,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
}
- clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0;
@@ -1399,16 +1399,12 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0;
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 21c35528f61f..3a263840893e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -57,6 +57,8 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
/* Wait for response register to be ready */
dcn401_smu_wait_for_response(clk_mgr, 10, 200000);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -71,9 +73,11 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
return false;
}
@@ -102,8 +106,6 @@ static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
-
return reg;
}
@@ -115,6 +117,8 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Wait for response register to be ready */
dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -124,18 +128,71 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
- TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
-
/* Wait for response */
if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
*total_delay_us = delay1_us + delay2_us;
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
*total_delay_us = delay1_us + 2000000;
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
+ return false;
+}
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ smu_print("SMU Get SMU version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version)) {
+
+ smu_print("SMU version: %d\n", *version);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check driver if version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response)) {
+
+ smu_print("SMU driver if version: %d\n", response);
+
+ if (response == SMU14_DRIVER_IF_VERSION)
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check msg header version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) {
+
+ smu_print("SMU msg header version: %d\n", response);
+
+ if (response == DALSMC_VERSION)
+ return true;
+ }
+
return false;
}
@@ -163,6 +220,22 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi
smu_print("Numways for SubVP : %d\n", num_ways);
}
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ smu_print("SMU Set DRAM addr high: %d\n", addr_high);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ smu_print("SMU Set DRAM addr low: %d\n", addr_low);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
@@ -348,3 +421,52 @@ unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr
return response;
}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ smu_print("SMU dpm freq: %d MHz\n", response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ smu_print("SMU DC mode max DMP freq: %d MHz\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index e02eb1294b37..4f5ac603e822 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -7,11 +7,17 @@
#include "os_types.h"
#include "core_types.h"
-#include "dcn32/dcn32_clk_mgr_smu_msg.h"
+struct clk_mgr_internal;
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
@@ -29,5 +35,7 @@ bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk);
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index dcc48b5238e5..5f2d5638c819 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -60,7 +60,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -84,6 +84,7 @@
#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dml2/dml2_internal_types.h"
+#include "soc_and_ip_translator.h"
#endif
#include "dce/dmub_outbox.h"
@@ -459,7 +460,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
- if (dc->optimized_required || dc->wm_optimized_required) {
+ if (dc->optimized_required &&
+ (stream->adjust.v_total_max != adjust->v_total_max ||
+ stream->adjust.v_total_min != adjust->v_total_min)) {
stream->adjust.timing_adjust_pending = true;
return false;
}
@@ -947,7 +950,9 @@ static void dc_destruct(struct dc *dc)
}
dc_destroy_resource_pool(dc);
-
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator);
+#endif
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
@@ -1151,6 +1156,9 @@ static bool dc_construct(struct dc *dc,
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
}
+ dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
+ if (!dc->soc_and_ip_translator)
+ goto fail;
#endif
if (!create_links(dc, init_params->num_virtual_links))
@@ -2411,6 +2419,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
goto fail;
}
+ /*
+ * If not already seamless, make transition seamless by inserting intermediate minimal transition
+ */
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) {
+ res = commit_minimal_transition_state(dc, context);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
res = dc_commit_state_no_check(dc, context);
for (i = 0; i < params->stream_count; i++) {
@@ -2557,7 +2577,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -3036,8 +3055,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
-
- dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -3293,6 +3310,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->adaptive_sync_infopacket)
stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+ if (update->avi_infopacket)
+ stream->avi_infopacket = *update->avi_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3390,7 +3410,7 @@ static void update_seamless_boot_flags(struct dc *dc,
int surface_count,
struct dc_stream_state *stream)
{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -3587,7 +3607,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->adaptive_sync_infopacket ||
- stream_update->vtem_infopacket) {
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -4149,7 +4170,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
- dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL);
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -5059,6 +5080,7 @@ static bool full_update_required(struct dc *dc,
stream_update->hfvsif_infopacket ||
stream_update->vtem_infopacket ||
stream_update->adaptive_sync_infopacket ||
+ stream_update->avi_infopacket ||
stream_update->dpms_off ||
stream_update->allow_freesync ||
stream_update->vrr_active_variable ||
@@ -5102,129 +5124,6 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool update_planes_and_stream_v1(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
-{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
- /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
- * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
- * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
- */
- force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return false;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
-
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
- }
- return true;
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -5482,12 +5381,10 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ } else {
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- } else
- ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ }
if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
@@ -5727,8 +5624,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
}
}
-
- DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ if (!dc->caps.is_apu)
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
__func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
@@ -6463,3 +6360,21 @@ bool dc_can_clear_cursor_limit(struct dc *dc)
return false;
}
+
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct timing_generator *tg = NULL;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (dc->res_pool->timing_generators[i] &&
+ dc->res_pool->timing_generators[i]->inst == primary_otg_inst) {
+ tg = dc->res_pool->timing_generators[i];
+ break;
+ }
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+ if (dc->hwss.get_underflow_debug_data)
+ dc->hwss.get_underflow_debug_data(dc, tg, out_data);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ec4e80e5b6eb..d82b1cb467f4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -1177,6 +1177,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ if (tg->funcs->wait_otg_disable)
+ tg->funcs->wait_otg_disable(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 814f68d76257..a180f68f711c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER dc->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 130455f2802a..9acd30019717 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -33,8 +33,9 @@
* dc.h with detail interface documentation, then add function implementation
* in this file which calls link functions.
*/
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_i2c.h"
+
struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
{
if (link_index >= MAX_LINKS)
@@ -520,3 +521,10 @@ enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, cons
return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+void dc_link_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 4d6181e7c612..bc5dedf5f60c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,7 +40,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
@@ -95,7 +95,6 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-
#include "dml2/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
@@ -165,7 +164,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE ||
+ asic_id.chip_id == DEVICE_ID_NV_143F ||
+ asic_id.chip_id == DEVICE_ID_NV_13F9 ||
+ asic_id.chip_id == DEVICE_ID_NV_13FA ||
+ asic_id.chip_id == DEVICE_ID_NV_13FB ||
+ asic_id.chip_id == DEVICE_ID_NV_13FC ||
+ asic_id.chip_id == DEVICE_ID_NV_13DB) {
dc_version = DCN_VERSION_2_01;
break;
}
@@ -2143,7 +2148,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
h_active = timing->h_addressable +
timing->h_border_left +
timing->h_border_right +
- otg_master->hblank_borrow;
+ otg_master->dsc_padding_params.dsc_hactive_padding;
width = h_active / count;
if (otg_master->stream_res.tg)
@@ -4261,39 +4266,33 @@ fail:
return res;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
+#endif /* CONFIG_DRM_AMD_DC_FP */
+
/**
- * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
+ * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding.
* @pipe_ctx: Pointer to the pipe context structure.
*
- * This function calculates the horizontal blanking borrow value for a given pipe context based on the
+ * This function calculates the timing parameters for a given pipe context based on the
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
- * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
- * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
+ * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the
+ * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding
* value to 0.
*/
-static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
+static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx)
{
- uint32_t hactive;
- uint32_t ceil_slice_width;
struct dc_stream_state *stream = NULL;
if (!pipe_ctx)
return;
stream = pipe_ctx->stream;
+ pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0;
+ pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0;
- if (stream->timing.flags.DSC) {
- hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-
- /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
- if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
- ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
- pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
+ if (stream)
+ pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz;
- if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
- pipe_ctx->hblank_borrow = 0;
- }
- }
}
/**
@@ -4336,7 +4335,7 @@ enum dc_status dc_validate_global_state(
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
if (dc->debug.enable_hblank_borrow)
- decide_hblank_borrow(pipe_ctx);
+ calculate_timing_params_for_dsc_with_padding(pipe_ctx);
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
@@ -4411,8 +4410,14 @@ static void set_avi_info_frame(
unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
enum dc_timing_3d_format format;
+ if (stream->avi_infopacket.valid) {
+ *info_packet = stream->avi_infopacket;
+ return;
+ }
+
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
color_space = pipe_ctx->stream->output_color_space;
if (color_space == COLOR_SPACE_UNKNOWN)
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index fe9f99f1bdf9..f976ffd6d466 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
- get_link_index_from_dpia_port_index(dc, notify->link_index);
+ get_link_index_from_dpia_port_index(dc, notify->instance);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 883054bb18e7..c61300a7cb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -211,7 +211,7 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
return NULL;
}
- if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
+ if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4d6bc9fd4faa..9ac2d41f8fca 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -316,6 +316,9 @@ bool dc_stream_set_cursor_attributes(
{
bool result = false;
+ if (!stream)
+ return false;
+
if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
stream->cursor_attributes = *attributes;
result = true;
@@ -331,7 +334,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8c230cf8939b..98f0b6b3c213 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.340"
+#define DC_VER "3.2.351"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -234,6 +234,7 @@ struct lut3d_caps {
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
@@ -694,6 +695,15 @@ struct dc_clocks {
int idle_fclk_khz;
int subvp_prefetch_dramclk_khz;
int subvp_prefetch_fclk_khz;
+
+ /* Stutter efficiency is technically not clock values
+ * but stored here so the values are part of the update_clocks call similar to num_ways
+ * Efficiencies are stored as percentage (0-100)
+ */
+ struct {
+ uint8_t base_efficiency; //LP1
+ uint8_t low_power_efficiency; //LP2
+ } stutter_efficiency;
};
struct dc_bw_validation_profile {
@@ -839,7 +849,8 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
- uint32_t reserved:25;
+ uint32_t enable_bw_allocation_mode:1; /* bit 7 */
+ uint32_t reserved:24;
} bits;
uint32_t raw;
};
@@ -1072,6 +1083,7 @@ struct dc_debug_options {
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
+ bool disable_force_pstate_allow_on_hw_release;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -1146,6 +1158,12 @@ struct dc_debug_options {
bool force_subvp_df_throttle;
uint32_t acpi_transition_bitmasks[MAX_PIPES];
bool enable_pg_cntl_debug_logs;
+ unsigned int auxless_alpm_lfps_setup_ns;
+ unsigned int auxless_alpm_lfps_period_ns;
+ unsigned int auxless_alpm_lfps_silence_ns;
+ unsigned int auxless_alpm_lfps_t1t2_us;
+ short auxless_alpm_lfps_t1t2_offset_us;
+ bool disable_stutter_for_wm_program;
};
@@ -1306,6 +1324,32 @@ union dc_3dlut_state {
};
+#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0
+#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0
+#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0
+
+struct lut_rgb {
+ uint16_t b;
+ uint16_t g;
+ uint16_t r;
+ uint16_t padding;
+};
+
+//this structure maps directly to how the lut will read it from memory
+struct lut_mem_mapping {
+ union {
+ //NATIVE MODE 1, 2
+ //RGB layout [b][g][r] //red is 128 byte aligned
+ //BGR layout [r][g][b] //blue is 128 byte aligned
+ struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN];
+ struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN];
+
+ //TRANSFORMED
+ uint16_t linear_rgb[(33*33*33*4/128+1)*128];
+ };
+ uint16_t size;
+};
+
struct dc_rmcm_3dlut {
bool isInUse;
const struct dc_stream_state *stream;
@@ -1691,7 +1735,6 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
@@ -1733,7 +1776,7 @@ struct dc {
struct dml2_configuration_options dml2_options;
struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
-
+ struct soc_and_ip_translator *soc_and_ip_translator;
};
struct dc_scaling_info {
@@ -1786,6 +1829,23 @@ struct dc_surface_update {
struct dc_bias_and_scale bias_and_scale;
};
+struct dc_underflow_debug_data {
+ uint32_t otg_inst;
+ uint32_t otg_underflow;
+ uint32_t h_position;
+ uint32_t v_position;
+ uint32_t otg_frame_count;
+ struct dc_underflow_per_hubp_debug_data {
+ uint32_t hubp_underflow;
+ uint32_t hubp_in_blank;
+ uint32_t hubp_readline;
+ uint32_t det_config_error;
+ } hubps[MAX_PIPES];
+ uint32_t curr_det_sizes[MAX_PIPES];
+ uint32_t target_det_sizes[MAX_PIPES];
+ uint32_t compbuf_config_error;
+};
+
/*
* Create a new surface with default parameters;
*/
@@ -1804,8 +1864,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
-#include "dc_stream.h"
-
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
@@ -2447,6 +2505,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*/
enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
+/*
+ * Get if ALPM is supported by the link
+ */
+void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support,
+ bool *auxwake_support);
+
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -2674,4 +2738,17 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
bool dc_is_cursor_limit_pending(struct dc *dc);
bool dc_can_clear_cursor_limit(struct dc *dc);
+/**
+ * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
+ *
+ * @dc: Pointer to the display core context.
+ * @primary_otg_inst: Instance index of the primary OTG that underflowed.
+ * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information.
+ *
+ * This function collects and logs underflow-related HW states when underflow happens,
+ * including OTG underflow status, current read positions, frame count, and per-HUBP debug data.
+ * The results are stored in the provided out_data structure for further analysis or logging.
+ */
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index f5ef1a07078e..53a088ebddef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -2010,11 +2010,12 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
return result;
}
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
- uint32_t count)
+ uint32_t count
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2042,9 +2043,54 @@ bool dmub_lsdma_send_linear_copy_packet(
return result;
}
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz;
+ lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size;
+ lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo;
+ lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi;
+ lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x;
+ lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo;
+ lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi;
+ lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x;
+ lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y;
+ lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x;
+ lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
+ lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
+
+ return result;
+}
+
bool dmub_lsdma_send_tiled_to_tiled_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
- struct lsdma_send_tiled_to_tiled_copy_command_params params)
+ struct lsdma_send_tiled_to_tiled_copy_command_params params
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2066,8 +2112,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.src_y = params.src_y;
lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
- lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
- lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width;
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width;
lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
@@ -2078,8 +2124,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.tmz = params.tmz;
lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
- lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
- lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height;
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height;
lsdma_data->u.tiled_copy_data.data_format = params.data_format;
lsdma_data->u.tiled_copy_data.max_com = params.max_com;
lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
@@ -2097,7 +2143,8 @@ bool dmub_lsdma_send_pio_copy_command(
uint64_t src_addr,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t overlap_disable)
+ uint32_t overlap_disable
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2130,7 +2177,8 @@ bool dmub_lsdma_send_pio_constfill_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t data)
+ uint32_t data
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 8ea320f21269..7ef93444ef3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -211,11 +211,45 @@ void dc_dmub_srv_fams2_passthrough_flip(
int surface_count);
bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
uint32_t count);
+
+struct lsdma_linear_sub_window_copy_params {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t padding : 22;
+};
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+);
bool dmub_lsdma_send_pio_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 5ce1be362534..db669ccb1d58 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1021,7 +1021,8 @@ union dp_128b_132b_supported_lttpr_link_rates {
union dp_alpm_lttpr_cap {
struct {
uint8_t AUX_LESS_ALPM_SUPPORTED :1;
- uint8_t RESERVED :7;
+ uint8_t ASSR_SUPPORTED :1;
+ uint8_t RESERVED :6;
} bits;
uint8_t raw;
};
@@ -1119,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval {
union edp_alpm_caps {
struct {
- uint8_t AUX_WAKE_ALPM_CAP :1;
- uint8_t PM_STATE_2A_SUPPORT :1;
- uint8_t AUX_LESS_ALPM_CAP :1;
- uint8_t RESERVED :5;
+ uint8_t AUX_WAKE_ALPM_CAP :1;
+ uint8_t PM_STATE_2A_SUPPORT :1;
+ uint8_t AUX_LESS_ALPM_CAP :1;
+ uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1;
+ uint8_t RESERVED :4;
} bits;
uint8_t raw;
};
@@ -1282,6 +1284,7 @@ struct dpcd_caps {
union dp_receive_port0_cap receive_port0_cap;
/* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
uint8_t mso_cap_sst_links_supported;
+ uint8_t dp_edp_general_cap_2;
};
union dpcd_sink_ext_caps {
@@ -1347,7 +1350,9 @@ union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
+ unsigned char ALPM_MODE_SEL : 1;
+ unsigned char ACDS_PERIOD_DURATION : 1;
+ unsigned char RESERVED : 4;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 7217de258851..5a365bd19933 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -732,7 +732,7 @@ char *dce_version_to_string(const int version)
case DCN_VERSION_3_03:
return "DCN 3.0.3";
case DCN_VERSION_3_1:
- return "DCN 3.1";
+ return "DCN 3.1.2";
case DCN_VERSION_3_14:
return "DCN 3.1.4";
case DCN_VERSION_3_15:
@@ -755,3 +755,8 @@ char *dce_version_to_string(const int version)
return "Unknown";
}
}
+
+bool dc_supports_vrr(const enum dce_version v)
+{
+ return v >= DCE_VERSION_8_0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 7f57661433eb..55704d4457ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -128,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
- stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
+ stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5fc6fea211de..76cf9fdedab0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -203,6 +203,7 @@ struct dc_stream_state {
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
struct dc_info_packet adaptive_sync_infopacket;
+ struct dc_info_packet avi_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -335,6 +336,8 @@ struct dc_stream_update {
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
struct dc_info_packet *adaptive_sync_infopacket;
+ struct dc_info_packet *avi_infopacket;
+
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 375ca2f13b7a..b5aa03a3e39c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -563,6 +563,12 @@ struct dc_info_packet_128 {
uint8_t sb[128];
};
+struct dc_edid_read_policy {
+ uint32_t max_retry_count;
+ uint32_t delay_time_ms;
+ uint32_t ignore_checksum;
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
@@ -571,6 +577,12 @@ struct dc_plane_flip_time {
unsigned int prev_update_time_in_us;
};
+enum dc_alpm_mode {
+ DC_ALPM_AUXWAKE = 0,
+ DC_ALPM_AUXLESS = 1,
+ DC_ALPM_UNSUPPORTED = 0xF,
+};
+
enum dc_psr_state {
PSR_STATE0 = 0x0,
PSR_STATE1,
@@ -616,6 +628,7 @@ struct psr_config {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
union dmcu_psr_level {
@@ -728,6 +741,7 @@ struct psr_context {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
struct colorspace_transform {
@@ -1137,6 +1151,10 @@ struct replay_config {
bool low_rr_supported;
/* Replay Video Conferencing Optimization Enabled */
bool replay_video_conferencing_optimization_enabled;
+ /* Replay alpm mode */
+ enum dc_alpm_mode alpm_mode;
+ /* Replay full screen only */
+ bool os_request_force_ffu;
};
/* Replay feature flags*/
@@ -1199,6 +1217,7 @@ struct dc_panel_config {
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ bool read_psrcap_again;
unsigned int replay_enable_option;
} psr;
/* ABM */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 0ce9489ac6b7..de6d62401362 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -39,6 +39,7 @@
#define CTX \
dccg_dcn->base.ctx
+#include "logger_types.h"
#define DC_LOGGER \
dccg->ctx->logger
@@ -1136,7 +1137,7 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
default:
break;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1406,6 +1407,10 @@ static void dccg35_set_dtbclk_dto(
* PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
* programming is handled in program_pix_clk() regardless, so it can be removed from here.
*/
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n",
+ __func__, params->otg_inst, params->pixclk_khz,
+ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
+
} else {
switch (params->otg_inst) {
case 0:
@@ -1431,6 +1436,8 @@ static void dccg35_set_dtbclk_dto(
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst);
}
}
@@ -1475,6 +1482,8 @@ static void dccg35_set_dpstreamclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n",
+ __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst);
}
@@ -1514,6 +1523,8 @@ static void dccg35_set_dpstreamclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n",
+ __func__, dp_hpo_inst, enable ? 1 : 0);
}
@@ -1553,7 +1564,7 @@ static void dccg35_set_physymclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1);
}
@@ -1626,6 +1637,8 @@ static void dccg35_set_physymclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n",
+ __func__, phy_inst, force_enable ? 1 : 0, clk_src);
}
static void dccg35_set_valid_pixel_rate(
@@ -1673,6 +1686,7 @@ static void dccg35_dpp_root_clock_control(
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
static void dccg35_disable_symclk32_se(
@@ -1731,6 +1745,7 @@ static void dccg35_disable_symclk32_se(
BREAK_TO_DEBUGGER();
return;
}
+
}
static void dccg35_init_cb(struct dccg *dccg)
@@ -1738,7 +1753,6 @@ static void dccg35_init_cb(struct dccg *dccg)
(void)dccg;
/* Any RCG should be done when driver enter low power mode*/
}
-
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -1753,6 +1767,8 @@ void dccg35_init(struct dccg *dccg)
for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
@@ -1765,6 +1781,8 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 668ee2d405fd..0b8ed9b94d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -619,7 +619,7 @@ void dccg401_set_dp_dto(
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
- if (dto_phase_hz <= 0) {
+ if (dto_phase_hz <= 0 && dto_integer <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index bb4ac5042c80..673bb87d2c17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -725,14 +725,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
for (i = 0; i < AUX_MAX_RETRIES; i++) {
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+ "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
payload->address,
payload->length,
(unsigned int) payload->write,
- (unsigned int) payload->mot);
+ (unsigned int) payload->mot,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
@@ -746,7 +750,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+ "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d "
+ "payload->reply=%u is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
@@ -756,7 +762,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot,
ret,
(int)operation_result,
- (unsigned int) *payload->reply);
+ (unsigned int) *payload->reply,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (!payload->write)
dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 0421b267a0b5..365dd2e37aea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -591,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index e188447c8156..2d73b94c515c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ff3b8244ba3d..87af4fdc04a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
+ copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!link->dc->debug.disable_fec) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index fcd3d86ad517..f9542edff14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
+#include "link_service.h"
#include "dc_dmub_srv.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
@@ -168,6 +169,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->replay_support_fast_resync_in_ultra_sleep_mode = link->replay_settings.config.replay_support_fast_resync_in_ultra_sleep_mode;
copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
@@ -189,6 +191,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+ copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode;
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns;
+ copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns;
+ copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_us;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us;
+ copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link);
+ }
+
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
index e0558a78b11c..1c1228116487 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
@@ -812,7 +812,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
enc10, &stream->timing);
break;
case SIGNAL_TYPE_EDP:
- is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index 22e66b375a7f..d928b4dcf6b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
index 0b47aeb60e79..bec0b4aaeb2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index 9a92f73d5b7f..84cc2ddc52fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -37,7 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
-#include "link.h"
+#include "link_service.h"
#define CTX \
enc10->base.ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index ae81451a3a72..3e85e9c3d2cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 1a9bb614c41e..3523d1cdc1a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6ab2a218b769..fd5d1dbf9dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn35_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -397,7 +397,7 @@ static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
uint32_t reset_val;
REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
- return (reset_val == 0) ? false : true;
+ return reset_val != 0;
}
void enc35_disable_fifo(struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index d5fa551dd3c9..99aab70ef3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -32,7 +32,7 @@
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 7b9c22c45453..fbbf9c757b3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -277,12 +277,13 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
/*
* SMU message tracing
*/
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
-
-#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
-#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx);
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx);
+#define TRACE_SMU_MSG_DELAY(msg_id, param_in, delay, ctx) dm_trace_smu_enter(msg_id, param_in, delay, ctx)
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_ENTER(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_EXIT(success, response, ctx) dm_trace_smu_exit(success, response, ctx)
/*
* DMUB Interfaces
@@ -311,4 +312,6 @@ void dm_dtn_log_end(struct dc_context *ctx,
char *dce_version_to_string(const int version);
+bool dc_supports_vrr(const enum dce_version v);
+
#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index bf63da266a18..3b093b8699ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
uint32_t src_height;
uint32_t src_width;
uint32_t v_refresh;
- uint32_t sym_clock; /* HDMI only */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
struct dc_link_settings link_settings; /* DP only */
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 2a2eaf6adf26..7aaf13bbd4e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,8 +30,7 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
-
-#include "link.h"
+#include "link_service.h"
#include "dcn20_fpu.h"
#include "dc_state_priv.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 390c1a77fda6..9c58ff1069d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 843d6004258c..570e6e39eb45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 5718000627b0..f549da082c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -652,7 +652,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8d4873f80df0..4fb37df54d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 0c0b2d67c9cd..1aaa77265eed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
- int j;
+ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
@@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
@@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
@@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
+ vlevel = clamp(vlevel_req, 2, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
+ vlevel = clamp(vlevel_req, 1, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
index 8da97a96b1ce..8d7c59ec701d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
@@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index e968870a4b81..b5d3fd4c3694 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c46bda2141ac..bfeb01477f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -615,7 +615,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b7d2a0caec11..04df263ff65e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -703,7 +703,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6160952245b4..8a0f128722b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -31,7 +31,7 @@
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -3229,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -3401,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
uint32_t height = subvp_active_margin_list.res[i].height;
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 8839faf42207..e0a1dc89ce43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 5d73efa2f0c9..c9dd920744c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -31,7 +31,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 6f516af82956..8cda18ce1a76 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -10,7 +10,7 @@
#include "dml/dcn35/dcn35_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 715f9019a33e..4b9b2e84d381 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -6529,7 +6529,7 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo
mode_lib->ms.TotImmediateFlipBytes = 0;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) {
- mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k];
+ mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]);
if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) {
mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index a06217a9eef6..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -8,7 +8,7 @@
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
-#include "bounding_boxes/dcn4_soc_bb.h"
+#include "soc_and_ip_translator.h"
static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
const struct dc *in_dc,
@@ -38,375 +38,37 @@ static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-/*
- * Populate dml_init based on default static values in soc bb. The default
- * values are for reference and support at least minimal operation of current
- * SoC and DCN hardware. The values could be modifed by subsequent override
- * functions to reflect our true hardware capability.
- */
-static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version)
{
- switch (in_dc->ctx->dce_version) {
+ enum dml2_project_id project_id;
+ switch (dcn_version) {
case DCN_VERSION_4_01:
- dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
- dml_init->soc_bb = dml2_socbb_dcn401;
- dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
- dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
- memset(dml_init, 0, sizeof(*dml_init));
+ project_id = dml2_project_invalid;
DC_ERR("unsupported dcn version for DML21!");
- return;
- }
-}
-
-static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-}
-
-/*
- * SMU stands for System Management Unit. It is a power management processor.
- * It owns the initialization of dc's clock table and programming of clock values
- * based on dc's requests.
- * Our clock values in base soc bb is a dummy placeholder. The real clock values
- * are retrieved from SMU firmware to dc clock table at runtime.
- * This function overrides our dummy placeholder values with real values in dc
- * clock table.
- */
-static void override_dml_init_with_values_from_smu(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- int i;
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (!in_dc->clk_mgr->funcs->is_smu_present ||
- !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
- /* skip if smu is not present */
- return;
-
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- }
- }
- }
-}
-
-static void override_dml_init_with_values_from_vbios(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
-
- if (dc_bw_params->num_channels) {
- dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
- dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- }
-
- if (dc_bw_params->dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
- } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ break;
}
- dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+ return project_id;
}
-
-static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- /*
- * TODO - There seems to be overlaps between the values overriden from
- * dmub and vbios. Investigate and identify the values that DMUB needs
- * to own.
- */
-// const struct dmub_soc_bb_params *dmub_bb_params =
-// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
-
-// if (dmub_bb_params == NULL)
-// return;
-
-// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
-// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
-// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
-// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
-// if (dmub_bb_params->fclk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
-// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
-// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
-// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
-// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_min_idle_time_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
-// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
-// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #else
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #endif
-// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
-// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
-// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
-// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
-}
+ dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version);
-static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- if (!config->use_native_soc_bb_construction) {
+ if (config->use_native_soc_bb_construction) {
+ in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config);
+ in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps);
+ } else {
dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
- in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.dram_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
- in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
- in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
-}
-
-void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- populate_default_dml_init_params(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_smu(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -422,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
struct dml2_context *dml_ctx)
{
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ uint32_t pix_clk_100hz;
- timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
timing->h_front_porch = stream->timing.h_front_porch;
timing->v_front_porch = stream->timing.v_front_porch;
timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
timing->pixel_clock_khz *= 2;
- timing->h_total = stream->timing.h_total;
+ timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
timing->v_total = stream->timing.v_total;
timing->h_sync_width = stream->timing.h_sync_width;
timing->interlaced = stream->timing.flags.INTERLACE;
hblank_start = stream->timing.h_total - stream->timing.h_front_porch;
- timing->h_blank_end = hblank_start - stream->timing.h_addressable
+ timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding
- stream->timing.h_border_left - stream->timing.h_border_right;
if (hblank_start < stream->timing.h_addressable)
@@ -459,15 +125,16 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
/* limit min refresh rate to DC cap */
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
if (stream->ctx->dc->caps.max_v_total != 0) {
- min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) {
+ pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ } else {
+ pix_clk_100hz = stream->timing.pix_clk_100hz;
+ }
+ min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL),
+ (timing->h_total * (long long)calc_max_hardware_v_total(stream)));
}
- if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
- } else {
- timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
- }
+ timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -515,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
-/**
- * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
- * based on the pipe context.
- * @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
- * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
- *
- * This function modifies the horizontal active and blank end timings by adding and subtracting
- * the horizontal blanking borrow value from the pipe context, respectively.
- */
-static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
-{
- timing->h_active += pipe->hblank_borrow;
- timing->h_blank_end -= pipe->hblank_borrow;
-}
-
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@@ -829,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
- temp_pipe->hblank_borrow = pipe->hblank_borrow;
+ temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding;
+ temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding;
+ temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@@ -1097,8 +751,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
disp_cfg_stream_location = dml_dispcfg->num_streams++;
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
- populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
- adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
+ populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
@@ -1165,6 +818,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 03de3cf06ae5..08f7f03b1023 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -60,7 +60,7 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, con
DC_FP_START();
- dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
dml2_initialize_instance(&dml_ctx->v21.dml_init);
@@ -224,7 +224,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
/* Populate stream, plane mappings and other fields in display config. */
+ DC_FP_START();
result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ DC_FP_END();
if (!result)
return false;
@@ -279,7 +281,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
mode_support->dml2_instance = dml_init->dml2_instance;
+ DC_FP_START();
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ DC_FP_END();
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index b05030926ce8..91955bbe24b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -159,6 +159,8 @@ struct dml2_dchub_watermark_regs {
uint32_t sr_exit;
uint32_t sr_enter_z8;
uint32_t sr_exit_z8;
+ uint32_t sr_enter_low_power;
+ uint32_t sr_exit_low_power;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 8c9f414aa6bf..176f55947664 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -96,6 +96,8 @@ struct dml2_soc_power_management_parameters {
double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
+ double low_power_stutter_enter_plus_exit_latency_us;
+ double low_power_stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 98c0234e2f47..41adb1104d0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -16,9 +16,9 @@ struct dml2_instance;
enum dml2_project_id {
dml2_project_invalid = 0,
- dml2_project_dcn4x_stage1 = 1,
- dml2_project_dcn4x_stage2 = 2,
- dml2_project_dcn4x_stage2_auto_drr_svp = 3,
+ dml2_project_dcn4x_stage1,
+ dml2_project_dcn4x_stage2,
+ dml2_project_dcn4x_stage2_auto_drr_svp,
};
enum dml2_pstate_change_support {
@@ -417,6 +417,8 @@ struct dml2_display_cfg_programming {
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
+ uint8_t base_percent_efficiency; //LP1
+ uint8_t low_power_percent_efficiency; //LP2
} stutter;
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index b9cff2198511..bf62d42b3f78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1238,18 +1238,27 @@ static void CalculateDETBufferSize(
static double CalculateRequiredDispclk(
enum dml2_odm_mode ODMMode,
- double PixelClock)
+ double PixelClock,
+ bool isTMDS420)
{
+ double DispClk;
if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
+ DispClk = PixelClock / 4.0;
} else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
+ DispClk = PixelClock / 3.0;
} else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
+ DispClk = PixelClock / 2.0;
} else {
- return PixelClock;
+ DispClk = PixelClock;
+ }
+
+ if (isTMDS420) {
+ double TMDS420MinPixClock = PixelClock / 2.0;
+ DispClk = math_max2(DispClk, TMDS420MinPixClock);
}
+
+ return DispClk;
}
static double TruncToValidBPP(
@@ -4122,11 +4131,12 @@ static noinline_for_stack void CalculateODMMode(
bool success;
bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0);
enum dml2_odm_mode DecidedODMMode;
+ bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi);
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
+ SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
index 28394de02885..640087e862f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
@@ -10,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 28687565ac22..ffb8c09f37a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -201,6 +201,8 @@ struct dml2_core_internal_watermarks {
double WritebackFCLKChangeWatermark;
double StutterExitWatermark;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
@@ -877,6 +879,9 @@ struct dml2_core_internal_mode_program {
double Z8StutterEfficiency;
unsigned int Z8NumberOfStutterBurstsPerFrame;
double Z8StutterEfficiencyNotIncludingVBlank;
+ double LowPowerStutterEfficiency;
+ double LowPowerStutterEfficiencyNotIncludingVBlank;
+ unsigned int LowPowerNumberOfStutterBurstsPerFrame;
double StutterPeriod;
double Z8StutterEfficiencyBestCase;
unsigned int Z8NumberOfStutterBurstsPerFrameBestCase;
@@ -1016,6 +1021,8 @@ struct dml2_core_internal_SOCParametersList {
double FCLKChangeLatency;
double SRExitTime;
double SREnterPlusExitTime;
+ double SRExitTimeLowPower;
+ double SREnterPlusExitTimeLowPower;
double SRExitZ8Time;
double SREnterPlusExitZ8Time;
double USRRetrainingLatency;
@@ -1851,9 +1858,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
unsigned int CompbufReservedSpaceZs;
bool hw_debug5;
double SRExitTime;
+ double SRExitTimeLowPower;
double SRExitZ8Time;
bool SynchronizeTimings;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterEnterPlusExitWatermark;
bool ProgressiveToInterlaceUnitInOPP;
double *MinTTUVBlank;
@@ -1879,7 +1888,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
// output
double *StutterEfficiencyNotIncludingVBlank;
double *StutterEfficiency;
+ double *LowPowerStutterEfficiencyNotIncludingVBlank;
+ double *LowPowerStutterEfficiency;
unsigned int *NumberOfStutterBurstsPerFrame;
+ unsigned int *LowPowerNumberOfStutterBurstsPerFrame;
double *Z8StutterEfficiencyNotIncludingVBlank;
double *Z8StutterEfficiency;
unsigned int *Z8NumberOfStutterBurstsPerFrame;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 3861bc6c9621..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
index cd3fbc0591d8..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index e763c8e45da8..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -48,18 +48,19 @@ static void set_reserved_time_on_all_planes_with_stream_index(struct display_con
static void remove_duplicates(double *list_a, int *list_a_size)
{
- int cur_element = 0;
- // For all elements b[i] in list_b[]
- while (cur_element < *list_a_size - 1) {
- if (list_a[cur_element] == list_a[cur_element + 1]) {
- for (int j = cur_element + 1; j < *list_a_size - 1; j++) {
- list_a[j] = list_a[j + 1];
- }
- *list_a_size = *list_a_size - 1;
- } else {
- cur_element++;
+ int j = 0;
+
+ if (*list_a_size == 0)
+ return;
+
+ for (int i = 1; i < *list_a_size; i++) {
+ if (list_a[j] != list_a[i]) {
+ j++;
+ list_a[j] = list_a[i];
}
}
+
+ *list_a_size = j + 1;
}
static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 7ed0242a4b33..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 5f1b49a50049..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
{
bool sorted, swapped;
unsigned int cur_index;
- unsigned int temp;
int odm_slice_index;
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
@@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
swapped = false;
while (!sorted) {
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
- temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
+ swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1],
+ pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]);
swapped = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index a56e75cdf712..c59f825cfae9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -654,14 +654,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
unsigned int svp_height,
unsigned int svp_vstartup)
{
- unsigned int i, pipe_idx;
+ unsigned int i;
double line_time, fp_and_sync_width_time;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
// Find DML pipe index (pipe_idx) using dc_pipe_idx
- for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
+ for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -669,8 +669,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
if (i == dc_pipe_idx)
break;
-
- pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 0318260370ed..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -535,7 +535,7 @@ static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode v
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
- return (result == 1) ? true : false;
+ return result == 1;
}
static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 5a6a861402b3..5f6b431ec398 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -673,6 +673,16 @@ struct dcn401_dpp {
struct pwl_params pwl_data;
};
+enum dcn401_dscl_mode_sel {
+ DCN401_DSCL_MODE_SCALING_444_BYPASS = 0,
+ DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DCN401_DSCL_MODE_DSCL_BYPASS = 6
+};
+
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 2f92e7d4981b..6df3419f825f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -78,16 +78,6 @@ enum dscl_autocal_mode {
AUTOCAL_MODE_AUTOREPLICATE = 3
};
-enum dscl_mode_sel {
- DSCL_MODE_SCALING_444_BYPASS = 0,
- DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
- DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
- DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
- DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
- DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
- DSCL_MODE_DSCL_BYPASS = 6
-};
-
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format)
return false;
}
-static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
+static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
@@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
- return DSCL_MODE_DSCL_BYPASS;
+ return DCN401_DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
@@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
- return DSCL_MODE_SCALING_444_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
- return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
- return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS;
- return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
@@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
- enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
+ enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
@@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
- dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
+ dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
@@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
- if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
+ if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
@@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
@@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 1f53a9f0c0ac..e4144b244332 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -1157,6 +1157,11 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
+ /* increase miniumum slice count to meet sink slice width limitations */
+ min_slices_h = dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min
+ dc_fixpt_from_int(min_slices_h))); // source min
+
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
/* increase minimum slice count to meet sink throughput limitations */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index bd1b9aef6d5c..89f0d999bf35 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -406,9 +406,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
+ // Need to find the ceiling value for the slice width
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 1313a7c5d87b..73a1e6a03719 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -28,7 +28,7 @@
#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "link.h"
+#include "link_service.h"
#include "link_hwss.h"
#include "link/protocols/link_dpcd.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index d347bb06577a..e7e5f6d4778e 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,6 +440,35 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
+void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0],
+ DET0_SIZE, &target_det_sizes[0]);
+
+ REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1],
+ DET1_SIZE, &target_det_sizes[1]);
+
+ REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2],
+ DET2_SIZE, &target_det_sizes[2]);
+
+ REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3],
+ DET3_SIZE, &target_det_sizes[3]);
+
+}
+
+uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t compbuf_config_error = 0;
+
+ REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR,
+ &compbuf_config_error);
+
+ return compbuf_config_error;
+}
+
static const struct hubbub_funcs hubbub30_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -457,6 +486,8 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index ca6233e8f1f4..49a469969d36 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,4 +133,10 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
+void hubbub3_get_det_sizes(struct hubbub *hubbub,
+ uint32_t *curr_det_sizes,
+ uint32_t *target_det_sizes);
+
+uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index b98505b240a7..cdb20251a154 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -1071,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 32a6be543105..4d4ca6d77bbd 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -28,6 +28,7 @@
#include "dcn32_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
+#include "dal_asic_id.h"
#define CTX \
@@ -72,6 +73,14 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
+static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, dc_control);
+}
+
void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -754,8 +763,18 @@ static bool hubbub32_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ struct dc *dc = hubbub->ctx->dc;
bool wm_pending = false;
+ if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ /* before raising watermarks, SDP control give to DF, stutter must be disabled */
+ wm_pending = true;
+ hubbub32_set_sdp_control(hubbub, false);
+ hubbub1_allow_self_refresh_control(hubbub, false);
+ }
+
if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
@@ -786,10 +805,20 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower) {
+ /* after lowering watermarks, stutter setting is restored, SDP control given to DC */
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+
+ if (dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ hubbub32_set_sdp_control(hubbub, true);
+ }
+ } else if (dc->debug.disable_stutter) {
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ }
- hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow);
return wm_pending;
}
@@ -974,8 +1003,7 @@ void hubbub32_init(struct hubbub *hubbub)
ignore the "df_pre_cstate_req" from the SDP port control.
only the DCN will determine when to connect the SDP port
*/
- REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
- SDPIF_PORT_CONTROL, 1);
+ hubbub32_set_sdp_control(hubbub, true);
/*Set SDP's max outstanding request to 512
must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
@@ -1009,6 +1037,8 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6d41953011f5..a443722a8632 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -589,6 +589,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 92fab471b183..a36273a52880 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,6 +1247,8 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index f8f991785d4f..cf2eb9793008 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -104,7 +104,8 @@
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
- SRI(HUBP_CLK_CNTL, HUBP, id)
+ SRI(HUBP_CLK_CNTL, HUBP, id),\
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -249,7 +250,8 @@
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
- uint32_t HUBP_CLK_CNTL
+ uint32_t HUBP_CLK_CNTL; \
+ uint32_t HUBPRET_READ_LINE_VALUE
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -622,6 +624,8 @@
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type PIPE_READ_LINE;\
+ type HUBP_SEG_ALLOC_ERR_STATUS;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -671,6 +675,7 @@ struct dcn_fl_regs_st {
uint32_t lut_done;
uint32_t lut_addr_mode;
uint32_t lut_width;
+ uint32_t lut_mpc_width;
uint32_t lut_tmz;
uint32_t lut_crossbar_sel_r;
uint32_t lut_crossbar_sel_g;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 62369be070ea..f325db555102 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -264,6 +264,7 @@
type HUBP_3DLUT_DONE;\
type HUBP_3DLUT_ADDRESSING_MODE;\
type HUBP_3DLUT_WIDTH;\
+ type HUBP_3DLUT_MPC_WIDTH;\
type HUBP_3DLUT_TMZ;\
type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\
type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 0da70b50e86d..556214b2227d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -505,6 +505,30 @@ void hubp3_init(struct hubp *hubp)
hubp_reset(hubp);
}
+uint32_t hubp3_get_current_read_line(struct hubp *hubp)
+{
+ uint32_t read_line = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(HUBPRET_READ_LINE_VALUE,
+ PIPE_READ_LINE,
+ &read_line);
+
+ return read_line;
+}
+
+unsigned int hubp3_get_underflow_status(struct hubp *hubp)
+{
+ uint32_t hubp_underflow = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_UNDERFLOW_STATUS,
+ &hubp_underflow);
+
+ return hubp_underflow;
+}
+
static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -534,6 +558,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b7d7adf0b58c..842f4eb72cc8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -243,7 +243,8 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh)
bool hubp3_construct(
struct dcn20_hubp *hubp2,
@@ -299,6 +300,11 @@ void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
+uint32_t hubp3_get_current_read_line(struct hubp *hubp);
+
+uint32_t hubp3_get_underflow_status(struct hubp *hubp);
+
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 7fd582a8a4ba..47101847c2b7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value(
hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized);
}
+uint32_t hubp31_get_det_config_error(struct hubp *hubp)
+{
+ uint32_t config_error = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_SEG_ALLOC_ERR_STATUS,
+ &config_error);
+
+ return config_error;
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -98,6 +110,9 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
index d688db79b750..5952c4671507 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
@@ -228,7 +228,9 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
bool hubp31_construct(
@@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
void hubp31_program_extended_blank_value(
struct hubp *hubp, unsigned int min_dst_y_next_start_optimized);
+uint32_t hubp31_get_det_config_error(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index f3a21c623f44..a5f23bb2a76a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -206,6 +206,9 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 6d060ba12da8..b140808f21af 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -218,6 +218,9 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 705b98b1b6cc..0fcbc6a35be6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
}
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
+ uint32_t width = {cfg->width};
+
+ if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
+ width = (cfg->width == 17) ? 4916 : 35940;
+
+ REG_UPDATE_2(_3DLUT_FL_CONFIG,
+ HUBP0_3DLUT_FL_MODE, cfg->mode,
+ HUBP0_3DLUT_FL_FORMAT, cfg->format);
+
+ REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
+ HUBP0_3DLUT_FL_BIAS, cfg->bias,
+ HUBP0_3DLUT_FL_SCALE, cfg->scale);
+
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
+ HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
+ HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
+
+ //cross bar
+ REG_UPDATE_8(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
+ HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
+ HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
+ HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
+ HUBP_3DLUT_TMZ, cfg->protection_bits,
+ HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+}
+
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -1033,6 +1070,10 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 608e6153fa68..fdabbeec8ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -252,7 +252,9 @@
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
@@ -349,6 +351,10 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg);
+
void hubp401_clear_tiling(struct hubp *hubp);
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4ea13d0bf815..24184b4eb352 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -48,7 +48,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -671,6 +671,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ link_hwss->setup_stream_attribute(pipe_ctx);
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -1269,7 +1270,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
}
-static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
{
switch (crtc_id) {
case CONTROLLER_ID_D0:
@@ -1289,7 +1290,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
-static void populate_audio_dp_link_info(
+void populate_audio_dp_link_info(
const struct pipe_ctx *pipe_ctx,
struct audio_dp_link_info *dp_link_info)
{
@@ -1924,10 +1925,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
- // Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
- dc->ctx->dce_version != DCE_VERSION_8_1 &&
- dc->ctx->dce_version != DCE_VERSION_8_3) {
+ /* Check fastboot support, disable on DCE 6-8 because of blank screens */
+ if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
if (edp_link != edp_streams[0]->link)
@@ -2254,7 +2253,7 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(
+void enable_fbc(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 7cd8c1576988..9c032e449481 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -114,5 +114,12 @@ void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output);
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id);
+void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info);
+void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 39910f73ecd0..e9fe97f0c4ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -55,7 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -328,19 +328,25 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
DTN_INFO("\n=======HUBP FL======\n");
- DTN_INFO(
- "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ static const char * const pLabels[] = {
+ "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
+ "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
+ "adr_low", "REFCYC", "Bias", "Scale", "Mode",
+ "Format", "prefetch"};
+
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en) {
- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ uint32_t values[] = {
pool->hubps[i]->inst,
fl_regs->lut_enable,
fl_regs->lut_done,
fl_regs->lut_addr_mode,
fl_regs->lut_width,
+ fl_regs->lut_mpc_width,
fl_regs->lut_tmz,
fl_regs->lut_crossbar_sel_r,
fl_regs->lut_crossbar_sel_g,
@@ -351,8 +357,13 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
fl_regs->lut_fl_bias,
fl_regs->lut_fl_scale,
fl_regs->lut_fl_mode,
- fl_regs->lut_fl_format);
- DTN_INFO("\n");
+ fl_regs->lut_fl_format,
+ dlg_regs->dst_y_prefetch};
+
+ int num_elements = 18;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
}
}
@@ -541,19 +552,43 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
DTN_INFO("===== MPC RMCM 3DLUT =====\n");
- DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ static const char * const pLabels[] = {
+ "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
+ "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
+ "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
+ "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
+
for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
- if (s.opp_id != 0xf)
- DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
- i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
- s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
- s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
- s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
- s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
- s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ if (s.opp_id != 0xf) {
+ uint32_t values[] = {
+ i,
+ s.rmcm_regs.rmcm_3dlut_size,
+ s.rmcm_regs.rmcm_3dlut_mode,
+ s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel,
+ s.rmcm_regs.rmcm_3dlut_30bit_en,
+ s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel,
+ s.rmcm_regs.rmcm_3dlut_out_norm_factor,
+ s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r,
+ s.rmcm_regs.rmcm_3dlut_out_scale_r,
+ s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
+ s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
+
+ int num_elements = 19;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
}
DTN_INFO("\n");
DTN_INFO("===== MPC RMCM Shaper =====\n");
@@ -3312,7 +3347,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -3628,6 +3663,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int clip_x = pipe_ctx->plane_state->clip_rect.x;
+ int clip_width = pipe_ctx->plane_state->clip_rect.width;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
@@ -3646,7 +3683,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
*/
/**
- * Translate cursor from stream space to plane space.
+ * Translate cursor and clip offset from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
@@ -3663,6 +3700,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
}
/**
@@ -3709,30 +3750,18 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (param.rotation == ROTATION_ANGLE_0) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
}
// Swap axis and mirror horizontally
@@ -3802,30 +3831,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
-
if (!param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 5e57bd1a08e7..9477c9f9e196 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -54,7 +54,7 @@
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -2388,10 +2388,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2404,10 +2404,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -3052,6 +3052,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div)
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -3127,7 +3129,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 61efb15572ff..e2269211553c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 37a239219dfe..e47ed5571dfd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -50,7 +50,7 @@
#include "dpcd_defs.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
@@ -1228,3 +1228,51 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
}
}
}
+
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (tg) {
+ uint32_t v_blank_start = 0, v_blank_end = 0;
+
+ out_data->otg_inst = tg->inst;
+
+ tg->funcs->get_scanoutpos(tg,
+ &v_blank_start,
+ &v_blank_end,
+ &out_data->h_position,
+ &out_data->v_position);
+
+ out_data->otg_frame_count = tg->funcs->get_frame_count(tg);
+
+ out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg);
+ }
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+
+ if (hubp) {
+ if (hubp->funcs->hubp_get_underflow_status)
+ out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp);
+
+ if (hubp->funcs->hubp_in_blank)
+ out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp);
+
+ if (hubp->funcs->hubp_get_current_read_line)
+ out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp);
+
+ if (hubp->funcs->hubp_get_det_config_error)
+ out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp);
+ }
+ }
+
+ if (hubbub->funcs->get_det_sizes)
+ hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes);
+
+ if (hubbub->funcs->compbuf_config_error)
+ out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 4b90b781c4f2..40afbbfb5b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -29,6 +29,7 @@
#include "hw_sequencer_private.h"
struct dc;
+struct dc_underflow_debug_data;
void dcn30_init_hw(struct dc *dc);
void dcn30_program_all_writeback_pipes_in_tree(
@@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2ac5d54d1626..d7ff55669bac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 8ba934b83957..b822f2dffff0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -45,7 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "inc/link_enc_cfg.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 556f4fe57eda..5a6a459da224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -112,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 560984533950..f925f669f2a4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 9f454fa90e65..79faab1125d4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -115,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 416b1dca3dac..f39292952702 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -49,7 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32/dcn32_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
@@ -1052,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index b971356d30b1..c19ef075c882 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -121,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 764eff6a4ec6..05011061822c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index aefb7c473741..f2f16a0bdb4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -127,6 +127,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index a580a55695c3..09e60158f0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -122,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 61167c19359d..7c276c319086 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -25,7 +25,7 @@
#include "dpcd_defs.h"
#include "clk_mgr.h"
#include "dsc.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
@@ -810,9 +810,12 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
- /* if we are borrowing from hblank, h_addressable needs to be adjusted */
- if (dc->debug.enable_hblank_borrow)
- patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
+ /* if we are padding, h_addressable needs to be adjusted */
+ if (dc->debug.enable_hblank_borrow) {
+ patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
+ patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
+ patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ }
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
@@ -965,6 +968,8 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -1378,22 +1383,22 @@ void dcn401_prepare_bandwidth(struct dc *dc,
false);
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
/* update timeout thresholds */
if (hubbub->funcs->program_arbiter) {
- dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
+ dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
}
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
@@ -1619,20 +1624,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
void dcn401_hardware_release(struct dc *dc)
{
- dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
-
- /* If pstate unsupported, or still supported
- * by firmware, force it supported by dcn
- */
- if (dc->current_state) {
- if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
- dc->res_pool->hubbub->funcs->force_pstate_change_control)
- dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, true);
-
- dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ } else {
+ if (dc->current_state) {
+ dc->clk_mgr->clks.p_state_change_support = false;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index fe7aceb2f510..d6e11b7e4fce 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -104,6 +104,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 9df8030e37f7..1723bbcf2c46 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -47,6 +47,7 @@ struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
+struct dc_underflow_debug_data;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -475,6 +476,9 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*get_underflow_debug_data)(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f0d7185153b2..d11893f8c916 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -433,7 +433,14 @@ enum p_state_switch_method {
P_STATE_V_ACTIVE,
P_STATE_SUB_VP,
P_STATE_DRR_SUB_VP,
- P_STATE_V_BLANK_SUB_VP
+ P_STATE_V_BLANK_SUB_VP,
+};
+
+struct dsc_padding_params {
+ /* pixels borrowed from hblank to hactive */
+ uint8_t dsc_hactive_padding;
+ uint32_t dsc_htotal_padding;
+ uint32_t dsc_pix_clk_100hz;
};
struct pipe_ctx {
@@ -493,8 +500,7 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
- /* pixels borrowed from hblank to hactive */
- uint8_t hblank_borrow;
+ struct dsc_padding_params dsc_padding_params;
/* next vupdate */
uint32_t next_vupdate;
uint32_t wait_frame_count;
@@ -683,6 +689,7 @@ struct replay_context {
/* Controller Id used for Dig Fe source select */
enum controller_id controllerId;
unsigned int line_time_in_ns;
+ bool os_request_force_ffu;
};
enum dc_replay_enable {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 52b745667ef7..843a18287c83 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,19 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct hubbub_system_latencies {
+ uint32_t max_latency_ns;
+ uint32_t avg_latency_ns;
+ uint32_t min_latency_ns;
+};
+
+struct hubbub_urgent_latency_params {
+ uint32_t refclk_mhz;
+ uint32_t t_win_ns;
+ uint32_t bandwidth_mbps;
+ uint32_t bw_factor_x1000;
+};
+
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
@@ -229,6 +242,17 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
+ void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes);
+ uint32_t (*compbuf_config_error)(struct hubbub *hubbub);
+ struct hubbub_perfmon_funcs{
+ void (*start_system_latency_measurement)(struct hubbub *hubbub);
+ void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies);
+ void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub);
+ void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps);
+ void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params);
+ void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns);
+ void (*reset)(struct hubbub *hubbub);
+ } perfmon;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index cee29e89ec5c..2b874d2cc61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916
+ hubp_3dlut_fl_width_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
+struct hubp_fl_3dlut_config {
+ bool enabled;
+ enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_format format;
+ uint16_t bias;
+ uint16_t scale;
+ struct dc_plane_address address;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum dc_cm2_gpu_mem_layout layout;
+ uint8_t protection_bits;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -288,7 +304,10 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
+ uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
+ uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 7641439f6ca0..22960ee03dee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -115,6 +115,16 @@ enum MCM_LUT_ID {
MCM_LUT_SHAPER
};
+struct mpc_fl_3dlut_config {
+ bool enabled;
+ uint16_t width;
+ bool select_lut_bank_a;
+ uint16_t bit_depth;
+ int hubp_index;
+ uint16_t bias;
+ uint16_t scale;
+};
+
union mcm_lut_params {
const struct pwl_params *pwl;
const struct tetrahedral_params *lut3d;
@@ -1059,21 +1069,6 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
- /**
- * @program_3dlut_size:
- *
- * Program 3D LUT size.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] is_17x17x17 - is 3dlut 17x17x17
- * - [in] mpcc_id
- *
- * Return:
- *
- * void
- */
- void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
/**
* @mcm:
@@ -1098,6 +1093,7 @@ struct mpc_funcs {
* MPC RMCM new HW sequential programming functions
*/
struct {
+ void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id);
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 267ace4eef8a..f2de2cf23859 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -374,6 +374,7 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_otg_disable)(struct timing_generator *optc);
bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
bool (*get_pipe_update_pending)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
index f2503402c10e..1e34e84160aa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
@@ -42,8 +42,8 @@
* dc_link_exports.c or other dc files implement dc.h
*
* DC to Link:
- * dc_link_exports.c or other dc files include link.h
- * link_factory.c implements link.h
+ * dc_link_exports.c or other dc files include link_service.h
+ * link_factory.c implements link_service.h
*
* Link sub-component to Link sub-component:
* link_factory.c includes --> link_xxx.h
@@ -73,7 +73,7 @@
* 2. Implement your function in the suitable link_xxx.c file.
* 3. Assign the function to link_service in link_factory.c
* 4. NEVER include link_xxx.h headers outside link component.
- * 5. NEVER include link.h on DM side.
+ * 5. NEVER include link_service.h on DM side.
*/
#include "core_types.h"
@@ -218,7 +218,10 @@ struct link_service {
bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
struct dc_link_settings *link_setting);
-
+ uint8_t (*dp_get_lttpr_count)(struct dc_link *link);
+ void (*edp_get_alpm_support)(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
/*************************** DP DPIA/PHY ******************************/
void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index a890f581f4e8..4e26a16a8743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -45,6 +45,7 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
int num_opp;
+ int num_dpp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
new file mode 100644
index 000000000000..23daf98b8aa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef __SOC_AND_IP_TRANSLATOR_H__
+#define __SOC_AND_IP_TRANSLATOR_H__
+
+#include "dc.h"
+#include "dml_top_soc_parameter_types.h"
+
+struct soc_and_ip_translator_funcs {
+ void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+ void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps);
+};
+
+struct soc_and_ip_translator {
+ const struct soc_and_ip_translator_funcs *translator_funcs;
+};
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version);
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator);
+
+
+#endif // __SOC_AND_IP_TRANSLATOR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2956c2b3ad1a..9e33bf937a69 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -76,6 +76,8 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
uint8_t count;
int i;
struct audio_output audio_output[MAX_PIPES];
+ struct dc_stream_state *streams_on_link[MAX_PIPES];
+ int num_streams_on_link = 0;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -138,12 +140,19 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
// Set DPMS on with stream update
- for (i = 0; i < state->stream_count; i++)
- if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
- stream_update.stream = state->streams[i];
+ // Cache all streams on current link since dc_update_planes_and_stream might kill current_state
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ streams_on_link[num_streams_on_link++] = state->streams[i];
+ }
+
+ for (i = 0; i < num_streams_on_link; i++) {
+ if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
+ stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
}
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index eae23ea7f6ec..033650cdb811 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
-#include "link.h"
+#include "link_service.h"
void dp_handle_automated_test(struct dc_link *link);
bool dp_set_test_pattern(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index ab437a0c9101..9ff4a6c46a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
-#include "link.h"
+#include "link_service.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b68bcc9fca0a..892907991f91 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -138,8 +138,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
stream_encoder->funcs->dvi_set_stream_attribute(
stream_encoder,
&stream->timing,
- (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK);
else if (dc_is_lvds_signal(stream->signal))
stream_encoder->funcs->lvds_set_stream_attribute(
stream_encoder,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 45f0e091fcb0..4a25210a344f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -27,7 +27,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
index 9ac08a332540..cf578a8662a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
-#include "link.h"
+#include "link_service.h"
uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 1d3ed8ca83b5..7c9005bc2587 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,7 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
index 82301187bc7c..8bf36827ecfb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
-#include "link.h"
+#include "link_service.h"
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 827b630daf49..85303167a553 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -656,7 +656,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+ DC_LOG_DC("DP Alt mode state on HPD: %d Link=%d\n", is_in_alt_mode, link->link_index);
if (is_in_alt_mode)
return true;
@@ -1140,6 +1140,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else if (dc_is_dvi_signal(sink->sink_signal) &&
+ aud_support->hdmi_audio_native &&
+ sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
dp_trace_init(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 7da05078721e..1ab29476060b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
-#include "link.h"
+#include "link_service.h"
bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 8c8682f743d6..83419e1a9036 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -832,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -2358,9 +2358,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal, link->link_index);
}
}
@@ -2458,7 +2458,6 @@ void link_set_dpms_on(
struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
bool apply_edp_fast_boot_optimization =
pipe_ctx->stream->apply_edp_fast_boot_optimization;
@@ -2474,9 +2473,10 @@ void link_set_dpms_on(
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal,
+ link->link_index);
}
}
@@ -2502,8 +2502,6 @@ void link_set_dpms_on(
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
}
- link_hwss->setup_stream_attribute(pipe_ctx);
-
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
// Enable VPG before building infoframe
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 9398f9c1666a..bd6fc63064a3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DPMS_H__
#define __DC_LINK_DPMS_H__
-#include "link.h"
+#include "link_service.h"
void link_set_dpms_on(
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index de1143dbbd25..31a73867cd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -165,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_overwrite_extended_receiver_cap =
dp_overwrite_extended_receiver_cap;
link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+ link_srv->dp_get_lttpr_count = dp_get_lttpr_count;
+ link_srv->edp_get_alpm_support = edp_get_alpm_support;
}
/* link dp phy/dpia implements basic dp phy/dpia functionality such as
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index e96220d48d03..aad36ca1a31c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
-#include "link.h"
+#include "link_service.h"
struct dc_link *link_create(const struct link_init_data *init_params);
void link_destroy(struct dc_link **link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 1907bda3cb6e..f7aa3bc3a93a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
-#include "link.h"
+#include "link_service.h"
void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index aecaf37eee35..acdc162de535 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -408,8 +408,10 @@ enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const stru
link = stream->link;
if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
- || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- && link->hpd_status))
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)))
+ continue;
+
+ if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false))
continue;
dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 9553c81053fe..595774e76453 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
-#include "link.h"
+#include "link_service.h"
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index a3e25e55bed6..d3e6f01a6a90 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DDC_SERVICE_H__
#define __DAL_DDC_SERVICE_H__
-#include "link.h"
+#include "link_service.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 651926e547b9..b12c11bd6a14 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1525,8 +1525,8 @@ bool read_is_mst_supported(struct dc_link *link)
return false;
}
- rev.raw = 0;
- cap.raw = 0;
+ rev.raw = 0;
+ cap.raw = 0;
st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
sizeof(rev));
@@ -2125,13 +2125,13 @@ void detect_edp_sink_caps(struct dc_link *link)
&backlight_adj_cap, sizeof(backlight_adj_cap));
link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true : false;
core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
&general_edp_cap, sizeof(general_edp_cap));
link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true : false;
set_default_brightness_aux(link);
@@ -2195,6 +2195,12 @@ void detect_edp_sink_caps(struct dc_link *link)
DP_EDP_MSO_LINK_CAPABILITIES,
(uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported,
sizeof(link->dpcd_caps.mso_cap_sst_links_supported));
+ /*
+ * Read eDP general capability 2
+ */
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2,
+ (uint8_t *)&link->dpcd_caps.dp_edp_general_cap_2,
+ sizeof(link->dpcd_caps.dp_edp_general_cap_2));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2506,3 +2512,40 @@ bool dp_is_sink_present(struct dc_link *link)
return present;
}
+
+uint8_t dp_get_lttpr_count(struct dc_link *link)
+{
+ if (dp_is_lttpr_present(link))
+ return dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ return 0;
+}
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ bool lttpr_present = dp_is_lttpr_present(link);
+
+ if (auxless_support == NULL || auxwake_support == NULL)
+ return;
+
+ *auxless_support = false;
+ *auxwake_support = false;
+
+ if (!dc_is_embedded_signal(link->connector_signal))
+ return;
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_LESS_ALPM_CAP) {
+ if (lttpr_present) {
+ if (link->dpcd_caps.lttpr_caps.alpm.bits.AUX_LESS_ALPM_SUPPORTED)
+ *auxless_support = true;
+ } else
+ *auxless_support = true;
+ }
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP) {
+ if (!lttpr_present)
+ *auxwake_support = true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 940b147cc5d4..6e17f72a752f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_CAPABILITY_H__
#define __DC_LINK_DP_CAPABILITY_H__
-#include "link.h"
+#include "link_service.h"
bool detect_dp_sink_caps(struct dc_link *link);
@@ -108,4 +108,10 @@ uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+uint8_t dp_get_lttpr_count(struct dc_link *link);
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index a61edfc9ca7a..7cd03fa4892b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -27,7 +27,7 @@
#ifndef __DC_LINK_DPIA_H__
#define __DC_LINK_DPIA_H__
-#include "link.h"
+#include "link_service.h"
/* Read tunneling device capability from DPCD and update link capability
* accordingly.
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 819bf2d8ba53..8a3c18ae97a7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -48,8 +48,7 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->hpd_status
- && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
@@ -226,35 +225,40 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
bool ret = false;
uint8_t val;
- if (link->hpd_status) {
- val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
+ if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index);
+ return false;
+ }
- if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
- DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- retrieve_usb4_dp_bw_allocation_info(link);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
- if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
+ retrieve_usb4_dp_bw_allocation_info(link);
- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- ret = true;
-
- if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
- /*
- * During DP tunnel creation, the CM preallocates BW
- * and reduces the estimated BW of other DPIAs.
- * The CM releases the preallocation only when the allocation is complete.
- * Perform a zero allocation to make the CM release the preallocation
- * and correctly update the estimated BW for all DPIAs per host router.
- */
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
- } else
- DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
- }
+ if (
+ link->dpia_bw_alloc_config.nrd_max_link_rate
+ && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
+
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
return ret;
}
@@ -297,15 +301,12 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
{
if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpia_bw_alloc_config.bw_alloc_enabled) {
- //1. Hot Plug
- if (link->hpd_status && peak_bw > 0) {
+ if (peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.link_max_bw = peak_bw;
link_dpia_send_bw_alloc_request(link, peak_bw);
- }
- //2. Cold Unplug
- else if (!link->hpd_status)
+ } else
dpia_bw_alloc_unplug(link);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 41efcb3e44e2..30cd8e2b9d35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,7 +26,7 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
-#include "link.h"
+#include "link_service.h"
/*
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index ac33730fedd4..87516fb3b45a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_IRQ_HANDLER_H__
#define __DC_LINK_DP_IRQ_HANDLER_H__
-#include "link.h"
+#include "link_service.h"
bool dp_parse_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index ab1c1f8f1f8b..58e154494582 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_PHY_H__
#define __DC_LINK_DP_PHY_H__
-#include "link.h"
+#include "link_service.h"
void dp_enable_link_phy(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 2dc1a660e504..08e2b572e0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1018,7 +1018,12 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
{
enum dc_status status;
uint8_t sink_status = 0;
- uint8_t i;
+ uint32_t i;
+ uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10);
+
+ // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL)
+ // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case
/* clear training pattern set */
status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
@@ -1028,7 +1033,7 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < intra_hop_disable_time_ms; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
@@ -1724,6 +1729,15 @@ bool perform_link_training_with_retries(
break;
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ !link->dc->config.enable_dpia_pre_training) {
+ if (j == (attempts - 1))
+ do_fallback = true;
+ else
+ do_fallback = false;
+ }
+
if (j == (attempts - 1)) {
DC_LOG_WARNING(
"%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 574b083e0936..ce52de22ab7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_TRAINING_H__
#define __DC_LINK_DP_TRAINING_H__
-#include "link.h"
+#include "link_service.h"
bool perform_link_training_with_retries(
const struct dc_link_settings *link_setting,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
index 08d787a1e451..c2717c678c72 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
@@ -25,7 +25,7 @@
#ifndef __LINK_DPCD_H__
#define __LINK_DPCD_H__
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 98ec9b5a559c..5e806edbb9f6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -703,6 +703,20 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ /* This is a workaround: some vendors require the source to
+ * read the PSR cap; otherwise, the vendor's PSR feature will
+ * fall back to its default behavior, causing a misconfiguration
+ * of this feature.
+ */
+ if (link->panel_config.psr.read_psrcap_again) {
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_SUPPORT,
+ &link->dpcd_caps.psr_info.psr_version,
+ sizeof(link->dpcd_caps.psr_info.psr_version));
+ }
+
//Clear PSR cfg
memset(&psr_configuration, 0, sizeof(psr_configuration));
dm_helpers_dp_write_dpcd(
@@ -870,6 +884,8 @@ bool edp_setup_psr(struct dc_link *link,
psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+ psr_context->os_request_force_ffu = psr_config->os_request_force_ffu;
+
if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
@@ -1029,6 +1045,8 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
+ replay_context.os_request_force_ffu = link->replay_settings.config.os_request_force_ffu;
+
link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
@@ -1042,7 +1060,13 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
(uint8_t *)&(replay_config.raw), sizeof(uint8_t));
memset(&alpm_config, 0, sizeof(alpm_config));
- alpm_config.bits.ENABLE = 1;
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 0;
+ }
+
dm_helpers_dp_write_dpcd(
link->ctx,
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 4a475d5b9dde..62a6344e613e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__
#define __DC_LINK_EDP_PANEL_CONTROL_H__
-#include "link.h"
+#include "link_service.h"
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
index 4fb526b264f9..af529328ba17 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_HPD_H__
#define __DC_LINK_HPD_H__
-#include "link.h"
+#include "link_service.h"
enum hpd_source_id get_hpd_line(struct dc_link *link);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..2a422e223bf2 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
@@ -284,7 +284,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf);
- memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height);
+ memcpy(dest_luma_buffer, luma_buffer, (size_t)mcif_params->luma_pitch * dest_height);
memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2);
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index f3fb3fe13757..e1a0308dee57 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -287,13 +287,6 @@ void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_I
}
}
-void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
-}
-
void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
@@ -611,7 +604,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .program_3dlut_size = mpc401_program_3dlut_size,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index eb0c68d0b0c7..fdc42f8ab3ff 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -221,11 +221,6 @@ void mpc401_program_lut_read_write_control(
bool lut_bank_a,
int mpcc_id);
-void mpc401_program_3dlut_size(
- struct mpc *mpc,
- bool is_17x17x17,
- int mpcc_id);
-
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index d159e3ed3bb3..ead92ad78a23 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -62,6 +62,7 @@
SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 72bff94cb57d..52d5ea98c86b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -162,6 +162,8 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ REG_WAIT(OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+
optc1_clear_optc_underflow(optc);
return true;
@@ -428,6 +430,21 @@ static void optc35_set_long_vtotal(
}
}
+static void optc35_wait_otg_disable(struct timing_generator *optc)
+{
+ struct optc *optc1;
+ uint32_t is_master_en;
+
+ if (!optc || !optc->ctx)
+ return;
+
+ optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL, OTG_MASTER_EN, &is_master_en);
+ if (!is_master_en)
+ REG_WAIT(OTG_CLOCK_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+}
+
static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -479,6 +496,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc35_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index ff79c38287df..5af13706e601 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -226,6 +226,11 @@ bool optc401_disable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
+ // wait until CRTC_CURRENT_MASTER_EN_STATE == 0
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 15000);
+
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 3a51be63f020..c4b4dc3ad8c9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -29,6 +29,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
@@ -836,17 +837,24 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static enum dc_status dce100_validate_bandwidth(
+enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
+ struct dc_stream_state *stream = NULL;
+ const uint32_t max_pix_clk_khz = max(dc->clk_mgr->clks.max_supported_dispclk_khz, 400000);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
+ stream = context->res_ctx.pipe_ctx[i].stream;
+ if (stream) {
at_least_one_pipe = true;
+
+ if (stream->timing.pix_clk_100hz >= max_pix_clk_khz * 10)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
if (at_least_one_pipe) {
@@ -854,7 +862,16 @@ static enum dc_status dce100_validate_bandwidth(
context->bw_ctx.bw.dce.dispclk_khz = 681000;
context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
+ /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and
+ * the DP clock, and shouldn't be turned off. Just select the display
+ * clock value from its low power mode.
+ */
+ if (dc->ctx->dce_version == DCE_VERSION_6_0 ||
+ dc->ctx->dce_version == DCE_VERSION_6_4)
+ context->bw_ctx.bw.dce.dispclk_khz = 352000;
+ else
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+
context->bw_ctx.bw.dce.yclk_khz = 0;
}
@@ -881,7 +898,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-static enum dc_status dce100_validate_global(
+enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
index fecab7c560f5..dd150a4b4610 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
@@ -41,6 +41,15 @@ struct resource_pool *dce100_create_resource_pool(
enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dc_validate_mode validate_mode);
+
enum dc_status dce100_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 164ba796f64c..869a8e515fc0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -1111,12 +1111,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
+ (int64_t)clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1152,12 +1152,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index eb1e158d3436..540e04ec1e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -67,7 +67,7 @@
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
-#include "link.h"
+#include "link_service.h"
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -990,12 +990,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
memory_type_multiplier = MEMORY_TYPE_HBM;
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 53b60044653f..53c67ebe779f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -34,6 +34,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce60/irq_service_dce60.h"
#include "dce110/dce110_timing_generator.h"
@@ -863,61 +864,6 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status dce60_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- enum dc_validate_mode validate_mode)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return DC_OK;
-}
-
-static bool dce60_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce60_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce60_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce60_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -931,10 +877,10 @@ static const struct resource_funcs dce60_res_pool_funcs = {
.destroy = dce60_destroy_resource_pool,
.link_enc_create = dce60_link_encoder_create,
.panel_cntl_create = dce60_panel_cntl_create,
- .validate_bandwidth = dce60_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce60_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3e8b0ac11d90..5b7769745202 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -32,6 +32,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce80/irq_service_dce80.h"
#include "dce110/dce110_timing_generator.h"
@@ -869,61 +870,6 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status dce80_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- enum dc_validate_mode validate_mode)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return DC_OK;
-}
-
-static bool dce80_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce80_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce80_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -937,10 +883,10 @@ static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
.panel_cntl_create = dce80_panel_cntl_create,
- .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce80_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index f9cbdad3ef37..84b38d2d6967 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -85,7 +85,7 @@
#include "vm_helper.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 895349d9ca07..ff63f59ff928 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -60,7 +60,7 @@
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -2192,7 +2192,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 3345068a878c..61623cb518d9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -47,7 +47,8 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
+
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
#include "dce/dce_aux.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 3479e1eab4cd..02b9a84f2db3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -47,7 +47,7 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 663c49cce4aa..d4917a35b991 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -927,6 +927,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_legacy_fast_update = true,
.using_dml2 = false,
.disable_dsc_power_gate = true,
+ .min_disp_clk_khz = 100000,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 9917b366f00c..3965a7f1b64b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -69,7 +69,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -739,6 +739,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
.enable_legacy_fast_update = false,
+ .disable_stutter_for_wm_program = true
};
static struct dce_aux *dcn32_aux_engine_create(
@@ -2852,7 +2853,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
- free_pipe->hblank_borrow = otg_master->hblank_borrow;
+ free_pipe->dsc_padding_params = otg_master->dsc_padding_params;
if (free_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
&new_ctx->res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 82f966cf4ed2..99f0432288b4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1141,7 +1141,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
- SRI_ARR(HUBP_CLK_CNTL, HUBP, id)
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
@@ -1229,7 +1230,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SR(DCHUBBUB_ARB_MALL_CNTL), \
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \
- SR(SDPIF_REQUEST_RATE_LIMIT)
+ SR(SDPIF_REQUEST_RATE_LIMIT), \
+ SR(DCHUBBUB_SDPIF_CFG0)
/* DCCG */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 061c0907d802..ad214986f7ac 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -72,7 +72,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 8475c6eec547..07552445e424 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -61,7 +61,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -1900,9 +1900,6 @@ static bool dcn35_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 0971c0f74186..cb0478a9a34d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -1872,9 +1872,6 @@ static bool dcn351_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index 8bae7fcedc22..126090c9bb8a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -1873,9 +1873,6 @@ static bool dcn36_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index b3988e38d0a6..1d18807e4749 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -50,7 +50,7 @@
#include "dml/display_mode_vba.h"
#include "dcn401/dcn401_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "link_enc_cfg.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -708,6 +708,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.use_max_lb = true,
.force_disable_subvp = false,
+ .disable_force_pstate_allow_on_hw_release = false,
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = true,
.using_dml21 = true,
@@ -1698,6 +1699,9 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+
if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 2ae6831c31ef..0fc66487d800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -140,7 +140,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
- SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
+ SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
new file mode 100644
index 000000000000..bc93356a0b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+# Makefile for bounding box component.
+# Floating point required due to nature of bounding box values
+
+soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
+soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
+
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+
+soc_and_ip_translator := soc_and_ip_translator.o
+soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+
+AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_soc_and_ip_translator)
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
new file mode 100644
index 000000000000..3190c76eb482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn4_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb)
+{
+ memcpy(soc_bb, &dml2_socbb_dcn401, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn4_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ const struct clk_bw_params *dc_bw_params,
+ bool use_clock_dc_limits)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn401_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table,
+ dc->clk_mgr->bw_params,
+ config->use_clock_dc_limits);
+ }
+}
+
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ soc_bb->xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+
+ /* latencies in vbios are platform specific and should be used if provided */
+ if (dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns / 10.0;
+}
+
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ /* set if the value is provided */
+ if (dc->bb_overrides.sr_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.fclk_clock_change_latency_ns)
+ soc_bb->power_management_parameters.fclk_change_blackout_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+
+ //Z8 values not expected nor used on DCN401 but still added for completeness
+ if (dc->bb_overrides.sr_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn401_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc, config);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
+static void dcn401_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn401_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn401_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn401_get_ip_caps,
+};
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn401_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
new file mode 100644
index 000000000000..21d842857601
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN401_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN401_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "soc_and_ip_translator.h"
+#include "dml2/dml21/inc/dml_top_soc_parameter_types.h"
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+/* Functions that can be re-used by higher DCN revisions of this component */
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+
+#endif /* _DCN401_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
new file mode 100644
index 000000000000..c9e224d262c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn42_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn42_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn42_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn42_get_ip_caps,
+};
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
new file mode 100644
index 000000000000..914dcbb369a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN42_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN42_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "dml_top_soc_parameter_types.h"
+#include "soc_and_ip_translator.h"
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
new file mode 100644
index 000000000000..0fc0e5a6c171
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+
+static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
+ enum dce_version dc_version)
+{
+ switch (dc_version) {
+ case DCN_VERSION_4_01:
+ dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
+ default:
+ break;
+ }
+}
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version)
+{
+ struct soc_and_ip_translator *soc_and_ip_translator;
+
+ soc_and_ip_translator = kzalloc(sizeof(*soc_and_ip_translator), GFP_KERNEL);
+ if (!soc_and_ip_translator)
+ return NULL;
+
+ dc_construct_soc_and_ip_translator(soc_and_ip_translator, dc_version);
+
+ return soc_and_ip_translator;
+}
+
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator)
+{
+ kfree(*soc_and_ip_translator);
+ *soc_and_ip_translator = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index ad088d70e189..6ffc74fc9dcd 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -44,6 +44,11 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
bool is_dual_link) {}
+static void virtual_stream_encoder_lvds_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing)
+{}
+
static void virtual_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
@@ -115,6 +120,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
virtual_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
virtual_stream_encoder_dvi_set_stream_attribute,
+ .lvds_set_stream_attribute =
+ virtual_stream_encoder_lvds_set_stream_attribute,
.set_throttled_vcp_size =
virtual_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 0bafb6710761..338fdc651f2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -316,6 +316,7 @@ struct dmub_srv_hw_params {
bool disable_sldo_opt;
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
+ bool override_hbr3_pll_vco;
};
/**
@@ -567,6 +568,7 @@ struct dmub_srv {
bool sw_init;
bool hw_init;
+ bool dpia_supported;
uint64_t fb_base;
uint64_t fb_offset;
@@ -597,6 +599,8 @@ struct dmub_notification {
enum dmub_notification_type type;
uint8_t link_index;
uint8_t result;
+ /* notify instance from DMUB */
+ uint8_t instance;
bool pending_notification;
union {
struct aux_reply_data aux_reply;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 6a69a788abe8..92248224b713 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -843,7 +843,8 @@ union dmub_fw_boot_options {
uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
- uint32_t reserved : 6; /**< reserved */
+ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
+ uint32_t reserved : 5; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -882,7 +883,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_fw_signals {
+union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
@@ -897,7 +898,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_driver_signals {
+union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
@@ -1990,18 +1991,19 @@ struct dmub_cmd_lsdma_data {
struct lsdma_tiled_copy_data {
uint32_t src_addr_lo;
uint32_t src_addr_hi;
+
uint32_t dst_addr_lo;
uint32_t dst_addr_hi;
uint32_t src_x : 16;
uint32_t src_y : 16;
- uint32_t src_width : 16;
- uint32_t src_height : 16;
-
uint32_t dst_x : 16;
uint32_t dst_y : 16;
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
uint32_t dst_width : 16;
uint32_t dst_height : 16;
@@ -2034,23 +2036,58 @@ struct dmub_cmd_lsdma_data {
uint32_t padding : 30;
} tiled_copy_data;
struct lsdma_linear_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
uint32_t count : 30;
uint32_t cache_policy_dst : 2;
uint32_t tmz : 1;
uint32_t cache_policy_src : 2;
uint32_t padding : 29;
-
+ } linear_copy_data;
+ struct lsdma_linear_sub_window_copy_data {
uint32_t src_lo;
uint32_t src_hi;
+
uint32_t dst_lo;
uint32_t dst_hi;
- } linear_copy_data;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t reserved0 : 22;
+ } linear_sub_window_copy_data;
struct lsdma_reg_write_data {
uint32_t reg_addr;
uint32_t reg_data;
} reg_write_data;
struct lsdma_pio_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2063,12 +2100,11 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t src_lo;
- uint32_t src_hi;
- uint32_t dst_lo;
- uint32_t dst_hi;
} pio_copy_data;
struct lsdma_pio_constfill_data {
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2081,14 +2117,12 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t dst_lo;
- uint32_t dst_hi;
+
uint32_t data;
} pio_constfill_data;
uint32_t all[14];
} u;
-
};
struct dmub_rb_cmd_lsdma {
@@ -2330,6 +2364,7 @@ struct dmub_cmd_fams2_global_config {
union dmub_fams2_global_feature_config features;
uint32_t recovery_timeout_us;
uint32_t hwfq_flip_programming_delay_us;
+ uint32_t max_allow_to_target_delta_us; // how early DCN could assert P-State allow compared to the P-State target
};
union dmub_cmd_fams2_config {
@@ -3986,6 +4021,10 @@ enum dmub_cmd_replay_type {
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
/**
+ * Set version
+ */
+ DMUB_CMD__REPLAY_SET_VERSION = 9,
+ /**
* Set Replay General command.
*/
DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
@@ -4015,6 +4054,10 @@ struct dmub_alpm_auxless_data {
uint16_t lfps_t1_t2_override_us;
short lfps_t1_t2_offset_us;
uint8_t lttpr_count;
+ /*
+ * Padding to align structure to 4 byte boundary.
+ */
+ uint8_t pad[1];
};
/**
@@ -4091,14 +4134,82 @@ struct dmub_cmd_replay_copy_settings_data {
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
-
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * Determines if fast resync in ultra sleep mode is enabled/disabled.
+ */
+ uint8_t replay_support_fast_resync_in_ultra_sleep_mode;
/**
* @pad: Align structure to 4 byte boundary.
*/
+ uint8_t pad[1];
+};
+
+
+/**
+ * Replay versions.
+ */
+enum replay_version {
+ /**
+ * FreeSync Replay
+ */
+ REPLAY_VERSION_FREESYNC_REPLAY = 0,
+ /**
+ * Panel Replay
+ */
+ REPLAY_VERSION_PANEL_REPLAY = 1,
+ /**
+ * Replay not supported.
+ */
+ REPLAY_VERSION_UNSUPPORTED = 0xFF,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD___SET_REPLAY_VERSION command.
+ */
+struct dmub_cmd_replay_set_version_data {
+ /**
+ * Panel Instance.
+ * Panel instance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * PSR version that FW should implement.
+ */
+ enum replay_version version;
+ /**
+ * PSR control version.
+ */
+ uint8_t cmd_version;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
uint8_t pad[2];
};
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+struct dmub_rb_cmd_replay_set_version {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_cmd_replay_set_version_data replay_set_version_data;
+};
+
+/**
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
struct dmub_rb_cmd_replay_copy_settings {
@@ -4450,6 +4561,10 @@ union dmub_replay_cmd_set {
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
/**
+ * Definition of DMUB_CMD__REPLAY_SET_VERSION command data.
+ */
+ struct dmub_cmd_replay_set_version_data version_data;
+ /**
* Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data.
*/
struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data;
@@ -4665,21 +4780,25 @@ enum dmub_cmd_lsdma_type {
*/
DMUB_CMD__LSDMA_LINEAR_COPY = 1,
/**
+ * LSDMA copies data from source to destination linearly in sub window
+ */
+ DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
+ /**
* Send the tiled-to-tiled copy command
*/
- DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 3,
/**
* Send the poll reg write command
*/
- DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 4,
/**
* Send the pio copy command
*/
- DMUB_CMD__LSDMA_PIO_COPY = 4,
+ DMUB_CMD__LSDMA_PIO_COPY = 5,
/**
* Send the pio constfill command
*/
- DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 6,
};
struct abm_ace_curve {
@@ -5894,6 +6013,9 @@ enum ips_residency_mode {
IPS_RESIDENCY__IPS2,
IPS_RESIDENCY__IPS1_RCG,
IPS_RESIDENCY__IPS1_ONO2_ON,
+ IPS_RESIDENCY__IPS1_Z8_RETENTION,
+ IPS_RESIDENCY__PG_ONO_LAST_SEEN_IN_IPS,
+ IPS_RESIDENCY__PG_ONO_CURRENT_STATE
};
#define NUM_IPS_HISTOGRAM_BUCKETS 16
@@ -5907,6 +6029,8 @@ struct dmub_ips_residency_info {
uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
uint64_t total_time_us;
uint64_t total_inactive_time_us;
+ uint32_t ono_pg_state_at_collection;
+ uint32_t ono_pg_state_last_seen_in_ips;
};
/**
@@ -6203,6 +6327,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
*/
struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_rb_cmd_replay_set_version replay_set_version;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -6411,15 +6539,18 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
- const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+ const uint8_t *src = (const uint8_t *)cmd;
uint8_t i;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
// copying data
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ for (i = 0; i < DMUB_RB_CMD_SIZE; i++)
*dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -6444,6 +6575,9 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
const uint8_t *src = (const uint8_t *)cmd;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
@@ -6489,6 +6623,9 @@ static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb,
uint32_t num_cmds,
uint32_t *next_rptr)
{
+ if (rb->capacity == 0)
+ return;
+
*next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds;
if (*next_rptr >= rb->capacity)
@@ -6552,6 +6689,9 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb,
*/
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
{
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_empty(rb))
return false;
@@ -6576,6 +6716,9 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
+ if (rb->capacity == 0)
+ return;
+
while (rptr != wptr) {
uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 3f38db752b84..4777c7203b2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -377,6 +377,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index e7056205b050..ce041f6239dc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -89,44 +89,50 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn32_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout = 100000;
+ uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
- if (in_reset == 0) {
+ if (in_reset == 0 && is_enabled != 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
+
+ udelay(1);
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(1);
}
+ for (i = 0; i < timeout; ++i) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
+ break;
+
+ udelay(1);
+ }
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ if (is_enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -135,7 +141,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
- /* Clear the GPINT command manually so we don't reset again. */
+ /* Clear the GPINT command manually so we don't send anything during boot. */
cmd.all = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
}
@@ -419,8 +425,8 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
- uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
+ uint32_t is_traceport_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
if (!dmub)
@@ -470,18 +476,15 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
- REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
-
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
dmub->debug.is_traceport_en = is_traceport_enabled;
- REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
-
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
dmub->debug.is_cw6_enabled = is_cw6_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 1a229450c53d..daf81027d663 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -155,6 +158,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -162,7 +167,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \
DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
- DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
+ DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn32_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 2228d62adc7e..834e5434ccb8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -400,13 +400,14 @@ union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub)
void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
- union dmub_fw_boot_options cur_boot_options = {0};
- cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub);
+ if (!dmub->dpia_supported) {
+ dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia;
+ }
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
- boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
+ boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 567c5b1aeb7a..e7a58b140388 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -71,7 +71,7 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
switch (cmd.cmd_common.header.type) {
case DMUB_OUT_CMD__DP_AUX_REPLY:
notify->type = DMUB_NOTIFICATION_AUX_REPLY;
- notify->link_index = cmd.dp_aux_reply.control.instance;
+ notify->instance = cmd.dp_aux_reply.control.instance;
notify->result = cmd.dp_aux_reply.control.result;
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
@@ -84,17 +84,17 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->type = DMUB_NOTIFICATION_HPD_IRQ;
}
- notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->instance = cmd.dp_hpd_notify.hpd_data.instance;
notify->result = AUX_RET_SUCCESS;
break;
case DMUB_OUT_CMD__SET_CONFIG_REPLY:
notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
- notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->instance = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
- notify->link_index = cmd.dpia_notification.payload.header.instance;
+ notify->instance = cmd.dpia_notification.payload.header.instance;
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5fc29164e4b4..8aea50aa9533 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -213,6 +213,11 @@ enum {
#endif
#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define DEVICE_ID_NV_143F 0x143F
+#define DEVICE_ID_NV_13F9 0x13F9
+#define DEVICE_ID_NV_13FA 0x13FA
+#define DEVICE_ID_NV_13FB 0x13FB
+#define DEVICE_ID_NV_13FC 0x13FC
+#define DEVICE_ID_NV_13DB 0x13DB
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define DEVICE_ID_VGH_1435 0x1435
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 71efd2770c99..ce421bcddcb0 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -226,8 +226,8 @@ static void update_v_total_for_static_ramp(
unsigned int target_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
- bool ramp_direction_is_up = (current_duration_in_us >
- target_duration_in_us) ? true : false;
+ bool ramp_direction_is_up = current_duration_in_us >
+ target_duration_in_us;
/* Calculate ratio between new and current frame duration with 3 digit */
unsigned int frame_duration_ratio = div64_u64(1000000,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 5e01c6e24cbc..c760216a6240 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -29,6 +29,7 @@ static void push_error_status(struct mod_hdcp *hdcp,
enum mod_hdcp_status status)
{
struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+ const uint8_t retry_limit = hdcp->connection.link.adjust.retry_limit;
if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
trace->errors[trace->error_count].status = status;
@@ -39,11 +40,11 @@ static void push_error_status(struct mod_hdcp *hdcp,
if (is_hdcp1(hdcp)) {
hdcp->connection.hdcp1_retry_count++;
- if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp1_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp1.disable = 1;
} else if (is_hdcp2(hdcp)) {
hdcp->connection.hdcp2_retry_count++;
- if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp2_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp2.disable = 1;
}
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index c42468bb70ac..b51ddf2846df 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -220,6 +220,7 @@ struct mod_hdcp_link_adjustment_hdcp2 {
struct mod_hdcp_link_adjustment {
uint8_t auth_delay;
+ uint8_t retry_limit;
struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bfb446736ca8..75efda2969cf 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -239,18 +239,51 @@ enum amd_harvest_ip_mask {
AMD_HARVEST_IP_DMU_MASK = 0x4,
};
+/**
+ * enum DC_FEATURE_MASK - Bits that control DC feature defaults
+ */
enum DC_FEATURE_MASK {
//Default value can be found at "uint amdgpu_dc_feature_mask"
- DC_FBC_MASK = (1 << 0), //0x1, disabled by default
- DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
- DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
- DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
- DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
- DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
- DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
- DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
- DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
+ /**
+ * @DC_FBC_MASK: (0x1) disabled by default
+ */
+ DC_FBC_MASK = (1 << 0),
+ /**
+ * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default
+ */
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1),
+ /**
+ * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default
+ */
+ DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2),
+ /**
+ * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1
+ */
+ DC_PSR_MASK = (1 << 3),
+ /**
+ * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default
+ */
+ DC_EDP_NO_POWER_SEQUENCING = (1 << 4),
+ /**
+ * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP1_4A = (1 << 5),
+ /**
+ * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6),
+ /**
+ * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default
+ */
+ DC_PSR_ALLOW_SMU_OPT = (1 << 7),
+ /**
+ * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default
+ */
+ DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8),
+ /**
+ * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4
+ */
+ DC_REPLAY_MASK = (1 << 9),
};
/**
@@ -258,64 +291,64 @@ enum DC_FEATURE_MASK {
*/
enum DC_DEBUG_MASK {
/**
- * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting
*/
DC_DISABLE_PIPE_SPLIT = 0x1,
/**
- * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode
*/
DC_DISABLE_STUTTER = 0x2,
/**
- * @DC_DISABLE_DSC: If set, disable display stream compression
+ * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression
*/
DC_DISABLE_DSC = 0x4,
/**
- * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations
*/
DC_DISABLE_CLOCK_GATING = 0x8,
/**
- * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU
*/
DC_DISABLE_PSR = 0x10,
/**
- * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even
* if mclk switch in vblank is possible
*/
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
/**
- * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading
*/
DC_DISABLE_MPO = 0x40,
/**
- * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA
*/
DC_ENABLE_DPIA_TRACE = 0x80,
/**
- * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version
* does not default to it.
*/
DC_ENABLE_DML2 = 0x100,
/**
- * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU
*/
DC_DISABLE_PSR_SU = 0x200,
/**
- * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay
*/
DC_DISABLE_REPLAY = 0x400,
/**
- * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time.
* If more than one IPS debug bit is set, the lowest bit takes
* precedence. For example, if DC_FORCE_IPS_ENABLE and
* DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
@@ -324,56 +357,57 @@ enum DC_DEBUG_MASK {
DC_DISABLE_IPS = 0x800,
/**
- * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time,
* *except* when driver goes into suspend.
*/
DC_DISABLE_IPS_DYNAMIC = 0x1000,
/**
- * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if
* there is an enabled display. Otherwise, enable all IPS.
*/
DC_DISABLE_IPS2_DYNAMIC = 0x2000,
/**
- * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time.
*/
DC_FORCE_IPS_ENABLE = 0x4000,
/**
- * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for
+ * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for
* eDP display from ACPI _DDC method.
*/
DC_DISABLE_ACPI_EDID = 0x8000,
/**
- * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
+ * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver.
*/
DC_DISABLE_HDMI_CEC = 0x10000,
/**
- * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted
+ * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted
* Memory Clock Switching (FAMS) feature in amdgpu driver.
*/
DC_DISABLE_SUBVP_FAMS = 0x20000,
/**
- * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
+ * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom
+ * brightness curves
*/
DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000,
/**
- * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW
+ * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW
* path regardless of reported HW capabilities.
*/
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
/**
- * @DC_SKIP_DETECTION_LT: If set, skip detection link training
+ * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training
*/
DC_SKIP_DETECTION_LT = 0x200000,
};
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 5c86423c2e92..3d083010e734 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -211,7 +211,7 @@ atom_bios_string = "ATOM"
};
*/
-#pragma pack(1) /* BIOS data must use byte aligment*/
+#pragma pack(1) /* BIOS data must use byte alignment*/
enum atombios_image_offset{
OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048,
@@ -255,8 +255,8 @@ struct atom_rom_header_v2_2
uint16_t subsystem_vendor_id;
uint16_t subsystem_id;
uint16_t pci_info_offset;
- uint16_t masterhwfunction_offset; //Offest for SW to get all command function offsets, Don't change the position
- uint16_t masterdatatable_offset; //Offest for SW to get all data table offsets, Don't change the position
+ uint16_t masterhwfunction_offset; //Offset for SW to get all command function offsets, Don't change the position
+ uint16_t masterdatatable_offset; //Offset for SW to get all data table offsets, Don't change the position
uint16_t reserved;
uint32_t pspdirtableoffset;
};
@@ -453,7 +453,7 @@ struct atom_dtd_format
uint8_t refreshrate;
};
-/* atom_dtd_format.modemiscinfo defintion */
+/* atom_dtd_format.modemiscinfo definition */
enum atom_dtd_format_modemiscinfo{
ATOM_HSYNC_POLARITY = 0x0002,
ATOM_VSYNC_POLARITY = 0x0004,
@@ -678,7 +678,7 @@ struct lcd_info_v2_1
uint32_t reserved1[8];
};
-/* lcd_info_v2_1.panel_misc defintion */
+/* lcd_info_v2_1.panel_misc definition */
enum atom_lcd_info_panel_misc{
ATOM_PANEL_MISC_FPDI =0x0002,
};
@@ -716,7 +716,7 @@ enum atom_gpio_pin_assignment_gpio_id {
/* gpio_id pre-define id for multiple usage */
/* GPIO use to control PCIE_VDDC in certain SLT board */
PCIE_VDDC_CONTROL_GPIO_PINID = 56,
- /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC swithing feature is enable */
+ /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC switching feature is enable */
PP_AC_DC_SWITCH_GPIO_PINID = 60,
/* VDDC_REGULATOR_VRHOT_GPIO_PINID in Gpio_Pin_LutTable, VRHot feature is enable */
VDDC_VRHOT_GPIO_PINID = 61,
@@ -734,7 +734,7 @@ enum atom_gpio_pin_assignment_gpio_id {
struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
- /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
+ /*the real number of this included in the structure is calculated by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[];
};
@@ -997,7 +997,7 @@ enum atom_connector_layout_info_mini_type_def {
enum atom_display_device_tag_def{
ATOM_DISPLAY_LCD1_SUPPORT = 0x0002, //an embedded display is either an LVDS or eDP signal type of display
- ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compability
+ ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compatibility
ATOM_DISPLAY_DFP1_SUPPORT = 0x0008,
ATOM_DISPLAY_DFP2_SUPPORT = 0x0080,
ATOM_DISPLAY_DFP3_SUPPORT = 0x0200,
@@ -1011,7 +1011,7 @@ struct atom_display_object_path_v2
{
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t extencoderobjid; //2nd encoder after the first encoder, from the connector point of view;
uint16_t encoder_recordoffset;
uint16_t extencoder_recordoffset;
@@ -1023,7 +1023,7 @@ struct atom_display_object_path_v2
struct atom_display_object_path_v3 {
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t reserved1; //only on USBC case, otherwise always = 0
uint16_t reserved2; //reserved and always = 0
uint16_t reserved3; //reserved and always = 0
@@ -3547,7 +3547,7 @@ struct atom_voltage_object_header_v4{
enum atom_voltage_object_mode
{
VOLTAGE_OBJ_GPIO_LUT = 0, //VOLTAGE and GPIO Lookup table ->atom_gpio_voltage_object_v4
- VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequece through I2C -> atom_i2c_voltage_object_v4
+ VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequence through I2C -> atom_i2c_voltage_object_v4
VOLTAGE_OBJ_PHASE_LUT = 4, //Set Vregulator Phase lookup table ->atom_gpio_voltage_object_v4
VOLTAGE_OBJ_SVID2 = 7, //Indicate voltage control by SVID2 ->atom_svid2_voltage_object_v4
VOLTAGE_OBJ_EVV = 8,
@@ -3585,7 +3585,7 @@ struct atom_gpio_voltage_object_v4
{
struct atom_voltage_object_header_v4 header; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
uint8_t gpio_control_id; // default is 0 which indicate control through CG VID mode
- uint8_t gpio_entry_num; // indiate the entry numbers of Votlage/Gpio value Look up table
+ uint8_t gpio_entry_num; // indicate the entry numbers of Votlage/Gpio value Look up table
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
@@ -4507,8 +4507,8 @@ struct amd_acpi_description_header{
struct uefi_acpi_vfct{
struct amd_acpi_description_header sheader;
uint8_t tableUUID[16]; //0x24
- uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
- uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+ uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
+ uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
uint32_t reserved[4]; //0x3C
};
@@ -4540,7 +4540,7 @@ struct gop_lib1_content {
/*
***************************************************************************
Scratch Register definitions
- Each number below indicates which scratch regiser request, Active and
+ Each number below indicates which scratch register request, Active and
Connect all share the same definitions as display_device_tag defines
***************************************************************************
*/
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index acd1cef61b7c..349544504c93 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -65,6 +65,7 @@ struct single_display_configuration {
uint32_t view_resolution_cy;
enum amd_pp_display_config_type displayconfigtype;
uint32_t vertical_refresh; /* for active display */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
};
#define MAX_NUM_DISPLAY 32
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2b1ea7467b0..2b0cdb2a2775 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -30,6 +30,12 @@ extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v14_0_ip_block;
+enum smu_temp_metric_type {
+ SMU_TEMP_METRIC_BASEBOARD,
+ SMU_TEMP_METRIC_GPUBOARD,
+ SMU_TEMP_METRIC_MAX,
+};
+
enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0,
};
@@ -156,6 +162,10 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
AMDGPU_PP_SENSOR_VCN_LOAD,
+ AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ AMDGPU_PP_SENSOR_NODEPOWER,
+ AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
};
enum amd_pp_task {
@@ -496,6 +506,8 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table);
+ bool (*temp_metrics_is_supported)(void *handle, enum smu_temp_metric_type type);
ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
@@ -1595,6 +1607,79 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+enum amdgpu_vr_temp {
+ AMDGPU_VDDCR_VDD0_TEMP,
+ AMDGPU_VDDCR_VDD1_TEMP,
+ AMDGPU_VDDCR_VDD2_TEMP,
+ AMDGPU_VDDCR_VDD3_TEMP,
+ AMDGPU_VDDCR_SOC_A_TEMP,
+ AMDGPU_VDDCR_SOC_C_TEMP,
+ AMDGPU_VDDCR_SOCIO_A_TEMP,
+ AMDGPU_VDDCR_SOCIO_C_TEMP,
+ AMDGPU_VDD_085_HBM_TEMP,
+ AMDGPU_VDDCR_11_HBM_B_TEMP,
+ AMDGPU_VDDCR_11_HBM_D_TEMP,
+ AMDGPU_VDD_USR_TEMP,
+ AMDGPU_VDDIO_11_E32_TEMP,
+ AMDGPU_VR_MAX_TEMP_ENTRIES,
+};
+
+enum amdgpu_system_temp {
+ AMDGPU_UBB_FPGA_TEMP,
+ AMDGPU_UBB_FRONT_TEMP,
+ AMDGPU_UBB_BACK_TEMP,
+ AMDGPU_UBB_OAM7_TEMP,
+ AMDGPU_UBB_IBC_TEMP,
+ AMDGPU_UBB_UFPGA_TEMP,
+ AMDGPU_UBB_OAM1_TEMP,
+ AMDGPU_OAM_0_1_HSC_TEMP,
+ AMDGPU_OAM_2_3_HSC_TEMP,
+ AMDGPU_OAM_4_5_HSC_TEMP,
+ AMDGPU_OAM_6_7_HSC_TEMP,
+ AMDGPU_UBB_FPGA_0V72_VR_TEMP,
+ AMDGPU_UBB_FPGA_3V3_VR_TEMP,
+ AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP,
+ AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP,
+ AMDGPU_RETIMER_0_1_0V9_VR_TEMP,
+ AMDGPU_RETIMER_4_5_0V9_VR_TEMP,
+ AMDGPU_RETIMER_2_3_0V9_VR_TEMP,
+ AMDGPU_RETIMER_6_7_0V9_VR_TEMP,
+ AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP,
+ AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP,
+ AMDGPU_IBC_HSC_TEMP,
+ AMDGPU_IBC_TEMP,
+ AMDGPU_SYSTEM_MAX_TEMP_ENTRIES = 32,
+};
+
+enum amdgpu_node_temp {
+ AMDGPU_RETIMER_X_TEMP,
+ AMDGPU_OAM_X_IBC_TEMP,
+ AMDGPU_OAM_X_IBC_2_TEMP,
+ AMDGPU_OAM_X_VDD18_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_B_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_D_VR_TEMP,
+ AMDGPU_NODE_MAX_TEMP_ENTRIES = 12,
+};
+
+struct amdgpu_gpuboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t node_temp[AMDGPU_NODE_MAX_TEMP_ENTRIES];
+ uint32_t vr_temp[AMDGPU_VR_MAX_TEMP_ENTRIES];
+};
+
+struct amdgpu_baseboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t system_temp[AMDGPU_SYSTEM_MAX_TEMP_ENTRIES];
+};
+
struct amdgpu_partition_metrics_v1_0 {
struct metrics_table_header common_header;
/* Current clocks (Mhz) */
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 15680c3f4970..ab1cfc92dbeb 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -238,7 +238,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t enable_mes_sch_stb_log : 1;
uint32_t limit_single_process : 1;
uint32_t is_strix_tmz_wa_enabled :1;
- uint32_t reserved : 13;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 12;
};
uint32_t uint32_t_all;
};
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index d85ffab2aff9..69611c7e30e3 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -66,6 +66,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_SET_SE_MODE = 17,
MES_SCH_API_SET_GANG_SUBMIT = 18,
MES_SCH_API_SET_HW_RSRC_1 = 19,
+ MES_SCH_API_INV_TLBS = 20,
MES_SCH_API_MAX = 0xFF
};
@@ -286,7 +287,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
uint32_t enable_mes_fence_int: 1;
- uint32_t reserved : 10;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 9;
};
uint32_t uint32_all;
};
@@ -870,6 +872,35 @@ union MESAPI__SET_GANG_SUBMIT {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+/*
+ * @inv_sel 0-select pasid as input to do the invalidation , 1-select vmid
+ * @flush_type 0-old style, 1-light weight, 2-heavyweight, 3-heavyweight2
+ * @inv_sel_id specific pasid when inv_sel is 0 and specific vmid if inv_sel is 1
+ * @hub_id 0-gc_hub, 1-mm_hub
+ */
+struct INV_TLBS {
+ uint8_t inv_sel;
+ uint8_t flush_type;
+ uint16_t inv_sel_id;
+ uint32_t hub_id;
+ /* If following two inv_range setting are all 0 , whole VM will be invalidated,
+ * otherwise only required range be invalidated
+ */
+ uint64_t inv_range_va_start;
+ uint64_t inv_range_size;
+ uint64_t reserved;
+};
+
+union MESAPI__INV_TLBS {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ struct INV_TLBS invalidate_tlbs;
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 71d986dd7a6e..518d07afc7df 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -764,10 +764,6 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
- if (adev->cper.enabled)
- if (amdgpu_cper_generate_bp_threshold_record(adev))
- dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
-
return ret;
}
@@ -824,6 +820,21 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -2038,6 +2049,66 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
}
/**
+ * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific temperature type, If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (!pp_funcs->get_temp_metrics ||
+ !amdgpu_dpm_is_temp_metrics_supported(adev, type))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
+ * is available
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ *
+ * This function returns metrics if specific temperature metrics type is supported or not.
+ *
+ * Return: True in case of metrics type supported else false.
+ */
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ bool support_temp_metrics = false;
+
+ if (!pp_funcs->temp_metrics_is_supported)
+ return support_temp_metrics;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_temp_metrics =
+ pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_temp_metrics;
+}
+
+/**
* amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
* partition
* @adev: Pointer to the device.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
index 42efe838fa85..b5e9c3ecf703 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -27,69 +27,69 @@
#include "amdgpu_smu.h"
#include "amdgpu_dpm_internal.h"
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev)
{
struct drm_device *ddev = adev_to_drm(adev);
+ struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ struct single_display_configuration *display_cfg;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_connector *conn;
+ int num_crtcs = 0;
+ int vrefresh;
+ u32 vblank_in_pixels, vblank_time_us;
+
+ cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+ /* The array should only contain active displays. */
+ if (!amdgpu_crtc->enabled)
+ continue;
+
+ conn = to_amdgpu_connector(amdgpu_crtc->connector);
+ display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++];
+
+ if (amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vblank_in_pixels =
amdgpu_crtc->hw_mode.crtc_htotal *
(amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2));
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
+ vblank_time_us =
+ vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- return vblank_time_us;
-}
+ /* The legacy (non-DC) code has issues with mclk switching
+ * with refresh rates over 120 Hz. Disable mclk switching.
+ */
+ if (vrefresh > 120)
+ vblank_time_us = 0;
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
+ /* Find minimum vblank time. */
+ if (vblank_time_us < cfg->min_vblank_time)
+ cfg->min_vblank_time = vblank_time_us;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
+ /* Find vertical refresh rate of first active display. */
+ if (!cfg->vrefresh)
+ cfg->vrefresh = vrefresh;
}
+
+ if (amdgpu_crtc->crtc_id < cfg->crtc_index) {
+ /* Find first active CRTC and its line time. */
+ cfg->crtc_index = amdgpu_crtc->crtc_id;
+ cfg->line_time_in_us = amdgpu_crtc->line_time;
+ }
+
+ display_cfg->controller_id = amdgpu_crtc->crtc_id;
+ display_cfg->pixel_clock = conn->pixelclock_for_modeset;
}
}
- return vrefresh;
+ cfg->display_clk = adev->clock.default_dispclk;
+ cfg->num_display = num_crtcs;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5fbfe7333b54..b5fbb0fd1dc0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -110,9 +110,10 @@ static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
bool runpm_check = runpm ? adev->in_runpm : false;
if (amdgpu_in_reset(adev))
- return -EPERM;
+ return -EBUSY;
+
if (adev->in_suspend && !runpm_check)
- return -EPERM;
+ return -EBUSY;
return 0;
}
@@ -1420,9 +1421,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL;
}
-static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
- enum amd_pp_sensors sensor,
- void *query)
+static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
+ enum amd_pp_sensors sensor,
+ void *query)
{
int r, size = sizeof(uint32_t);
@@ -1455,7 +1456,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
if (r)
return r;
@@ -1479,7 +1480,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
if (r)
return r;
@@ -1503,7 +1504,7 @@ static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
if (r)
return r;
@@ -1782,7 +1783,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
uint32_t ss_power;
int r = 0, i;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
if (r == -EOPNOTSUPP) {
/* sensor not available on dGPU, try to read from APU */
adev = NULL;
@@ -1795,7 +1796,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
}
mutex_unlock(&mgpu_info.mutex);
if (adev)
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
}
if (r)
@@ -1905,11 +1906,11 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2073,6 +2074,265 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
return 0;
}
+/**
+ * DOC: board
+ *
+ * Certain SOCs can support various board attributes reporting. This is useful
+ * for user application to monitor various board reated attributes.
+ *
+ * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
+ * seven types of attributes are reported. Baseboard temperature and
+ * gpu board temperature are reported as binary files. Npm status, current node power limit,
+ * max node power limit, node power and global ppt residency is reported as ASCII text file.
+ *
+ * * .. code-block:: console
+ *
+ * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/npm_status
+ *
+ * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/node_power
+ *
+ * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
+ */
+
+/**
+ * DOC: baseboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current baseboard
+ * temperature metrics data. The file baseboard_temp is used for this.
+ * Reading the file will dump all the current baseboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: gpuboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpuboard
+ * temperature metrics data. The file gpuboard_temp is used for this.
+ * Reading the file will dump all the current gpuboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: cur_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power limit.
+ * The file cur_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 nplimit;
+ int r;
+
+ /* get the current node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ (void *)&nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", nplimit);
+}
+
+/**
+ * DOC: node_power
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power.
+ * The file node_power is used for this.
+ */
+static ssize_t amdgpu_show_node_power(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", npower);
+}
+
+/**
+ * DOC: npm_status
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power management status.
+ * The file npm_status is used for this. It shows the status as enabled or disabled based on
+ * current node power value. If node power is zero, status is disabled else enabled.
+ */
+static ssize_t amdgpu_show_npm_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
+}
+
+/**
+ * DOC: global_ppt_resid
+ *
+ * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
+ * The file global_ppt_resid is used for this.
+ */
+static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 gpptresid;
+ int r;
+
+ /* get the global ppt residency */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ (void *)&gpptresid);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", gpptresid);
+}
+
+/**
+ * DOC: max_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
+ * The file max_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 max_nplimit;
+ int r;
+
+ /* get the max node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&max_nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", max_nplimit);
+}
+
+static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
+static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
+static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
+static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
+static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
+static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
+static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
+
+static struct attribute *board_attrs[] = {
+ &dev_attr_baseboard_temp.attr,
+ &dev_attr_gpuboard_temp.attr,
+ NULL
+};
+
+static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_baseboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
+ return 0;
+ }
+
+ if (attr == &dev_attr_gpuboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_board_attr_group = {
+ .name = "board",
+ .attrs = board_attrs,
+ .is_visible = amdgpu_board_attr_visible,
+};
+
/* pm policy attributes */
struct amdgpu_pm_policy_attr {
struct device_attribute dev_attr;
@@ -2507,18 +2767,18 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_MEM:
/* get current memory temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
+ (void *)&temp);
break;
default:
r = -EINVAL;
@@ -2780,8 +3040,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 min_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ (void *)&min_rpm);
if (r)
return r;
@@ -2797,8 +3057,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 max_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ (void *)&max_rpm);
if (r)
return r;
@@ -2931,8 +3191,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
+ (void *)&vddgfx);
if (r)
return r;
@@ -2948,8 +3208,8 @@ static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
- (void *)&vddboard);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
if (r)
return r;
@@ -2982,8 +3242,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
return -EINVAL;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
+ (void *)&vddnb);
if (r)
return r;
@@ -3005,7 +3265,7 @@ static int amdgpu_hwmon_get_power(struct device *dev,
u32 query = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
if (r)
return r;
@@ -3125,9 +3385,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
@@ -3158,8 +3415,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
+ (void *)&sclk);
if (r)
return r;
@@ -3182,8 +3439,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
+ (void *)&mclk);
if (r)
return r;
@@ -3469,6 +3726,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
}
+ if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
+ amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
+ effective_mode |= S_IWUSR;
+
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
@@ -3477,10 +3738,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* not all products support both average and instantaneous */
if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* hide max/min values if we can't both query and manage the fan */
@@ -3519,8 +3782,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only few boards support vddboard */
if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
- (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* no mclk on APUs other than gc 9,4,3*/
@@ -4402,6 +4665,7 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0;
+ uint32_t tmp;
int ret;
if (adev->pm.sysfs_initialized)
@@ -4463,6 +4727,28 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
goto err_out0;
}
+ if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
+ ret = devm_device_add_group(adev->dev,
+ &amdgpu_board_attr_group);
+ if (ret)
+ goto err_out0;
+ if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&tmp) != -EOPNOTSUPP) {
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_cur_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_max_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
+ amdgpu_board_attr_group.name);
+ }
+ }
+
adev->pm.sysfs_initialized = true;
return 0;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 768317ee1486..65c1d98af26c 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -263,10 +263,6 @@ struct amdgpu_dpm {
u32 voltage_response_time;
u32 backbias_response_time;
void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
struct amdgpu_dpm_dynamic_state dyn_state;
struct amdgpu_dpm_fan fan;
u32 tdp_limit;
@@ -526,6 +522,8 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
void *table);
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -613,5 +611,8 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type);
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
index 5c2a89f0d5d5..cc6d7ba040e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -23,10 +23,6 @@
#ifndef __AMDGPU_DPM_INTERNAL_H__
#define __AMDGPU_DPM_INTERNAL_H__
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 307ebf7e3226..33eb85dd68e9 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) ||
pi->disable_nb_ps3_in_battery;
ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpm0_pg_nb_ps_hi = 0x2;
@@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
return 0;
force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+ (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start);
if (force_high) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index ea3ace882a10..c7ed0b457129 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -771,8 +771,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
int i;
struct amdgpu_ps *ps;
u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = adev->pm.pm_display_cfg.num_display < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
@@ -945,9 +944,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
amdgpu_dpm_post_set_power_state(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -968,7 +964,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_get_active_displays(adev);
+ if (!adev->dc_enabled)
+ amdgpu_dpm_get_display_cfg(adev);
amdgpu_dpm_change_power_state_locked(adev);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 52e732be59e3..cf9932e68055 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -3081,11 +3081,17 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
static bool si_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
- if (vblank_time < switch_limit)
+ /* Consider zero vblank time too short and disable MCLK switching.
+ * Note that the vblank time is set to maximum when no displays are attached,
+ * so we'll still enable MCLK switching in that case.
+ */
+ if (vblank_time == 0)
+ return true;
+ else if (vblank_time < switch_limit)
return true;
else
return false;
@@ -3441,6 +3447,8 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
+ const struct amd_pp_display_configuration *display_cfg =
+ &adev->pm.pm_display_cfg;
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching = false;
@@ -3449,6 +3457,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
+ u32 high_pixelclock_count = 0;
int i;
if (adev->asic_type == CHIP_HAINAN) {
@@ -3476,6 +3485,30 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
}
+ /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
+ * For example, 4K 60Hz and 1080p 144Hz fall into this category.
+ * Find number of such displays connected.
+ */
+ for (i = 0; i < display_cfg->num_display; i++) {
+ /* The array only contains active displays. */
+ if (display_cfg->displays[i].pixel_clock > 297000)
+ high_pixelclock_count++;
+ }
+
+ /* These are some ad-hoc fixes to some issues observed with SI GPUs.
+ * They are necessary because we don't have something like dce_calcs
+ * for these GPUs to calculate bandwidth requirements.
+ */
+ if (high_pixelclock_count) {
+ /* On Oland, we observe some flickering when two 4K 60Hz
+ * displays are connected, possibly because voltage is too low.
+ * Raise the voltage by requiring a higher SCLK.
+ * (Voltage cannot be adjusted independently without also SCLK.)
+ */
+ if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
+ disable_sclk_switching = true;
+ }
+
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
@@ -3486,7 +3519,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
rps->ecclk = 0;
}
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ if ((adev->pm.pm_display_cfg.num_display > 1) ||
si_dpm_vblank_too_short(adev))
disable_mclk_switching = true;
@@ -3634,7 +3667,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
+ display_cfg->display_clk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
@@ -4159,16 +4192,16 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
static void si_program_display_gap(struct amdgpu_device *adev)
{
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
u32 tmp, pipe;
- int i;
tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
+ if (cfg->num_display > 0)
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
- if (adev->pm.dpm.new_active_crtc_count > 1)
+ if (cfg->num_display > 1)
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
@@ -4178,17 +4211,8 @@ static void si_program_display_gap(struct amdgpu_device *adev)
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
+ if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
+ pipe = cfg->crtc_index;
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
@@ -4199,7 +4223,7 @@ static void si_program_display_gap(struct amdgpu_device *adev)
* This can be a problem on PowerXpress systems or if you want to use the card
* for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+ si_notify_smc_display_change(adev, cfg->num_display > 0);
}
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
@@ -5508,7 +5532,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ (adev->pm.pm_display_cfg.num_display <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5637,14 +5661,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
static int si_disable_ulv(struct amdgpu_device *adev)
{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
+ PPSMC_Result r;
- return 0;
+ r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
+ return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
}
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
@@ -5661,7 +5681,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
/* XXX validate against display requirements! */
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
+ if (adev->pm.pm_display_cfg.display_clk <=
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
if (ulv->pl.vddc <
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
@@ -5815,39 +5835,36 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
static int si_upload_smc_data(struct amdgpu_device *adev)
{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ u32 crtc_index = 0;
+ u32 mclk_change_block_cp_min = 0;
+ u32 mclk_change_block_cp_max = 0;
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
+ /* When a display is plugged in, program these so that the SMC
+ * performs MCLK switching when it doesn't cause flickering.
+ * When no display is plugged in, there is no need to restrict
+ * MCLK switching, so program them to zero.
+ */
+ if (cfg->num_display) {
+ crtc_index = cfg->crtc_index;
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
+ if (cfg->line_time_in_us) {
+ mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
+ mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
}
}
- if (amdgpu_crtc == NULL)
- return 0;
-
- if (amdgpu_crtc->line_time <= 0)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ crtc_index);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ mclk_change_block_cp_min);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ mclk_change_block_cp_max);
return 0;
}
@@ -7954,6 +7971,7 @@ static void si_dpm_print_power_state(void *handle,
amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ drm_dbg(adev_to_drm(adev), "\tvce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 4e65ab9e931c..281a5e377aee 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -172,20 +172,42 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
{
u32 tmp;
int i;
+ int usec_timeout;
+
+ /* SMC seems to process some messages exceptionally slowly. */
+ switch (msg) {
+ case PPSMC_MSG_NoForcedLevel:
+ case PPSMC_MSG_SetEnabledLevels:
+ case PPSMC_MSG_SetForcedLevels:
+ case PPSMC_MSG_DisableULV:
+ case PPSMC_MSG_SwitchToSwState:
+ usec_timeout = 1000000; /* 1 sec */
+ break;
+ default:
+ usec_timeout = 200000; /* 200 ms */
+ break;
+ }
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
WREG32(mmSMC_MESSAGE_0, msg);
- for (i = 0; i < adev->usec_timeout; i++) {
+ for (i = 0; i < usec_timeout; i++) {
tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(mmSMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
+ if (tmp == 0) {
+ drm_warn(adev_to_drm(adev),
+ "%s timeout on message: %x (SMC_SCRATCH0: %x)\n",
+ __func__, msg, RREG32(mmSMC_SCRATCH0));
+ }
+
+ return (PPSMC_Result)tmp;
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index b48a031cbba0..554492dfa3c0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1554,16 +1554,7 @@ static void pp_pm_compute_clocks(void *handle)
struct amdgpu_device *adev = hwmgr->adev;
if (!adev->dc_enabled) {
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
-
+ amdgpu_dpm_get_display_cfg(adev);
pp_display_configuration_change(handle,
&adev->pm.pm_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 8d40ed0f0e83..ce166a7f8e42 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -563,8 +563,8 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
PP_ASSERT_WITH_CODE((NULL != voltage_info),
"Could not find Voltage Table in BIOS.", return false;);
- ret = (NULL != atomctrl_lookup_voltage_type_v3
- (voltage_info, voltage_type, voltage_mode)) ? true : false;
+ ret = atomctrl_lookup_voltage_type_v3
+ (voltage_info, voltage_type, voltage_mode) != NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 9a821563bc8e..14ccd743ca1d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1032,7 +1032,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
uint32_t min_freq, max_freq = 0;
- uint32_t ret = 0;
+ int ret = 0;
switch (type) {
case PP_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index 5e43ad2b2956..d2dbd90bb427 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2540,9 +2540,8 @@ static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 17d2f5bff4a7..1f50f1e74c48 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2655,9 +2655,8 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
const struct pp_smumgr_func iceland_smu_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index ff6b563ecbf5..bf6d09572cfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2578,9 +2578,8 @@ static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index baf51cd82a35..0d4cbe4113a0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -401,7 +401,7 @@ failed:
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- uint32_t ret;
+ int ret;
ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 6fe6e6abb5d8..2e21f9d066cb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -3139,9 +3139,8 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 408f05dfab90..fb8086859857 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1315,6 +1315,33 @@ static void smu_init_power_profile(struct smu_context *smu)
smu_power_profile_mode_get(smu, smu->power_profile_mode);
}
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return;
+
+ set_bit(fea_id, fea_cap->cap_map);
+}
+
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return false;
+
+ return test_bit(fea_id, fea_cap->cap_map);
+}
+
+static void smu_feature_cap_init(struct smu_context *smu)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);
+}
+
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1347,6 +1374,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
INIT_DELAYED_WORK(&smu->swctf_delayed_work,
smu_swctf_delayed_work_handler);
+ smu_feature_cap_init(smu);
+
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1896,7 +1925,6 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
- smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
@@ -2104,7 +2132,6 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
}
smu_dpm_set_jpeg_enable(smu, false);
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
- smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
if (!smu->pm_enabled)
@@ -3507,15 +3534,10 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
bool smu_link_reset_is_support(struct smu_context *smu)
{
- bool ret = false;
-
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
- ret = smu->ppt_funcs->link_reset_is_support(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
}
int smu_mode1_reset(struct smu_context *smu)
@@ -3831,6 +3853,51 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
+{
+ struct smu_context *smu = handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ enum smu_table_id table_id;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
+ return -EOPNOTSUPP;
+
+ table_id = smu_metrics_get_temp_table_id(type);
+
+ if (table_id == SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ /* If the request is to get size alone, return the cached table size */
+ if (!table && tables[table_id].cache.size)
+ return tables[table_id].cache.size;
+
+ if (smu_table_cache_is_valid(&tables[table_id])) {
+ memcpy(table, tables[table_id].cache.buffer,
+ tables[table_id].cache.size);
+ return tables[table_id].cache.size;
+ }
+
+ return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
+}
+
+static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
+{
+ struct smu_context *smu = handle;
+ bool ret = false;
+
+ if (!smu->pm_enabled)
+ return false;
+
+ if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
+ ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
+
+ return ret;
+}
+
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
{
struct smu_context *smu = handle;
@@ -3903,6 +3970,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
.get_xcp_metrics = smu_sys_get_xcp_metrics,
+ .get_temp_metrics = smu_sys_get_temp_metrics,
+ .temp_metrics_is_supported = smu_temp_metrics_is_supported,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -4058,12 +4127,7 @@ int smu_send_rma_reason(struct smu_context *smu)
*/
bool smu_reset_sdma_is_supported(struct smu_context *smu)
{
- bool ret = false;
-
- if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
- ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
}
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
@@ -4076,6 +4140,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+bool smu_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+}
+
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b52e194397e2..582c186d8b62 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -249,6 +249,14 @@ struct smu_user_dpm_profile {
tables[table_id].domain = d; \
} while (0)
+struct smu_table_cache {
+ void *buffer;
+ size_t size;
+ /* interval in ms*/
+ uint32_t interval;
+ unsigned long last_cache_time;
+};
+
struct smu_table {
uint64_t size;
uint32_t align;
@@ -257,6 +265,7 @@ struct smu_table {
void *cpu_addr;
struct amdgpu_bo *bo;
uint32_t version;
+ struct smu_table_cache cache;
};
enum smu_perf_level_designation {
@@ -322,6 +331,9 @@ enum smu_table_id {
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
SMU_TABLE_WIFIBAND,
+ SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ SMU_TABLE_PMFW_SYSTEM_METRICS,
SMU_TABLE_COUNT,
};
@@ -396,6 +408,10 @@ struct smu_dpm_context {
struct smu_dpm_policy_ctxt *dpm_policies;
};
+struct smu_temp_context {
+ const struct smu_temp_funcs *temp_funcs;
+};
+
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
@@ -512,6 +528,17 @@ enum smu_fw_status {
*/
#define SMU_WBRF_EVENT_HANDLING_PACE 10
+enum smu_feature_cap_id {
+ SMU_FEATURE_CAP_ID__LINK_RESET = 0,
+ SMU_FEATURE_CAP_ID__SDMA_RESET,
+ SMU_FEATURE_CAP_ID__VCN_RESET,
+ SMU_FEATURE_CAP_ID__COUNT,
+};
+
+struct smu_feature_cap {
+ DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
+};
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -529,10 +556,12 @@ struct smu_context {
struct smu_table_context smu_table;
struct smu_dpm_context smu_dpm;
struct smu_power_context smu_power;
+ struct smu_temp_context smu_temp;
struct smu_feature smu_feature;
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
struct smu_temperature_range thermal_range;
+ struct smu_feature_cap fea_cap;
void *od_settings;
struct smu_umd_pstate_table pstate_table;
@@ -624,6 +653,28 @@ struct smu_context {
struct i2c_adapter;
/**
+ * struct smu_temp_funcs - Callbacks used to get temperature data.
+ */
+struct smu_temp_funcs {
+ /**
+ * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: Size of &table
+ */
+ ssize_t (*get_temp_metrics)(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table);
+
+ /**
+ * @temp_metrics_is_support: Get if specific temperature metrics is supported
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: true if supported else false
+ */
+ bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
+
+};
+
+/**
* struct pptable_funcs - Callbacks used to interact with the SMU.
*/
struct pptable_funcs {
@@ -1234,11 +1285,6 @@ struct pptable_funcs {
bool (*mode1_reset_is_support)(struct smu_context *smu);
/**
- * @link_reset_is_support: Check if GPU supports link reset.
- */
- bool (*link_reset_is_support)(struct smu_context *smu);
-
- /**
* @mode1_reset: Perform mode1 reset.
*
* Complete GPU reset.
@@ -1388,10 +1434,6 @@ struct pptable_funcs {
* @reset_sdma: message SMU to soft reset sdma instance.
*/
int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
- /**
- * @reset_sdma_is_supported: Check if support resets the SDMA engine.
- */
- bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
* @reset_vcn: message SMU to soft reset vcn instance.
@@ -1622,6 +1664,71 @@ typedef struct {
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type);
+static inline enum smu_table_id
+smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ return SMU_TABLE_BASEBOARD_TEMP_METRICS;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return SMU_TABLE_GPUBOARD_TEMP_METRICS;
+ default:
+ return SMU_TABLE_COUNT;
+ }
+
+ return SMU_TABLE_COUNT;
+}
+
+static inline void smu_table_cache_update_time(struct smu_table *table,
+ unsigned long time)
+{
+ table->cache.last_cache_time = time;
+}
+
+static inline bool smu_table_cache_is_valid(struct smu_table *table)
+{
+ if (!table->cache.buffer || !table->cache.last_cache_time ||
+ !table->cache.interval || !table->cache.size ||
+ time_after(jiffies,
+ table->cache.last_cache_time +
+ msecs_to_jiffies(table->cache.interval)))
+ return false;
+
+ return true;
+}
+
+static inline int smu_table_cache_init(struct smu_context *smu,
+ enum smu_table_id table_id, size_t size,
+ uint32_t cache_interval)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
+ if (!tables[table_id].cache.buffer)
+ return -ENOMEM;
+
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = cache_interval;
+ tables[table_id].cache.size = size;
+
+ return 0;
+}
+
+static inline void smu_table_cache_fini(struct smu_context *smu,
+ enum smu_table_id table_id)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ if (tables[table_id].cache.buffer) {
+ kfree(tables[table_id].cache.buffer);
+ tables[table_id].cache.buffer = NULL;
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = 0;
+ }
+}
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
@@ -1673,10 +1780,14 @@ int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_vcn_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
#endif
+
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index 0a2ca544f4e3..bf6aa9620911 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -135,7 +135,63 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x13
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_VDDCR_VDD0_TEMP,
+ SVI_VDDCR_VDD1_TEMP,
+ SVI_VDDCR_VDD2_TEMP,
+ SVI_VDDCR_VDD3_TEMP,
+ SVI_VDDCR_SOC_A_TEMP,
+ SVI_VDDCR_SOC_C_TEMP,
+ SVI_VDDCR_SOCIO_A_TEMP,
+ SVI_VDDCR_SOCIO_C_TEMP,
+ SVI_VDD_085_HBM_TEMP,
+ SVI_VDDCR_11_HBM_B_TEMP,
+ SVI_VDDCR_11_HBM_D_TEMP,
+ SVI_VDD_USR_TEMP,
+ SVI_VDDIO_11_E32_TEMP,
+ SVI_MAX_TEMP_ENTRIES, // 13
+} SVI_TEMP_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x14
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -231,11 +287,32 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t GfxclkBelowHostLimitThmAcc[8];
uint64_t GfxclkBelowHostLimitTotalAcc[8];
uint64_t GfxclkLowUtilizationAcc[8];
+
+ uint32_t AidTemperature[4];
+ uint32_t XcdTemperature[8];
+ uint32_t HbmTemperature[8];
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; // Defaults to 0.
+ uint16_t NodeIdentifier; // Unique identifier to each node on system.
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius
+ int16_t spare[7];
+
+ //NPM: NODE POWER MANAGEMENT
+ uint32_t NodePowerLimit;
+ uint32_t NodePower;
+ uint32_t GlobalPPTResidencyAcc;
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
@@ -287,6 +364,9 @@ typedef struct {
// General info
uint32_t pldmVersion[2];
+
+ //Node Power Limit
+ uint32_t MaxNodePowerLimit;
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
index e1f490b6ce64..4b066c42e0ec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -116,7 +116,12 @@
#define PPSMC_MSG_DumpErrorRecord 0x57
#define PPSMC_MSG_EraseRasTable 0x58
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A
+#define PPSMC_MSG_GetBadPageSeverity 0x5B
+#define PPSMC_MSG_GetSystemMetricsTable 0x5C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x5D
+#define PPSMC_MSG_ResetVCN 0x5E
+#define PPSMC_Message_Count 0x5F
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41f268313613..63a088ef7169 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,9 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_MSG_ResetVCN 0x4E
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVCN 0x5B
+#define PPSMC_Message_Count 0x5C
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index d7a9e41820fa..2256c77da636 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -278,7 +278,8 @@
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
__SMU_DUMMY_MAP(ResetVCN), \
- __SMU_DUMMY_MAP(GetStaticMetricsTable),
+ __SMU_DUMMY_MAP(GetStaticMetricsTable), \
+ __SMU_DUMMY_MAP(GetSystemMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -469,6 +470,7 @@ enum smu_feature_mask {
/* Message category flags */
#define SMU_MSG_VF_FLAG (1U << 0)
#define SMU_MSG_RAS_PRI (1U << 1)
+#define SMU_MSG_NO_PRECHECK (1U << 2)
/* Firmware capability flags */
#define SMU_FW_CAP_RAS_PRI (1U << 0)
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
index 251ed011b3b0..251ed011b3b0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 9ad46f545d15..4fff78da81ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1745,10 +1745,10 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -1756,27 +1756,12 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void arcturus_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1897,7 +1882,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index aac202d0c30e..0028f10ead42 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3145,10 +3145,10 @@ static int navi10_i2c_control_init(struct smu_context *smu)
control->quirks = &navi10_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -3156,27 +3156,12 @@ static int navi10_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void navi10_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d57591509aed..31c2c0386b1f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2648,10 +2648,10 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
control->quirks = &sienna_cichlid_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
/* assign the buses used for the FRU EEPROM and RAS EEPROM */
@@ -2660,27 +2660,12 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index e97b0cf19197..3baf20f4c373 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -470,7 +470,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
uint32_t min = 0, max = 0;
- uint32_t ret = 0;
+ int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMinGfxclkFrequency,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index c63d2e28954d..18d5d0704509 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1641,33 +1641,22 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
control->quirks = &aldebaran_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- i2c_del_adapter(control);
-
- return res;
}
static void aldebaran_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1781,7 +1770,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index e084ed99ec0e..c1062e5f0393 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2825,10 +2825,10 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_0_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2838,27 +2838,12 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 02a455a31c25..cb3fea9e8cf3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -83,7 +83,6 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] =
SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
};
-// clang-format off
const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -106,7 +105,7 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -137,9 +136,57 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
};
+int smu_v13_0_12_tables_init(struct smu_context *smu)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table_cache *cache;
+ int ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), 5);
+
+ if (ret)
+ return ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ /* Initialize base board temperature metrics */
+ cache = &(tables[SMU_TABLE_BASEBOARD_TEMP_METRICS].cache);
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *) cache->buffer;
+ smu_cmn_init_baseboard_temp_metrics(baseboard_temp_metrics, 1, 0);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_table_cache_init(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ cache = &(tables[SMU_TABLE_GPUBOARD_TEMP_METRICS].cache);
+ gpuboard_temp_metrics = (struct amdgpu_gpuboard_temp_metrics_v1_0 *)cache->buffer;
+ smu_cmn_init_gpuboard_temp_metrics(gpuboard_temp_metrics, 1, 0);
+
+ return 0;
+}
+
+void smu_v13_0_12_tables_fini(struct smu_context *smu)
+{
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+}
+
static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
@@ -187,6 +234,11 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
+size_t smu_v13_0_12_get_system_metrics_size(void)
+{
+ return sizeof(SystemMetricsTable_t);
+}
+
static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
StaticMetricsTable_t *static_metrics)
{
@@ -220,7 +272,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t table_version;
- int ret, i;
+ int ret, i, n;
if (!pptable->Init) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -259,6 +311,22 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
/* use AID0 serial number by default */
pptable->PublicSerialNumber_AID =
static_metrics->PublicSerialNumber_AID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
if (ret)
return ret;
@@ -274,6 +342,9 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
static_metrics->pldmVersion[0] != 0xFFFFFFFF)
smu->adev->firmware.pldm_version =
static_metrics->pldmVersion[0];
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ pptable->MaxNodePowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxNodePowerLimit);
smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -359,6 +430,292 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table *sys_table;
+ int ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ if (smu_table_cache_is_valid(sys_table))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ smu_table_cache_update_time(sys_table, jiffies);
+ memcpy(sys_table->cache.buffer, table->cpu_addr,
+ smu_v13_0_12_get_system_metrics_size());
+
+ return 0;
+}
+
+static enum amdgpu_node_temp smu_v13_0_12_get_node_sensor_type(NODE_TEMP_e type)
+{
+ switch (type) {
+ case NODE_TEMP_RETIMER:
+ return AMDGPU_RETIMER_X_TEMP;
+ case NODE_TEMP_IBC_TEMP:
+ return AMDGPU_OAM_X_IBC_TEMP;
+ case NODE_TEMP_IBC_2_TEMP:
+ return AMDGPU_OAM_X_IBC_2_TEMP;
+ case NODE_TEMP_VDD18_VR_TEMP:
+ return AMDGPU_OAM_X_VDD18_VR_TEMP;
+ case NODE_TEMP_04_HBM_B_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_B_VR_TEMP;
+ case NODE_TEMP_04_HBM_D_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_D_VR_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_vr_temp smu_v13_0_12_get_vr_sensor_type(SVI_TEMP_e type)
+{
+ switch (type) {
+ case SVI_VDDCR_VDD0_TEMP:
+ return AMDGPU_VDDCR_VDD0_TEMP;
+ case SVI_VDDCR_VDD1_TEMP:
+ return AMDGPU_VDDCR_VDD1_TEMP;
+ case SVI_VDDCR_VDD2_TEMP:
+ return AMDGPU_VDDCR_VDD2_TEMP;
+ case SVI_VDDCR_VDD3_TEMP:
+ return AMDGPU_VDDCR_VDD3_TEMP;
+ case SVI_VDDCR_SOC_A_TEMP:
+ return AMDGPU_VDDCR_SOC_A_TEMP;
+ case SVI_VDDCR_SOC_C_TEMP:
+ return AMDGPU_VDDCR_SOC_C_TEMP;
+ case SVI_VDDCR_SOCIO_A_TEMP:
+ return AMDGPU_VDDCR_SOCIO_A_TEMP;
+ case SVI_VDDCR_SOCIO_C_TEMP:
+ return AMDGPU_VDDCR_SOCIO_C_TEMP;
+ case SVI_VDD_085_HBM_TEMP:
+ return AMDGPU_VDD_085_HBM_TEMP;
+ case SVI_VDDCR_11_HBM_B_TEMP:
+ return AMDGPU_VDDCR_11_HBM_B_TEMP;
+ case SVI_VDDCR_11_HBM_D_TEMP:
+ return AMDGPU_VDDCR_11_HBM_D_TEMP;
+ case SVI_VDD_USR_TEMP:
+ return AMDGPU_VDD_USR_TEMP;
+ case SVI_VDDIO_11_E32_TEMP:
+ return AMDGPU_VDDIO_11_E32_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_system_temp smu_v13_0_12_get_system_sensor_type(SYSTEM_TEMP_e type)
+{
+ switch (type) {
+ case SYSTEM_TEMP_UBB_FPGA:
+ return AMDGPU_UBB_FPGA_TEMP;
+ case SYSTEM_TEMP_UBB_FRONT:
+ return AMDGPU_UBB_FRONT_TEMP;
+ case SYSTEM_TEMP_UBB_BACK:
+ return AMDGPU_UBB_BACK_TEMP;
+ case SYSTEM_TEMP_UBB_OAM7:
+ return AMDGPU_UBB_OAM7_TEMP;
+ case SYSTEM_TEMP_UBB_IBC:
+ return AMDGPU_UBB_IBC_TEMP;
+ case SYSTEM_TEMP_UBB_UFPGA:
+ return AMDGPU_UBB_UFPGA_TEMP;
+ case SYSTEM_TEMP_UBB_OAM1:
+ return AMDGPU_UBB_OAM1_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_HSC:
+ return AMDGPU_OAM_0_1_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_2_3_HSC:
+ return AMDGPU_OAM_2_3_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_HSC:
+ return AMDGPU_OAM_4_5_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_6_7_HSC:
+ return AMDGPU_OAM_6_7_HSC_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_0V72_VR:
+ return AMDGPU_UBB_FPGA_0V72_VR_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_3V3_VR:
+ return AMDGPU_UBB_FPGA_3V3_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR:
+ return AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR:
+ return AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_0V9_VR:
+ return AMDGPU_RETIMER_0_1_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_0V9_VR:
+ return AMDGPU_RETIMER_4_5_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_2_3_0V9_VR:
+ return AMDGPU_RETIMER_2_3_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_6_7_0V9_VR:
+ return AMDGPU_RETIMER_6_7_0V9_VR_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR:
+ return AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR:
+ return AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP;
+ case SYSTEM_TEMP_IBC_HSC:
+ return AMDGPU_IBC_HSC_TEMP;
+ case SYSTEM_TEMP_IBC:
+ return AMDGPU_IBC_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0 &&
+ smu->adev->gmc.xgmi.num_physical_nodes > 1 &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS)))
+ return true;
+ break;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS));
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ return -EOPNOTSUPP;
+
+ if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) {
+ *value = pptable->MaxNodePowerLimit;
+ return 0;
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ *value = SMUQ10_ROUND(metrics->NodePowerLimit);
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ *value = SMUQ10_ROUND(metrics->NodePower);
+ break;
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *data_table;
+ struct smu_table *sys_table;
+ int ret, sensor_type;
+ u32 idx, sensors;
+ ssize_t size;
+
+ if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ /* Initialize base board temperature metrics */
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_BASEBOARD_TEMP_METRICS];
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ } else {
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_GPUBOARD_TEMP_METRICS];
+ gpuboard_temp_metrics =
+ (struct amdgpu_gpuboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+ smu_table_cache_update_time(data_table, jiffies);
+
+ if (type == SMU_TEMP_METRIC_GPUBOARD) {
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < NODE_TEMP_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->NodeTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_node_sensor_type(sensors);
+ gpuboard_temp_metrics->node_temp[idx] =
+ ((int)metrics->NodeTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->node_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+
+ idx = 0;
+
+ for (sensors = 0; sensors < SVI_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->VrTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_vr_sensor_type(sensors);
+ gpuboard_temp_metrics->vr_temp[idx] =
+ ((int)metrics->VrTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->vr_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ } else if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < SYSTEM_TEMP_MAX_ENTRIES; sensors++) {
+ if (metrics->SystemTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_system_sensor_type(sensors);
+ baseboard_temp_metrics->system_temp[idx] =
+ ((int)metrics->SystemTemperatures[sensors]) & 0xFFFFFF;
+ baseboard_temp_metrics->system_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ }
+
+ memcpy(table, data_table->cache.buffer, size);
+
+ return size;
+}
+
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
{
const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
@@ -572,3 +929,8 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
return sizeof(*gpu_metrics);
}
+
+const struct smu_temp_funcs smu_v13_0_12_temp_funcs = {
+ .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v13_0_12_get_temp_metrics,
+};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9cc294f4708b..cbe5b06438c1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -143,9 +143,9 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
- MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -177,7 +177,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
- MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
};
// clang-format on
@@ -312,6 +312,8 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551800)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
if (fw_ver >= 0x5551600) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
@@ -350,6 +352,20 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
+
+ if (fw_ver > 0x04560900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+
+ if (fw_ver >= 0x04560700) {
+ if (fw_ver >= 0x04560900) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ if (smu->adev->gmc.xgmi.physical_node_id == 0)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS));
+ } else if (!amdgpu_sriov_vf(smu->adev))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ } else {
+ smu_v13_0_12_tables_fini(smu);
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,19 +418,41 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
- if ((pgm == 0 && fw_ver >= 0x00557F01) ||
- (pgm == 7 && fw_ver >= 0x7551000)) {
- smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
- smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (fw_ver >= 0x00558200)
+ amdgpu_virt_attr_set(&adev->virt.virt_caps,
+ AMDGPU_VIRT_CAP_POWER_LIMIT,
+ AMDGPU_CAP_ATTR_RW);
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+ } else {
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
- if ((pgm == 0 && fw_ver >= 0x00558000) ||
- (pgm == 7 && fw_ver >= 0x7551000))
- smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if (((pgm == 0) && (fw_ver >= 0x00558200)) ||
+ ((pgm == 4) && (fw_ver >= 0x04557100)))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
static void smu_v13_0_x_init_caps(struct smu_context *smu)
@@ -511,8 +549,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ void *gpu_metrics_table __free(kfree) = NULL;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int gpu_metrcs_size = METRICS_TABLE_SIZE;
+ int ret;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
@@ -528,27 +570,36 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- if (!smu_table->metrics_table)
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+ metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
- smu_table->gpu_metrics_table =
+ gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
- if (!smu_table->gpu_metrics_table) {
- kfree(smu_table->metrics_table);
+ if (!gpu_metrics_table)
return -ENOMEM;
- }
- smu_table->driver_pptable =
- kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
- if (!smu_table->driver_pptable) {
- kfree(smu_table->metrics_table);
- kfree(smu_table->gpu_metrics_table);
+ driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
return -ENOMEM;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12)) {
+ ret = smu_v13_0_12_tables_init(smu);
+ if (ret)
+ return ret;
}
+ smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table);
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
return 0;
}
@@ -677,6 +728,13 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ smu_v13_0_12_tables_fini(smu);
+ return smu_v13_0_fini_smc_tables(smu);
+}
+
static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num)
@@ -803,7 +861,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int version = smu_v13_0_6_get_metrics_version(smu);
- int ret, i, retry = 100;
+ int ret, i, retry = 100, n;
uint32_t table_version;
uint16_t max_speed;
uint8_t max_width;
@@ -865,6 +923,23 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
pptable->PublicSerialNumber_AID =
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ GET_METRIC_FIELD(PublicSerialNumber_AID,
+ version)[i]);
+ }
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ GET_METRIC_FIELD(PublicSerialNumber_XCD,
+ version)[i]);
+ }
+
pptable->Init = true;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -1731,6 +1806,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
+ ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data);
+ if (ret)
+ return ret;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -2426,10 +2510,10 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_6_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2437,27 +2521,12 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -2560,9 +2629,9 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int ret, inst, i, j, k, idx;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
@@ -2587,17 +2656,14 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
return -ENOMEM;
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
- goto out;
- }
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
+ metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2668,8 +2734,6 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
idx++;
}
}
-out:
- kfree(metrics_v0);
return sizeof(*xcp_metrics);
}
@@ -2680,31 +2744,26 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct gpu_metrics_v1_8 *gpu_metrics =
(struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
- ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
- kfree(metrics_v0);
- return num_bytes;
- }
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2890,7 +2949,6 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
*table = (void *)gpu_metrics;
- kfree(metrics_v0);
return sizeof(*gpu_metrics);
}
@@ -3076,7 +3134,7 @@ static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int var = (adev->pdev->device & 0xF);
- if (var == 0x1)
+ if (var == 0x0 || var == 0x1 || var == 0x3)
return true;
return false;
@@ -3152,6 +3210,11 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET));
+}
+
static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
@@ -3165,6 +3228,20 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
}
+static int smu_v13_0_6_post_init(struct smu_context *smu)
+{
+ if (smu_v13_0_6_is_link_reset_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
+
+ if (smu_v13_0_6_reset_sdma_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
+
+ if (smu_v13_0_6_reset_vcn_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+
+ return 0;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3781,6 +3858,12 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.parse_error_code = aca_smu_parse_error_code,
};
+static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
+{
+ smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -3797,7 +3880,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.init_microcode = smu_v13_0_6_init_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
- .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .fini_smc_tables = smu_v13_0_6_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
@@ -3828,7 +3911,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
.link_reset = smu_v13_0_6_link_reset,
@@ -3838,8 +3920,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
- .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
+ .post_init = smu_v13_0_6_post_init,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -3854,6 +3936,8 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
+ smu_v13_0_6_set_temp_funcs(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
+
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 67b30674fd31..7ef5f3e66c27 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -49,6 +49,7 @@ struct PPTable_t {
uint32_t MaxLclkDpmRange;
uint32_t MinLclkDpmRange;
uint64_t PublicSerialNumber_AID;
+ uint32_t MaxNodePowerLimit;
bool Init;
};
@@ -64,10 +65,13 @@ enum smu_v13_0_6_caps {
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(SDMA_RESET),
+ SMU_CAP(VCN_RESET),
SMU_CAP(STATIC_METRICS),
SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(BOARD_VOLTAGE),
SMU_CAP(PLDM_VERSION),
+ SMU_CAP(TEMP_METRICS),
+ SMU_CAP(NPM_METRICS),
SMU_CAP(ALL),
};
@@ -79,6 +83,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
int smu_v13_0_12_get_max_metrics_size(void);
+size_t smu_v13_0_12_get_system_metrics_size(void);
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member, uint32_t *value);
@@ -86,6 +91,12 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
struct amdgpu_xcp *xcp, void *table,
void *smu_metrics);
+int smu_v13_0_12_tables_init(struct smu_context *smu);
+void smu_v13_0_12_tables_fini(struct smu_context *smu);
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value);
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index f32474af90b3..086501cc5213 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2087,10 +2087,10 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v14_0_2_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2100,27 +2100,12 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 59f9abd0f7b8..f532f7c69259 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -256,11 +256,12 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
uint32_t flags, resp;
- bool fed_status;
+ bool fed_status, pri;
flags = __smu_cmn_get_msg_flags(smu, msg);
*poll = true;
+ pri = !!(flags & SMU_MSG_NO_PRECHECK);
/* When there is RAS fatal error, FW won't process non-RAS priority
* messages. Don't allow any messages other than RAS priority messages.
*/
@@ -272,15 +273,18 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
smu_get_message_name(smu, msg));
return -EACCES;
}
+ }
+ if (pri || fed_status) {
/* FW will ignore non-priority messages when a RAS fatal error
- * is detected. Hence it is possible that a previous message
- * wouldn't have got response. Allow to continue without polling
- * for response status for priority messages.
+ * or reset condition is detected. Hence it is possible that a
+ * previous message wouldn't have got response. Allow to
+ * continue without polling for response status for priority
+ * messages.
*/
resp = RREG32(smu->resp_reg);
dev_dbg(adev->dev,
- "Sending RAS priority message %s response status: %x",
+ "Sending priority message %s response status: %x",
smu_get_message_name(smu, msg), resp);
if (resp == 0)
*poll = false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a608cdbdada4..d588f74b98de 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -65,6 +65,32 @@
header->structure_size = sizeof(*tmp); \
} while (0)
+#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
index 0d878ca3acba..144fbe4ceb9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,12 +22,16 @@
*
*/
-#ifndef __DCE_V11_0_H__
-#define __DCE_V11_0_H__
-
-extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+#ifndef __RAS_CORE_STATUS_H__
+#define __RAS_CORE_STATUS_H__
+#define RAS_CORE_OK 0
+#define RAS_CORE_NOT_SUPPORTED 248
+#define RAS_CORE_FAIL_ERROR_QUERY 249
+#define RAS_CORE_FAIL_ERROR_INJECTION 250
+#define RAS_CORE_FAIL_FATAL_RECOVERY 251
+#define RAS_CORE_FAIL_POISON_CONSUMPTION 252
+#define RAS_CORE_FAIL_POISON_CREATION 253
+#define RAS_CORE_FAIL_NO_VALID_BANKS 254
+#define RAS_CORE_GPU_IN_MODE1_RESET 255
#endif
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
index 477ee15eff5d..829e3b8b0d19 100644
--- a/drivers/gpu/drm/ast/ast_2100.c
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -32,6 +32,39 @@
#include "ast_post.h"
/*
+ * DRAM type
+ */
+
+static enum ast_dram_layout ast_2100_get_dram_layout_p2a(struct ast_device *ast)
+{
+ u32 mcr_cfg;
+ enum ast_dram_layout dram_layout;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+
+ switch (mcr_cfg & 0x0c) {
+ case 0:
+ case 4:
+ default:
+ dram_layout = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (mcr_cfg & 0x40)
+ dram_layout = AST_DRAM_1Gx16;
+ else
+ dram_layout = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ dram_layout = AST_DRAM_1Gx32;
+ break;
+ }
+
+ return dram_layout;
+}
+
+/*
* POST
*/
@@ -266,6 +299,7 @@ static void ast_post_chip_2100(struct ast_device *ast)
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
+ enum ast_dram_layout dram_layout = ast_2100_get_dram_layout_p2a(ast);
j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
@@ -292,11 +326,17 @@ static void ast_post_chip_2100(struct ast_device *ast)
for (i = 0; i < 15; i++)
udelay(dram_reg_info->data);
} else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
+ switch (dram_layout) {
+ case AST_DRAM_1Gx16:
data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
+ break;
+ case AST_DRAM_1Gx32:
data = 0x00000c8d;
+ break;
+ default:
+ data = dram_reg_info->data;
+ break;
+ }
temp = ast_read32(ast, 0x12070);
temp &= 0xc;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e37a55295ed7..c15aef014f69 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,13 +98,15 @@ enum ast_config_mode {
ast_use_defaults
};
-#define AST_DRAM_512Mx16 0
-#define AST_DRAM_1Gx16 1
-#define AST_DRAM_512Mx32 2
-#define AST_DRAM_1Gx32 3
-#define AST_DRAM_2Gx16 6
-#define AST_DRAM_4Gx16 7
-#define AST_DRAM_8Gx16 8
+enum ast_dram_layout {
+ AST_DRAM_512Mx16 = 0,
+ AST_DRAM_1Gx16 = 1,
+ AST_DRAM_512Mx32 = 2,
+ AST_DRAM_1Gx32 = 3,
+ AST_DRAM_2Gx16 = 6,
+ AST_DRAM_4Gx16 = 7,
+ AST_DRAM_8Gx16 = 8,
+};
/*
* Hardware cursor
@@ -172,10 +174,6 @@ struct ast_device {
enum ast_config_mode config_mode;
enum ast_chip chip;
- uint32_t dram_bus_width;
- uint32_t dram_type;
- uint32_t mclk;
-
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 44b9b5f659fc..3eea6a6cdacd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -210,126 +210,6 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
}
-static int ast_get_dram_info(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct device_node *np = dev->dev->of_node;
- uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
- uint32_t denum, num, div, ref_pll, dsel;
-
- switch (ast->config_mode) {
- case ast_use_dt:
- /*
- * If some properties are missing, use reasonable
- * defaults for GEN5
- */
- if (of_property_read_u32(np, "aspeed,mcr-configuration",
- &mcr_cfg))
- mcr_cfg = 0x00000577;
- if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
- &mcr_scu_mpll))
- mcr_scu_mpll = 0x000050C0;
- if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
- &mcr_scu_strap))
- mcr_scu_strap = 0;
- break;
- case ast_use_p2a:
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- mcr_cfg = ast_read32(ast, 0x10004);
- mcr_scu_mpll = ast_read32(ast, 0x10120);
- mcr_scu_strap = ast_read32(ast, 0x10170);
- break;
- case ast_use_defaults:
- default:
- ast->dram_bus_width = 16;
- ast->dram_type = AST_DRAM_1Gx16;
- if (IS_AST_GEN6(ast))
- ast->mclk = 800;
- else
- ast->mclk = 396;
- return 0;
- }
-
- if (mcr_cfg & 0x40)
- ast->dram_bus_width = 16;
- else
- ast->dram_bus_width = 32;
-
- if (IS_AST_GEN6(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_8Gx16;
- break;
- }
- } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (mcr_cfg & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (mcr_cfg & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
- }
-
- if (mcr_scu_strap & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = mcr_scu_mpll & 0x1f;
- num = (mcr_scu_mpll & 0x3fe0) >> 5;
- dsel = (mcr_scu_mpll & 0xc000) >> 14;
- switch (dsel) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000));
- return 0;
-}
-
struct drm_device *ast_device_create(struct pci_dev *pdev,
const struct drm_driver *drv,
enum ast_chip chip,
@@ -352,12 +232,6 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
ast->regs = regs;
ast->ioregs = ioregs;
- ret = ast_get_dram_info(ast);
- if (ret)
- return ERR_PTR(ret);
- drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
- ast->mclk, ast->dram_type, ast->dram_bus_width);
-
ast_detect_tx_chip(ast, need_post);
switch (ast->tx_chip) {
case AST_TX_ASTDP:
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index b9e0ca85226a..a250afd8d662 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -120,8 +120,8 @@ config DRM_ITE_IT6505
select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select EXTCON
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA1
+ select REGMAP_I2C
help
ITE IT6505 DisplayPort bridge chip driver.
@@ -316,6 +316,19 @@ config DRM_SIMPLE_BRIDGE
Support for non-programmable DRM bridges, such as ADI ADV7123, TI
THS8134 and THS8135 or passive resistor ladder DACs.
+config DRM_SOLOMON_SSD2825
+ tristate "SSD2825 RGB/DSI bridge"
+ depends on SPI_MASTER && OF
+ select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ help
+ Say Y here if you want support for the Solomon SSD2825 RGB/DSI
+ SPI bridge driver.
+
+ Say M here if you want to support this hardware as a module.
+ The module will be named "ssd2825".
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -438,6 +451,18 @@ config DRM_TI_TPD12S015
Texas Instruments TPD12S015 HDMI level shifter and ESD protection
driver.
+config DRM_WAVESHARE_BRIDGE
+ tristate "Waveshare DSI bridge"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select REGMAP_I2C
+ help
+ Driver for waveshare DSI to DPI bridge board.
+ Please say Y if you have such hardware
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 245e8a27e3fc..c7dc03182e59 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
+obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_WAVESHARE_BRIDGE) += waveshare-dsi.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 85ebead9809c..8be7266fd4f4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,13 +195,14 @@
#define ADV7511_I2S_IEC958_DIRECT 3
#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
-#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_SPD(x) ADV7511_PACKET(0, x)
#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
-#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE1(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE2(x) ADV7511_PACKET(7, x)
#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
@@ -348,6 +349,7 @@ struct adv7511 {
struct i2c_client *i2c_cec;
struct regmap *regmap;
+ struct regmap *regmap_packet;
struct regmap *regmap_cec;
enum drm_connector_status status;
bool powered;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 766b1c96bc88..87e7e820810a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -12,6 +12,8 @@
#include <sound/soc.h>
#include <linux/of_graph.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include "adv7511.h"
static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
@@ -155,17 +157,8 @@ int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- /* send current Audio infoframe values while updating */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), BIT(5));
-
- regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
-
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), 0);
-
- return 0;
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
}
int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
@@ -188,15 +181,9 @@ int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
/* not copyrighted */
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
BIT(5), BIT(5));
- /* enable audio infoframes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
- BIT(3), BIT(3));
/* AV mute disable */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), 0);
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
@@ -214,4 +201,6 @@ void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 00d6417c177b..b9be86541307 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -132,6 +132,13 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7511_packet_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+};
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -886,9 +893,18 @@ static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
case HDMI_INFOFRAME_TYPE_AVI:
adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
default:
drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
break;
@@ -903,16 +919,52 @@ static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
{
struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
- adv7511_bridge_hdmi_clear_infoframe(bridge, type);
-
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ /* The Audio infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
case HDMI_INFOFRAME_TYPE_AVI:
+ /* send current AVI infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), BIT(6));
+
/* The AVI infoframe id is not configurable */
regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
buffer + 1, len - 1);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_LENGTH, 0x2);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(1), 0x1);
+
+ /* use AVI infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), 0);
+
adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPD(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPARE1(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
default:
drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
break;
@@ -1242,6 +1294,13 @@ static int adv7511_probe(struct i2c_client *i2c)
goto err_i2c_unregister_edid;
}
+ adv7511->regmap_packet = devm_regmap_init_i2c(adv7511->i2c_packet,
+ &adv7511_packet_config);
+ if (IS_ERR(adv7511->regmap_packet)) {
+ ret = PTR_ERR(adv7511->regmap_packet);
+ goto err_i2c_unregister_packet;
+ }
+
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
adv7511->i2c_packet->addr << 1);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8b3304dedcd9..6f3fdcb6afdb 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
+ platform->bridge.support_hdcp = true;
drm_bridge_add(&platform->bridge);
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index cced81633ddc..f1d8a8a151d8 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -6,6 +6,7 @@ config DRM_CDNS_DSI
select DRM_PANEL_BRIDGE
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
+ select VIDEOMODE_HELPERS
depends on OF
help
Support Cadence DPI to DSI bridge. This is an internal
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index a57ca8c3bdae..09b289f0fcbf 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -9,6 +9,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
+#include <video/videomode.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
@@ -417,7 +418,8 @@
#define DSI_OUTPUT_PORT 0
#define DSI_INPUT_PORT(inputid) (1 + (inputid))
-#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HBP_FRAME_PULSE_OVERHEAD 12
+#define DSI_HBP_FRAME_EVENT_OVERHEAD 16
#define DSI_HSA_FRAME_OVERHEAD 14
#define DSI_HFP_FRAME_OVERHEAD 6
#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
@@ -452,15 +454,6 @@ bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
return container_of(bridge, struct cdns_dsi_input, bridge);
}
-static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- if (mode_valid_check)
- return mode->hsync_start - mode->hdisplay;
-
- return mode->crtc_hsync_start - mode->crtc_hdisplay;
-}
-
static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
unsigned int dpi_bpp,
unsigned int dsi_pkt_overhead)
@@ -476,145 +469,77 @@ static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
}
static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
- unsigned int tmp;
- bool sync_pulse = false;
+ u32 dpi_hsa, dpi_hbp, dpi_hfp, dpi_hact;
+ bool sync_pulse;
int bpp;
+ dpi_hsa = vm->hsync_len;
+ dpi_hbp = vm->hback_porch;
+ dpi_hfp = vm->hfront_porch;
+ dpi_hact = vm->hactive;
+
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- sync_pulse = true;
+ sync_pulse = output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
- if (mode_valid_check)
- tmp = mode->htotal -
- (sync_pulse ? mode->hsync_end : mode->hsync_start);
- else
- tmp = mode->crtc_htotal -
- (sync_pulse ?
- mode->crtc_hsync_end : mode->crtc_hsync_start);
-
- dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
-
if (sync_pulse) {
- if (mode_valid_check)
- tmp = mode->hsync_end - mode->hsync_start;
- else
- tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp, bpp,
+ DSI_HBP_FRAME_PULSE_OVERHEAD);
- dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
+ dsi_cfg->hsa = dpi_to_dsi_timing(dpi_hsa, bpp,
DSI_HSA_FRAME_OVERHEAD);
- }
-
- dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
- mode->hdisplay : mode->crtc_hdisplay,
- bpp, 0);
- dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check),
- bpp, DSI_HFP_FRAME_OVERHEAD);
-
- return 0;
-}
-
-static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi,
- struct cdns_dsi_cfg *dsi_cfg,
- struct phy_configure_opts_mipi_dphy *phy_cfg,
- const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- struct cdns_dsi_output *output = &dsi->output;
- unsigned long long dlane_bps;
- unsigned long adj_dsi_htotal;
- unsigned long dsi_htotal;
- unsigned long dpi_htotal;
- unsigned long dpi_hz;
- unsigned int dsi_hfp_ext;
- unsigned int lanes = output->dev->lanes;
-
- dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp + dpi_hsa, bpp,
+ DSI_HBP_FRAME_EVENT_OVERHEAD);
- dsi_htotal += dsi_cfg->hact;
- dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
-
- /*
- * Make sure DSI htotal is aligned on a lane boundary when calculating
- * the expected data rate. This is done by extending HFP in case of
- * misalignment.
- */
- adj_dsi_htotal = dsi_htotal;
- if (dsi_htotal % lanes)
- adj_dsi_htotal += lanes - (dsi_htotal % lanes);
+ dsi_cfg->hsa = 0;
+ }
- dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000;
- dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal;
+ dsi_cfg->hact = dpi_to_dsi_timing(dpi_hact, bpp, 0);
- /* data rate in bytes/sec is not an integer, refuse the mode. */
- dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal;
- if (do_div(dlane_bps, lanes * dpi_htotal))
- return -EINVAL;
+ dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
- /* data rate was in bytes/sec, convert to bits/sec. */
- phy_cfg->hs_clk_rate = dlane_bps * 8;
+ dsi_cfg->htotal = dsi_cfg->hact + dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
- dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
- dsi_cfg->hfp += dsi_hfp_ext;
- dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext;
+ if (sync_pulse) {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_PULSE_OVERHEAD;
+ dsi_cfg->htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_EVENT_OVERHEAD;
+ }
return 0;
}
static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
- unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
- int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
- ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
+ ret = cdns_dsi_mode2cfg(dsi, vm, dsi_cfg);
if (ret)
return ret;
- ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ ret = phy_mipi_dphy_get_default_config(vm->pixelclock,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
if (ret)
return ret;
- ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
- if (ret)
- return ret;
-
ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &output->phy_opts);
if (ret)
return ret;
- dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
-
- /*
- * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
- * is empty before we start a receiving a new line on the DPI
- * interface.
- */
- if ((u64)phy_cfg->hs_clk_rate *
- mode_to_dpi_hfp(mode, mode_valid_check) * nlanes <
- (u64)dsi_hss_hsa_hse_hbp *
- (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
- return -EINVAL;
-
return 0;
}
@@ -644,8 +569,7 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
- struct cdns_dsi_cfg dsi_cfg;
- int bpp, ret;
+ int bpp;
/*
* VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
@@ -663,10 +587,6 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
if ((mode->hdisplay * bpp) % 32)
return MODE_H_ILLEGAL;
- ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true);
- if (ret)
- return MODE_BAD;
-
return MODE_OK;
}
@@ -882,7 +802,13 @@ static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
phy_cfg->hs_clk_rate);
- reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period;
+
+ /*
+ * Estimated time [in clock cycles] to perform LP->HS on D-PHY.
+ * It is not clear how to calculate this, so for now,
+ * set it to 1/10 of the total number of clocks in a line.
+ */
+ reg_wakeup = dsi_cfg.htotal / nlanes / 10;
writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
dsi->regs + VID_DPHY_TIME);
@@ -989,6 +915,28 @@ static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static long cdns_dsi_round_pclk(struct cdns_dsi *dsi, unsigned long pclk)
+{
+ struct cdns_dsi_output *output = &dsi->output;
+ unsigned int nlanes = output->dev->lanes;
+ union phy_configure_opts phy_opts = { 0 };
+ u32 bitspp;
+ int ret;
+
+ bitspp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+
+ ret = phy_mipi_dphy_get_default_config(pclk, bitspp, nlanes,
+ &phy_opts.mipi_dphy);
+ if (ret)
+ return ret;
+
+ ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &phy_opts);
+ if (ret)
+ return ret;
+
+ return div_u64((u64)phy_opts.mipi_dphy.hs_clk_rate * nlanes, bitspp);
+}
+
static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -997,10 +945,32 @@ static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
- const struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+ struct videomode vm;
+ long pclk;
+
+ /* cdns-dsi requires negative syncs */
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC;
- return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+ /*
+ * The DPHY PLL has quite a coarsely grained clock rate options. See
+ * what hsclk rate we can achieve based on the pixel clock, convert it
+ * back to pixel clock, set that to the adjusted_mode->clock. This is
+ * all in hopes that the CRTC will be able to provide us the requested
+ * clock, as otherwise the DPI and DSI clocks will be out of sync.
+ */
+
+ pclk = cdns_dsi_round_pclk(dsi, adjusted_mode->clock * 1000);
+ if (pclk < 0)
+ return (int)pclk;
+
+ adjusted_mode->clock = pclk / 1000;
+
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ return cdns_dsi_check_conf(dsi, &vm, dsi_cfg);
}
static struct drm_bridge_state *
@@ -1082,10 +1052,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
if (output->dev)
return -EBUSY;
- /* We do not support burst mode yet. */
- if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- return -ENOTSUPP;
-
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
@@ -1442,4 +1408,3 @@ MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
MODULE_DESCRIPTION("Cadence DSI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdns-dsi");
-
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 52b7b5889e6f..e9f16dbc9535 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -108,7 +108,7 @@ static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_connector_state *conn_state,
unsigned int *num_output_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) {
@@ -151,7 +151,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) {
@@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev)
if (conn->bridge.ddc)
conn->bridge.ops |= DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_DETECT;
- if (conn->hpd_gpio)
+ /* Detecting the monitor requires reading DPCD */
+ if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort)
conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
if (conn->hpd_irq >= 0)
conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index bea8346515b8..8f7a0d46601a 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -492,14 +492,12 @@ static int imx93_dsi_get_phy_configure_opts(struct imx93_dsi *dsi,
static enum drm_mode_status
imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode)
{
- struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *dmd_bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(dmd_bridge->encoder);
- /* Get the last bridge */
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
-
- if ((bridge->ops & DRM_BRIDGE_OP_DETECT) &&
- (bridge->ops & DRM_BRIDGE_OP_EDID)) {
+ if ((last_bridge->ops & DRM_BRIDGE_OP_DETECT) &&
+ (last_bridge->ops & DRM_BRIDGE_OP_EDID)) {
unsigned long pixel_clock_rate = mode->clock * 1000;
unsigned long rounded_rate;
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cf813672b4ff..2eb8fba7016c 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -146,6 +146,7 @@
#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4)
#define HDMI_REG_PKT_GENERAL_CTRL 0xc6
+#define HDMI_REG_PKT_NULL_CTRL 0xc9
#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd
#define ENABLE_PKT BIT(0)
#define REPEAT_PKT BIT(1)
@@ -154,6 +155,12 @@
* 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers)
*/
+/* NULL packet registers */
+/* Header Byte(HB): n = 0 ~ 2 */
+#define HDMI_REG_PKT_HB(n) (0x138 + (n))
+/* Packet Byte(PB): n = 0 ~ 27(HDMI_MAX_INFOFRAME_SIZE), n = 0 for checksum */
+#define HDMI_REG_PKT_PB(n) (0x13b + (n))
+
/* AVI packet registers */
#define HDMI_REG_AVI_DB1 0x158
#define HDMI_REG_AVI_DB2 0x159
@@ -224,7 +231,9 @@ static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg)
case HDMI_REG_HDMI_MODE:
case HDMI_REG_GCP:
case HDMI_REG_PKT_GENERAL_CTRL:
+ case HDMI_REG_PKT_NULL_CTRL:
case HDMI_REG_AVI_INFOFRM_CTRL:
+ case HDMI_REG_PKT_HB(0) ... HDMI_REG_PKT_PB(HDMI_MAX_INFOFRAME_SIZE):
case HDMI_REG_AVI_DB1:
case HDMI_REG_AVI_DB2:
case HDMI_REG_AVI_DB3:
@@ -755,10 +764,16 @@ static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge,
{
struct it6263 *it = bridge_to_it6263(bridge);
- if (type == HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0);
- else
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ regmap_write(it->hdmi_regmap, HDMI_REG_PKT_NULL_CTRL, 0);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
+ }
return 0;
}
@@ -770,27 +785,36 @@ static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge,
struct it6263 *it = bridge_to_it6263(bridge);
struct regmap *regmap = it->hdmi_regmap;
- if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* write the first AVI infoframe data byte chunk(DB1-DB5) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_DB_CHUNK1_SIZE);
+
+ /* write the second AVI infoframe data byte chunk(DB6-DB13) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE +
+ HDMI_AVI_DB_CHUNK1_SIZE],
+ HDMI_AVI_DB_CHUNK2_SIZE);
+
+ /* write checksum */
+ regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
+
+ regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ /* write header and payload */
+ regmap_bulk_write(regmap, HDMI_REG_PKT_HB(0), buffer, len);
+
+ regmap_write(regmap, HDMI_REG_PKT_NULL_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
- return 0;
}
- /* write the first AVI infoframe data byte chunk(DB1-DB5) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE],
- HDMI_AVI_DB_CHUNK1_SIZE);
-
- /* write the second AVI infoframe data byte chunk(DB6-DB13) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE +
- HDMI_AVI_DB_CHUNK1_SIZE],
- HDMI_AVI_DB_CHUNK2_SIZE);
-
- /* write checksum */
- regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
-
- regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 89649c17ffad..a094803ba7aa 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -21,7 +21,7 @@
#include <linux/wait.h>
#include <linux/bitfield.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -2107,35 +2107,6 @@ static void it6505_hdcp_part1_auth(struct it6505 *it6505)
it6505->hdcp_status = HDCP_AUTH_GOING;
}
-static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
- unsigned int size, u8 *output_av)
-{
- struct shash_desc *desc;
- struct crypto_shash *tfm;
- int err;
- struct device *dev = it6505->dev;
-
- tfm = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(tfm)) {
- dev_err(dev, "crypto_alloc_shash sha1 failed");
- return PTR_ERR(tfm);
- }
- desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
- if (!desc) {
- crypto_free_shash(tfm);
- return -ENOMEM;
- }
-
- desc->tfm = tfm;
- err = crypto_shash_digest(desc, sha1_input, size, output_av);
- if (err)
- dev_err(dev, "crypto_shash_digest sha1 failed");
-
- crypto_free_shash(tfm);
- kfree(desc);
- return err;
-}
-
static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
@@ -2205,7 +2176,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
return false;
}
- it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ sha1(it6505->sha1_input, i, (u8 *)av);
/*1B-05 V' must retry 3 times */
for (retry = 0; retry < 3; retry++) {
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index b5dd71f6a990..eabc4c32f6ab 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -31,11 +31,10 @@
/* returns true iff both arguments logically differs */
#define NEQV(a, b) (!(a) ^ !(b))
-/* DSIM_STATUS */
+/* DSIM_STATUS or DSIM_DPHY_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
#define DSIM_STOP_STATE_CLK BIT(8)
#define DSIM_TX_READY_HS_CLK BIT(10)
-#define DSIM_PLL_STABLE BIT(31)
/* DSIM_SWRST */
#define DSIM_FUNCRST BIT(16)
@@ -46,17 +45,13 @@
#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN BIT(24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS BIT(27)
-#define DSIM_ESC_CLKEN BIT(28)
-#define DSIM_TX_REQUEST_HSCLK BIT(31)
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x, offset) (((x) & 0xf) << offset)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK(offset) (0xf << offset)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
/* DSIM_CONFIG */
#define DSIM_LANE_EN_CLK BIT(0)
@@ -91,7 +86,6 @@
*/
#define DSIM_HSE_DISABLE_MODE BIT(23)
#define DSIM_AUTO_MODE BIT(24)
-#define DSIM_VIDEO_MODE BIT(25)
#define DSIM_BURST_MODE BIT(26)
#define DSIM_SYNC_INFORM BIT(27)
#define DSIM_EOT_DISABLE BIT(28)
@@ -129,9 +123,9 @@
#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_VSA(x, offset) ((x) << offset)
#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_VSA_MASK(offset) ((0x3ff) << offset)
#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
/* DSIM_SDRESOL */
@@ -157,6 +151,11 @@
#define DSIM_INT_RX_ECC_ERR BIT(15)
#define DSIM_INT_RX_CRC_ERR BIT(14)
+/* DSIM_SFRCTRL */
+#define DSIM_SFR_CTRL_STAND_BY BIT(4)
+#define DSIM_SFR_CTRL_SHADOW_UPDATE BIT(1)
+#define DSIM_SFR_CTRL_SHADOW_EN BIT(0)
+
/* DSIM_FIFOCTRL */
#define DSIM_RX_DATA_FULL BIT(25)
#define DSIM_RX_DATA_EMPTY BIT(24)
@@ -191,9 +190,7 @@
#define DSIM_PLL_DPDNSWAP_DAT (1 << 24)
#define DSIM_FREQ_BAND(x) ((x) << 24)
#define DSIM_PLL_EN BIT(23)
-#define DSIM_PLL_P(x, offset) ((x) << (offset))
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
+#define DSIM_PLL(x, offset) ((x) << (offset))
/* DSIM_PHYCTRL */
#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
@@ -222,25 +219,42 @@
#define DSI_XFER_TIMEOUT_MS 100
#define DSI_RX_FIFO_EMPTY 0x30800002
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
#define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL)
-static const char *const clk_names[5] = {
- "bus_clk",
- "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8",
- "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0"
-};
-
enum samsung_dsim_transfer_type {
EXYNOS_DSI_TX,
EXYNOS_DSI_RX,
};
+static struct clk_bulk_data exynos3_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "pll_clk" },
+};
+
+static struct clk_bulk_data exynos4_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+};
+
+static struct clk_bulk_data exynos5433_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+ { .id = "phyclk_mipidphy0_bitclkdiv8" },
+ { .id = "phyclk_mipidphy0_rxclkesc0" },
+ { .id = "sclk_rgb_vclk_to_dsim0" },
+};
+
+static struct clk_bulk_data exynos7870_clk_bulk_data[] = {
+ { .id = "bus" },
+ { .id = "pll" },
+ { .id = "byte" },
+ { .id = "esc" },
+};
+
enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
+ DSIM_STATUS_REG, /* Status register (legacy) */
+ DSIM_LINK_STATUS_REG, /* Link status register */
+ DSIM_DPHY_STATUS_REG, /* D-PHY status register */
DSIM_SWRST_REG, /* Software reset register */
DSIM_CLKCTRL_REG, /* Clock control register */
DSIM_TIMEOUT_REG, /* Time out register */
@@ -255,6 +269,7 @@ enum reg_idx {
DSIM_PKTHDR_REG, /* Packet Header FIFO register */
DSIM_PAYLOAD_REG, /* Payload FIFO register */
DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_SFRCTRL_REG, /* SFR standby and shadow control register */
DSIM_FIFOCTRL_REG, /* FIFO status and control register */
DSIM_PLLCTRL_REG, /* PLL control register */
DSIM_PHYCTRL_REG,
@@ -312,6 +327,32 @@ static const unsigned int exynos5433_reg_ofs[] = {
[DSIM_PHYTIMING2_REG] = 0xBC,
};
+static const unsigned int exynos7870_reg_ofs[] = {
+ [DSIM_LINK_STATUS_REG] = 0x04,
+ [DSIM_DPHY_STATUS_REG] = 0x08,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_CONFIG_REG] = 0x30,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_SFRCTRL_REG] = 0x48,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
enum reg_value_idx {
RESET_TYPE,
PLL_TIMER,
@@ -384,6 +425,24 @@ static const unsigned int exynos5433_reg_values[] = {
[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
};
+static const unsigned int exynos7870_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 80000,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x177),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x08),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2b),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0f),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
static const unsigned int imx8mm_dsim_reg_values[] = {
[RESET_TYPE] = DSIM_SWRST,
[PLL_TIMER] = 500,
@@ -405,13 +464,26 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -424,13 +496,26 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -443,11 +528,24 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
- .num_clks = 2,
+ .has_legacy_status_reg = 1,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -459,12 +557,25 @@ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 5,
+ .clk_data = exynos5433_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos5433_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5433_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -476,12 +587,25 @@ static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5422_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -490,19 +614,62 @@ static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.min_freq = 500,
};
+static const struct samsung_dsim_driver_data exynos7870_dsi_driver_data = {
+ .reg_ofs = exynos7870_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .has_sfrctrl = 1,
+ .clk_data = exynos7870_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos7870_clk_bulk_data),
+ .max_freq = 1500,
+ .wait_for_hdr_fifo = 0,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .video_mode_bit = 18,
+ .pll_stable_bit = 24,
+ .esc_clken_bit = 16,
+ .byte_clken_bit = 17,
+ .tx_req_hsclk_bit = 20,
+ .lane_esc_clk_bit = 8,
+ .lane_esc_data_offset = 9,
+ .pll_p_offset = 13,
+ .pll_m_offset = 3,
+ .pll_s_offset = 0,
+ .main_vsa_offset = 16,
+ .reg_values = exynos7870_reg_values,
+ .pll_fin_min = 6,
+ .pll_fin_max = 12,
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
+};
+
static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 2100,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
/*
* Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
* downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
*/
.pll_p_offset = 14,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = imx8mm_dsim_reg_values,
.pll_fin_min = 2,
.pll_fin_max = 30,
@@ -518,6 +685,7 @@ samsung_dsim_types[DSIM_TYPE_COUNT] = {
[DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
[DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
[DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS7870] = &exynos7870_dsi_driver_data,
[DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
[DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
};
@@ -653,8 +821,9 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
writel(driver_data->reg_values[PLL_TIMER],
dsi->reg_base + driver_data->plltmr_reg);
- reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
- DSIM_PLL_M(m) | DSIM_PLL_S(s);
+ reg = DSIM_PLL_EN | DSIM_PLL(p, driver_data->pll_p_offset)
+ | DSIM_PLL(m, driver_data->pll_m_offset)
+ | DSIM_PLL(s, driver_data->pll_s_offset);
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
@@ -682,14 +851,17 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
- timeout = 1000;
+ timeout = 3000;
do {
if (timeout-- == 0) {
dev_err(dsi->dev, "PLL failed to stabilize\n");
return 0;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_LINK_STATUS_REG);
+ } while ((reg & BIT(driver_data->pll_stable_bit)) == 0);
dsi->hs_clock = fout;
@@ -698,6 +870,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
unsigned long hs_clk, byte_clk, esc_clk, pix_clk;
unsigned long esc_div;
u32 reg;
@@ -731,15 +904,17 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
hs_clk, byte_clk, esc_clk);
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= BIT(driver_data->esc_clken_bit) | BIT(driver_data->byte_clken_bit)
+ | DSIM_ESC_PRESCALER(esc_div)
+ | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1,
+ driver_data->lane_esc_data_offset)
+ | DSIM_BYTE_CLK_SRC(0)
+ | BIT(driver_data->tx_req_hsclk_bit);
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
return 0;
@@ -843,11 +1018,14 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ reg &= ~(BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | BIT(driver_data->esc_clken_bit)
+ | BIT(driver_data->byte_clken_bit));
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
@@ -891,7 +1069,7 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* mode, otherwise it will support command mode.
*/
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
+ reg |= BIT(driver_data->video_mode_bit);
/*
* The user manual describes that following bits are ignored in
@@ -962,7 +1140,10 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
return -EFAULT;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_DPHY_STATUS_REG);
if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
!= DSIM_STOP_STATE_DAT(lanes_mask))
continue;
@@ -983,6 +1164,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
{
struct drm_display_mode *m = &dsi->mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ unsigned int main_vsa_offset = dsi->driver_data->main_vsa_offset;
u32 reg;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
@@ -1009,7 +1191,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp);
samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start, main_vsa_offset)
| DSIM_MAIN_HSA(hsa);
samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
}
@@ -1023,6 +1205,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
@@ -1031,6 +1214,15 @@ static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enabl
else
reg &= ~DSIM_MAIN_STAND_BY;
samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ if (driver_data->has_sfrctrl) {
+ reg = samsung_dsim_read(dsi, DSIM_SFRCTRL_REG);
+ if (enable)
+ reg |= DSIM_SFR_CTRL_STAND_BY;
+ else
+ reg &= ~DSIM_SFR_CTRL_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_SFRCTRL_REG, reg);
+ }
}
static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
@@ -1087,6 +1279,7 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
{
struct device *dev = dsi->dev;
struct mipi_dsi_packet *pkt = &xfer->packet;
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
const u8 *payload = pkt->payload + xfer->tx_done;
u16 length = pkt->payload_length - xfer->tx_done;
bool first = !xfer->tx_done;
@@ -1127,9 +1320,11 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
return;
reg = get_unaligned_le32(pkt->header);
- if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
+ if (driver_data->wait_for_hdr_fifo) {
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
}
if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
@@ -1922,7 +2117,7 @@ int samsung_dsim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_dsim *dsi;
- int ret, i;
+ int ret;
dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
if (IS_ERR(dsi))
@@ -1946,23 +2141,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
- sizeof(*dsi->clks), GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
+ ret = devm_clk_bulk_get(dev, dsi->driver_data->num_clks,
+ dsi->driver_data->clk_data);
+ if (ret) {
+ dev_err(dev, "failed to get clocks in bulk (%d)\n", ret);
+ return ret;
}
dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
@@ -2035,7 +2218,7 @@ static int samsung_dsim_suspend(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
usleep_range(10000, 20000);
@@ -2051,8 +2234,7 @@ static int samsung_dsim_suspend(struct device *dev)
phy_power_off(dsi->phy);
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0)
@@ -2065,7 +2247,7 @@ static int samsung_dsim_resume(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0) {
@@ -2073,11 +2255,9 @@ static int samsung_dsim_resume(struct device *dev)
return ret;
}
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+ ret = clk_bulk_prepare_enable(driver_data->num_clks, driver_data->clk_data);
+ if (ret < 0)
+ goto err_clk;
ret = phy_power_on(dsi->phy);
if (ret < 0) {
@@ -2088,8 +2268,7 @@ static int samsung_dsim_resume(struct device *dev)
return 0;
err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
return ret;
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 3d15ddd39470..e4d0bc2200f8 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -262,6 +262,16 @@ static const struct of_device_id simple_bridge_match[] = {
.connector_type = DRM_MODE_CONNECTOR_VGA,
},
}, {
+ .compatible = "radxa,ra620",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "realtek,rtd2171",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
.compatible = "ti,opa362",
.data = &(const struct simple_bridge_info) {
.connector_type = DRM_MODE_CONNECTOR_Composite,
diff --git a/drivers/gpu/drm/bridge/ssd2825.c b/drivers/gpu/drm/bridge/ssd2825.c
new file mode 100644
index 000000000000..f2fdbf7c117d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ssd2825.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+
+#define SSD2825_DEVICE_ID_REG 0xb0
+#define SSD2825_RGB_INTERFACE_CTRL_REG_1 0xb1
+#define SSD2825_RGB_INTERFACE_CTRL_REG_2 0xb2
+#define SSD2825_RGB_INTERFACE_CTRL_REG_3 0xb3
+#define SSD2825_RGB_INTERFACE_CTRL_REG_4 0xb4
+#define SSD2825_RGB_INTERFACE_CTRL_REG_5 0xb5
+#define SSD2825_RGB_INTERFACE_CTRL_REG_6 0xb6
+#define SSD2825_NON_BURST_EV BIT(2)
+#define SSD2825_BURST BIT(3)
+#define SSD2825_PCKL_HIGH BIT(13)
+#define SSD2825_HSYNC_HIGH BIT(14)
+#define SSD2825_VSYNC_HIGH BIT(15)
+#define SSD2825_CONFIGURATION_REG 0xb7
+#define SSD2825_CONF_REG_HS BIT(0)
+#define SSD2825_CONF_REG_CKE BIT(1)
+#define SSD2825_CONF_REG_SLP BIT(2)
+#define SSD2825_CONF_REG_VEN BIT(3)
+#define SSD2825_CONF_REG_HCLK BIT(4)
+#define SSD2825_CONF_REG_CSS BIT(5)
+#define SSD2825_CONF_REG_DCS BIT(6)
+#define SSD2825_CONF_REG_REN BIT(7)
+#define SSD2825_CONF_REG_ECD BIT(8)
+#define SSD2825_CONF_REG_EOT BIT(9)
+#define SSD2825_CONF_REG_LPE BIT(10)
+#define SSD2825_VC_CTRL_REG 0xb8
+#define SSD2825_PLL_CTRL_REG 0xb9
+#define SSD2825_PLL_CONFIGURATION_REG 0xba
+#define SSD2825_CLOCK_CTRL_REG 0xbb
+#define SSD2825_PACKET_SIZE_CTRL_REG_1 0xbc
+#define SSD2825_PACKET_SIZE_CTRL_REG_2 0xbd
+#define SSD2825_PACKET_SIZE_CTRL_REG_3 0xbe
+#define SSD2825_PACKET_DROP_REG 0xbf
+#define SSD2825_OPERATION_CTRL_REG 0xc0
+#define SSD2825_MAX_RETURN_SIZE_REG 0xc1
+#define SSD2825_RETURN_DATA_COUNT_REG 0xc2
+#define SSD2825_ACK_RESPONSE_REG 0xc3
+#define SSD2825_LINE_CTRL_REG 0xc4
+#define SSD2825_INTERRUPT_CTRL_REG 0xc5
+#define SSD2825_INTERRUPT_STATUS_REG 0xc6
+#define SSD2825_ERROR_STATUS_REG 0xc7
+#define SSD2825_DATA_FORMAT_REG 0xc8
+#define SSD2825_DELAY_ADJ_REG_1 0xc9
+#define SSD2825_DELAY_ADJ_REG_2 0xca
+#define SSD2825_DELAY_ADJ_REG_3 0xcb
+#define SSD2825_DELAY_ADJ_REG_4 0xcc
+#define SSD2825_DELAY_ADJ_REG_5 0xcd
+#define SSD2825_DELAY_ADJ_REG_6 0xce
+#define SSD2825_HS_TX_TIMER_REG_1 0xcf
+#define SSD2825_HS_TX_TIMER_REG_2 0xd0
+#define SSD2825_LP_RX_TIMER_REG_1 0xd1
+#define SSD2825_LP_RX_TIMER_REG_2 0xd2
+#define SSD2825_TE_STATUS_REG 0xd3
+#define SSD2825_SPI_READ_REG 0xd4
+#define SSD2825_SPI_READ_REG_RESET 0xfa
+#define SSD2825_PLL_LOCK_REG 0xd5
+#define SSD2825_TEST_REG 0xd6
+#define SSD2825_TE_COUNT_REG 0xd7
+#define SSD2825_ANALOG_CTRL_REG_1 0xd8
+#define SSD2825_ANALOG_CTRL_REG_2 0xd9
+#define SSD2825_ANALOG_CTRL_REG_3 0xda
+#define SSD2825_ANALOG_CTRL_REG_4 0xdb
+#define SSD2825_INTERRUPT_OUT_CTRL_REG 0xdc
+#define SSD2825_RGB_INTERFACE_CTRL_REG_7 0xdd
+#define SSD2825_LANE_CONFIGURATION_REG 0xde
+#define SSD2825_DELAY_ADJ_REG_7 0xdf
+#define SSD2825_INPUT_PIN_CTRL_REG_1 0xe0
+#define SSD2825_INPUT_PIN_CTRL_REG_2 0xe1
+#define SSD2825_BIDIR_PIN_CTRL_REG_1 0xe2
+#define SSD2825_BIDIR_PIN_CTRL_REG_2 0xe3
+#define SSD2825_BIDIR_PIN_CTRL_REG_3 0xe4
+#define SSD2825_BIDIR_PIN_CTRL_REG_4 0xe5
+#define SSD2825_BIDIR_PIN_CTRL_REG_5 0xe6
+#define SSD2825_BIDIR_PIN_CTRL_REG_6 0xe7
+#define SSD2825_BIDIR_PIN_CTRL_REG_7 0xe8
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_1 0xe9
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_2 0xea
+#define SSD2825_CABC_BRIGHTNESS_STATUS_REG 0xeb
+#define SSD2825_READ_REG 0xff
+
+#define SSD2825_COM_BYTE 0x00
+#define SSD2825_DAT_BYTE 0x01
+
+#define SSD2828_LP_CLOCK_DIVIDER(n) (((n) - 1) & 0x3f)
+#define SSD2825_LP_MIN_CLK 5000 /* KHz */
+#define SSD2825_REF_MIN_CLK 2000 /* KHz */
+
+static const struct regulator_bulk_data ssd2825_supplies[] = {
+ { .supply = "dvdd" },
+ { .supply = "avdd" },
+ { .supply = "vddio" },
+};
+
+struct ssd2825_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct ssd2825_priv {
+ struct spi_device *spi;
+ struct device *dev;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+
+ struct clk *tx_clk;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct ssd2825_dsi_output output;
+
+ struct mutex mlock; /* for host transfer operations */
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 pll_freq_kbps; /* PLL in kbps */
+ u32 nibble_freq_khz; /* PLL div by 4 */
+
+ u32 hzd; /* HS Zero Delay in ns*/
+ u32 hpd; /* HS Prepare Delay is ns */
+};
+
+static inline struct ssd2825_priv *dsi_host_to_ssd2825(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct ssd2825_priv, dsi_host);
+}
+
+static inline struct ssd2825_priv *bridge_to_ssd2825(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ssd2825_priv, bridge);
+}
+
+static int ssd2825_write_raw(struct ssd2825_priv *priv, u8 high_byte, u8 low_byte)
+{
+ struct spi_device *spi = priv->spi;
+ u8 tx_buf[2];
+
+ /*
+ * Low byte is the value, high byte defines type of
+ * write cycle, 0 for command and 1 for data.
+ */
+ tx_buf[0] = low_byte;
+ tx_buf[1] = high_byte;
+
+ return spi_write(spi, tx_buf, 2);
+}
+
+static int ssd2825_write_reg(struct ssd2825_priv *priv, u8 reg, u16 command)
+{
+ u8 datal = (command & 0x00FF);
+ u8 datah = (command & 0xFF00) >> 8;
+ int ret;
+
+ /* Command write cycle */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 7-0 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datal);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 15-8 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datah);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_write_dsi(struct ssd2825_priv *priv, const u8 *command, int len)
+{
+ int ret, i;
+
+ ret = ssd2825_write_reg(priv, SSD2825_PACKET_SIZE_CTRL_REG_1, len);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, SSD2825_PACKET_DROP_REG);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; i++) {
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, command[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ssd2825_read_raw(struct ssd2825_priv *priv, u8 cmd, u16 *data)
+{
+ struct spi_device *spi = priv->spi;
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u8 tx_buf[2];
+ u8 rx_buf[2];
+ int ret;
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ tx_buf[1] = (cmd & 0xFF00) >> 8;
+ tx_buf[0] = (cmd & 0x00FF);
+
+ xfer[0].tx_buf = tx_buf;
+ xfer[0].bits_per_word = 9;
+ xfer[0].len = 2;
+
+ xfer[1].rx_buf = rx_buf;
+ xfer[1].bits_per_word = 16;
+ xfer[1].len = 2;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+ spi_message_add_tail(&xfer[1], &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&spi->dev, "ssd2825 read raw failed %d\n", ret);
+ return ret;
+ }
+
+ *data = rx_buf[1] | (rx_buf[0] << 8);
+
+ return 0;
+}
+
+static int ssd2825_read_reg(struct ssd2825_priv *priv, u8 reg, u16 *data)
+{
+ int ret;
+
+ /* Reset the read register */
+ ret = ssd2825_write_reg(priv, SSD2825_SPI_READ_REG, SSD2825_SPI_READ_REG_RESET);
+ if (ret)
+ return ret;
+
+ /* Push the address to read */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Perform a reading cycle */
+ ret = ssd2825_read_raw(priv, SSD2825_SPI_READ_REG_RESET, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n", dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * ssd2825 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t ssd2825_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ u16 config;
+ int ret;
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ guard(mutex)(&priv->mlock);
+
+ ret = ssd2825_read_reg(priv, SSD2825_CONFIGURATION_REG, &config);
+ if (ret)
+ return ret;
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ config |= SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ config &= ~SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ default:
+ return 0;
+ }
+
+ ret = ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_dsi(priv, msg->tx_buf, msg->tx_len);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops ssd2825_dsi_host_ops = {
+ .attach = ssd2825_dsi_host_attach,
+ .detach = ssd2825_dsi_host_detach,
+ .transfer = ssd2825_dsi_host_transfer,
+};
+
+static void ssd2825_hw_reset(struct ssd2825_priv *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 6000);
+}
+
+/*
+ * PLL configuration register settings.
+ *
+ * See the "PLL Configuration Register Description" in the SSD2825 datasheet.
+ */
+static u16 construct_pll_config(struct ssd2825_priv *priv,
+ u32 desired_pll_freq_kbps, u32 reference_freq_khz)
+{
+ u32 div_factor = 1, mul_factor, fr = 0;
+
+ while (reference_freq_khz / (div_factor + 1) >= SSD2825_REF_MIN_CLK)
+ div_factor++;
+ if (div_factor > 31)
+ div_factor = 31;
+
+ mul_factor = DIV_ROUND_UP(desired_pll_freq_kbps * div_factor,
+ reference_freq_khz);
+
+ priv->pll_freq_kbps = reference_freq_khz * mul_factor / div_factor;
+ priv->nibble_freq_khz = priv->pll_freq_kbps / 4;
+
+ if (priv->pll_freq_kbps >= 501000)
+ fr = 3;
+ else if (priv->pll_freq_kbps >= 251000)
+ fr = 2;
+ else if (priv->pll_freq_kbps >= 126000)
+ fr = 1;
+
+ return (fr << 14) | (div_factor << 8) | mul_factor;
+}
+
+static int ssd2825_setup_pll(struct ssd2825_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u16 pll_config, lp_div;
+ u32 nibble_delay, pclk_mult, tx_freq_khz;
+ u8 hzd, hpd;
+
+ tx_freq_khz = clk_get_rate(priv->tx_clk) / KILO;
+ if (!tx_freq_khz)
+ tx_freq_khz = SSD2825_REF_MIN_CLK;
+
+ pclk_mult = priv->pd_lines / priv->dsi_lanes + 1;
+ pll_config = construct_pll_config(priv, pclk_mult * mode->clock,
+ tx_freq_khz);
+
+ lp_div = priv->pll_freq_kbps / (SSD2825_LP_MIN_CLK * 8);
+
+ /* nibble_delay in nanoseconds */
+ nibble_delay = MICRO / priv->nibble_freq_khz;
+
+ hzd = priv->hzd / nibble_delay;
+ hpd = (priv->hpd - 4 * nibble_delay) / nibble_delay;
+
+ /* Disable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0000);
+ ssd2825_write_reg(priv, SSD2825_LINE_CTRL_REG, 0x0001);
+
+ /* Set delays */
+ ssd2825_write_reg(priv, SSD2825_DELAY_ADJ_REG_1, (hzd << 8) | hpd);
+
+ /* Set PLL coefficients */
+ ssd2825_write_reg(priv, SSD2825_PLL_CONFIGURATION_REG, pll_config);
+
+ /* Clock Control Register */
+ ssd2825_write_reg(priv, SSD2825_CLOCK_CTRL_REG,
+ SSD2828_LP_CLOCK_DIVIDER(lp_div));
+
+ /* Enable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ return 0;
+}
+
+static void ssd2825_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ u32 input_bus_flags = bridge->timings->input_bus_flags;
+ u16 flags = 0, config;
+ u8 pixel_format;
+ int ret;
+
+ /* Power Sequence */
+ ret = clk_prepare_enable(priv->tx_clk);
+ if (ret)
+ dev_err(priv->dev, "error enabling tx_clk (%d)\n", ret);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ssd2825_supplies), priv->supplies);
+ if (ret)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ usleep_range(1000, 2000);
+
+ ssd2825_hw_reset(priv);
+
+ /* Perform SW reset */
+ ssd2825_write_reg(priv, SSD2825_OPERATION_CTRL_REG, 0x0100);
+
+ /* Set pixel format */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB565:
+ pixel_format = 0x00;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ pixel_format = 0x01;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ pixel_format = 0x02;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ pixel_format = 0x03;
+ break;
+ }
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ /* Set panel timings */
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_1,
+ ((mode->vtotal - mode->vsync_end) << 8) |
+ (mode->htotal - mode->hsync_end));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_2,
+ ((mode->vtotal - mode->vsync_start) << 8) |
+ (mode->htotal - mode->hsync_start));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_3,
+ ((mode->vsync_start - mode->vdisplay) << 8) |
+ (mode->hsync_start - mode->hdisplay));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_4, mode->hdisplay);
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_5, mode->vdisplay);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ flags |= SSD2825_HSYNC_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ flags |= SSD2825_VSYNC_HIGH;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ flags |= SSD2825_NON_BURST_EV;
+
+ if (input_bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE)
+ flags |= SSD2825_PCKL_HIGH;
+
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_6, flags | pixel_format);
+ ssd2825_write_reg(priv, SSD2825_LANE_CONFIGURATION_REG, dsi_dev->lanes - 1);
+ ssd2825_write_reg(priv, SSD2825_TEST_REG, 0x0004);
+
+ /* Call PLL configuration */
+ ssd2825_setup_pll(priv, mode);
+
+ usleep_range(10000, 11000);
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_CKE | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_LPM)
+ config &= ~SSD2825_CONF_REG_HS;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Initial DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ if (priv->output.panel)
+ drm_panel_enable(priv->output.panel);
+}
+
+static void ssd2825_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u16 config;
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ config |= SSD2825_CONF_REG_VEN;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Complete configuration after DSI commands were sent */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+}
+
+static void ssd2825_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ int ret;
+
+ msleep(100);
+
+ /* Exit DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG,
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ /* HW disable */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ssd2825_supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ clk_disable_unprepare(priv->tx_clk);
+}
+
+static int ssd2825_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+ssd2825_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay > 1366)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > 1366)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static bool ssd2825_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Default to positive sync */
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+
+ return true;
+}
+
+static const struct drm_bridge_funcs ssd2825_bridge_funcs = {
+ .attach = ssd2825_bridge_attach,
+ .mode_valid = ssd2825_bridge_mode_valid,
+ .mode_fixup = ssd2825_mode_fixup,
+
+ .atomic_pre_enable = ssd2825_bridge_atomic_pre_enable,
+ .atomic_enable = ssd2825_bridge_atomic_enable,
+ .atomic_disable = ssd2825_bridge_atomic_disable,
+
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+};
+
+static const struct drm_bridge_timings default_ssd2825_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static int ssd2825_probe(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv;
+ struct device *dev = &spi->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /* Driver supports only 8 bit 3 Wire mode */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ priv = devm_drm_bridge_alloc(dev, struct ssd2825_priv, bridge, &ssd2825_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ spi_set_drvdata(spi, priv);
+
+ priv->spi = spi;
+ priv->dev = dev;
+
+ mutex_init(&priv->mlock);
+
+ priv->tx_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->tx_clk),
+ "can't retrieve bridge tx_clk\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(ssd2825_supplies),
+ ssd2825_supplies, &priv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ priv->hzd = 133; /* ns */
+ device_property_read_u32(dev, "solomon,hs-zero-delay-ns", &priv->hzd);
+
+ priv->hpd = 40; /* ns */
+ device_property_read_u32(dev, "solomon,hs-prep-delay-ns", &priv->hpd);
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &ssd2825_dsi_host_ops;
+
+ priv->bridge.timings = &default_ssd2825_timings;
+ priv->bridge.of_node = np;
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static void ssd2825_remove(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv = spi_get_drvdata(spi);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+}
+
+static const struct of_device_id ssd2825_of_match[] = {
+ { .compatible = "solomon,ssd2825" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ssd2825_of_match);
+
+static struct spi_driver ssd2825_driver = {
+ .driver = {
+ .name = "ssd2825",
+ .of_match_table = ssd2825_of_match,
+ },
+ .probe = ssd2825_probe,
+ .remove = ssd2825_remove,
+};
+module_spi_driver(ssd2825_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Solomon SSD2825 RGB to MIPI-DSI bridge driver SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index f3ab2f985f8c..2c5e532410de 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_DW_DP
+ tristate
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_MMIO
+
config DRM_DW_HDMI
tristate
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 9dc376d220ad..4dada44029ac 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_DW_DP) += dw-dp.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
new file mode 100644
index 000000000000..9bbfe8da3de0
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
@@ -0,0 +1,2095 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare Cores DisplayPort Transmitter Controller
+ *
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/unaligned.h>
+
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DW_DP_VERSION_NUMBER 0x0000
+#define DW_DP_VERSION_TYPE 0x0004
+#define DW_DP_ID 0x0008
+
+#define DW_DP_CONFIG_REG1 0x0100
+#define DW_DP_CONFIG_REG2 0x0104
+#define DW_DP_CONFIG_REG3 0x0108
+
+#define DW_DP_CCTL 0x0200
+#define FORCE_HPD BIT(4)
+#define DEFAULT_FAST_LINK_TRAIN_EN BIT(2)
+#define ENHANCE_FRAMING_EN BIT(1)
+#define SCRAMBLE_DIS BIT(0)
+#define DW_DP_SOFT_RESET_CTRL 0x0204
+#define VIDEO_RESET BIT(5)
+#define AUX_RESET BIT(4)
+#define AUDIO_SAMPLER_RESET BIT(3)
+#define HDCP_MODULE_RESET BIT(2)
+#define PHY_SOFT_RESET BIT(1)
+#define CONTROLLER_RESET BIT(0)
+
+#define DW_DP_VSAMPLE_CTRL 0x0300
+#define PIXEL_MODE_SELECT GENMASK(22, 21)
+#define VIDEO_MAPPING GENMASK(20, 16)
+#define VIDEO_STREAM_ENABLE BIT(5)
+
+#define DW_DP_VSAMPLE_STUFF_CTRL1 0x0304
+
+#define DW_DP_VSAMPLE_STUFF_CTRL2 0x0308
+
+#define DW_DP_VINPUT_POLARITY_CTRL 0x030c
+#define DE_IN_POLARITY BIT(2)
+#define HSYNC_IN_POLARITY BIT(1)
+#define VSYNC_IN_POLARITY BIT(0)
+
+#define DW_DP_VIDEO_CONFIG1 0x0310
+#define HACTIVE GENMASK(31, 16)
+#define HBLANK GENMASK(15, 2)
+#define I_P BIT(1)
+#define R_V_BLANK_IN_OSC BIT(0)
+
+#define DW_DP_VIDEO_CONFIG2 0x0314
+#define VBLANK GENMASK(31, 16)
+#define VACTIVE GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG3 0x0318
+#define H_SYNC_WIDTH GENMASK(31, 16)
+#define H_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG4 0x031c
+#define V_SYNC_WIDTH GENMASK(31, 16)
+#define V_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG5 0x0320
+#define INIT_THRESHOLD_HI GENMASK(22, 21)
+#define AVERAGE_BYTES_PER_TU_FRAC GENMASK(19, 16)
+#define INIT_THRESHOLD GENMASK(13, 7)
+#define AVERAGE_BYTES_PER_TU GENMASK(6, 0)
+
+#define DW_DP_VIDEO_MSA1 0x0324
+#define VSTART GENMASK(31, 16)
+#define HSTART GENMASK(15, 0)
+
+#define DW_DP_VIDEO_MSA2 0x0328
+#define MISC0 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_MSA3 0x032c
+#define MISC1 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_HBLANK_INTERVAL 0x0330
+#define HBLANK_INTERVAL_EN BIT(16)
+#define HBLANK_INTERVAL GENMASK(15, 0)
+
+#define DW_DP_AUD_CONFIG1 0x0400
+#define AUDIO_TIMESTAMP_VERSION_NUM GENMASK(29, 24)
+#define AUDIO_PACKET_ID GENMASK(23, 16)
+#define AUDIO_MUTE BIT(15)
+#define NUM_CHANNELS GENMASK(14, 12)
+#define HBR_MODE_ENABLE BIT(10)
+#define AUDIO_DATA_WIDTH GENMASK(9, 5)
+#define AUDIO_DATA_IN_EN GENMASK(4, 1)
+#define AUDIO_INF_SELECT BIT(0)
+
+#define DW_DP_SDP_VERTICAL_CTRL 0x0500
+#define EN_VERTICAL_SDP BIT(2)
+#define EN_AUDIO_STREAM_SDP BIT(1)
+#define EN_AUDIO_TIMESTAMP_SDP BIT(0)
+#define DW_DP_SDP_HORIZONTAL_CTRL 0x0504
+#define EN_HORIZONTAL_SDP BIT(2)
+#define DW_DP_SDP_STATUS_REGISTER 0x0508
+#define DW_DP_SDP_MANUAL_CTRL 0x050c
+#define DW_DP_SDP_STATUS_EN 0x0510
+
+#define DW_DP_SDP_REGISTER_BANK 0x0600
+#define SDP_REGS GENMASK(31, 0)
+
+#define DW_DP_PHYIF_CTRL 0x0a00
+#define PHY_WIDTH BIT(25)
+#define PHY_POWERDOWN GENMASK(20, 17)
+#define PHY_BUSY GENMASK(15, 12)
+#define SSC_DIS BIT(16)
+#define XMIT_ENABLE GENMASK(11, 8)
+#define PHY_LANES GENMASK(7, 6)
+#define PHY_RATE GENMASK(5, 4)
+#define TPS_SEL GENMASK(3, 0)
+
+#define DW_DP_PHY_TX_EQ 0x0a04
+#define DW_DP_CUSTOMPAT0 0x0a08
+#define DW_DP_CUSTOMPAT1 0x0a0c
+#define DW_DP_CUSTOMPAT2 0x0a10
+#define DW_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET 0x0a14
+#define DW_DP_PHYIF_PWRDOWN_CTRL 0x0a18
+
+#define DW_DP_AUX_CMD 0x0b00
+#define AUX_CMD_TYPE GENMASK(31, 28)
+#define AUX_ADDR GENMASK(27, 8)
+#define I2C_ADDR_ONLY BIT(4)
+#define AUX_LEN_REQ GENMASK(3, 0)
+
+#define DW_DP_AUX_STATUS 0x0b04
+#define AUX_TIMEOUT BIT(17)
+#define AUX_BYTES_READ GENMASK(23, 19)
+#define AUX_STATUS GENMASK(7, 4)
+
+#define DW_DP_AUX_DATA0 0x0b08
+#define DW_DP_AUX_DATA1 0x0b0c
+#define DW_DP_AUX_DATA2 0x0b10
+#define DW_DP_AUX_DATA3 0x0b14
+
+#define DW_DP_GENERAL_INTERRUPT 0x0d00
+#define VIDEO_FIFO_OVERFLOW_STREAM0 BIT(6)
+#define AUDIO_FIFO_OVERFLOW_STREAM0 BIT(5)
+#define SDP_EVENT_STREAM0 BIT(4)
+#define AUX_CMD_INVALID BIT(3)
+#define HDCP_EVENT BIT(2)
+#define AUX_REPLY_EVENT BIT(1)
+#define HPD_EVENT BIT(0)
+
+#define DW_DP_GENERAL_INTERRUPT_ENABLE 0x0d04
+#define HDCP_EVENT_EN BIT(2)
+#define AUX_REPLY_EVENT_EN BIT(1)
+#define HPD_EVENT_EN BIT(0)
+
+#define DW_DP_HPD_STATUS 0x0d08
+#define HPD_STATE GENMASK(11, 9)
+#define HPD_STATUS BIT(8)
+#define HPD_HOT_UNPLUG BIT(2)
+#define HPD_HOT_PLUG BIT(1)
+#define HPD_IRQ BIT(0)
+
+#define DW_DP_HPD_INTERRUPT_ENABLE 0x0d0c
+#define HPD_UNPLUG_ERR_EN BIT(3)
+#define HPD_UNPLUG_EN BIT(2)
+#define HPD_PLUG_EN BIT(1)
+#define HPD_IRQ_EN BIT(0)
+
+#define DW_DP_HDCP_CFG 0x0e00
+#define DPCD12PLUS BIT(7)
+#define CP_IRQ BIT(6)
+#define BYPENCRYPTION BIT(5)
+#define HDCP_LOCK BIT(4)
+#define ENCRYPTIONDISABLE BIT(3)
+#define ENABLE_HDCP_13 BIT(2)
+#define ENABLE_HDCP BIT(1)
+
+#define DW_DP_HDCP_OBS 0x0e04
+#define HDCP22_RE_AUTHENTICATION_REQ BIT(31)
+#define HDCP22_AUTHENTICATION_FAILED BIT(30)
+#define HDCP22_AUTHENTICATION_SUCCESS BIT(29)
+#define HDCP22_CAPABLE_SINK BIT(28)
+#define HDCP22_SINK_CAP_CHECK_COMPLETE BIT(27)
+#define HDCP22_STATE GENMASK(26, 24)
+#define HDCP22_BOOTED BIT(23)
+#define HDCP13_BSTATUS GENMASK(22, 19)
+#define REPEATER BIT(18)
+#define HDCP_CAPABLE BIT(17)
+#define STATEE GENMASK(16, 14)
+#define STATEOEG GENMASK(13, 11)
+#define STATER GENMASK(10, 8)
+#define STATEA GENMASK(7, 4)
+#define SUBSTATEA GENMASK(3, 1)
+#define HDCPENGAGED BIT(0)
+
+#define DW_DP_HDCP_APIINTCLR 0x0e08
+#define DW_DP_HDCP_APIINTSTAT 0x0e0c
+#define DW_DP_HDCP_APIINTMSK 0x0e10
+#define HDCP22_GPIOINT BIT(8)
+#define HDCP_ENGAGED BIT(7)
+#define HDCP_FAILED BIT(6)
+#define KSVSHA1CALCDONEINT BIT(5)
+#define AUXRESPNACK7TIMES BIT(4)
+#define AUXRESPTIMEOUT BIT(3)
+#define AUXRESPDEFER7TIMES BIT(2)
+#define KSVACCESSINT BIT(0)
+
+#define DW_DP_HDCP_KSVMEMCTRL 0x0e18
+#define KSVSHA1STATUS BIT(4)
+#define KSVMEMACCESS BIT(1)
+#define KSVMEMREQUEST BIT(0)
+
+#define DW_DP_HDCP_REG_BKSV0 0x3600
+#define DW_DP_HDCP_REG_BKSV1 0x3604
+#define DW_DP_HDCP_REG_ANCONF 0x3608
+#define AN_BYPASS BIT(0)
+
+#define DW_DP_HDCP_REG_AN0 0x360c
+#define DW_DP_HDCP_REG_AN1 0x3610
+#define DW_DP_HDCP_REG_RMLCTL 0x3614
+#define ODPK_DECRYPT_ENABLE BIT(0)
+
+#define DW_DP_HDCP_REG_RMLSTS 0x3618
+#define IDPK_WR_OK_STS BIT(6)
+#define IDPK_DATA_INDEX GENMASK(5, 0)
+#define DW_DP_HDCP_REG_SEED 0x361c
+#define DW_DP_HDCP_REG_DPK0 0x3620
+#define DW_DP_HDCP_REG_DPK1 0x3624
+#define DW_DP_HDCP22_GPIOSTS 0x3628
+#define DW_DP_HDCP22_GPIOCHNGSTS 0x362c
+#define DW_DP_HDCP_REG_DPK_CRC 0x3630
+
+#define DW_DP_MAX_REGISTER DW_DP_HDCP_REG_DPK_CRC
+
+#define SDP_REG_BANK_SIZE 16
+
+struct dw_dp_link_caps {
+ bool enhanced_framing;
+ bool tps3_supported;
+ bool tps4_supported;
+ bool fast_training;
+ bool channel_coding;
+ bool ssc;
+};
+
+struct dw_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ bool voltage_max_reached[4];
+ bool pre_max_reached[4];
+};
+
+struct dw_dp_link_train {
+ struct dw_dp_link_train_set adjust;
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct dw_dp_link {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int lanes;
+ u8 sink_count;
+ u8 vsc_sdp_supported;
+ struct dw_dp_link_caps caps;
+ struct dw_dp_link_train train;
+ struct drm_dp_desc desc;
+};
+
+struct dw_dp_bridge_state {
+ struct drm_bridge_state base;
+ struct drm_display_mode mode;
+ u8 video_mapping;
+ u8 color_format;
+ u8 bpc;
+ u8 bpp;
+};
+
+struct dw_dp_sdp {
+ struct dp_sdp base;
+ unsigned long flags;
+};
+
+struct dw_dp_hotplug {
+ bool long_hpd;
+};
+
+struct dw_dp {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct phy *phy;
+ struct clk *apb_clk;
+ struct clk *aux_clk;
+ struct clk *i2s_clk;
+ struct clk *spdif_clk;
+ struct clk *hdcp_clk;
+ struct reset_control *rstc;
+ struct completion complete;
+ int irq;
+ struct work_struct hpd_work;
+ struct dw_dp_hotplug hotplug;
+ /* Serialize hpd status access */
+ struct mutex irq_lock;
+
+ struct drm_dp_aux aux;
+
+ struct dw_dp_link link;
+ struct dw_dp_plat_data plat_data;
+ u8 pixel_mode;
+
+ DECLARE_BITMAP(sdp_reg_bank, SDP_REG_BANK_SIZE);
+};
+
+enum {
+ DW_DP_RGB_6BIT,
+ DW_DP_RGB_8BIT,
+ DW_DP_RGB_10BIT,
+ DW_DP_RGB_12BIT,
+ DW_DP_RGB_16BIT,
+ DW_DP_YCBCR444_8BIT,
+ DW_DP_YCBCR444_10BIT,
+ DW_DP_YCBCR444_12BIT,
+ DW_DP_YCBCR444_16BIT,
+ DW_DP_YCBCR422_8BIT,
+ DW_DP_YCBCR422_10BIT,
+ DW_DP_YCBCR422_12BIT,
+ DW_DP_YCBCR422_16BIT,
+ DW_DP_YCBCR420_8BIT,
+ DW_DP_YCBCR420_10BIT,
+ DW_DP_YCBCR420_12BIT,
+ DW_DP_YCBCR420_16BIT,
+};
+
+enum {
+ DW_DP_MP_SINGLE_PIXEL,
+ DW_DP_MP_DUAL_PIXEL,
+ DW_DP_MP_QUAD_PIXEL,
+};
+
+enum {
+ DW_DP_SDP_VERTICAL_INTERVAL = BIT(0),
+ DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1),
+};
+
+enum {
+ DW_DP_HPD_STATE_IDLE,
+ DW_DP_HPD_STATE_UNPLUG,
+ DP_DP_HPD_STATE_TIMEOUT = 4,
+ DW_DP_HPD_STATE_PLUG = 7
+};
+
+enum {
+ DW_DP_PHY_PATTERN_NONE,
+ DW_DP_PHY_PATTERN_TPS_1,
+ DW_DP_PHY_PATTERN_TPS_2,
+ DW_DP_PHY_PATTERN_TPS_3,
+ DW_DP_PHY_PATTERN_TPS_4,
+ DW_DP_PHY_PATTERN_SERM,
+ DW_DP_PHY_PATTERN_PBRS7,
+ DW_DP_PHY_PATTERN_CUSTOM_80BIT,
+ DW_DP_PHY_PATTERN_CP2520_1,
+ DW_DP_PHY_PATTERN_CP2520_2,
+};
+
+struct dw_dp_output_format {
+ u32 bus_format;
+ u32 color_format;
+ u8 video_mapping;
+ u8 bpc;
+ u8 bpp;
+};
+
+#define to_dw_dp_bridge_state(s) container_of(s, struct dw_dp_bridge_state, base)
+
+static const struct dw_dp_output_format dw_dp_output_formats[] = {
+ { MEDIA_BUS_FMT_RGB101010_1X30, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_RGB888_1X24, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_8BIT, 8, 24 },
+ { MEDIA_BUS_FMT_YUV10_1X30, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_YUV8_1X24, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_8BIT, 8, 24},
+ { MEDIA_BUS_FMT_YUYV10_1X20, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_10BIT, 10, 20 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_8BIT, 8, 16 },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_10BIT, 10, 15 },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_8BIT, 8, 12 },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_6BIT, 6, 18 },
+};
+
+static const struct dw_dp_output_format *dw_dp_get_output_format(u32 bus_format)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++)
+ if (dw_dp_output_formats[i].bus_format == bus_format)
+ return &dw_dp_output_formats[i];
+
+ return NULL;
+}
+
+static inline struct dw_dp *bridge_to_dp(struct drm_bridge *b)
+{
+ return container_of(b, struct dw_dp, bridge);
+}
+
+static struct dw_dp_bridge_state *dw_dp_get_bridge_state(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *dw_bridge_state;
+ struct drm_bridge_state *state;
+
+ state = drm_priv_to_bridge_state(dp->bridge.base.state);
+ if (!state)
+ return NULL;
+
+ dw_bridge_state = to_dw_dp_bridge_state(state);
+ if (!dw_bridge_state)
+ return NULL;
+
+ return dw_bridge_state;
+}
+
+static inline void dw_dp_phy_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, TPS_SEL,
+ FIELD_PREP(TPS_SEL, pattern));
+}
+
+static void dw_dp_phy_xmit_enable(struct dw_dp *dp, u32 lanes)
+{
+ u32 xmit_enable;
+
+ switch (lanes) {
+ case 4:
+ case 2:
+ case 1:
+ xmit_enable = GENMASK(lanes - 1, 0);
+ break;
+ case 0:
+ default:
+ xmit_enable = 0;
+ break;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, XMIT_ENABLE,
+ FIELD_PREP(XMIT_ENABLE, xmit_enable));
+}
+
+static bool dw_dp_bandwidth_ok(struct dw_dp *dp,
+ const struct drm_display_mode *mode, u32 bpp,
+ unsigned int lanes, unsigned int rate)
+{
+ u32 max_bw, req_bw;
+
+ req_bw = mode->clock * bpp / 8;
+ max_bw = lanes * rate;
+ if (req_bw > max_bw)
+ return false;
+
+ return true;
+}
+
+static bool dw_dp_hpd_detect(struct dw_dp *dp)
+{
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ return FIELD_GET(HPD_STATE, value) == DW_DP_HPD_STATE_PLUG;
+}
+
+static void dw_dp_link_caps_reset(struct dw_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->tps4_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+}
+
+static void dw_dp_link_reset(struct dw_dp_link *link)
+{
+ link->vsc_sdp_supported = 0;
+ link->sink_count = 0;
+ link->revision = 0;
+ link->rate = 0;
+ link->lanes = 0;
+
+ dw_dp_link_caps_reset(&link->caps);
+ memset(link->dpcd, 0, sizeof(link->dpcd));
+}
+
+static int dw_dp_link_parse(struct dw_dp *dp, struct drm_connector *connector)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ dw_dp_link_reset(link);
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, link->dpcd);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_read_desc(&dp->aux, &link->desc, drm_dp_is_branch(link->dpcd));
+
+ if (drm_dp_read_sink_count_cap(connector, link->dpcd, &link->desc)) {
+ ret = drm_dp_read_sink_count(&dp->aux);
+ if (ret < 0)
+ return ret;
+
+ link->sink_count = ret;
+
+ /* Dongle connected, but no display */
+ if (!link->sink_count)
+ return -ENODEV;
+ }
+
+ link->vsc_sdp_supported = drm_dp_vsc_sdp_supported(&dp->aux, link->dpcd);
+
+ link->revision = link->dpcd[DP_DPCD_REV];
+ link->rate = min_t(u32, min(dp->plat_data.max_link_rate,
+ dp->phy->attrs.max_link_rate * 100),
+ drm_dp_max_link_rate(link->dpcd));
+ link->lanes = min_t(u8, phy_get_bus_width(dp->phy),
+ drm_dp_max_lane_count(link->dpcd));
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(link->dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(link->dpcd);
+ link->caps.tps4_supported = drm_dp_tps4_supported(link->dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(link->dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(link->dpcd);
+ link->caps.ssc = !!(link->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+
+ return 0;
+}
+
+static int dw_dp_link_train_update_vs_emph(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_link_train_set *train_set = &link->train.adjust;
+ unsigned int lanes = dp->link.lanes;
+ union phy_configure_opts phy_cfg;
+ unsigned int *vs, *pe;
+ int i, ret;
+ u8 buf[4];
+
+ vs = train_set->voltage_swing;
+ pe = train_set->pre_emphasis;
+
+ for (i = 0; i < lanes; i++) {
+ phy_cfg.dp.voltage[i] = vs[i];
+ phy_cfg.dp.pre[i] = pe[i];
+ }
+
+ phy_cfg.dp.set_lanes = false;
+ phy_cfg.dp.set_rate = false;
+ phy_cfg.dp.set_voltages = true;
+
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lanes; i++) {
+ buf[i] = (vs[i] << DP_TRAIN_VOLTAGE_SWING_SHIFT) |
+ (pe[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ if (train_set->voltage_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_SWING_REACHED;
+ if (train_set->pre_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lanes);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dw_dp_phy_configure(struct dw_dp *dp, unsigned int rate,
+ unsigned int lanes, bool ssc)
+{
+ union phy_configure_opts phy_cfg;
+ int ret;
+
+ /* Move PHY to P3 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x3));
+
+ phy_cfg.dp.lanes = lanes;
+ phy_cfg.dp.link_rate = rate / 100;
+ phy_cfg.dp.ssc = ssc;
+ phy_cfg.dp.set_lanes = true;
+ phy_cfg.dp.set_rate = true;
+ phy_cfg.dp.set_voltages = false;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_LANES,
+ FIELD_PREP(PHY_LANES, lanes / 2));
+
+ /* Move PHY to P0 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x0));
+
+ dw_dp_phy_xmit_enable(dp, lanes);
+
+ return 0;
+}
+
+static int dw_dp_link_configure(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 buf[2];
+ int ret;
+
+ ret = dw_dp_phy_configure(dp, link->rate, link->lanes, link->caps.ssc);
+ if (ret)
+ return ret;
+
+ buf[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ buf[1] = link->lanes;
+
+ if (link->caps.enhanced_framing) {
+ buf[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 0));
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ buf[0] = link->caps.ssc ? DP_SPREAD_AMP_0_5 : 0;
+ buf[1] = link->caps.channel_coding ? DP_SET_ANSI_8B10B : 0;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void dw_dp_link_train_init(struct dw_dp_link_train *train)
+{
+ struct dw_dp_link_train_set *adj = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ adj->voltage_swing[i] = 0;
+ adj->pre_emphasis[i] = 0;
+ adj->voltage_max_reached[i] = false;
+ adj->pre_max_reached[i] = false;
+ }
+
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool dw_dp_link_train_valid(const struct dw_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int dw_dp_link_train_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ u8 buf = 0;
+ int ret;
+
+ if (pattern && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 0));
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_NONE);
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_1);
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_2);
+ break;
+ case DP_TRAINING_PATTERN_3:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_3);
+ break;
+ case DP_TRAINING_PATTERN_4:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ buf | pattern);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u8 dw_dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
+static bool dw_dp_link_get_adjustments(struct dw_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct dw_dp_link_train_set *adj = &link->train.adjust;
+ unsigned int i;
+ bool changed = false;
+ u8 v = 0;
+ u8 p = 0;
+
+ for (i = 0; i < link->lanes; i++) {
+ v = drm_dp_get_adjust_request_voltage(status, i);
+ v >>= DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p = drm_dp_get_adjust_request_pre_emphasis(status, i);
+ p >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (v != adj->voltage_swing[i] || p != adj->pre_emphasis[i])
+ changed = true;
+
+ if (p >= (DP_TRAIN_PRE_EMPH_LEVEL_3 >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) {
+ adj->pre_emphasis[i] = DP_TRAIN_PRE_EMPH_LEVEL_3 >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ adj->pre_max_reached[i] = true;
+ } else {
+ adj->pre_emphasis[i] = p;
+ adj->pre_max_reached[i] = false;
+ }
+
+ v = min(v, dw_dp_voltage_max(p));
+ if (v >= (DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> DP_TRAIN_VOLTAGE_SWING_SHIFT)) {
+ adj->voltage_swing[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ adj->voltage_max_reached[i] = true;
+ } else {
+ adj->voltage_swing[i] = v;
+ adj->voltage_max_reached[i] = false;
+ }
+ }
+
+ return changed;
+}
+
+static int dw_dp_link_clock_recovery(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE];
+ unsigned int tries = 0;
+ int ret;
+ bool adj_changed;
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (;;) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(status, link->lanes)) {
+ link->train.clock_recovered = true;
+ break;
+ }
+
+ /*
+ * According to DP spec 1.4, if current ADJ is the same
+ * with previous REQ, we need to retry 5 times.
+ */
+ adj_changed = dw_dp_link_get_adjustments(link, status);
+ if (!adj_changed)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == 5)
+ break;
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_channel_equalization(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE], pattern;
+ unsigned int tries;
+ int ret;
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ return ret;
+
+ for (tries = 1; tries < 5; tries++) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0)
+ return ret;
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(status, link->lanes)) {
+ link->train.channel_equalized = true;
+ break;
+ }
+
+ dw_dp_link_get_adjustments(link, status);
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_downgrade(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+ case 270000:
+ link->rate = 162000;
+ break;
+ case 540000:
+ link->rate = 270000;
+ break;
+ case 810000:
+ link->rate = 540000;
+ break;
+ }
+
+ if (!dw_dp_bandwidth_ok(dp, &state->mode, state->bpp, link->lanes,
+ link->rate))
+ return -E2BIG;
+
+ return 0;
+}
+
+static int dw_dp_link_train_full(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+retry:
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_clock_recovery(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "clock recovery failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ dev_err(dp->dev, "clock recovery failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "clock recovery succeeded\n");
+
+ ret = dw_dp_link_channel_equalization(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "channel equalization failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ dev_err(dp->dev, "channel equalization failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "channel equalization succeeded\n");
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train_fast(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE];
+ u8 pattern;
+
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ dev_err(dp->dev, "channel equalization failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ if (link->caps.fast_training) {
+ if (dw_dp_link_train_valid(&link->train)) {
+ ret = dw_dp_link_train_fast(dp);
+ if (ret < 0)
+ dev_err(dp->dev, "fast link training failed: %d\n", ret);
+ else
+ return 0;
+ }
+ }
+
+ ret = dw_dp_link_train_full(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "full link training failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_dp_send_sdp(struct dw_dp *dp, struct dw_dp_sdp *sdp)
+{
+ const u8 *payload = sdp->base.db;
+ u32 reg;
+ int i, nr;
+
+ nr = find_first_zero_bit(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ if (nr < SDP_REG_BANK_SIZE)
+ set_bit(nr, dp->sdp_reg_bank);
+ else
+ return -EBUSY;
+
+ reg = DW_DP_SDP_REGISTER_BANK + nr * 9 * 4;
+
+ /* SDP header */
+ regmap_write(dp->regmap, reg, get_unaligned_le32(&sdp->base.sdp_header));
+
+ /* SDP data payload */
+ for (i = 1; i < 9; i++, payload += 4)
+ regmap_write(dp->regmap, reg + i * 4,
+ FIELD_PREP(SDP_REGS, get_unaligned_le32(payload)));
+
+ if (sdp->flags & DW_DP_SDP_VERTICAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_VERTICAL_CTRL,
+ EN_VERTICAL_SDP << nr,
+ EN_VERTICAL_SDP << nr);
+
+ if (sdp->flags & DW_DP_SDP_HORIZONTAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_HORIZONTAL_CTRL,
+ EN_HORIZONTAL_SDP << nr,
+ EN_HORIZONTAL_SDP << nr);
+
+ return 0;
+}
+
+static int dw_dp_send_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *state;
+ struct dw_dp_sdp sdp = {};
+ struct drm_dp_vsc_sdp vsc = {};
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ vsc.bpc = state->bpc;
+
+ vsc.sdp_type = DP_SDP_VSC;
+ vsc.revision = 0x5;
+ vsc.length = 0x13;
+ vsc.content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+
+ sdp.flags = DW_DP_SDP_VERTICAL_INTERVAL;
+
+ switch (state->color_format) {
+ case DRM_COLOR_FORMAT_YCBCR444:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV422;
+ break;
+ case DRM_COLOR_FORMAT_RGB444:
+ default:
+ vsc.pixelformat = DP_PIXELFORMAT_RGB;
+ break;
+ }
+
+ if (state->color_format == DRM_COLOR_FORMAT_RGB444) {
+ vsc.colorimetry = DP_COLORIMETRY_DEFAULT;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_VESA;
+ } else {
+ vsc.colorimetry = DP_COLORIMETRY_BT709_YCC;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ }
+
+ drm_dp_vsc_sdp_pack(&vsc, &sdp.base);
+
+ return dw_dp_send_sdp(dp, &sdp);
+}
+
+static int dw_dp_video_set_pixel_mode(struct dw_dp *dp)
+{
+ switch (dp->pixel_mode) {
+ case DW_DP_MP_SINGLE_PIXEL:
+ case DW_DP_MP_DUAL_PIXEL:
+ case DW_DP_MP_QUAD_PIXEL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, PIXEL_MODE_SELECT,
+ FIELD_PREP(PIXEL_MODE_SELECT, dp->pixel_mode));
+
+ return 0;
+}
+
+static bool dw_dp_video_need_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ if (!link->vsc_sdp_supported)
+ return false;
+
+ if (state->color_format == DRM_COLOR_FORMAT_YCBCR420)
+ return true;
+
+ return false;
+}
+
+static int dw_dp_video_set_msa(struct dw_dp *dp, u8 color_format, u8 bpc,
+ u16 vstart, u16 hstart)
+{
+ u16 misc = 0;
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ misc |= DP_MSA_MISC_COLOR_VSC_SDP;
+
+ switch (color_format) {
+ case DRM_COLOR_FORMAT_RGB444:
+ misc |= DP_MSA_MISC_COLOR_RGB;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR444:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_422_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ misc |= DP_MSA_MISC_6_BPC;
+ break;
+ case 8:
+ misc |= DP_MSA_MISC_8_BPC;
+ break;
+ case 10:
+ misc |= DP_MSA_MISC_10_BPC;
+ break;
+ case 12:
+ misc |= DP_MSA_MISC_12_BPC;
+ break;
+ case 16:
+ misc |= DP_MSA_MISC_16_BPC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA1,
+ FIELD_PREP(VSTART, vstart) | FIELD_PREP(HSTART, hstart));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA2, FIELD_PREP(MISC0, misc));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA3, FIELD_PREP(MISC1, misc >> 8));
+
+ return 0;
+}
+
+static void dw_dp_video_disable(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 0));
+}
+
+static int dw_dp_video_enable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+ struct drm_display_mode *mode;
+ u8 color_format, bpc, bpp;
+ u8 init_threshold, vic;
+ u32 hstart, hactive, hblank, h_sync_width, h_front_porch;
+ u32 vstart, vactive, vblank, v_sync_width, v_front_porch;
+ u32 peak_stream_bandwidth, link_bandwidth;
+ u32 average_bytes_per_tu, average_bytes_per_tu_frac;
+ u32 ts, hblank_interval;
+ u32 value;
+ int ret;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ bpc = state->bpc;
+ bpp = state->bpp;
+ color_format = state->color_format;
+ mode = &state->mode;
+
+ vstart = mode->vtotal - mode->vsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+
+ ret = dw_dp_video_set_pixel_mode(dp);
+ if (ret)
+ return ret;
+
+ ret = dw_dp_video_set_msa(dp, color_format, bpc, vstart, hstart);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_MAPPING,
+ FIELD_PREP(VIDEO_MAPPING, state->video_mapping));
+
+ /* Configure DW_DP_VINPUT_POLARITY_CTRL register */
+ value = 0;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= FIELD_PREP(HSYNC_IN_POLARITY, 1);
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= FIELD_PREP(VSYNC_IN_POLARITY, 1);
+ regmap_write(dp->regmap, DW_DP_VINPUT_POLARITY_CTRL, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG1 register */
+ hactive = mode->hdisplay;
+ hblank = mode->htotal - mode->hdisplay;
+ value = FIELD_PREP(HACTIVE, hactive) | FIELD_PREP(HBLANK, hblank);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ value |= FIELD_PREP(I_P, 1);
+ vic = drm_match_cea_mode(mode);
+ if (vic == 5 || vic == 6 || vic == 7 ||
+ vic == 10 || vic == 11 || vic == 20 ||
+ vic == 21 || vic == 22 || vic == 39 ||
+ vic == 25 || vic == 26 || vic == 40 ||
+ vic == 44 || vic == 45 || vic == 46 ||
+ vic == 50 || vic == 51 || vic == 54 ||
+ vic == 55 || vic == 58 || vic == 59)
+ value |= R_V_BLANK_IN_OSC;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG1, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG2 register */
+ vblank = mode->vtotal - mode->vdisplay;
+ vactive = mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG2,
+ FIELD_PREP(VBLANK, vblank) | FIELD_PREP(VACTIVE, vactive));
+
+ /* Configure DW_DP_VIDEO_CONFIG3 register */
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG3,
+ FIELD_PREP(H_SYNC_WIDTH, h_sync_width) |
+ FIELD_PREP(H_FRONT_PORCH, h_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG4 register */
+ v_sync_width = mode->vsync_end - mode->vsync_start;
+ v_front_porch = mode->vsync_start - mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG4,
+ FIELD_PREP(V_SYNC_WIDTH, v_sync_width) |
+ FIELD_PREP(V_FRONT_PORCH, v_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG5 register */
+ peak_stream_bandwidth = mode->clock * bpp / 8;
+ link_bandwidth = (link->rate / 1000) * link->lanes;
+ ts = peak_stream_bandwidth * 64 / link_bandwidth;
+ average_bytes_per_tu = ts / 1000;
+ average_bytes_per_tu_frac = ts / 100 - average_bytes_per_tu * 10;
+ if (dp->pixel_mode == DW_DP_MP_SINGLE_PIXEL) {
+ if (average_bytes_per_tu < 6)
+ init_threshold = 32;
+ else if (hblank <= 80 && color_format != DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 12;
+ else if (hblank <= 40 && color_format == DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 3;
+ else
+ init_threshold = 16;
+ } else {
+ u32 t1 = 0, t2 = 0, t3 = 0;
+
+ switch (bpc) {
+ case 6:
+ t1 = (4 * 1000 / 9) * link->lanes;
+ break;
+ case 8:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ t1 = (1000 / 2) * link->lanes;
+ } else {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 3) * link->lanes;
+ else
+ t1 = (3000 / 16) * link->lanes;
+ }
+ break;
+ case 10:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422)
+ t1 = (2000 / 5) * link->lanes;
+ else
+ t1 = (4000 / 15) * link->lanes;
+ break;
+ case 12:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 3) * link->lanes;
+ } else {
+ t1 = (2000 / 9) * link->lanes;
+ }
+ break;
+ case 16:
+ if (color_format != DRM_COLOR_FORMAT_YCBCR422 &&
+ dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 4) * link->lanes;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (color_format == DRM_COLOR_FORMAT_YCBCR420)
+ t2 = (link->rate / 4) * 1000 / (mode->clock / 2);
+ else
+ t2 = (link->rate / 4) * 1000 / mode->clock;
+
+ if (average_bytes_per_tu_frac)
+ t3 = average_bytes_per_tu + 1;
+ else
+ t3 = average_bytes_per_tu;
+ init_threshold = t1 * t2 * t3 / (1000 * 1000);
+ if (init_threshold <= 16 || average_bytes_per_tu < 10)
+ init_threshold = 40;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG5,
+ FIELD_PREP(INIT_THRESHOLD_HI, init_threshold >> 6) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU_FRAC, average_bytes_per_tu_frac) |
+ FIELD_PREP(INIT_THRESHOLD, init_threshold) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU, average_bytes_per_tu));
+
+ /* Configure DW_DP_VIDEO_HBLANK_INTERVAL register */
+ hblank_interval = hblank * (link->rate / 4) / mode->clock;
+ regmap_write(dp->regmap, DW_DP_VIDEO_HBLANK_INTERVAL,
+ FIELD_PREP(HBLANK_INTERVAL_EN, 1) |
+ FIELD_PREP(HBLANK_INTERVAL, hblank_interval));
+
+ /* Video stream enable */
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 1));
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ dw_dp_send_vsc_sdp(dp);
+
+ return 0;
+}
+
+static void dw_dp_hpd_init(struct dw_dp *dp)
+{
+ /* Enable all HPD interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_HPD_INTERRUPT_ENABLE,
+ HPD_UNPLUG_EN | HPD_PLUG_EN | HPD_IRQ_EN,
+ FIELD_PREP(HPD_UNPLUG_EN, 1) |
+ FIELD_PREP(HPD_PLUG_EN, 1) |
+ FIELD_PREP(HPD_IRQ_EN, 1));
+
+ /* Enable all top-level interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ HPD_EVENT_EN, FIELD_PREP(HPD_EVENT_EN, 1));
+}
+
+static void dw_dp_aux_init(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ AUX_REPLY_EVENT_EN, FIELD_PREP(AUX_REPLY_EVENT_EN, 1));
+}
+
+static void dw_dp_init_hw(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, DEFAULT_FAST_LINK_TRAIN_EN,
+ FIELD_PREP(DEFAULT_FAST_LINK_TRAIN_EN, 0));
+
+ dw_dp_hpd_init(dp);
+ dw_dp_aux_init(dp);
+}
+
+static int dw_dp_aux_write_data(struct dw_dp *dp, const u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value = 0;
+
+ for (j = 0; j < num; j++)
+ value |= buffer[i * 4 + j] << (j * 8);
+
+ regmap_write(dp->regmap, DW_DP_AUX_DATA0 + i * 4, value);
+ }
+
+ return size;
+}
+
+static int dw_dp_aux_read_data(struct dw_dp *dp, u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_AUX_DATA0 + i * 4, &value);
+
+ for (j = 0; j < num; j++)
+ buffer[i * 4 + j] = value >> (j * 8);
+ }
+
+ return size;
+}
+
+static ssize_t dw_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct dw_dp *dp = container_of(aux, struct dw_dp, aux);
+ unsigned long timeout = msecs_to_jiffies(10);
+ u32 status, value;
+ ssize_t ret = 0;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ ret = dw_dp_aux_write_data(dp, msg->buffer, msg->size);
+ if (ret < 0)
+ return ret;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (msg->size > 0)
+ value = FIELD_PREP(AUX_LEN_REQ, msg->size - 1);
+ else
+ value = FIELD_PREP(I2C_ADDR_ONLY, 1);
+ value |= FIELD_PREP(AUX_CMD_TYPE, msg->request);
+ value |= FIELD_PREP(AUX_ADDR, msg->address);
+ regmap_write(dp->regmap, DW_DP_AUX_CMD, value);
+
+ status = wait_for_completion_timeout(&dp->complete, timeout);
+ if (!status) {
+ dev_err(dp->dev, "timeout waiting for AUX reply\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_read(dp->regmap, DW_DP_AUX_STATUS, &value);
+ if (value & AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ msg->reply = FIELD_GET(AUX_STATUS, value);
+
+ if (msg->size > 0 && msg->reply == DP_AUX_NATIVE_REPLY_ACK) {
+ if (msg->request & DP_AUX_I2C_READ) {
+ size_t count = FIELD_GET(AUX_BYTES_READ, value) - 1;
+
+ if (count != msg->size)
+ return -EBUSY;
+
+ ret = dw_dp_aux_read_data(dp, msg->buffer, count);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Limits for the video timing for DP:
+ * 1. the hfp should be 2 pixels aligned;
+ * 2. the minimum hsync should be 9 pixel;
+ * 3. the minimum hbp should be 16 pixel;
+ */
+static int dw_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_bridge_state *state;
+ const struct dw_dp_output_format *fmt;
+ struct drm_display_mode *mode;
+ int min_hbp = 16;
+ int min_hsync = 9;
+
+ state = to_dw_dp_bridge_state(bridge_state);
+ mode = &state->mode;
+
+ fmt = dw_dp_get_output_format(bridge_state->output_bus_cfg.format);
+ if (!fmt)
+ return -EINVAL;
+
+ state->video_mapping = fmt->video_mapping;
+ state->color_format = fmt->color_format;
+ state->bpc = fmt->bpc;
+ state->bpp = fmt->bpp;
+
+ if ((adjusted_mode->hsync_start - adjusted_mode->hdisplay) & 0x1) {
+ adjusted_mode->hsync_start += 1;
+ dev_warn(dp->dev, "hfp is not 2 pixeel aligned, fixup to aligned hfp\n");
+ }
+
+ if (adjusted_mode->hsync_end - adjusted_mode->hsync_start < min_hsync) {
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + min_hsync;
+ dev_warn(dp->dev, "hsync is too narrow, fixup to min hsync:%d\n", min_hsync);
+ }
+
+ if (adjusted_mode->htotal - adjusted_mode->hsync_end < min_hbp) {
+ adjusted_mode->htotal = adjusted_mode->hsync_end + min_hbp;
+ dev_warn(dp->dev, "hbp is too narrow, fixup to min hbp:%d\n", min_hbp);
+ }
+
+ drm_mode_copy(mode, adjusted_mode);
+
+ return 0;
+}
+
+static enum drm_mode_status dw_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ u32 min_bpp;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR420 &&
+ link->vsc_sdp_supported &&
+ (drm_mode_is_420_only(info, mode) || drm_mode_is_420_also(info, mode)))
+ min_bpp = 12;
+ else if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ min_bpp = 16;
+ else if (info->color_formats & DRM_COLOR_FORMAT_RGB444)
+ min_bpp = 18;
+ else
+ min_bpp = 24;
+
+ if (!link->vsc_sdp_supported &&
+ drm_mode_is_420_only(info, mode))
+ return MODE_NO_420;
+
+ if (!dw_dp_bandwidth_ok(dp, mode, min_bpp, link->lanes, link->rate))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool dw_dp_needs_link_retrain(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!dw_dp_link_train_valid(&link->train))
+ return false;
+
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) < 0)
+ return false;
+
+ /* Retrain if Channel EQ or CR not ok */
+ return !drm_dp_channel_eq_ok(link_status, dp->link.lanes);
+}
+
+static void dw_dp_link_disable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+
+ if (dw_dp_hpd_detect(dp))
+ drm_dp_link_power_down(&dp->aux, dp->link.revision);
+
+ dw_dp_phy_xmit_enable(dp, 0);
+
+ phy_power_off(dp->phy);
+
+ link->train.clock_recovered = false;
+ link->train.channel_equalized = false;
+}
+
+static int dw_dp_link_enable(struct dw_dp *dp)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_link_power_up(&dp->aux, dp->link.revision);
+ if (ret < 0)
+ return ret;
+
+ ret = dw_dp_link_train(dp);
+
+ return ret;
+}
+
+static void dw_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector) {
+ dev_err(dp->dev, "failed to get connector\n");
+ return;
+ }
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state) {
+ dev_err(dp->dev, "failed to get connector state\n");
+ return;
+ }
+
+ set_bit(0, dp->sdp_reg_bank);
+
+ ret = dw_dp_link_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable link: %d\n", ret);
+ return;
+ }
+
+ ret = dw_dp_video_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable video: %d\n", ret);
+ return;
+ }
+}
+
+static void dw_dp_reset(struct dw_dp *dp)
+{
+ int val;
+
+ disable_irq(dp->irq);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 1));
+ usleep_range(10, 20);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 0));
+
+ dw_dp_init_hw(dp);
+ regmap_read_poll_timeout(dp->regmap, DW_DP_HPD_STATUS, val,
+ FIELD_GET(HPD_HOT_PLUG, val), 200, 200000);
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ enable_irq(dp->irq);
+}
+
+static void dw_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ dw_dp_video_disable(dp);
+ dw_dp_link_disable(dp);
+ bitmap_zero(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ dw_dp_reset(dp);
+}
+
+static bool dw_dp_hpd_detect_link(struct dw_dp *dp, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret < 0)
+ return false;
+ ret = dw_dp_link_parse(dp, connector);
+ phy_power_off(dp->phy);
+
+ return !ret;
+}
+
+static enum drm_connector_status dw_dp_bridge_detect(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ if (!dw_dp_hpd_detect(dp))
+ return connector_status_disconnected;
+
+ if (!dw_dp_hpd_detect_link(dp, connector))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static const struct drm_edid *dw_dp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ const struct drm_edid *edid;
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return NULL;
+
+ edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
+
+ phy_power_off(dp->phy);
+
+ return edid;
+}
+
+static u32 *dw_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_display_mode mode = crtc_state->mode;
+ const struct dw_dp_output_format *fmt;
+ u32 i, j = 0;
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(ARRAY_SIZE(dw_dp_output_formats), sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) {
+ fmt = &dw_dp_output_formats[i];
+
+ if (fmt->bpc > conn_state->max_bpc)
+ continue;
+
+ if (!(fmt->color_format & di->color_formats))
+ continue;
+
+ if (fmt->color_format == DRM_COLOR_FORMAT_YCBCR420 &&
+ !link->vsc_sdp_supported)
+ continue;
+
+ if (fmt->color_format != DRM_COLOR_FORMAT_YCBCR420 &&
+ drm_mode_is_420_only(di, &mode))
+ continue;
+
+ if (!dw_dp_bandwidth_ok(dp, &mode, fmt->bpp, link->lanes, link->rate))
+ continue;
+
+ output_fmts[j++] = fmt->bus_format;
+ }
+
+ *num_output_fmts = j;
+
+ return output_fmts;
+}
+
+static struct drm_bridge_state *dw_dp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct dw_dp_bridge_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
+
+ return &state->base;
+}
+
+static const struct drm_bridge_funcs dw_dp_bridge_funcs = {
+ .atomic_duplicate_state = dw_dp_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .atomic_get_output_bus_fmts = dw_dp_bridge_atomic_get_output_bus_fmts,
+ .atomic_check = dw_dp_bridge_atomic_check,
+ .mode_valid = dw_dp_bridge_mode_valid,
+ .atomic_enable = dw_dp_bridge_atomic_enable,
+ .atomic_disable = dw_dp_bridge_atomic_disable,
+ .detect = dw_dp_bridge_detect,
+ .edid_read = dw_dp_bridge_edid_read,
+};
+
+static int dw_dp_link_retrain(struct dw_dp *dp)
+{
+ struct drm_device *dev = dp->bridge.dev;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!dw_dp_needs_link_retrain(dp))
+ return 0;
+
+ dev_dbg(dp->dev, "Retraining link\n");
+
+ drm_modeset_acquire_init(&ctx, 0);
+ for (;;) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (!ret)
+ ret = dw_dp_link_train(dp);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static void dw_dp_hpd_work(struct work_struct *work)
+{
+ struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work);
+ bool long_hpd;
+ int ret;
+
+ mutex_lock(&dp->irq_lock);
+ long_hpd = dp->hotplug.long_hpd;
+ mutex_unlock(&dp->irq_lock);
+
+ dev_dbg(dp->dev, "[drm] Get hpd irq - %s\n", long_hpd ? "long" : "short");
+
+ if (!long_hpd) {
+ if (dw_dp_needs_link_retrain(dp)) {
+ ret = dw_dp_link_retrain(dp);
+ if (ret)
+ dev_warn(dp->dev, "Retrain link failed\n");
+ }
+ } else {
+ drm_helper_hpd_irq_event(dp->bridge.dev);
+ }
+}
+
+static void dw_dp_handle_hpd_event(struct dw_dp *dp)
+{
+ u32 value;
+
+ mutex_lock(&dp->irq_lock);
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ if (value & HPD_IRQ) {
+ dev_dbg(dp->dev, "IRQ from the HPD\n");
+ dp->hotplug.long_hpd = false;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_IRQ);
+ }
+
+ if (value & HPD_HOT_PLUG) {
+ dev_dbg(dp->dev, "Hot plug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ }
+
+ if (value & HPD_HOT_UNPLUG) {
+ dev_dbg(dp->dev, "Unplug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_UNPLUG);
+ }
+ mutex_unlock(&dp->irq_lock);
+
+ schedule_work(&dp->hpd_work);
+}
+
+static irqreturn_t dw_dp_irq(int irq, void *data)
+{
+ struct dw_dp *dp = data;
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_GENERAL_INTERRUPT, &value);
+ if (!value)
+ return IRQ_NONE;
+
+ if (value & HPD_EVENT)
+ dw_dp_handle_hpd_event(dp);
+
+ if (value & AUX_REPLY_EVENT) {
+ regmap_write(dp->regmap, DW_DP_GENERAL_INTERRUPT, AUX_REPLY_EVENT);
+ complete(&dp->complete);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_range dw_dp_readable_ranges[] = {
+ regmap_reg_range(DW_DP_VERSION_NUMBER, DW_DP_ID),
+ regmap_reg_range(DW_DP_CONFIG_REG1, DW_DP_CONFIG_REG3),
+ regmap_reg_range(DW_DP_CCTL, DW_DP_SOFT_RESET_CTRL),
+ regmap_reg_range(DW_DP_VSAMPLE_CTRL, DW_DP_VIDEO_HBLANK_INTERVAL),
+ regmap_reg_range(DW_DP_AUD_CONFIG1, DW_DP_AUD_CONFIG1),
+ regmap_reg_range(DW_DP_SDP_VERTICAL_CTRL, DW_DP_SDP_STATUS_EN),
+ regmap_reg_range(DW_DP_PHYIF_CTRL, DW_DP_PHYIF_PWRDOWN_CTRL),
+ regmap_reg_range(DW_DP_AUX_CMD, DW_DP_AUX_DATA3),
+ regmap_reg_range(DW_DP_GENERAL_INTERRUPT, DW_DP_HPD_INTERRUPT_ENABLE),
+};
+
+static const struct regmap_access_table dw_dp_readable_table = {
+ .yes_ranges = dw_dp_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dw_dp_readable_ranges),
+};
+
+static const struct regmap_config dw_dp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = DW_DP_MAX_REGISTER,
+ .rd_table = &dw_dp_readable_table,
+};
+
+static void dw_dp_phy_exit(void *data)
+{
+ struct dw_dp *dp = data;
+
+ phy_exit(dp->phy);
+}
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp *dp;
+ struct drm_bridge *bridge;
+ void __iomem *res;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return ERR_PTR(-ENOMEM);
+
+ dp = devm_drm_bridge_alloc(dev, struct dw_dp, bridge, &dw_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
+
+ dp->dev = dev;
+ dp->pixel_mode = DW_DP_MP_QUAD_PIXEL;
+
+ dp->plat_data.max_link_rate = plat_data->max_link_rate;
+ bridge = &dp->bridge;
+ mutex_init(&dp->irq_lock);
+ INIT_WORK(&dp->hpd_work, dw_dp_hpd_work);
+ init_completion(&dp->complete);
+
+ res = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res))
+ return ERR_CAST(res);
+
+ dp->regmap = devm_regmap_init_mmio(dev, res, &dw_dp_regmap_config);
+ if (IS_ERR(dp->regmap)) {
+ dev_err_probe(dev, PTR_ERR(dp->regmap), "failed to create regmap\n");
+ return ERR_CAST(dp->regmap);
+ }
+
+ dp->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(dp->phy)) {
+ dev_err_probe(dev, PTR_ERR(dp->phy), "failed to get phy\n");
+ return ERR_CAST(dp->phy);
+ }
+
+ dp->apb_clk = devm_clk_get_enabled(dev, "apb");
+ if (IS_ERR(dp->apb_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->apb_clk), "failed to get apb clock\n");
+ return ERR_CAST(dp->apb_clk);
+ }
+
+ dp->aux_clk = devm_clk_get_enabled(dev, "aux");
+ if (IS_ERR(dp->aux_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->aux_clk), "failed to get aux clock\n");
+ return ERR_CAST(dp->aux_clk);
+ }
+
+ dp->i2s_clk = devm_clk_get(dev, "i2s");
+ if (IS_ERR(dp->i2s_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n");
+ return ERR_CAST(dp->i2s_clk);
+ }
+
+ dp->spdif_clk = devm_clk_get(dev, "spdif");
+ if (IS_ERR(dp->spdif_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n");
+ return ERR_CAST(dp->spdif_clk);
+ }
+
+ dp->hdcp_clk = devm_clk_get(dev, "hdcp");
+ if (IS_ERR(dp->hdcp_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->hdcp_clk), "failed to get hdcp clock\n");
+ return ERR_CAST(dp->hdcp_clk);
+ }
+
+ dp->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(dp->rstc)) {
+ dev_err_probe(dev, PTR_ERR(dp->rstc), "failed to get reset control\n");
+ return ERR_CAST(dp->rstc);
+ }
+
+ bridge->of_node = dev->of_node;
+ bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->ycbcr_420_allowed = true;
+
+ dp->aux.dev = dev;
+ dp->aux.drm_dev = encoder->dev;
+ dp->aux.name = dev_name(dev);
+ dp->aux.transfer = dw_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret) {
+ dev_err_probe(dev, ret, "Aux register failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to attach bridge\n");
+
+ dw_dp_init_hw(dp);
+
+ ret = phy_init(dp->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "phy init failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0)
+ return ERR_PTR(ret);
+
+ ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq,
+ IRQF_ONESHOT, dev_name(dev), dp);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request irq\n");
+ return ERR_PTR(ret);
+ }
+
+ return dp;
+}
+EXPORT_SYMBOL_GPL(dw_dp_bind);
+
+MODULE_AUTHOR("Andy Yan <andyshrk@163.com>");
+MODULE_DESCRIPTION("DW DP Core Library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c
new file mode 100644
index 000000000000..43f4e7412d72
--- /dev/null
+++ b/drivers/gpu/drm/bridge/waveshare-dsi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ * Based on panel-raspberrypi-touchscreen by Broadcom
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+struct ws_bridge {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct backlight_device *backlight;
+ struct device *dev;
+ struct regmap *reg_map;
+};
+
+static const struct regmap_config ws_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+};
+
+static struct ws_bridge *bridge_to_ws_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ws_bridge, bridge);
+}
+
+static int ws_bridge_attach_dsi(struct ws_bridge *ws)
+{
+ const struct mipi_dsi_device_info info = {
+ .type = "ws-bridge",
+ .channel = 0,
+ .node = NULL,
+ };
+ struct device_node *dsi_host_node;
+ struct device *dev = ws->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ dsi_host_node = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!dsi_host_node) {
+ dev_err(dev, "Failed to get remote port\n");
+ return -ENODEV;
+ }
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find dsi_host\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi), "Failed to create dsi device\n");
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 2;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to attach dsi to host\n");
+
+ return 0;
+}
+
+static int ws_bridge_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+ int ret;
+
+ ret = ws_bridge_attach_dsi(ws);
+ if (ret)
+ return ret;
+
+ return drm_bridge_attach(encoder, ws->next_bridge,
+ &ws->bridge, flags);
+}
+
+static void ws_bridge_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ regmap_write(ws->reg_map, 0xad, 0x01);
+ backlight_enable(ws->backlight);
+}
+
+static void ws_bridge_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ backlight_disable(ws->backlight);
+ regmap_write(ws->reg_map, 0xad, 0x00);
+}
+
+static const struct drm_bridge_funcs ws_bridge_bridge_funcs = {
+ .enable = ws_bridge_bridge_enable,
+ .disable = ws_bridge_bridge_disable,
+ .attach = ws_bridge_bridge_attach,
+};
+
+static int ws_bridge_bl_update_status(struct backlight_device *bl)
+{
+ struct ws_bridge *ws = bl_get_data(bl);
+
+ regmap_write(ws->reg_map, 0xab, 0xff - backlight_get_brightness(bl));
+ regmap_write(ws->reg_map, 0xaa, 0x01);
+
+ return 0;
+}
+
+static const struct backlight_ops ws_bridge_bl_ops = {
+ .update_status = ws_bridge_bl_update_status,
+};
+
+static struct backlight_device *ws_bridge_create_backlight(struct ws_bridge *ws)
+{
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+ struct device *dev = ws->dev;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, ws,
+ &ws_bridge_bl_ops, &props);
+}
+
+static int ws_bridge_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct drm_panel *panel;
+ struct ws_bridge *ws;
+ int ret;
+
+ ws = devm_drm_bridge_alloc(dev, struct ws_bridge, bridge, &ws_bridge_bridge_funcs);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
+ ws->dev = dev;
+
+ ws->reg_map = devm_regmap_init_i2c(i2c, &ws_regmap_config);
+ if (IS_ERR(ws->reg_map))
+ return dev_err_probe(dev, PTR_ERR(ws->reg_map), "Failed to allocate regmap\n");
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, &panel, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find remote panel\n");
+
+ ws->next_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ws->next_bridge))
+ return PTR_ERR(ws->next_bridge);
+
+ ws->backlight = ws_bridge_create_backlight(ws);
+ if (IS_ERR(ws->backlight)) {
+ ret = PTR_ERR(ws->backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ regmap_write(ws->reg_map, 0xc0, 0x01);
+ regmap_write(ws->reg_map, 0xc2, 0x01);
+ regmap_write(ws->reg_map, 0xac, 0x01);
+
+ ws->bridge.type = DRM_MODE_CONNECTOR_DPI;
+ ws->bridge.of_node = dev->of_node;
+ devm_drm_bridge_add(dev, &ws->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ws_bridge_of_ids[] = {
+ {.compatible = "waveshare,dsi2dpi",},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ws_bridge_of_ids);
+
+static struct i2c_driver ws_bridge_driver = {
+ .driver = {
+ .name = "ws_dsi2dpi",
+ .of_match_table = ws_bridge_of_ids,
+ },
+ .probe = ws_bridge_probe,
+};
+module_i2c_driver(ws_bridge_driver);
+
+MODULE_AUTHOR("Joseph Guo <qijian.guo@nxp.com>");
+MODULE_DESCRIPTION("Waveshare DSI2DPI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 5eb7e9bfe361..baacd21e7341 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -20,6 +20,7 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -641,6 +642,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge *bridge, *panel_bridge = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
+ bool support_hdcp = false;
int connector_type;
int ret;
@@ -749,12 +751,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(-EINVAL);
}
- if (!drm_bridge_get_next_bridge(bridge))
+ if (drm_bridge_is_last(bridge))
connector_type = bridge->type;
#ifdef CONFIG_OF
- if (!drm_bridge_get_next_bridge(bridge) &&
- bridge->of_node)
+ if (drm_bridge_is_last(bridge) && bridge->of_node)
connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node));
#endif
@@ -763,6 +764,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (drm_bridge_is_panel(bridge))
panel_bridge = bridge;
+
+ if (bridge->support_hdcp)
+ support_hdcp = true;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -772,8 +776,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!connector->ycbcr_420_allowed)
supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
- bridge = bridge_connector->bridge_hdmi;
-
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -816,6 +818,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_notifier_register(connector,
NULL,
bridge->hdmi_cec_dev);
@@ -825,6 +829,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_register(connector,
&drm_bridge_connector_hdmi_cec_funcs,
bridge->hdmi_cec_adapter_name,
@@ -845,6 +851,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (panel_bridge)
drm_panel_bridge_set_orientation(connector, panel_bridge);
+ if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) &&
+ IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER))
+ drm_connector_attach_content_protection_property(connector, true);
+
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 3b50d817c839..436bfe9f9081 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -42,7 +42,7 @@
*
* https://hverkuil.home.xs4all.nl/cec-status.txt
*
- * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * Please mail me (hverkuil@kernel.org) if you find an adapter that works
* and is not yet listed there.
*
* Note that the current implementation does not support CEC over an MST hub.
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 1ecc3df7e316..4aaeae4fa03c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -3962,6 +3962,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
int ret;
unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
u8 buf[3] = { 0 };
+ size_t len = 2;
/* The panel uses the PWM for controlling brightness levels */
if (!(bl->aux_set || bl->luminance_set))
@@ -3974,6 +3975,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[1] = (level & 0x00ff00) >> 8;
buf[2] = (level & 0xff0000) >> 16;
offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ len = 3;
} else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
@@ -3981,7 +3983,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, len);
if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ef56b474acf5..d5ebe6ea0acb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -456,6 +456,7 @@ mode_fixup(struct drm_atomic_state *state)
ret = drm_atomic_bridge_chain_check(bridge,
new_crtc_state,
new_conn_state);
+ drm_bridge_put(bridge);
if (ret) {
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
return ret;
@@ -527,6 +528,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
bridge = drm_bridge_chain_get_first_bridge(encoder);
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
return ret;
@@ -1212,6 +1214,7 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_disable(bridge, state);
+ drm_bridge_put(bridge);
/* Right function depends upon target state. */
if (funcs) {
@@ -1329,6 +1332,7 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_post_disable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1501,6 +1505,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
+ drm_bridge_put(bridge);
}
}
@@ -1580,6 +1585,7 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_pre_enable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1655,6 +1661,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
drm_atomic_bridge_chain_enable(bridge, state);
+ drm_bridge_put(bridge);
}
}
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index ecc73d52bfae..85dbdaa4a2e2 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1078,19 +1078,20 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
if (async_flip) {
- /* check if the prop does a nop change */
- if ((prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips)) {
- ret = drm_atomic_plane_get_property(plane, plane_state,
- prop, &old_val);
- ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- }
+ /* no-op changes are always allowed */
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- /* ask the driver if this non-primary plane is supported */
- if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
- ret = -EINVAL;
+ /* fail everything that isn't no-op or a pure flip */
+ if (ret && prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
+ break;
+ }
+ if (ret && plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* ask the driver if this non-primary plane is supported */
if (plane_funcs && plane_funcs->atomic_async_check)
ret = plane_funcs->atomic_async_check(plane, state, true);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 4bde00083047..d031447eebc9 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -941,11 +941,11 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
{
unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- struct drm_bridge *prev_bridge;
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) =
+ drm_bridge_get_prev_bridge(cur_bridge);
u32 *in_bus_fmts;
int ret;
- prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
cur_bridge);
@@ -1435,6 +1435,9 @@ static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
unsigned int idx)
{
drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+
+ drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount));
+
drm_printf(p, "\ttype: [%d] %s\n",
bridge->type,
drm_get_connector_type_name(bridge->type));
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 37a3270bc3c2..131c1c9ae92f 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -817,6 +817,40 @@ void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *
}
EXPORT_SYMBOL(drm_crtc_load_palette_8);
+static void fill_palette_332(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i = (r << 5) | (g << 2) | b; /* 8-bit palette index */
+
+ /* Expand R (3-bit) G (3-bit) and B (2-bit) values to 16-bit values */
+ r = (r << 13) | (r << 10) | (r << 7) | (r << 4) | (r << 1) | (r >> 2);
+ g = (g << 13) | (g << 10) | (g << 7) | (g << 4) | (g << 1) | (g >> 2);
+ b = (b << 14) | (b << 12) | (b << 10) | (b << 8) | (b << 6) | (b << 4) | (b << 2) | b;
+
+ set_palette(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_palette_332 - Programs a default palette for R332-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs an RGB332 palette to hardware.
+ */
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int r, g, b;
+
+ /* Limits of 8-8-4 are the maximum number of values for each channel. */
+ for (r = 0; r < 8; ++r) {
+ for (g = 0; g < 8; ++g) {
+ for (b = 0; b < 4; ++b)
+ fill_palette_332(crtc, r, g, b, set_palette);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_332);
+
static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
drm_crtc_set_lut_func set_palette)
{
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index cdd591b11488..8e3cb08241c8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -532,6 +532,8 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
return "rebind";
case DRM_WEDGE_RECOVERY_BUS_RESET:
return "bus-reset";
+ case DRM_WEDGE_RECOVERY_VENDOR:
+ return "vendor-specific";
default:
return NULL;
}
@@ -694,7 +696,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
}
static int drm_dev_init(struct drm_device *dev,
@@ -735,7 +736,6 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 8f3daf38ca63..006836554cc2 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1243,6 +1243,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
} else if (dst_format == DRM_FORMAT_BGRX8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
+ } else if (dst_format == DRM_FORMAT_RGB332) {
+ drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state);
+ return 0;
}
}
@@ -1253,6 +1256,25 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
}
EXPORT_SYMBOL(drm_fb_blit);
+static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ u8 *dbuf8 = dbuf;
+ const u8 *sbuf8 = sbuf;
+ u8 px;
+
+ while (pixels) {
+ unsigned int i, bits = min(pixels, 4U);
+ u8 byte = 0;
+
+ for (i = 0; i < bits; i++, pixels--) {
+ byte >>= 2;
+ px = (*sbuf8++ * 3 + 127) / 255;
+ byte |= (px &= 0x03) << 6;
+ }
+ *dbuf8++ = byte;
+ }
+}
+
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -1359,3 +1381,92 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+
+/**
+ * drm_fb_xrgb8888_to_gray2 - Convert XRGB8888 to gray2
+ * @dst: Array of gray2 destination buffer
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner). The first pixel (upper left corner of the clip rectangle) will
+ * be converted and copied to the two first bits (LSB) in the first byte of the gray2
+ * destination buffer. If the caller requires that the first pixel in a byte must
+ * be located at an x-coordinate that is a multiple of 8, then the caller must take
+ * care itself of supplying a suitable clip rectangle.
+ *
+ * DRM doesn't have native gray2 support. Drivers can use this function for
+ * gray2 devices that don't support XRGB8888 natively. Such drivers can
+ * announce the commonly supported XR24 format to userspace and use this function
+ * to convert to the native format.
+ *
+ */
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
+ unsigned int linepixels = drm_rect_width(clip);
+ unsigned int lines = drm_rect_height(clip);
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int len_src32 = linepixels * cpp;
+ struct drm_device *dev = fb->dev;
+ void *vaddr = src[0].vaddr;
+ unsigned int dst_pitch_0;
+ unsigned int y;
+ u8 *gray2 = dst[0].vaddr, *gray8;
+ u32 *src32;
+
+ if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+ dst_pitch_0 = dst_pitch[0];
+
+ /*
+ * The gray2 destination buffer contains 2 bit per pixel
+ */
+ if (!dst_pitch_0)
+ dst_pitch_0 = DIV_ROUND_UP(linepixels, 4);
+
+ /*
+ * The dma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ *
+ * Also, format conversion from XR24 to gray2 are done
+ * line-by-line but are converted to 8-bit grayscale as an
+ * intermediate step.
+ *
+ * Allocate a buffer to be used for both copying from the cma
+ * memory and to store the intermediate grayscale line pixels.
+ */
+ src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL);
+ if (!src32)
+ return;
+
+ gray8 = (u8 *)src32 + len_src32;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ src32 = memcpy(src32, vaddr, len_src32);
+ drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
+ drm_fb_gray8_to_gray2_line(gray2, gray8, linepixels);
+ vaddr += fb->pitches[0];
+ gray2 += dst_pitch_0;
+ }
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray2);
+
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6a44351e58b7..f884d155a832 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -187,6 +187,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ mutex_init(&obj->gpuva.lock);
dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
@@ -210,6 +211,7 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
WARN_ON(obj->dma_buf);
dma_resv_fini(&obj->_resv);
+ mutex_destroy(&obj->gpuva.lock);
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
@@ -332,7 +334,12 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
+ mutex_lock(&file_priv->prime.lock);
+
drm_prime_remove_buf_handle(&file_priv->prime, id);
+
+ mutex_unlock(&file_priv->prime.lock);
+
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -621,7 +628,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
- long i, j, npages;
+ unsigned long i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
@@ -645,7 +652,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
i = 0;
while (i < npages) {
- long nr;
+ unsigned long nr;
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
@@ -870,14 +877,6 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
}
EXPORT_SYMBOL(drm_gem_dma_resv_wait);
-/**
- * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Releases the handle to an mm object.
- */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -893,17 +892,6 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
- * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -943,17 +931,6 @@ err:
return ret;
}
-/**
- * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -988,6 +965,57 @@ err:
return ret;
}
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_change_handle *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -EOPNOTSUPP;
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->handle == args->new_handle)
+ return 0;
+
+ mutex_lock(&file_priv->prime.lock);
+
+ spin_lock(&file_priv->table_lock);
+ ret = idr_alloc(&file_priv->object_idr, obj,
+ args->new_handle, args->new_handle + 1, GFP_NOWAIT);
+ spin_unlock(&file_priv->table_lock);
+
+ if (ret < 0)
+ goto out_unlock;
+
+ if (obj->dma_buf) {
+ ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle);
+ if (ret < 0) {
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->new_handle);
+ spin_unlock(&file_priv->table_lock);
+ goto out_unlock;
+ }
+
+ drm_prime_remove_buf_handle(&file_priv->prime, args->handle);
+ }
+
+ ret = 0;
+
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->handle);
+ spin_unlock(&file_priv->table_lock);
+
+out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
+
+ return ret;
+}
+
/**
* drm_gem_open - initializes GEM file-private structures at devnode open time
* @dev: drm_device which is being opened by userspace
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 5bb4c77db2c3..eeeeb99cfdf6 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -271,107 +271,50 @@ npages_in_range(unsigned long start, unsigned long end)
}
/**
- * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
- * @notifier: Pointer to the GPU SVM notifier structure.
- * @start: Start address of the range
- * @end: End address of the range
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address of the notifier
+ * @end: End address of the notifier
*
- * Return: A pointer to the drm_gpusvm_range if found or NULL
+ * Return: A pointer to the drm_gpusvm_notifier if found or NULL
*/
-struct drm_gpusvm_range *
-drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
- unsigned long end)
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(&notifier->root, start, end - 1);
+ itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_range, itree);
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
/**
- * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
- * @range__: Iterator variable for the ranges
- * @next__: Iterator variable for the ranges temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the range
- * @end__: End address of the range
- *
- * This macro is used to iterate over GPU SVM ranges in a notifier while
- * removing ranges from it.
- */
-#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
- for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
- (next__) = __drm_gpusvm_range_next(range__); \
- (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
- (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-
-/**
- * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
- * @notifier: a pointer to the current drm_gpusvm_notifier
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
*
- * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
- * the current notifier is the last one or if the input notifier is
- * NULL.
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
*/
-static struct drm_gpusvm_notifier *
-__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
-{
- if (notifier && !list_is_last(&notifier->entry,
- &notifier->gpusvm->notifier_list))
- return list_next_entry(notifier, entry);
-
- return NULL;
-}
-
-static struct drm_gpusvm_notifier *
-notifier_iter_first(struct rb_root_cached *root, unsigned long start,
- unsigned long last)
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(root, start, last);
+ itree = interval_tree_iter_first(&notifier->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_notifier, itree);
+ return container_of(itree, struct drm_gpusvm_range, itree);
else
return NULL;
}
-
-/**
- * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
- */
-#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-
-/**
- * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @next__: Iterator variable for the notifiers temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
- * removing notifiers from it.
- */
-#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
- (next__) = __drm_gpusvm_notifier_next(notifier__); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
/**
* drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
@@ -430,6 +373,12 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
*
* This function initializes the GPU SVM.
*
+ * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
+ * then only @gpusvm, @name, and @drm are expected. However, the same base
+ * @gpusvm can also be used with both modes together in which case the full
+ * setup is needed, where the core drm_gpusvm_pages API will simply never use
+ * the other fields.
+ *
* Return: 0 on success, a negative error code on failure.
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
@@ -440,8 +389,16 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const struct drm_gpusvm_ops *ops,
const unsigned long *chunk_sizes, int num_chunks)
{
- if (!ops->invalidate || !num_chunks)
- return -EINVAL;
+ if (mm) {
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+ mmgrab(mm);
+ } else {
+ /* No full SVM mode, only core drm_gpusvm_pages API. */
+ if (ops || num_chunks || mm_range || notifier_size ||
+ device_private_page_owner)
+ return -EINVAL;
+ }
gpusvm->name = name;
gpusvm->drm = drm;
@@ -454,7 +411,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
gpusvm->chunk_sizes = chunk_sizes;
gpusvm->num_chunks = num_chunks;
- mmgrab(mm);
gpusvm->root = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpusvm->notifier_list);
@@ -473,22 +429,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_init);
/**
- * drm_gpusvm_notifier_find() - Find GPU SVM notifier
- * @gpusvm: Pointer to the GPU SVM structure
- * @fault_addr: Fault address
- *
- * This function finds the GPU SVM notifier associated with the fault address.
- *
- * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
- */
-static struct drm_gpusvm_notifier *
-drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
- unsigned long fault_addr)
-{
- return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
-}
-
-/**
* to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
* @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
*
@@ -562,7 +502,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
drm_gpusvm_range_remove(gpusvm, range);
}
- mmdrop(gpusvm->mm);
+ if (gpusvm->mm)
+ mmdrop(gpusvm->mm);
WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
}
EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
@@ -702,13 +643,42 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
INIT_LIST_HEAD(&range->entry);
- range->notifier_seq = LONG_MAX;
- range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
+ range->pages.notifier_seq = LONG_MAX;
+ range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0;
return range;
}
/**
+ * drm_gpusvm_hmm_pfn_to_order() - Get the largest CPU mapping order.
+ * @hmm_pfn: The current hmm_pfn.
+ * @hmm_pfn_index: Index of the @hmm_pfn within the pfn array.
+ * @npages: Number of pages within the pfn array i.e the hmm range size.
+ *
+ * To allow skipping PFNs with the same flags (like when they belong to
+ * the same huge PTE) when looping over the pfn array, take a given a hmm_pfn,
+ * and return the largest order that will fit inside the CPU PTE, but also
+ * crucially accounting for the original hmm range boundaries.
+ *
+ * Return: The largest order that will safely fit within the size of the hmm_pfn
+ * CPU PTE.
+ */
+static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn,
+ unsigned long hmm_pfn_index,
+ unsigned long npages)
+{
+ unsigned long size;
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= (hmm_pfn & ~HMM_PFN_FLAGS) & (size - 1);
+ hmm_pfn_index += size;
+ if (hmm_pfn_index > npages)
+ size -= (hmm_pfn_index - npages);
+
+ return ilog2(size);
+}
+
+/**
* drm_gpusvm_check_pages() - Check pages
* @gpusvm: Pointer to the GPU SVM structure
* @notifier: Pointer to the GPU SVM notifier structure
@@ -766,7 +736,7 @@ static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
err = -EFAULT;
goto err_free;
}
- i += 0x1 << hmm_pfn_to_map_order(pfns[i]);
+ i += 0x1 << drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
}
err_free:
@@ -943,7 +913,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
if (!mmget_not_zero(mm))
return ERR_PTR(-EFAULT);
- notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
if (!notifier) {
notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
if (IS_ERR(notifier)) {
@@ -1024,31 +994,31 @@ err_mmunlock:
EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
/**
- * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
+ * __drm_gpusvm_unmap_pages() - Unmap pages associated with GPU SVM pages (internal)
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
* @npages: Number of pages to unmap
*
- * This function unmap pages associated with a GPU SVM range. Assumes and
+ * This function unmap pages associated with a GPU SVM pages struct. Assumes and
* asserts correct locking is in place when called.
*/
-static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- unsigned long npages)
+static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
{
- unsigned long i, j;
- struct drm_pagemap *dpagemap = range->dpagemap;
+ struct drm_pagemap *dpagemap = svm_pages->dpagemap;
struct device *dev = gpusvm->drm->dev;
+ unsigned long i, j;
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->flags.has_dma_mapping) {
- struct drm_gpusvm_range_flags flags = {
- .__flags = range->flags.__flags,
+ if (svm_pages->flags.has_dma_mapping) {
+ struct drm_gpusvm_pages_flags flags = {
+ .__flags = svm_pages->flags.__flags,
};
for (i = 0, j = 0; i < npages; j++) {
- struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
+ struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j];
if (addr->proto == DRM_INTERCONNECT_SYSTEM)
dma_unmap_page(dev,
@@ -1064,31 +1034,52 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
flags.has_devmem_pages = false;
flags.has_dma_mapping = false;
- WRITE_ONCE(range->flags.__flags, flags.__flags);
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
- range->dpagemap = NULL;
+ svm_pages->dpagemap = NULL;
}
}
/**
- * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
+ * __drm_gpusvm_free_pages() - Free dma array associated with GPU SVM pages
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
*
* This function frees the dma address array associated with a GPU SVM range.
*/
-static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range)
+static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
{
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->dma_addr) {
- kvfree(range->dma_addr);
- range->dma_addr = NULL;
+ if (svm_pages->dma_addr) {
+ kvfree(svm_pages->dma_addr);
+ svm_pages->dma_addr = NULL;
}
}
/**
+ * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
+ * struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of mapped pages
+ *
+ * This function unmaps and frees the dma address array associated with a GPU
+ * SVM pages struct.
+ */
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
+
+/**
* drm_gpusvm_range_remove() - Remove GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range to be removed
@@ -1107,13 +1098,14 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
drm_gpusvm_driver_lock_held(gpusvm);
notifier = drm_gpusvm_notifier_find(gpusvm,
- drm_gpusvm_range_start(range));
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_start(range) + 1);
if (WARN_ON_ONCE(!notifier))
return;
drm_gpusvm_notifier_lock(gpusvm);
- __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
- drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, &range->pages);
__drm_gpusvm_range_remove(notifier, range);
drm_gpusvm_notifier_unlock(gpusvm);
@@ -1179,6 +1171,28 @@ void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
/**
+ * drm_gpusvm_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool drm_gpusvm_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping;
+}
+
+/**
* drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
@@ -1195,9 +1209,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range)
{
- lockdep_assert_held(&gpusvm->notifier_lock);
-
- return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
+ return drm_gpusvm_pages_valid(gpusvm, &range->pages);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
@@ -1211,66 +1223,71 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
*
* Return: True if GPU SVM range has valid pages, False otherwise
*/
-static bool
-drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range)
+static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
{
bool pages_valid;
- if (!range->dma_addr)
+ if (!svm_pages->dma_addr)
return false;
drm_gpusvm_notifier_lock(gpusvm);
- pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range);
+ pages_valid = drm_gpusvm_pages_valid(gpusvm, svm_pages);
if (!pages_valid)
- drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
drm_gpusvm_notifier_unlock(gpusvm);
return pages_valid;
}
/**
- * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * drm_gpusvm_get_pages() - Get pages and populate GPU SVM pages struct
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: The SVM pages to populate. This will contain the dma-addresses
+ * @mm: The mm corresponding to the CPU range
+ * @notifier: The corresponding notifier for the given CPU range
+ * @pages_start: Start CPU address for the pages
+ * @pages_end: End CPU address for the pages (exclusive)
* @ctx: GPU SVM context
*
- * This function gets pages for a GPU SVM range and ensures they are mapped for
- * DMA access.
+ * This function gets and maps pages for CPU range and ensures they are
+ * mapped for DMA access.
*
* Return: 0 on success, negative error code on failure.
*/
-int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx)
{
- struct mmu_interval_notifier *notifier = &range->notifier->notifier;
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
HMM_PFN_REQ_WRITE),
.notifier = notifier,
- .start = drm_gpusvm_range_start(range),
- .end = drm_gpusvm_range_end(range),
+ .start = pages_start,
+ .end = pages_end,
.dev_private_owner = gpusvm->device_private_page_owner,
};
- struct mm_struct *mm = gpusvm->mm;
void *zdd;
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
unsigned long i, j;
- unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
- drm_gpusvm_range_end(range));
+ unsigned long npages = npages_in_range(pages_start, pages_end);
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
- struct drm_gpusvm_range_flags flags;
+ struct drm_gpusvm_pages_flags flags;
+ enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
- if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range))
+ if (drm_gpusvm_pages_valid_unlocked(gpusvm, svm_pages))
goto set_seqno;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
@@ -1310,7 +1327,7 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
- flags.__flags = range->flags.__flags;
+ flags.__flags = svm_pages->flags.__flags;
if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
@@ -1323,13 +1340,12 @@ map_pages:
goto retry;
}
- if (!range->dma_addr) {
+ if (!svm_pages->dma_addr) {
/* Unlock and restart mapping to allocate memory. */
drm_gpusvm_notifier_unlock(gpusvm);
- range->dma_addr = kvmalloc_array(npages,
- sizeof(*range->dma_addr),
- GFP_KERNEL);
- if (!range->dma_addr) {
+ svm_pages->dma_addr =
+ kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL);
+ if (!svm_pages->dma_addr) {
err = -ENOMEM;
goto err_free;
}
@@ -1342,7 +1358,7 @@ map_pages:
for (i = 0, j = 0; i < npages; ++j) {
struct page *page = hmm_pfn_to_page(pfns[i]);
- order = hmm_pfn_to_map_order(pfns[i]);
+ order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
if (is_device_private_page(page) ||
is_device_coherent_page(page)) {
if (zdd != page->zone_device_data && i > 0) {
@@ -1368,13 +1384,13 @@ map_pages:
goto err_unmap;
}
}
- range->dma_addr[j] =
+ svm_pages->dma_addr[j] =
dpagemap->ops->device_map(dpagemap,
gpusvm->drm->dev,
page, order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
if (dma_mapping_error(gpusvm->drm->dev,
- range->dma_addr[j].addr)) {
+ svm_pages->dma_addr[j].addr)) {
err = -EFAULT;
goto err_unmap;
}
@@ -1394,15 +1410,15 @@ map_pages:
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
if (dma_mapping_error(gpusvm->drm->dev, addr)) {
err = -EFAULT;
goto err_unmap;
}
- range->dma_addr[j] = drm_pagemap_device_addr_encode
+ svm_pages->dma_addr[j] = drm_pagemap_addr_encode
(addr, DRM_INTERCONNECT_SYSTEM, order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
}
i += 1 << order;
num_dma_mapped = i;
@@ -1411,21 +1427,21 @@ map_pages:
if (pagemap) {
flags.has_devmem_pages = true;
- range->dpagemap = dpagemap;
+ svm_pages->dpagemap = dpagemap;
}
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
- WRITE_ONCE(range->flags.__flags, flags.__flags);
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
- range->notifier_seq = hmm_range.notifier_seq;
+ svm_pages->notifier_seq = hmm_range.notifier_seq;
return 0;
err_unmap:
- __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, num_dma_mapped);
drm_gpusvm_notifier_unlock(gpusvm);
err_free:
kvfree(pfns);
@@ -1433,11 +1449,62 @@ err_free:
goto retry;
return err;
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
+
+/**
+ * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a GPU SVM range and ensures they are mapped for
+ * DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm,
+ &range->notifier->notifier,
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range), ctx);
+}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
/**
+ * drm_gpusvm_unmap_pages() - Unmap GPU svm pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of pages in @svm_pages.
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM pages struct. If
+ * @in_notifier is set, it is assumed that gpusvm->notifier_lock is held in
+ * write mode; if it is clear, it acquires gpusvm->notifier_lock in read mode.
+ * Must be called in the invalidate() callback of the corresponding notifier for
+ * IOMMU security model.
+ */
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ if (ctx->in_notifier)
+ lockdep_assert_held_write(&gpusvm->notifier_lock);
+ else
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+
+ if (!ctx->in_notifier)
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
+
+/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
- * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
* @ctx: GPU SVM context
@@ -1455,15 +1522,7 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
drm_gpusvm_range_end(range));
- if (ctx->in_notifier)
- lockdep_assert_held_write(&gpusvm->notifier_lock);
- else
- drm_gpusvm_notifier_lock(gpusvm);
-
- __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
-
- if (!ctx->in_notifier)
- drm_gpusvm_notifier_unlock(gpusvm);
+ return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
@@ -1561,10 +1620,10 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
{
lockdep_assert_held_write(&range->gpusvm->notifier_lock);
- range->flags.unmapped = true;
+ range->pages.flags.unmapped = true;
if (drm_gpusvm_range_start(range) < mmu_range->start ||
drm_gpusvm_range_end(range) > mmu_range->end)
- range->flags.partial_unmap = true;
+ range->pages.flags.partial_unmap = true;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 86853535fb7b..af63f4d00315 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -421,6 +421,71 @@
*/
/**
+ * DOC: Madvise Logic - Splitting and Traversal
+ *
+ * This logic handles GPU VA range updates by generating remap and map operations
+ * without performing unmaps or merging existing mappings.
+ *
+ * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
+ * the existing mapping at the start and end boundaries and inserts a new map.
+ *
+ * ::
+ * a start end b
+ * pre: |-----------------------|
+ * drm_gpuva1
+ *
+ * a start end b
+ * new: |-----|=========|-------|
+ * remap map remap
+ *
+ * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
+ *
+ * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
+ * across boundaries, remapping the start and end segments, and inserting two
+ * map operations to cover the full range.
+ *
+ * :: a start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c end d
+ * new: |-------|==========|--------------|========|---------|
+ * remap1 map1 drm_gpuva2 map2 remap2
+ *
+ * two REMAPS and two MAPS
+ *
+ * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
+ * are generated to update the affected portion.
+ *
+ *
+ * :: a/start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a/start b c end d
+ * new: |------------------|--------------|========|---------|
+ * drm_gpuva1 drm_gpuva2 map1 remap1
+ *
+ * :: a start b c/end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c/end d
+ * new: |-------|==========|--------------|------------------|
+ * remap1 map1 drm_gpuva2 drm_gpuva3
+ *
+ * one REMAP and one MAP
+ *
+ * 4) Both start and end align with existing drm_gpuva boundaries. No operations
+ * are needed as the range is already covered.
+ *
+ * 5) No existing drm_gpuvas. No operations.
+ *
+ * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
+ * focusing solely on remap and map operations for efficient traversal and update.
+ */
+
+/**
* DOC: Locking
*
* In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
@@ -432,8 +497,7 @@
* DRM GPUVM also does not take care of the locking of the backing
* &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
* itself; drivers are responsible to enforce mutual exclusion using either the
- * GEMs dma_resv lock or alternatively a driver specific external lock. For the
- * latter see also drm_gem_gpuva_set_lock().
+ * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
*
* However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
* the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
@@ -486,13 +550,18 @@
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuvm_map_req map_req = {
+ * .map.va.addr = addr,
+ * .map.va.range = range,
+ * .map.gem.obj = obj,
+ * .map.gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -1512,7 +1581,7 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
@@ -1533,7 +1602,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
* If the reference count drops to zero, the &gpuvm_bo is destroyed, which
* includes removing it from the GEMs gpuva list. Hence, if a call to this
* function can potentially let the reference count drop to zero the caller must
- * hold the dma-resv or driver specific GEM gpuva lock.
+ * hold the lock that the GEM uses for its gpuva list (either the GEM's
+ * dma-resv or gpuva.lock mutex).
*
* This function may only be called from non-atomic context.
*
@@ -1557,7 +1627,7 @@ __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
{
struct drm_gpuvm_bo *vm_bo;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
drm_gem_for_each_gpuvm_bo(vm_bo, obj)
if (vm_bo->vm == gpuvm)
return vm_bo;
@@ -1616,7 +1686,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
if (!vm_bo)
return ERR_PTR(-ENOMEM);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
return vm_bo;
@@ -1652,7 +1722,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
return vm_bo;
}
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
return __vm_bo;
@@ -1824,8 +1894,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
* reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
@@ -1840,7 +1909,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
va->vm_bo = drm_gpuvm_bo_get(vm_bo);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@@ -1860,8 +1929,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* the latter is dropped.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
@@ -1872,7 +1940,7 @@ drm_gpuva_unlink(struct drm_gpuva *va)
if (unlikely(!obj))
return;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(va->vm, obj);
list_del_init(&va->gem.entry);
va->vm_bo = NULL;
@@ -2054,16 +2122,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
+ if (!req)
+ return 0;
+
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->map.va.addr;
+ op.map.va.range = req->map.va.range;
+ op.map.gem.obj = req->map.gem.obj;
+ op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2088,10 +2158,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
- struct drm_gpuva *va, bool merge)
+ struct drm_gpuva *va, bool merge, bool madvise)
{
struct drm_gpuva_op op = {};
+ if (madvise)
+ return 0;
+
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
@@ -2102,10 +2175,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+ const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
struct drm_gpuva *va, *next;
+ u64 req_offset = req->map.gem.offset;
+ u64 req_range = req->map.va.range;
+ u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2120,19 +2198,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ if (madvise && obj)
+ continue;
+
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
@@ -2153,6 +2234,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr < req_addr) {
@@ -2173,6 +2257,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
@@ -2180,6 +2267,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = req_addr,
+ .map.va.range = end - req_addr,
+ };
+
+ ret = op_map_cb(ops, priv, &map_req);
+ if (ret)
+ return ret;
+ }
+
continue;
}
@@ -2195,6 +2294,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr > req_addr) {
@@ -2203,16 +2305,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
continue;
}
@@ -2231,14 +2335,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = req_end - addr,
+ };
+
+ return op_map_cb(ops, priv, &map_req);
+ }
break;
}
}
}
-
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, op_map);
}
static int
@@ -2290,7 +2400,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
if (ret)
return ret;
} else {
- ret = op_unmap_cb(ops, priv, va, false);
+ ret = op_unmap_cb(ops, priv, va, false, false);
if (ret)
return ret;
}
@@ -2303,10 +2413,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2440,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2449,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2421,10 +2525,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
- * @req_addr: the start address of the range to unmap
- * @req_range: the range of the mappings to unmap
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuvm_map_req struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2445,9 +2546,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
* break;
* case DRIVER_OP_MAP:
- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
- * op->addr, op->range,
- * obj, op->obj_offset);
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
* break;
* }
*
@@ -2478,18 +2577,17 @@ static const struct drm_gpuvm_ops lock_ops = {
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+
if (req_obj) {
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
if (ret)
return ret;
}
- return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2608,13 +2706,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_unmap = drm_gpuva_sm_step,
};
+static struct drm_gpuva_ops *
+__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuvm *vm;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.vm = gpuvm;
+ args.ops = ops;
+
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(gpuvm, ops);
+ return ERR_PTR(ret);
+}
+
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
* of existing mapping(s) with the newly requested one.
@@ -2642,40 +2769,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
- struct drm_gpuva_ops *ops;
- struct {
- struct drm_gpuvm *vm;
- struct drm_gpuva_ops *ops;
- } args;
- int ret;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (unlikely(!ops))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&ops->list);
-
- args.vm = gpuvm;
- args.ops = ops;
-
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
- if (ret)
- goto err_free_ops;
-
- return ops;
-
-err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
- return ERR_PTR(ret);
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
/**
+ * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @req: map request arguments
+ *
+ * This function creates a list of operations to perform splitting
+ * of existent mapping(s) at start or end, based on the request map.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There will be no unmap operations, a maximum of two remap operations and two
+ * map operations. The two map operations correspond to: one from start to the
+ * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req)
+{
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
+
+/**
* drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
@@ -2804,8 +2941,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
- * It is the callers responsibility to protect the GEMs GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
@@ -2817,7 +2954,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
struct drm_gpuva *va;
int ret;
- drm_gem_gpuva_assert_lock_held(vm_bo->obj);
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e79c3c623c9a..5a3bed48ab1f 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -85,6 +85,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle);
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle);
@@ -170,6 +172,8 @@ int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f593dc569d31..d8a24875a7ba 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -653,6 +653,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CHANGE_HANDLE, drm_gem_change_handle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 3a9b3278a6e3..a712e177b350 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -773,41 +773,13 @@ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
EXPORT_SYMBOL(mipi_dsi_generic_write);
/**
- * mipi_dsi_generic_write_chatty() - mipi_dsi_generic_write() w/ an error log
- * @dsi: DSI peripheral device
- * @payload: buffer containing the payload
- * @size: size of payload buffer
- *
- * Like mipi_dsi_generic_write() but includes a dev_err()
- * call for you and returns 0 upon success, not the number of bytes sent.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi,
- const void *payload, size_t size)
-{
- struct device *dev = &dsi->dev;
- ssize_t ret;
-
- ret = mipi_dsi_generic_write(dsi, payload, size);
- if (ret < 0) {
- dev_err(dev, "sending generic data %*ph failed: %zd\n",
- (int)size, payload, ret);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_generic_write_chatty);
-
-/**
- * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write_chatty() w/ accum_err
+ * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write() w/ accum_err
* @ctx: Context for multiple DSI transactions
* @payload: buffer containing the payload
* @size: size of payload buffer
*
- * Like mipi_dsi_generic_write_chatty() but deals with errors in a way that
- * makes it convenient to make several calls in a row.
+ * A wrapper around mipi_dsi_generic_write() that deals with errors in a way
+ * that makes it convenient to make several calls in a row.
*/
void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size)
@@ -829,6 +801,30 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_generic_write_multi);
/**
+ * mipi_dsi_dual_generic_write_multi() - mipi_dsi_generic_write_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @payload: Buffer containing the payload
+ * @size: Size of payload buffer
+ *
+ * A wrapper around mipi_dsi_generic_write_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+ ctx->dsi = dsi2;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_generic_write_multi);
+
+/**
* mipi_dsi_generic_read() - receive data using a generic read packet
* @dsi: DSI peripheral device
* @params: buffer containing the request parameters
@@ -1008,6 +1004,30 @@ void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer_multi);
/**
+ * mipi_dsi_dual_dcs_write_buffer_multi - mipi_dsi_dcs_write_buffer_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @data: Buffer containing data to be transmitted
+ * @len: Size of transmission buffer
+ *
+ * A wrapper around mipi_dsi_dcs_write_buffer_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+ ctx->dsi = dsi2;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_dcs_write_buffer_multi);
+
+/**
* mipi_dsi_dcs_write() - send DCS write command
* @dsi: DSI peripheral device
* @cmd: DCS command
@@ -1077,6 +1097,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
EXPORT_SYMBOL(mipi_dsi_dcs_read);
/**
+ * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
+ *
+ * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it
+ * convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+ ssize_t ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_device_transfer(dsi, &msg);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "dcs read with command %#x failed: %d\n", cmd,
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read_multi);
+
+/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index d0183dea7703..4f65ce729a47 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -55,7 +55,8 @@ EXPORT_SYMBOL(drm_of_crtc_port_mask);
* and generate the DRM mask of CRTCs which may be attached to this
* encoder.
*
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*/
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
@@ -106,7 +107,9 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
* satisfy their .bind requirements
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ *
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 1da55322af12..22c44807e3fe 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -202,7 +202,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
/**
* drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
* @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array to store DMA information corresponding to mapped pages
* @migrate_pfn: Array of migrate page frame numbers to map
* @npages: Number of pages to map
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
@@ -215,25 +215,39 @@ static void drm_pagemap_get_devmem_page(struct page *page,
* Returns: 0 on success, -EFAULT if an error occurs during mapping.
*/
static int drm_pagemap_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long *migrate_pfn,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+ dma_addr_t dma_addr;
+ struct folio *folio;
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
+ folio = page_folio(page);
+ order = folio_order(folio);
+
+ dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
+ if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
+
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(dma_addr,
+ DRM_INTERCONNECT_SYSTEM,
+ order, dir);
+
+next:
+ i += NR_PAGES(order);
}
return 0;
@@ -242,7 +256,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
/**
* drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
* @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array of DMA information corresponding to mapped pages
* @npages: Number of pages to unmap
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
*
@@ -251,17 +265,20 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
* if it's valid and not already unmapped, and unmaps the corresponding page.
*/
static void drm_pagemap_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
+ for (i = 0; i < npages;) {
+ if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
+ goto next;
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
+
+next:
+ i += NR_PAGES(pagemap_addr[i].order);
}
}
@@ -314,7 +331,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct vm_area_struct *vas;
struct drm_pagemap_zdd *zdd = NULL;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int err;
@@ -340,14 +357,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_out;
}
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
zdd = drm_pagemap_zdd_alloc(pgmap_owner);
if (!zdd) {
@@ -377,8 +394,9 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
migrate.src, npages, DMA_TO_DEVICE);
+
if (err)
goto err_finalize;
@@ -390,7 +408,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, dma_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -404,7 +422,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_TO_DEVICE);
err_free:
if (zdd)
@@ -442,54 +460,80 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
{
unsigned long i;
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
+ for (i = 0; i < npages;) {
+ struct page *page = NULL, *src_page;
+ struct folio *folio;
+ unsigned int order = 0;
if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
src_page = migrate_pfn_to_page(src_mpfn[i]);
if (!src_page)
- continue;
+ goto next;
if (fault_page) {
if (src_page->zone_device_data !=
fault_page->zone_device_data)
- continue;
+ goto next;
}
+ order = folio_order(page_folio(src_page));
+
+ /* TODO: Support fallback to single pages if THP allocation fails */
if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
else
- page = alloc_page(GFP_HIGHUSER);
+ folio = folio_alloc(GFP_HIGHUSER, order);
- if (!page)
+ if (!folio)
goto free_pages;
+ page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
+
+next:
+ if (page)
+ addr += page_size(page);
+ else
+ addr += PAGE_SIZE;
+
+ i += NR_PAGES(order);
}
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_lock;
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
+ WARN_ON_ONCE(!folio_trylock(page_folio(page)));
+
+ order = folio_order(page_folio(page));
+ *mpages += NR_PAGES(order);
+
+next_lock:
+ i += NR_PAGES(order);
}
return 0;
free_pages:
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_put;
put_page(page);
mpfn[i] = 0;
+
+ order = folio_order(page_folio(page));
+
+next_put:
+ i += NR_PAGES(order);
}
return -ENOMEM;
}
@@ -509,7 +553,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
unsigned long npages, mpages = 0;
struct page **pages;
unsigned long *src, *dst;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int i, err = 0;
unsigned int retry_count = 2;
@@ -520,7 +564,7 @@ retry:
if (!mmget_not_zero(devmem_allocation->mm))
return -EFAULT;
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
@@ -528,8 +572,8 @@ retry:
}
src = buf;
dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
if (err)
@@ -544,7 +588,7 @@ retry:
if (err || !mpages)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
dst, npages, DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -552,7 +596,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -561,7 +605,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, dst);
migrate_device_pages(src, dst, npages);
migrate_device_finalize(src, dst, npages);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
@@ -612,7 +656,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
struct device *dev = NULL;
unsigned long npages, mpages = 0;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long start, end;
void *buf;
int i, err = 0;
@@ -637,14 +681,14 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
migrate.end = end;
npages = npages_in_range(start, end);
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
migrate.vma = vas;
migrate.src = buf;
@@ -680,7 +724,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -688,7 +732,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -698,7 +742,7 @@ err_finalize:
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
if (dev)
- drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c8bb28dccdc1..d1e6598ea3bc 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -134,6 +134,9 @@ void drm_panel_prepare(struct drm_panel *panel)
panel->prepared = true;
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_prepared)
+ continue;
+
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -179,6 +182,9 @@ void drm_panel_unprepare(struct drm_panel *panel)
mutex_lock(&panel->follower_lock);
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_unpreparing)
+ continue;
+
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -209,6 +215,7 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*/
void drm_panel_enable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -219,10 +226,12 @@ void drm_panel_enable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = true;
@@ -230,6 +239,19 @@ void drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_enabled)
+ continue;
+
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -243,6 +265,7 @@ EXPORT_SYMBOL(drm_panel_enable);
*/
void drm_panel_disable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -262,6 +285,18 @@ void drm_panel_disable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_disabling)
+ continue;
+
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+
ret = backlight_disable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
@@ -270,9 +305,12 @@ void drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = false;
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -539,13 +577,13 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
- * A panel follower is called right after preparing the panel and right before
- * unpreparing the panel. It's primary intention is to power on an associated
- * touchscreen, though it could be used for any similar devices. Multiple
- * devices are allowed the follow the same panel.
+ * A panel follower is called right after preparing/enabling the panel and right
+ * before unpreparing/disabling the panel. It's primary intention is to power on
+ * an associated touchscreen, though it could be used for any similar devices.
+ * Multiple devices are allowed the follow the same panel.
*
- * If a follower is added to a panel that's already been turned on, the
- * follower's prepare callback is called right away.
+ * If a follower is added to a panel that's already been prepared/enabled, the
+ * follower's prepared/enabled callback is called right away.
*
* The "panel" property of the follower points to the panel to be followed.
*
@@ -569,12 +607,18 @@ int drm_panel_add_follower(struct device *follower_dev,
mutex_lock(&panel->follower_lock);
list_add_tail(&follower->list, &panel->followers);
- if (panel->prepared) {
+ if (panel->prepared && follower->funcs->panel_prepared) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
+ if (panel->enabled && follower->funcs->panel_enabled) {
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
mutex_unlock(&panel->follower_lock);
@@ -587,7 +631,8 @@ EXPORT_SYMBOL(drm_panel_add_follower);
* @follower: The panel follower descriptor for the follower.
*
* Undo drm_panel_add_follower(). This includes calling the follower's
- * unprepare function if we're removed from a panel that's currently prepared.
+ * unpreparing/disabling function if we're removed from a panel that's currently
+ * prepared/enabled.
*
* Return: 0 or an error code.
*/
@@ -598,7 +643,13 @@ void drm_panel_remove_follower(struct drm_panel_follower *follower)
mutex_lock(&panel->follower_lock);
- if (panel->prepared) {
+ if (panel->enabled && follower->funcs->panel_disabling) {
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+ if (panel->prepared && follower->funcs->panel_unpreparing) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index 598f812b7cb3..537dc6dd0534 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -8,23 +8,26 @@
#include <drm/drm_edid.h>
#include <drm/drm_utils.h>
-struct drm_panel_min_backlight_quirk {
- struct {
- enum dmi_field field;
- const char * const value;
- } dmi_match;
+struct drm_panel_match {
+ enum dmi_field field;
+ const char * const value;
+};
+
+struct drm_get_panel_backlight_quirk {
+ struct drm_panel_match dmi_match;
+ struct drm_panel_match dmi_match_other;
struct drm_edid_ident ident;
- u8 min_brightness;
+ struct drm_panel_backlight_quirk quirk;
};
-static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+static const struct drm_get_panel_backlight_quirk drm_panel_min_backlight_quirks[] = {
/* 13 inch matte panel */
{
.dmi_match.field = DMI_BOARD_VENDOR,
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch glossy panel */
{
@@ -32,7 +35,7 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch 2.8k panel */
{
@@ -40,56 +43,114 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
.ident.name = "NE135A1M-NY1",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Steam Deck models */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Jupiter",
+ .quirk = { .min_brightness = 1, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Galileo",
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Have OLED Panels with brightness issue when last byte is 0/1 */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "AYANEO",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "AYANEO 3",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G0A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G1A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1Pro",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1 EVA-02",
+ .quirk = { .brightness_mask = 3, },
},
};
-static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
- const struct drm_edid *edid)
+static bool drm_panel_min_backlight_quirk_matches(
+ const struct drm_get_panel_backlight_quirk *quirk,
+ const struct drm_edid *edid)
{
- if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ if (quirk->dmi_match.field &&
+ !dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (quirk->dmi_match_other.field &&
+ !dmi_match(quirk->dmi_match_other.field,
+ quirk->dmi_match_other.value))
return false;
- if (!drm_edid_match(edid, &quirk->ident))
+ if (quirk->ident.panel_id && !drm_edid_match(edid, &quirk->ident))
return false;
return true;
}
/**
- * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * drm_get_panel_backlight_quirk - Get backlight quirks for a panel
* @edid: EDID of the panel to check
*
* This function checks for platform specific (e.g. DMI based) quirks
* providing info on the minimum backlight brightness for systems where this
- * cannot be probed correctly from the hard-/firm-ware.
+ * cannot be probed correctly from the hard-/firm-ware and other sources.
*
* Returns:
- * A negative error value or
- * an override value in the range [0, 255] representing 0-100% to be scaled to
- * the drivers target range.
+ * a drm_panel_backlight_quirk struct if a quirk was found, otherwise an
+ * error pointer.
*/
-int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid)
{
- const struct drm_panel_min_backlight_quirk *quirk;
+ const struct drm_get_panel_backlight_quirk *quirk;
size_t i;
if (!IS_ENABLED(CONFIG_DMI))
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
if (!edid)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
quirk = &drm_panel_min_backlight_quirks[i];
if (drm_panel_min_backlight_quirk_matches(quirk, edid))
- return quirk->min_brightness;
+ return &quirk->quirk;
}
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
}
-EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+EXPORT_SYMBOL(drm_get_panel_backlight_quirk);
MODULE_DESCRIPTION("Quirks for panel backlight overrides");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a23fc712a8b7..43a10b4af43a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -93,7 +93,7 @@ struct drm_prime_member {
struct rb_node handle_rb;
};
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -190,8 +190,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
{
struct rb_node *rb;
- mutex_lock(&prime_fpriv->lock);
-
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
@@ -210,8 +208,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
rb = rb->rb_left;
}
}
-
- mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6b3541159c0f..09b12c30df69 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -119,6 +119,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
*status = drm_bridge_chain_mode_valid(bridge,
&connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (*status != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index a455c56dbbeb..b01ffa4d6509 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -18,6 +18,7 @@
#include <linux/gfp.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
+#include <linux/pci.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -30,6 +31,8 @@
#include <drm/drm_property.h>
#include <drm/drm_sysfs.h>
+#include <asm/video.h>
+
#include "drm_internal.h"
#include "drm_crtc_internal.h"
@@ -508,6 +511,43 @@ void drm_sysfs_connector_property_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_property_event);
+static ssize_t boot_display_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+static DEVICE_ATTR_RO(boot_display);
+
+static struct attribute *display_attrs[] = {
+ &dev_attr_boot_display.attr,
+ NULL
+};
+
+static umode_t boot_display_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj)->parent;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (video_is_primary_device(&pdev->dev))
+ return a->mode;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group display_attr_group = {
+ .attrs = display_attrs,
+ .is_visible = boot_display_visible,
+};
+
+static const struct attribute_group *card_dev_groups[] = {
+ &display_attr_group,
+ NULL
+};
+
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
@@ -531,6 +571,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
kdev->devt = MKDEV(DRM_MAJOR, minor->index);
kdev->class = drm_class;
+ kdev->groups = card_dev_groups;
kdev->type = &drm_sysfs_device_minor;
}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 805aa28c1723..b8d9b7251319 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -69,7 +69,6 @@ struct decon_context {
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
- bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -132,9 +131,6 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
static void decon_wait_for_vblank(struct decon_context *ctx)
{
- if (ctx->suspended)
- return;
-
atomic_set(&ctx->wait_vsync_event, 1);
/*
@@ -210,9 +206,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
- if (ctx->suspended)
- return;
-
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
@@ -274,9 +267,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return -EPERM;
-
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -299,9 +289,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return;
-
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -404,9 +391,6 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
@@ -427,9 +411,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int pitch = fb->pitches[0];
unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base;
- if (ctx->suspended)
- return;
-
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -517,9 +498,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
u32 val;
- if (ctx->suspended)
- return;
-
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
@@ -538,9 +516,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
@@ -568,9 +543,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int ret;
- if (!ctx->suspended)
- return;
-
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
@@ -584,8 +556,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
-
- ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
@@ -593,9 +563,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
@@ -605,8 +572,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
-
- ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
@@ -727,7 +692,6 @@ static int decon_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = dev;
- ctx->suspended = true;
ctx->data = of_device_get_match_data(dev);
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 896a03639e2d..c4d098ab7863 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -154,6 +154,11 @@ static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
.host_ops = &exynos_dsi_exynos_host_ops,
};
+static const struct samsung_dsim_plat_data exynos7870_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS7870,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
+
static const struct of_device_id exynos_dsi_of_match[] = {
{
.compatible = "samsung,exynos3250-mipi-dsi",
@@ -175,6 +180,10 @@ static const struct of_device_id exynos_dsi_of_match[] = {
.compatible = "samsung,exynos5433-mipi-dsi",
.data = &exynos5433_dsi_pdata,
},
+ {
+ .compatible = "samsung,exynos7870-mipi-dsi",
+ .data = &exynos7870_dsi_pdata,
+ },
{ /* sentinel. */ }
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 4a37136f90f4..32d31e5f5f1a 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -120,7 +120,6 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -245,7 +244,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
err_drm_framebuffer_unregister_private:
drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
err_drm_gem_object_put:
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 0f07d77c5d52..4a15695fa933 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -16,7 +16,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -607,13 +606,16 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
return gconn->num_properties;
}
+static const struct drm_encoder_funcs gud_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
+};
+
static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
struct gud_connector_descriptor_req *desc)
{
struct drm_device *drm = &gdrm->drm;
struct gud_connector *gconn;
struct drm_connector *connector;
- struct drm_encoder *encoder;
int ret, connector_type;
u32 flags;
@@ -681,20 +683,13 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- /* The first connector is attached to the existing simple pipe encoder */
- if (!connector->index) {
- encoder = &gdrm->pipe.encoder;
- } else {
- encoder = &gconn->encoder;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
- if (ret)
- return ret;
-
- encoder->possible_crtcs = 1;
- }
+ gconn->encoder.possible_crtcs = drm_crtc_mask(&gdrm->crtc);
+ ret = drm_encoder_init(drm, &gconn->encoder, &gud_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
- return drm_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, &gconn->encoder);
}
int gud_get_connectors(struct gud_device *gdrm)
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 5385a2126e45..b7345c8d823d 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -16,6 +16,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -27,7 +28,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -289,7 +289,7 @@ static int gud_get_properties(struct gud_device *gdrm)
* but mask out any additions on future devices.
*/
val &= GUD_ROTATION_MASK;
- ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
+ ret = drm_plane_create_rotation_property(&gdrm->plane,
DRM_MODE_ROTATE_0, val);
break;
default:
@@ -338,10 +338,30 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
- .check = gud_pipe_check,
- .update = gud_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
+static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check
+};
+
+static const struct drm_crtc_funcs gud_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs gud_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = gud_plane_atomic_check,
+ .atomic_update = gud_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs gud_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -350,7 +370,7 @@ static const struct drm_mode_config_funcs gud_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const u64 gud_pipe_modifiers[] = {
+static const u64 gud_plane_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@@ -567,12 +587,17 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return -ENOMEM;
}
- ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
- formats, num_formats,
- gud_pipe_modifiers, NULL);
+ ret = drm_universal_plane_init(drm, &gdrm->plane, 0,
+ &gud_plane_funcs,
+ formats, num_formats,
+ gud_plane_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&gdrm->plane);
+
devm_kfree(dev, formats);
devm_kfree(dev, formats_dev);
@@ -582,7 +607,12 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return ret;
}
- drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
+ ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL,
+ &gud_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs);
ret = gud_get_connectors(gdrm);
if (ret) {
@@ -620,8 +650,6 @@ static void gud_disconnect(struct usb_interface *interface)
struct gud_device *gdrm = usb_get_intfdata(interface);
struct drm_device *drm = &gdrm->drm;
- drm_dbg(drm, "%s:\n", __func__);
-
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index d6fb25388722..d27c31648341 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -11,11 +11,11 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modes.h>
-#include <drm/drm_simple_kms_helper.h>
struct gud_device {
struct drm_device drm;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
@@ -62,11 +62,10 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val);
void gud_clear_damage(struct gud_device *gdrm);
void gud_flush_work(struct work_struct *work);
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state);
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state);
int gud_connector_fill_properties(struct drm_connector_state *connector_state,
struct gud_property_req *properties);
int gud_get_connectors(struct gud_device *gdrm);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 8d548d08f127..54d9aa9998e5 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -20,7 +20,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -451,14 +450,15 @@ static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer
gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state)
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
- struct drm_plane_state *old_plane_state = pipe->plane.state;
- const struct drm_display_mode *mode = &new_crtc_state->mode;
- struct drm_atomic_state *state = new_plane_state->state;
+ struct gud_device *gdrm = to_gud_device(plane->dev);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
@@ -469,20 +469,37 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
int idx, ret;
size_t len;
- if (WARN_ON_ONCE(!fb))
+ if (drm_WARN_ON_ONCE(plane->dev, !fb))
return -EINVAL;
+ if (drm_WARN_ON_ONCE(plane->dev, !crtc))
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ mode = &crtc_state->mode;
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!new_plane_state->visible)
+ return 0;
+
if (old_plane_state->rotation != new_plane_state->rotation)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
if (old_fb && old_fb->format != format)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
- if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
return 0;
/* Only one connector is supported */
- if (hweight32(new_crtc_state->connector_mask) != 1)
+ if (hweight32(crtc_state->connector_mask) != 1)
return -EINVAL;
if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
@@ -500,7 +517,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
if (!connector_state) {
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
+ drm_connector_list_iter_begin(plane->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc) {
connector_state = connector->state;
@@ -567,16 +584,18 @@ out:
return ret;
}
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state)
{
- struct drm_device *drm = pipe->crtc.dev;
+ struct drm_device *drm = plane->dev;
struct gud_device *gdrm = to_gud_device(drm);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(atomic_state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc *crtc = new_state->crtc;
struct drm_rect damage;
+ struct drm_atomic_helper_damage_iter iter;
int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
@@ -611,7 +630,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (ret)
goto ctrl_disable;
- if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage)
gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1852e0804942..3562a02ef7ad 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -50,7 +50,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_EXPORT_FOR_TESTS if m
- select DRM_DEBUG_SELFTEST
+ select DRM_KUNIT_TEST if KUNIT
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_WERROR
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 853543443072..e58c0c158b3a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,6 +32,7 @@ i915-y += \
i915_scatterlist.o \
i915_switcheroo.o \
i915_sysfs.o \
+ i915_timer_util.o \
i915_utils.o \
intel_clock_gating.o \
intel_cpu_info.o \
@@ -280,6 +281,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_panic.o \
display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 87f6b9602b16..aa159f9ce12f 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -424,17 +424,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
- if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(display) && port != PORT_A)) {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
- } else {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
- }
- intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(display, intel_dp->output_reg);
-
intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(display, intel_dp->output_reg);
@@ -612,6 +601,19 @@ cpt_set_link_train(struct intel_dp *intel_dp,
}
static void
+cpt_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
+static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
@@ -639,6 +641,19 @@ g4x_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(display, intel_dp->output_reg);
}
+static void
+g4x_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1285,12 +1300,10 @@ bool g4x_dp_init(struct intel_display *display,
drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -1300,8 +1313,6 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp.mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
@@ -1342,10 +1353,13 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(display) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
dig_port->dp.set_link_train = cpt_set_link_train;
- else
+ dig_port->dp.set_idle_link_train = cpt_set_idle_link_train;
+ } else {
dig_port->dp.set_link_train = g4x_set_link_train;
+ dig_port->dp.set_idle_link_train = g4x_set_idle_link_train;
+ }
if (display->platform.cherryview)
intel_encoder->set_signal_levels = chv_set_signal_levels;
@@ -1368,7 +1382,6 @@ bool g4x_dp_init(struct intel_display *display,
}
dig_port->dp.output_reg = output_reg;
- dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 2610f5702fb9..f6e2d1ed5639 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -19,7 +19,7 @@
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
-#include "intel_fdi.h"
+#include "intel_encoder.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
@@ -135,11 +135,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (HAS_PCH_SPLIT(display)) {
+ if (HAS_PCH_SPLIT(display))
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
- return -EINVAL;
- }
if (display->platform.g4x)
crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc);
@@ -690,12 +687,10 @@ bool g4x_hdmi_init(struct intel_display *display,
drm_dbg_kms(display->drm, "No VBT child device for HDMI-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -704,8 +699,6 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp.mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port)))
@@ -767,8 +760,6 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI);
dig_port->hdmi.hdmi_reg = hdmi_reg;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
- dig_port->max_lanes = 4;
intel_infoframe_init(dig_port);
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index f291ced989dc..407deb5dfb57 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -15,7 +15,6 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
@@ -23,6 +22,7 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_sprite.h"
@@ -155,8 +155,7 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane)
i9xx_plane == PLANE_C;
}
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -355,11 +354,24 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_plane_ctl(plane_state);
return 0;
}
+static u32 i8xx_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
+
+ return intel_fb_xy_to_linear(x, y, plane_state, 0);
+}
+
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ return plane_state->view.color_plane[0].offset;
+}
+
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -463,7 +475,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
- u32 dspcntr, dspaddr_offset, linear_offset;
+ u32 dspcntr;
dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
@@ -472,13 +484,6 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
crtc_state->async_flip_planes & BIT(plane->id))
dspcntr |= DISP_ASYNC_FLIP;
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (DISPLAY_VER(display) >= 4)
- dspaddr_offset = plane_state->view.color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
if (display->platform.cherryview && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -498,7 +503,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
} else if (DISPLAY_VER(display) >= 4) {
intel_de_write_fw(display, DSPLINOFF(display, i9xx_plane),
- linear_offset);
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, DSPTILEOFF(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
}
@@ -511,11 +516,9 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
if (DISPLAY_VER(display) >= 4)
- intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), plane_state->surf);
}
static void i830_plane_update_arm(struct intel_dsb *dsb,
@@ -604,16 +607,13 @@ g4x_primary_async_flip(struct intel_dsb *dsb,
{
struct intel_display *display = to_intel_display(plane);
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
if (async_flip)
dspcntr |= DISP_ASYNC_FLIP;
intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
-
- intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
}
static void
@@ -624,11 +624,9 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
bool async_flip)
{
struct intel_display *display = to_intel_display(plane);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane), plane_state->surf);
}
static void
@@ -1037,6 +1035,11 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ if (DISPLAY_VER(display) >= 4)
+ plane->surf_offset = i965_plane_surf_offset;
+ else
+ plane->surf_offset = i8xx_plane_surf_offset;
+
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
plane->capture_error = g4x_primary_capture_error;
else if (DISPLAY_VER(display) >= 4)
@@ -1175,7 +1178,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
@@ -1254,24 +1257,21 @@ bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
if (DISPLAY_VER(display) >= 4)
- intel_de_write(display, DSPSURF(display, i9xx_plane), base);
+ intel_de_write(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write(display, DSPADDR(display, i9xx_plane), base);
+ intel_de_write(display, DSPADDR(display, i9xx_plane), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index d90546d60855..565dab751301 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -24,6 +24,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int colot_plane);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state);
struct intel_plane *
intel_primary_plane_create(struct intel_display *display, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 1f9db5118777..fd3b7b35f351 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -3,6 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
@@ -85,7 +89,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+ bool is_ddr3 = dram_info->type == INTEL_DRAM_DDR3;
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
@@ -93,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
- i915->is_ddr3 == latency->is_ddr3 &&
- DIV_ROUND_CLOSEST(i915->fsb_freq, 1000) == latency->fsb_freq &&
- DIV_ROUND_CLOSEST(i915->mem_freq, 1000) == latency->mem_freq)
+ is_ddr3 == latency->is_ddr3 &&
+ DIV_ROUND_CLOSEST(dram_info->fsb_freq, 1000) == latency->fsb_freq &&
+ DIV_ROUND_CLOSEST(dram_info->mem_freq, 1000) == latency->mem_freq)
return latency;
}
drm_dbg_kms(display->drm,
- "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
- i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
+ "Could not find CxSR latency for %s, FSB %u kHz, MEM %u kHz\n",
+ intel_dram_type_str(dram_info->type),
+ dram_info->fsb_freq, dram_info->mem_freq);
return NULL;
}
@@ -109,6 +115,7 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
u32 val;
+ int ret;
vlv_punit_get(display->drm);
@@ -121,8 +128,10 @@ static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret)
drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
@@ -3902,6 +3911,7 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
+ int ret;
vlv_read_wm_values(display, wm);
@@ -3928,8 +3938,10 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret) {
drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 8d9cb73a93a7..37faa8f19f6e 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -25,6 +25,8 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
@@ -72,8 +74,12 @@ static int payload_credits_available(struct intel_display *display,
static bool wait_for_header_credits(struct intel_display *display,
enum transcoder dsi_trans, int hdr_credit)
{
- if (wait_for_us(header_credits_available(display, dsi_trans) >=
- hdr_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = header_credits_available(display, dsi_trans),
+ available >= hdr_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI header credits not released\n");
return false;
}
@@ -84,8 +90,12 @@ static bool wait_for_header_credits(struct intel_display *display,
static bool wait_for_payload_credits(struct intel_display *display,
enum transcoder dsi_trans, int payld_credit)
{
- if (wait_for_us(payload_credits_available(display, dsi_trans) >=
- payld_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = payload_credits_available(display, dsi_trans),
+ available >= payld_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI payload credits not released\n");
return false;
}
@@ -137,8 +147,11 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LPTX_IN_PROGRESS), 20))
+
+ ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans),
+ LPTX_IN_PROGRESS, 0,
+ 20, 0, NULL);
+ if (ret)
drm_err(display->drm, "LPTX bit not cleared\n");
}
}
@@ -516,13 +529,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
- if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 500))
+ ret = intel_de_wait_custom(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 0,
+ 500, 0, NULL);
+ if (ret)
drm_err(display->drm, "DDI port:%c buffer idle\n",
port_name(port));
}
@@ -838,9 +853,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
+ int ret;
+
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
+
+ ret = intel_de_wait_custom(display, DSI_TRANS_FUNC_CONF(dsi_trans),
+ LINK_READY, LINK_READY,
+ 2500, 0, NULL);
+ if (ret)
drm_err(display->drm, "DSI link not ready\n");
}
}
@@ -1321,6 +1341,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum port port;
enum transcoder dsi_trans;
u32 tmp;
+ int ret;
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
@@ -1337,9 +1358,10 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
tmp &= ~LINK_ULPS_TYPE_LP11;
intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
- 10))
+ ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans),
+ LINK_IN_ULPS, LINK_IN_ULPS,
+ 10, 0, NULL);
+ if (ret)
drm_err(display->drm, "DSI link not in ULPS\n");
}
@@ -1367,14 +1389,17 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 8))
+ ret = intel_de_wait_custom(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, DDI_BUF_IS_IDLE,
+ 8, 0, NULL);
+
+ if (ret)
drm_err(display->drm,
"DDI port:%c buffer not idle\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index dfdde8e4eabe..ed7a7ed486b5 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -16,6 +16,14 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#define SILENCE_PERIOD_MIN_TIME 80
+#define SILENCE_PERIOD_MAX_TIME 180
+#define SILENCE_PERIOD_TIME (SILENCE_PERIOD_MIN_TIME + \
+ (SILENCE_PERIOD_MAX_TIME - \
+ SILENCE_PERIOD_MIN_TIME) / 2)
+
+#define LFPS_CYCLE_COUNT 10
+
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
{
return intel_dp->alpm_dpcd & DP_ALPM_CAP;
@@ -44,72 +52,49 @@ void intel_alpm_init(struct intel_dp *intel_dp)
mutex_init(&intel_dp->alpm_parameters.lock);
}
-/*
- * See Bspec: 71632 for the table
- *
- * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
- *
- * Half cycle duration:
- *
- * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
- * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
- *
- * Link rates 5.4 - 8.1
- * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
- * LFPS Period chosen is the mid-point of the min:max values from the table
- * FLOOR( LFPS Period in Symbol clocks /
- * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
- */
-static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
- int *silence_period,
- int *lfps_half_cycle)
+static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
{
- switch (link_rate) {
- case 162000:
- *silence_period = 20;
- *lfps_half_cycle = 5;
- break;
- case 216000:
- *silence_period = 27;
- *lfps_half_cycle = 7;
- break;
- case 243000:
- *silence_period = 31;
- *lfps_half_cycle = 8;
- break;
- case 270000:
- *silence_period = 34;
- *lfps_half_cycle = 9;
- break;
- case 324000:
- *silence_period = 41;
- *lfps_half_cycle = 11;
- break;
- case 432000:
- *silence_period = 56;
- *lfps_half_cycle = 15;
- break;
- case 540000:
- *silence_period = 69;
- *lfps_half_cycle = 12;
- break;
- case 648000:
- *silence_period = 84;
- *lfps_half_cycle = 15;
- break;
- case 675000:
- *silence_period = 87;
- *lfps_half_cycle = 15;
- break;
- case 810000:
- *silence_period = 104;
- *lfps_half_cycle = 19;
- break;
- default:
- *silence_period = *lfps_half_cycle = -1;
- return false;
+ return SILENCE_PERIOD_TIME * intel_dp_link_symbol_clock(crtc_state->port_clock) /
+ 1000 / 1000;
+}
+
+static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
+ int *min, int *max)
+{
+ if (crtc_state->port_clock < 540000) {
+ *min = 65 * LFPS_CYCLE_COUNT;
+ *max = 75 * LFPS_CYCLE_COUNT;
+ } else if (crtc_state->port_clock <= 810000) {
+ *min = 140;
+ *max = 800;
+ } else {
+ *min = *max = -1;
+ return -1;
}
- return true;
+
+ return 0;
+}
+
+static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state)
+{
+ int tlfps_cycle_min, tlfps_cycle_max, ret;
+
+ ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
+ &tlfps_cycle_max);
+ if (ret)
+ return ret;
+
+ return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2;
+}
+
+static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state)
+{
+ int lfps_cycle_time = get_lfps_cycle_time(crtc_state);
+
+ if (lfps_cycle_time < 0)
+ return -1;
+
+ return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT);
}
/*
@@ -131,21 +116,19 @@ static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
* tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
* TPS4 Length = 252 Symbols
*/
-static int _lnl_compute_aux_less_wake_time(int port_clock)
+static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_state)
{
int tphy2_p2_to_p0 = 12 * 1000;
- int tlfps_period_max = 800;
- int tsilence_max = 180;
int t1 = 50 * 1000;
int tps4 = 252;
/* port_clock is link rate in 10kbit/s units */
- int tml_phy_lock = 1000 * 1000 * tps4 / port_clock;
+ int tml_phy_lock = 1000 * 1000 * tps4 / crtc_state->port_clock;
int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
int t2 = num_ml_phy_lock * tml_phy_lock;
int tcds = 1 * t2;
- return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
- t1 + tcds, 1000);
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + get_lfps_cycle_time(crtc_state) +
+ SILENCE_PERIOD_TIME + t1 + tcds, 1000);
}
static int
@@ -157,13 +140,13 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
lfps_half_cycle;
aux_less_wake_time =
- _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ _lnl_compute_aux_less_wake_time(crtc_state);
aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
aux_less_wake_time);
+ silence_period = get_silence_period_symbols(crtc_state);
- if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
- &silence_period,
- &lfps_half_cycle))
+ lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state);
+ if (lfps_half_cycle < 0)
return false;
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
@@ -406,7 +389,7 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
intel_dp->alpm_parameters.silence_period_sym_clocks);
- lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index e007380e9a63..3b14f929825a 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -236,7 +236,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
+ if (drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0))
+ return;
if (panel->backlight.combination_mode) {
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 9c268bed091d..3596dce84c28 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -36,6 +36,7 @@
#include "soc/intel_rom.h"
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
@@ -1566,10 +1567,7 @@ parse_psr(struct intel_display *display,
panel->vbt.psr.full_link = psr_table->full_link;
panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
-
- /* Allowed VBT values goes from 0 to 15 */
- panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
- psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+ panel->vbt.psr.idle_frames = psr_table->idle_frames;
/*
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
@@ -2480,6 +2478,25 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
+static u32 edp_rate_override_mask(int rate)
+{
+ switch (rate) {
+ case 2000000: return BDB_263_VBT_EDP_LINK_RATE_20;
+ case 1350000: return BDB_263_VBT_EDP_LINK_RATE_13_5;
+ case 1000000: return BDB_263_VBT_EDP_LINK_RATE_10;
+ case 810000: return BDB_263_VBT_EDP_LINK_RATE_8_1;
+ case 675000: return BDB_263_VBT_EDP_LINK_RATE_6_75;
+ case 540000: return BDB_263_VBT_EDP_LINK_RATE_5_4;
+ case 432000: return BDB_263_VBT_EDP_LINK_RATE_4_32;
+ case 324000: return BDB_263_VBT_EDP_LINK_RATE_3_24;
+ case 270000: return BDB_263_VBT_EDP_LINK_RATE_2_7;
+ case 243000: return BDB_263_VBT_EDP_LINK_RATE_2_43;
+ case 216000: return BDB_263_VBT_EDP_LINK_RATE_2_16;
+ case 162000: return BDB_263_VBT_EDP_LINK_RATE_1_62;
+ default: return 0;
+ }
+}
+
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->display->vbt.version < 216)
@@ -2499,6 +2516,19 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
return devdata->child.dp_max_lane_count + 1;
}
+bool
+intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate)
+{
+ if (!devdata || devdata->display->vbt.version < 263)
+ return false;
+
+ if (devdata->child.edp_data_rate_override == BDB_263_VBT_EDP_RATES_MASK)
+ return false;
+
+ return devdata->child.edp_data_rate_override & edp_rate_override_mask(rate);
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2747,8 +2777,10 @@ static int child_device_expected_size(u16 version)
{
BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
- if (version > 256)
+ if (version > 263)
return -ENOENT;
+ else if (version >= 263)
+ return 44;
else if (version >= 256)
return 40;
else if (version >= 216)
@@ -3743,8 +3775,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
void intel_bios_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_vbt", 0444, display->drm->debugfs_root,
display, &intel_bios_vbt_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 6cd7a011b8c4..f9e438b2787b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -50,180 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-/*
- * MIPI Sequence Block definitions
- *
- * Note the VBT spec has AssertReset / DeassertReset swapped from their
- * usual naming, we use the proper names here to avoid confusion when
- * reading the code.
- */
-enum mipi_seq {
- MIPI_SEQ_END = 0,
- MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
- MIPI_SEQ_INIT_OTP,
- MIPI_SEQ_DISPLAY_ON,
- MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
- MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
- MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
- MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
- MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
- MIPI_SEQ_POWER_ON, /* sequence block v3+ */
- MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
- MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
- MIPI_SEQ_ELEM_END = 0,
- MIPI_SEQ_ELEM_SEND_PKT,
- MIPI_SEQ_ELEM_DELAY,
- MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
- MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
- MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
- MIPI_SEQ_ELEM_MAX
-};
-
-#define MIPI_DSI_UNDEFINED_PANEL_ID 0
-#define MIPI_DSI_GENERIC_PANEL_ID 1
-
-struct mipi_config {
- u16 panel_id;
-
- /* General Params */
- u32 enable_dithering:1;
- u32 rsvd1:1;
- u32 is_bridge:1;
-
- u32 panel_arch_type:2;
- u32 is_cmd_mode:1;
-
-#define NON_BURST_SYNC_PULSE 0x1
-#define NON_BURST_SYNC_EVENTS 0x2
-#define BURST_MODE 0x3
- u32 video_transfer_mode:2;
-
- u32 cabc_supported:1;
-#define PPS_BLC_PMIC 0
-#define PPS_BLC_SOC 1
- u32 pwm_blc:1;
-
- /* Bit 13:10 */
-#define PIXEL_FORMAT_RGB565 0x1
-#define PIXEL_FORMAT_RGB666 0x2
-#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
-#define PIXEL_FORMAT_RGB888 0x4
- u32 videomode_color_format:4;
-
- /* Bit 15:14 */
-#define ENABLE_ROTATION_0 0x0
-#define ENABLE_ROTATION_90 0x1
-#define ENABLE_ROTATION_180 0x2
-#define ENABLE_ROTATION_270 0x3
- u32 rotation:2;
- u32 bta_enabled:1;
- u32 rsvd2:15;
-
- /* 2 byte Port Description */
-#define DUAL_LINK_NOT_SUPPORTED 0
-#define DUAL_LINK_FRONT_BACK 1
-#define DUAL_LINK_PIXEL_ALT 2
- u16 dual_link:2;
- u16 lane_cnt:2;
- u16 pixel_overlap:3;
- u16 rgb_flip:1;
-#define DL_DCS_PORT_A 0x00
-#define DL_DCS_PORT_C 0x01
-#define DL_DCS_PORT_A_AND_C 0x02
- u16 dl_dcs_cabc_ports:2;
- u16 dl_dcs_backlight_ports:2;
- u16 rsvd3:4;
-
- u16 rsvd4;
-
- u8 rsvd5;
- u32 target_burst_mode_freq;
- u32 dsi_ddr_clk;
- u32 bridge_ref_clk;
-
-#define BYTE_CLK_SEL_20MHZ 0
-#define BYTE_CLK_SEL_10MHZ 1
-#define BYTE_CLK_SEL_5MHZ 2
- u8 byte_clk_sel:2;
-
- u8 rsvd6:6;
-
- /* DPHY Flags */
- u16 dphy_param_valid:1;
- u16 eot_pkt_disabled:1;
- u16 enable_clk_stop:1;
- u16 rsvd7:13;
-
- u32 hs_tx_timeout;
- u32 lp_rx_timeout;
- u32 turn_around_timeout;
- u32 device_reset_timer;
- u32 master_init_timer;
- u32 dbi_bw_timer;
- u32 lp_byte_clk_val;
-
- /* 4 byte Dphy Params */
- u32 prepare_cnt:6;
- u32 rsvd8:2;
- u32 clk_zero_cnt:8;
- u32 trail_cnt:5;
- u32 rsvd9:3;
- u32 exit_zero_cnt:6;
- u32 rsvd10:2;
-
- u32 clk_lane_switch_cnt;
- u32 hl_switch_cnt;
-
- u32 rsvd11[6];
-
- /* timings based on dphy spec */
- u8 tclk_miss;
- u8 tclk_post;
- u8 rsvd12;
- u8 tclk_pre;
- u8 tclk_prepare;
- u8 tclk_settle;
- u8 tclk_term_enable;
- u8 tclk_trail;
- u16 tclk_prepare_clkzero;
- u8 rsvd13;
- u8 td_term_enable;
- u8 teot;
- u8 ths_exit;
- u8 ths_prepare;
- u16 ths_prepare_hszero;
- u8 rsvd14;
- u8 ths_settle;
- u8 ths_skip;
- u8 ths_trail;
- u8 tinit;
- u8 tlpx;
- u8 rsvd15[3];
-
- /* GPIOs */
- u8 panel_enable;
- u8 bl_enable;
- u8 pwm_enable;
- u8 reset_r_n;
- u8 pwr_down_r;
- u8 stdby_r_n;
-
-} __packed;
-
-/* all delays have a unit of 100us */
-struct mipi_pps_data {
- u16 panel_on_delay;
- u16 bl_enable_delay;
- u16 bl_disable_delay;
- u16 panel_off_delay;
- u16 panel_power_cycle_delay;
-} __packed;
-
void intel_bios_init(struct intel_display *display);
void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
@@ -259,6 +85,8 @@ bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate);
enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index 65d64f79a4bd..6ae1374d5c2b 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -2,7 +2,7 @@
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_panic.h>
-#include "display/intel_display_types.h"
+
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -59,18 +59,3 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
i915_debugfs_describe_obj(m, to_intel_bo(obj));
}
-
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
-{
- return i915_gem_object_alloc_framebuffer();
-}
-
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
-{
- return i915_gem_object_panic_setup(sb);
-}
-
-void intel_bo_panic_finish(struct intel_framebuffer *fb)
-{
- return i915_gem_object_panic_finish(fb);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index 97087a64d23b..48d87019e48a 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -25,8 +25,5 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
-void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index d29a755612de..ac6da20d9529 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -359,7 +359,7 @@ static int icl_get_qgv_points(struct intel_display *display,
for (i = 0; i < qi->num_psf_points; i++)
drm_dbg_kms(display->drm,
- "PSF GV %d: CLK=%d \n",
+ "PSF GV %d: CLK=%d\n",
i, qi->psf_points[i].clk);
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 228aa64c1349..9725eebe5706 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -22,6 +22,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <linux/time.h>
#include <drm/drm_fixed.h>
@@ -31,6 +32,7 @@
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_bw.h"
@@ -672,6 +674,7 @@ static void vlv_set_cdclk(struct intel_display *display,
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 400000:
@@ -702,12 +705,12 @@ static void vlv_set_cdclk(struct intel_display *display,
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
if (cdclk == 400000) {
u32 divider;
@@ -721,11 +724,11 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= divider;
vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) &
- CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
- 50))
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL),
+ (val & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
}
/* adjust self-refresh exit latency value */
@@ -761,6 +764,7 @@ static void chv_set_cdclk(struct intel_display *display,
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 333333:
@@ -786,12 +790,12 @@ static void chv_set_cdclk(struct intel_display *display,
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
vlv_punit_put(display->drm);
@@ -903,8 +907,10 @@ static void bdw_set_cdclk(struct intel_display *display,
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 100))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
intel_de_rmw(display, LCPLL_CTL,
@@ -913,8 +919,10 @@ static void bdw_set_cdclk(struct intel_display *display,
intel_de_rmw(display, LCPLL_CTL,
LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 0,
+ 1, 0, NULL);
+ if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
@@ -3569,7 +3577,7 @@ static int i9xx_hrawclk(struct intel_display *display)
struct drm_i915_private *i915 = to_i915(display->drm);
/* hrawclock is 1/4 the FSB frequency */
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4);
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(i915), 4);
}
/**
@@ -3622,9 +3630,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info);
void intel_cdclk_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_cdclk_info", 0444, display->drm->debugfs_root,
display, &i915_cdclk_info_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 42c923f416b3..6a55854db5b6 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -77,7 +77,7 @@ void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector
drm_connector_put(&connector->base);
}
-int intel_connector_init(struct intel_connector *connector)
+static int intel_connector_init(struct intel_connector *connector)
{
struct intel_digital_connector_state *conn_state;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index aafb25a814fa..0aa86626e646 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -14,7 +14,6 @@ struct i2c_adapter;
struct intel_connector;
struct intel_encoder;
-int intel_connector_init(struct intel_connector *connector);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
void intel_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 898c5d9e8f7a..31e68047f217 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -50,6 +50,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
+#include "intel_link_bw.h"
#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
@@ -421,7 +422,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -446,7 +447,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 198e69efe9ac..d4d181f9dca5 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -33,17 +33,9 @@ static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+static u32 intel_cursor_surf_offset(const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane_state);
- u32 base;
-
- if (DISPLAY_INFO(display)->cursor_needs_physical)
- base = plane_state->phys_dma_addr;
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->view.color_plane[0].offset;
+ return plane_state->view.color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
@@ -213,8 +205,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i845_cursor_ctl(const struct intel_plane_state *plane_state)
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
@@ -274,7 +265,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i845_cursor_ctl(plane_state);
return 0;
}
@@ -297,7 +288,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -406,8 +397,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
u32 cntl = 0;
@@ -534,7 +524,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_cursor_ctl(plane_state);
return 0;
}
@@ -675,7 +665,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
if (width != height)
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -1051,6 +1041,8 @@ intel_cursor_plane_create(struct intel_display *display,
cursor->check_plane = i9xx_check_cursor;
}
+ cursor->surf_offset = intel_cursor_surf_offset;
+
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
cursor->capture_error = g4x_cursor_capture_error;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9ecbb4b99c37..c09aa759f4d4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -26,6 +26,7 @@
*/
#include <linux/iopoll.h>
+#include <linux/seq_buf.h>
#include <linux/string_helpers.h>
#include <drm/display/drm_dp_helper.h>
@@ -2167,7 +2168,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
{
struct intel_display *display = to_intel_display(crtc_state);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- u32 ln0, ln1, pin_assignment;
+ enum intel_tc_pin_assignment pin_assignment;
+ u32 ln0, ln1;
u8 width;
if (DISPLAY_VER(display) >= 14)
@@ -2189,11 +2191,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
- pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = intel_tc_port_get_pin_assignment(dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
- case 0x0:
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
drm_WARN_ON(display->drm,
!intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
@@ -2203,20 +2205,20 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x1:
+ case INTEL_TC_PIN_ASSIGNMENT_A:
if (width == 4) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x2:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
if (width == 2) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x3:
- case 0x5:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2225,8 +2227,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x4:
- case 0x6:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2340,34 +2342,24 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
-static int read_fec_detected_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
{
struct intel_display *display = to_intel_display(aux->drm_dev);
int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
- int status;
- int err;
+ u8 status = 0;
+ int ret, err;
- err = readx_poll_timeout(read_fec_detected_status, aux, status,
- status & mask || status < 0,
- 10000, 200000);
+ ret = poll_timeout_us(err = drm_dp_dpcd_read_byte(aux, DP_FEC_STATUS, &status),
+ err || (status & mask),
+ 10 * 1000, 200 * 1000, false);
- if (err || status < 0) {
+ /* Either can be non-zero, but not both */
+ ret = ret ?: err;
+ if (ret) {
drm_dbg_kms(display->drm,
- "Failed waiting for FEC %s to get detected: %d (status %d)\n",
- str_enabled_disabled(enabled), err, status);
- return err ? err : status;
+ "Failed waiting for FEC %s to get detected: %d (status 0x%02x)\n",
+ str_enabled_disabled(enabled), ret, status);
+ return ret;
}
return 0;
@@ -2562,6 +2554,7 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
+ int ret;
if (DISPLAY_VER(display) < 14)
return;
@@ -2577,7 +2570,11 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
}
intel_de_rmw(display, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+
+ ret = intel_de_wait_custom(display, reg,
+ wait_bits, wait_bits,
+ 100, 0, NULL);
+ if (ret) {
drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3059,6 +3056,7 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
+ int ret;
if (DISPLAY_VER(display) < 14)
return;
@@ -3074,7 +3072,11 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
}
intel_de_rmw(display, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+
+ ret = intel_de_wait_custom(display, reg,
+ wait_bits, 0,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -5067,11 +5069,45 @@ static bool port_in_use(struct intel_display *display, enum port port)
return false;
}
+static const char *intel_ddi_encoder_name(struct intel_display *display,
+ enum port port, enum phy phy,
+ struct seq_buf *s)
+{
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ seq_buf_printf(s, "DDI %c/PHY %c",
+ port_name(port - PORT_D_XELPD + PORT_D),
+ phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 12) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %s%c/PHY %s%c",
+ port >= PORT_TC1 ? "TC" : "",
+ port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 11) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %c%s/PHY %s%c",
+ port_name(port),
+ port >= PORT_C ? " (TC)" : "",
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else {
+ seq_buf_printf(s, "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ }
+
+ drm_WARN_ON(display->drm, seq_buf_has_overflowed(s));
+
+ return seq_buf_str(s);
+}
+
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
+ DECLARE_SEQ_BUF(encoder_name, 20);
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
@@ -5149,52 +5185,19 @@ void intel_ddi_init(struct intel_display *display,
phy_name(phy));
}
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return;
- dig_port->aux_ch = AUX_CH_NONE;
-
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c",
- port_name(port - PORT_D_XELPD + PORT_D),
- phy_name(phy));
- } else if (DISPLAY_VER(display) >= 12) {
- enum tc_port tc_port = intel_port_to_tc(display, port);
-
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %s%c/PHY %s%c",
- port >= PORT_TC1 ? "TC" : "",
- port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(display) >= 11) {
- enum tc_port tc_port = intel_port_to_tc(display, port);
-
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c%s/PHY %s%c",
- port_name(port),
- port >= PORT_C ? " (TC)" : "",
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else {
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c", port_name(port), phy_name(phy));
- }
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS, "%s",
+ intel_ddi_encoder_name(display, port, phy, &encoder_name));
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
- mutex_init(&dig_port->hdcp.mutex);
- dig_port->hdcp.num_streams = 0;
-
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
@@ -5332,7 +5335,6 @@ void intel_ddi_init(struct intel_display *display,
dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
if (need_aux_ch(encoder, init_dp)) {
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7035c1fc9033..5dca7f96b425 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -76,6 +76,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -1081,6 +1082,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+ if (intel_display_wa(display, 14011503117)) {
+ if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled)
+ adl_scaler_ecc_unmask(new_crtc_state);
+ }
+
intel_alpm_post_plane_update(state, crtc);
intel_psr_post_plane_update(state, crtc);
@@ -7265,6 +7271,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
+ intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
if (new_crtc_state->use_dsb)
intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index 4d565935e2cc..d56065f22655 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -4,7 +4,7 @@
#include "i915_drv.h"
#include "intel_display_conversion.h"
-struct intel_display *__i915_to_display(struct drm_i915_private *i915)
+static struct intel_display *__i915_to_display(struct drm_i915_private *i915)
{
return i915->display;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index 46c7208d42ba..d497bc58a73f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -9,20 +9,8 @@
#define __INTEL_DISPLAY_CONVERSION__
struct drm_device;
-struct drm_i915_private;
struct intel_display;
-struct intel_display *__i915_to_display(struct drm_i915_private *i915);
struct intel_display *__drm_to_display(struct drm_device *drm);
-/*
- * Transitional macro to optionally convert struct drm_i915_private * to struct
- * intel_display *, also accepting the latter.
- */
-#define __to_intel_display(p) \
- _Generic(p, \
- const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
- struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
- const struct intel_display *: (p), \
- struct intel_display *: (p))
#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ce3f9810c42d..10dddec3796f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -820,14 +820,14 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_fifo_underrun_reset", 0644, debugfs_root,
display, &i915_fifo_underrun_reset_ops);
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, display->drm->primary);
intel_bios_debugfs_register(display);
intel_cdclk_debugfs_register(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index 88914a1f3f62..de62b774272d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
@@ -154,14 +153,14 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode,
/* add a subdirectory with files for each intel display param */
void intel_display_debugfs_params(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
struct dentry *dir;
char dirname[16];
snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name);
- dir = debugfs_lookup(dirname, minor->debugfs_root);
+ dir = debugfs_lookup(dirname, debugfs_root);
if (!dir)
- dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ dir = debugfs_create_dir(dirname, debugfs_root);
if (IS_ERR(dir))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 089cffabbad5..a002bc6ce7b0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1354,6 +1354,19 @@ static const struct intel_display_device_info xe2_lpd_display = {
.__runtime_defaults.has_dbuf_overlap_detection = true,
};
+static const struct intel_display_device_info wcl_display = {
+ XE_LPDP_FEATURES,
+
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.pipe_mask =
+ BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.fbc_mask =
+ BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C),
+ .__runtime_defaults.port_mask =
+ BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2),
+};
+
static const struct intel_display_device_info xe2_hpd_display = {
XE_LPDP_FEATURES,
.__runtime_defaults.port_mask = BIT(PORT_A) |
@@ -1480,7 +1493,7 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
- { 30, 2, &xe2_lpd_display },
+ { 30, 2, &wcl_display },
};
static const struct intel_display_device_info *
@@ -1931,6 +1944,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
+bool intel_display_device_present(struct intel_display *display)
+{
+ return display && HAS_DISPLAY(display);
+}
+
/*
* Assuming the device has display hardware, should it be enabled?
*
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 4308822f0415..f329f1beafef 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -9,7 +9,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
-#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_printer;
@@ -224,8 +223,8 @@ struct intel_display_platforms {
(IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
+#define DISPLAY_INFO(__display) ((__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&(__display)->info.__runtime_info)
#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
@@ -236,7 +235,7 @@ struct intel_display_platforms {
#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
#define IS_DISPLAY_STEP(__display, since, until) \
- (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ (drm_WARN_ON((__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
#define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C
@@ -307,6 +306,7 @@ struct intel_display_device_info {
} color;
};
+bool intel_display_device_present(struct intel_display *display);
bool intel_display_device_enabled(struct intel_display *display);
struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
void intel_display_device_remove(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 8586ba102605..cf1c14412abe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -18,6 +18,7 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 68157f177b6a..123e054affbe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -1990,20 +1990,17 @@ void vlv_display_irq_postinstall(struct intel_display *display)
void ibx_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_NOP(i915))
+ if (HAS_PCH_NOP(display))
return;
gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
- if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
intel_de_write(display, SERR_INT, 0xffffffff);
}
void gen8_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
if (!HAS_DISPLAY(display))
@@ -2020,7 +2017,7 @@ void gen8_display_irq_reset(struct intel_display *display)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
ibx_display_irq_reset(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 75316247ee8a..2aed110c5b09 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -120,6 +120,9 @@ intel_display_param_named_unsafe(enable_psr, int, 0400,
"(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
"Default: -1 (use per-chip default)");
+intel_display_param_named_unsafe(enable_panel_replay, int, 0400,
+ "Enable Panel Replay (0=disabled, 1=enabled). Default: -1 (use per-chip default)");
+
intel_display_param_named(psr_safest_params, bool, 0400,
"Replace PSR VBT parameters by the safest and not optimal ones. This "
"is helpful to detect if PSR issues are related to bad values set in "
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 784e6bae8615..b01bc5700c52 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -46,6 +46,7 @@ struct drm_printer;
param(bool, enable_dp_mst, true, 0600) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
+ param(int, enable_panel_replay, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
param(int, enable_dmc_wl, -1, 0400) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c92f3e736228..da4babfd6bcb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
#include "soc/intel_dram.h"
@@ -10,6 +11,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -1278,6 +1280,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
+ int ret;
assert_can_disable_lcpll(display);
@@ -1287,8 +1290,10 @@ static void hsw_disable_lcpll(struct intel_display *display,
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
+ 1, 0, NULL);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
val = intel_de_read(display, LCPLL_CTL);
@@ -1306,8 +1311,10 @@ static void hsw_disable_lcpll(struct intel_display *display,
hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(display) &
- D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ ret = poll_timeout_us(val = hsw_read_dcomp(display),
+ (val & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 100, 1000, false);
+ if (ret)
drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -1324,6 +1331,7 @@ static void hsw_restore_lcpll(struct intel_display *display)
{
struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
+ int ret;
val = intel_de_read(display, LCPLL_CTL);
@@ -1358,8 +1366,10 @@ static void hsw_restore_lcpll(struct intel_display *display)
if (val & LCPLL_CD_SOURCE_FCLK) {
intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 0,
+ 1, 0, NULL);
+ if (ret)
drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
@@ -2155,8 +2165,6 @@ void intel_power_domains_resume(struct intel_display *display)
power_domains->init_wakeref =
intel_display_power_get(display, POWER_DOMAIN_INIT);
}
-
- intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 77268802b55e..39b71fffa2cd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1717,6 +1717,59 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
};
+static const struct i915_power_well_desc wcl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xe3lpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xe3lpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xe3lpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list wcl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
+ I915_PW_DESCRIPTORS(wcl_power_wells_main),
+ I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+};
+
static void init_power_well_domains(const struct i915_power_well_instance *inst,
struct i915_power_well *power_well)
{
@@ -1824,7 +1877,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(display) >= 30)
+ if (DISPLAY_VERx100(display) == 3002)
+ return set_power_wells(power_domains, wcl_power_wells);
+ else if (DISPLAY_VER(display) >= 30)
return set_power_wells(power_domains, xe3lpd_power_wells);
else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 48cac225a809..5e88b930f5aa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -499,7 +501,6 @@ static void icl_tc_port_assert_ref_held(struct intel_display *display,
static void icl_tc_cold_exit(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
@@ -514,7 +515,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
msleep(1);
/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
- drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" :
"succeeded");
}
@@ -527,6 +528,8 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
+ u32 val;
+ int ret;
icl_tc_port_assert_ref_held(display, power_well, dig_port);
@@ -553,10 +556,11 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
- if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) &
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(display->drm,
- "Timeout waiting TC uC health\n");
+ ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)),
+ val & DKL_CMN_UC_DW27_UC_HEALTH,
+ 100, 1000, false);
+ if (ret)
+ drm_warn(display->drm, "Timeout waiting TC uC health\n");
}
}
@@ -1122,6 +1126,8 @@ static void vlv_set_power_well(struct intel_display *display,
u32 mask;
u32 state;
u32 ctrl;
+ u32 val;
+ int ret;
mask = PUNIT_PWRGT_MASK(pw_idx);
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
@@ -1129,10 +1135,8 @@ static void vlv_set_power_well(struct intel_display *display,
vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
+ val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS);
+ if ((val & mask) == state)
goto out;
ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
@@ -1140,14 +1144,15 @@ static void vlv_set_power_well(struct intel_display *display,
ctrl |= state;
vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS),
+ (val & mask) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
-#undef COND
-
out:
vlv_punit_put(display->drm);
}
@@ -1208,7 +1213,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
@@ -1711,23 +1716,24 @@ static void chv_set_pipe_power_well(struct intel_display *display,
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
+ int ret;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
+ if ((ctrl & DP_SSS_MASK(pipe)) == state)
goto out;
- ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (ctrl & DP_SSS_MASK(pipe)) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
@@ -1765,7 +1771,6 @@ static void chv_pipe_power_well_disable(struct intel_display *display,
static void
tgl_tc_cold_request(struct intel_display *display, bool block)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1798,10 +1803,9 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
}
if (ret)
- drm_err(&i915->drm, "TC cold %sblock failed\n",
- block ? "" : "un");
+ drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un");
else
- drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n",
block ? "" : "un");
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
index 7bd09d981cd2..9d71e26a4fa2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -2890,6 +2890,7 @@ enum skl_power_gate {
#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
#define _TCSS_DDI_STATUS_1 0x161500
#define _TCSS_DDI_STATUS_2 0x161504
@@ -2897,6 +2898,7 @@ enum skl_power_gate {
_TCSS_DDI_STATUS_1, \
_TCSS_DDI_STATUS_2))
#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
#define TCSS_DDI_STATUS_READY REG_BIT(2)
#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce45261c4a8f..358ab922d7a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -50,15 +50,17 @@
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
+#include "intel_dsi_vbt_defs.h"
#include "intel_wm_types.h"
struct cec_notifier;
struct drm_printer;
-struct __intel_global_objs_state;
struct intel_connector;
struct intel_ddi_buf_trans;
struct intel_fbc;
+struct intel_global_objs_state;
struct intel_hdcp_shim;
+struct intel_panic;
struct intel_tc_port;
/*
@@ -148,6 +150,7 @@ struct intel_framebuffer {
unsigned int vtd_guard;
unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width);
+ struct intel_panic *panic;
};
enum intel_hotplug_state {
@@ -593,7 +596,7 @@ struct intel_atomic_state {
struct ref_tracker *wakeref;
- struct __intel_global_objs_state *global_objs;
+ struct intel_global_objs_state *global_objs;
int num_global_objs;
/* Internal commit, as opposed to userspace/client initiated one */
@@ -642,7 +645,6 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
- u32 phys_dma_addr; /* for cursor_needs_physical */
/* for legacy cursor fb unpin */
struct drm_vblank_work unpin_work;
@@ -665,6 +667,9 @@ struct intel_plane_state {
/* chroma upsampler control register */
u32 cus_ctl;
+ /* surface address register */
+ u32 surf;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -941,10 +946,6 @@ struct intel_csc_matrix {
u16 postoff[3];
};
-void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val);
-
-typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val);
-
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1122,6 +1123,7 @@ struct intel_crtc_state {
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool wm_level_disabled;
+ bool pkg_c_latency_used;
u32 dc3co_exitline;
u16 su_y_granularity;
u8 active_non_psr_pipes;
@@ -1534,6 +1536,7 @@ struct intel_plane {
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ u32 (*surf_offset)(const struct intel_plane_state *plane_state);
int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_dsb *dsb,
@@ -1683,6 +1686,7 @@ struct intel_psr {
u8 entry_setup_frames;
bool link_ok;
+ bool pkg_c_latency_used;
u8 active_non_psr_pipes;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index f57280e9d041..31cd2c9cd488 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
@@ -39,3 +41,36 @@ void intel_display_wa_apply(struct intel_display *display)
else if (DISPLAY_VER(display) == 11)
gen11_display_wa_apply(display);
}
+
+/*
+ * Wa_16025573575:
+ * Fixes: Issue with bitbashing on Xe3 based platforms.
+ * Workaround: Set masks bits in GPIO CTL and preserve it during bitbashing sequence.
+ */
+static bool intel_display_needs_wa_16025573575(struct intel_display *display)
+{
+ return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002;
+}
+
+/*
+ * Wa_14011503117:
+ * Fixes: Before enabling the scaler DE fatal error is masked
+ * Workaround: Unmask the DE fatal error register after enabling the scaler
+ * and after waiting of at least 1 frame.
+ */
+bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name)
+{
+ switch (wa) {
+ case INTEL_DISPLAY_WA_16023588340:
+ return intel_display_needs_wa_16023588340(display);
+ case INTEL_DISPLAY_WA_16025573575:
+ return intel_display_needs_wa_16025573575(display);
+ case INTEL_DISPLAY_WA_14011503117:
+ return DISPLAY_VER(display) == 13;
+ default:
+ drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
+ break;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index babd9d16603d..abc1df83f066 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -21,4 +21,15 @@ static inline bool intel_display_needs_wa_16023588340(struct intel_display *disp
bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
+enum intel_display_wa {
+ INTEL_DISPLAY_WA_16023588340,
+ INTEL_DISPLAY_WA_16025573575,
+ INTEL_DISPLAY_WA_14011503117,
+};
+
+bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
+
+#define intel_display_wa(__display, __wa) \
+ __intel_display_wa((__display), INTEL_DISPLAY_WA_##__wa, __stringify(__wa))
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 744f51c0eab8..77a0199f9ea5 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1603,9 +1603,7 @@ DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
void intel_dmc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_dmc_info", 0444, display->drm->debugfs_root,
display, &intel_dmc_debugfs_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 7976fec88606..2eab591a8ef5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/notifier.h>
@@ -174,7 +175,6 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
@@ -183,16 +183,13 @@ static int max_dprx_rate(struct intel_dp *intel_dp)
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
/*
- * Some broken eDP sinks illegally declare support for
- * HBR3 without TPS4, and are unable to produce a stable
- * output. Reject HBR3 when TPS4 is not available.
+ * Some platforms + eDP panels may not reliably support HBR3
+ * due to signal integrity limitations, despite advertising it.
+ * Cap the link rate to HBR2 to avoid unstable configurations for the
+ * known machines.
*/
- if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
- encoder->base.base.id, encoder->base.name);
- max_rate = 540000;
- }
+ if (intel_dp_is_edp(intel_dp) && intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2))
+ max_rate = min(max_rate, 540000);
return max_rate;
}
@@ -1418,6 +1415,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ enum intel_output_format sink_format, output_format;
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1451,6 +1449,13 @@ intel_dp_mode_valid(struct drm_connector *_connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
+ sink_format = intel_dp_sink_format(connector, mode);
+ output_format = intel_dp_output_format(connector, sink_format);
+
+ status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
+ if (status != MODE_OK)
+ return status;
+
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -1466,11 +1471,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
intel_dp_mode_min_output_bpp(connector, mode));
if (intel_dp_has_dsc(connector)) {
- enum intel_output_format sink_format, output_format;
int pipe_bpp;
- sink_format = intel_dp_sink_format(connector, mode);
- output_format = intel_dp_output_format(connector, sink_format);
/*
* TBD pass the connector BPC,
* for now U8_MAX so that max BPC on that platform would be picked
@@ -2535,13 +2537,15 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits)
{
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
@@ -2551,7 +2555,8 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
- limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
+ limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 :
+ intel_dp_min_bpp(crtc_state->output_format);
if (is_mst) {
/*
* FIXME: If all the streams can't fit into the link with their
@@ -2650,7 +2655,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2684,7 +2689,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -2916,6 +2921,19 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
}
}
+bool
+intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state)
+{
+ struct hdr_output_metadata *hdr_metadata;
+
+ if (!conn_state->hdr_output_metadata)
+ return false;
+
+ hdr_metadata = conn_state->hdr_output_metadata->data;
+
+ return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
+}
+
static void
intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
@@ -3181,7 +3199,26 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
*/
min_hblank = min_hblank - 2;
- min_hblank = min(10, min_hblank);
+ /*
+ * min_hblank formula is undergoing a change, to avoid underrun use the
+ * recomended value in spec to compare with the calculated one and use the
+ * minimum value
+ */
+ if (intel_dp_is_uhbr(crtc_state)) {
+ /*
+ * Note: Bspec requires a min_hblank of 2 for YCBCR420
+ * with compressed bpp 6, but the minimum compressed bpp
+ * supported by the driver is 8.
+ */
+ drm_WARN_ON(display->drm,
+ (crtc_state->dsc.compression_enable &&
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ crtc_state->dsc.compressed_bpp_x16 < fxp_q4_from_int(8)));
+ min_hblank = min(3, min_hblank);
+ } else {
+ min_hblank = min(10, min_hblank);
+ }
+
crtc_state->min_hblank = min_hblank;
return 0;
@@ -3842,10 +3879,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (ret < 0)
return ret;
/* Wait for PCON to be FRL Ready */
- wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
-
- if (!is_active)
- return -ETIMEDOUT;
+ ret = poll_timeout_us(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux),
+ is_active,
+ 1000, TIMEOUT_FRL_READY_MS * 1000, false);
+ if (ret)
+ return ret;
ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
DP_PCON_ENABLE_SEQUENTIAL_LINK);
@@ -3862,12 +3900,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
* Wait for FRL to be completed
* Check if the HDMI Link is up and active.
*/
- wait_for(is_active =
- intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
- TIMEOUT_HDMI_LINK_ACTIVE_MS);
-
- if (!is_active)
- return -ETIMEDOUT;
+ ret = poll_timeout_us(is_active = intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
+ is_active,
+ 1000, TIMEOUT_HDMI_LINK_ACTIVE_MS * 1000, false);
+ if (ret)
+ return ret;
frl_trained:
drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
@@ -4277,10 +4314,26 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
static void
+intel_edp_set_data_override_rates(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int *sink_rates = intel_dp->sink_rates;
+ int i, count = 0;
+
+ for (i = 0; i < intel_dp->num_sink_rates; i++) {
+ if (intel_bios_encoder_reject_edp_rate(encoder->devdata,
+ intel_dp->sink_rates[i]))
+ continue;
+
+ sink_rates[count++] = intel_dp->sink_rates[i];
+ }
+ intel_dp->num_sink_rates = count;
+}
+
+static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp->num_sink_rates = 0;
@@ -4306,16 +4359,13 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
break;
/*
- * Some broken eDP sinks illegally declare support for
- * HBR3 without TPS4, and are unable to produce a stable
- * output. Reject HBR3 when TPS4 is not available.
+ * Some platforms cannot reliably drive HBR3 rates due to PHY limitations,
+ * even if the sink advertises support. Reject any sink rates above HBR2 on
+ * the known machines for stable output.
*/
- if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
- encoder->base.base.id, encoder->base.name);
+ if (rate > 540000 &&
+ intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2))
break;
- }
intel_dp->sink_rates[i] = rate;
}
@@ -4330,6 +4380,8 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->use_rate_select = true;
else
intel_dp_set_sink_rates(intel_dp);
+
+ intel_edp_set_data_override_rates(intel_dp);
}
static bool
@@ -5611,14 +5663,9 @@ bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
-
- do {
- is_connected = dig_port->connected(encoder);
- if (is_connected || is_glitch_free)
- break;
- usleep_range(10, 30);
- } while (time_before(jiffies, wait_expires));
+ poll_timeout_us(is_connected = dig_port->connected(encoder),
+ is_connected || is_glitch_free,
+ 30, 4000, false);
}
return is_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0657f5681196..f90cfd1dbbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -193,7 +193,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -214,5 +214,6 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
+bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 0a3a3f6a5f9d..eb05ef4bd9f6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -225,19 +225,6 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
connector->base.base.id, connector->base.name);
}
-static bool
-intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state)
-{
- struct hdr_output_metadata *hdr_metadata;
-
- if (!conn_state->hdr_output_metadata)
- return false;
-
- hdr_metadata = conn_state->hdr_output_metadata->data;
-
- return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
-}
-
static void
intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -521,9 +508,6 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (panel->backlight.edp.vesa.luminance_control_support)
- return;
-
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
if (!panel->backlight.edp.vesa.info.aux_enable)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a479b63112ea..27f3716bdc1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -22,6 +22,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
@@ -478,12 +479,13 @@ static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
-void
+bool
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
+ bool changed = false;
int lane;
if (intel_dp_is_uhbr(crtc_state)) {
@@ -502,10 +504,17 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
TRAIN_REQ_PREEMPH_ARGS(link_status));
}
- for (lane = 0; lane < 4; lane++)
- intel_dp->train_set[lane] =
- intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
- dp_phy, link_status, lane);
+ for (lane = 0; lane < 4; lane++) {
+ u8 new = intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ if (intel_dp->train_set[lane] == new)
+ continue;
+
+ intel_dp->train_set[lane] = new;
+ changed = true;
+ }
+
+ return changed;
}
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
@@ -758,6 +767,63 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
}
}
+/*
+ * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
+ * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
+ * 1.2 devices that support it, TPS2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
+
+ /* UHBR+ use separate 128b/132b TPS2 */
+ if (intel_dp_is_uhbr(crtc_state))
+ return DP_TRAINING_PATTERN_2;
+
+ /*
+ * TPS4 support is mandatory for all downstream devices that
+ * support HBR3. There are no known eDP panels that support
+ * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
+ * LTTPRs must support TPS4.
+ */
+ source_tps4 = intel_dp_source_supports_tps4(display);
+ sink_tps4 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (crtc_state->port_clock == 810000) {
+ if (!source_tps4)
+ lt_dbg(intel_dp, dp_phy,
+ "8.1 Gbps link rate without source TPS4 support\n");
+ if (!sink_tps4)
+ lt_dbg(intel_dp, dp_phy,
+ "8.1 Gbps link rate without sink TPS4 support\n");
+ }
+
+ /*
+ * TPS3 support is mandatory for downstream devices that
+ * support HBR2. However, not all sinks follow the spec.
+ */
+ source_tps3 = intel_dp_source_supports_tps3(display);
+ sink_tps3 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps3_supported(intel_dp->dpcd);
+ if (source_tps3 && sink_tps3) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (crtc_state->port_clock >= 540000) {
+ if (!source_tps3)
+ lt_dbg(intel_dp, dp_phy,
+ ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
+ if (!sink_tps3)
+ lt_dbg(intel_dp, dp_phy,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ }
+
+ return DP_TRAINING_PATTERN_2;
+}
+
static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
@@ -950,63 +1016,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
}
/*
- * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
- * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
- * 1.2 devices that support it, TPS2 otherwise.
- */
-static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- enum drm_dp_phy dp_phy)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- bool source_tps3, sink_tps3, source_tps4, sink_tps4;
-
- /* UHBR+ use separate 128b/132b TPS2 */
- if (intel_dp_is_uhbr(crtc_state))
- return DP_TRAINING_PATTERN_2;
-
- /*
- * TPS4 support is mandatory for all downstream devices that
- * support HBR3. There are no known eDP panels that support
- * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
- * LTTPRs must support TPS4.
- */
- source_tps4 = intel_dp_source_supports_tps4(display);
- sink_tps4 = dp_phy != DP_PHY_DPRX ||
- drm_dp_tps4_supported(intel_dp->dpcd);
- if (source_tps4 && sink_tps4) {
- return DP_TRAINING_PATTERN_4;
- } else if (crtc_state->port_clock == 810000) {
- if (!source_tps4)
- lt_dbg(intel_dp, dp_phy,
- "8.1 Gbps link rate without source TPS4 support\n");
- if (!sink_tps4)
- lt_dbg(intel_dp, dp_phy,
- "8.1 Gbps link rate without sink TPS4 support\n");
- }
-
- /*
- * TPS3 support is mandatory for downstream devices that
- * support HBR2. However, not all sinks follow the spec.
- */
- source_tps3 = intel_dp_source_supports_tps3(display);
- sink_tps3 = dp_phy != DP_PHY_DPRX ||
- drm_dp_tps3_supported(intel_dp->dpcd);
- if (source_tps3 && sink_tps3) {
- return DP_TRAINING_PATTERN_3;
- } else if (crtc_state->port_clock >= 540000) {
- if (!source_tps3)
- lt_dbg(intel_dp, dp_phy,
- ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
- if (!sink_tps3)
- lt_dbg(intel_dp, dp_phy,
- ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
- }
-
- return DP_TRAINING_PATTERN_2;
-}
-
-/*
* Perform the link training channel equalization phase on the given DP PHY
* using one of training pattern 2, 3 or 4 depending on the source and
* sink capabilities.
@@ -1127,16 +1136,19 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
intel_dp->link.active = true;
- intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
- if (intel_dp_is_uhbr(crtc_state) &&
- wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
- lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
+ if (intel_dp_is_uhbr(crtc_state)) {
+ ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
+ ret == 0,
+ 500, 500 * 1000, false);
+ if (ret)
+ lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
intel_hpd_unblock(encoder);
@@ -1371,8 +1383,8 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
if (ret)
ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
- if (intel_dp->set_idle_link_train)
- intel_dp->set_idle_link_train(intel_dp, crtc_state);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
return ret;
}
@@ -1574,8 +1586,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
int lttpr_count)
{
bool passed = false;
+ int ret;
- if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
+ ret == 0,
+ 500, 500 * 1000, false);
+ if (ret) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
goto out;
}
@@ -1602,6 +1618,8 @@ out:
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+
return passed;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 46614124569f..1ba22ed6db08 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -23,7 +23,7 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
int link_bw, int rate_select, int lane_count,
bool enhanced_framing);
-void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+bool intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 74497c9a0554..352f7ef29c28 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -611,12 +611,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, connector,
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (!intel_dp_compute_config_limits(intel_dp, conn_state,
crtc_state, false, dsc,
limits))
return false;
@@ -665,7 +668,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !mst_stream_compute_config_limits(intel_dp, connector,
+ !mst_stream_compute_config_limits(intel_dp, conn_state,
pipe_config, false, &limits);
if (!dsc_needed) {
@@ -691,7 +694,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
str_yes_no(intel_dp->force_dsc_en));
- if (!mst_stream_compute_config_limits(intel_dp, connector,
+ if (!mst_stream_compute_config_limits(intel_dp, conn_state,
pipe_config, true,
&limits))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index 6ed5012c5fac..5cfa1dd411da 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -6,7 +6,6 @@
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -753,13 +752,12 @@ static const struct {
void intel_dp_test_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
int i;
for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
debugfs_create_file(intel_display_debugfs_files[i].name,
0644,
- minor->debugfs_root,
+ display->drm->debugfs_root,
display,
intel_display_debugfs_files[i].fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 33e0398120c8..8ea96cc524a1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2046,6 +2046,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
u32 temp;
+ int ret;
bxt_port_to_phy_channel(display, port, &phy, &ch);
@@ -2056,8 +2057,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
0, PORT_PLL_POWER_ENABLE);
- if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
+ ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, PORT_PLL_POWER_STATE,
+ 200, 0, NULL);
+ if (ret)
drm_err(display->drm,
"Power state not set for PLL:%d\n", port);
}
@@ -2119,8 +2122,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
- 200))
+ ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_LOCK, PORT_PLL_LOCK,
+ 200, 0, NULL);
+ if (ret)
drm_err(display->drm, "PLL %d not locked\n", port);
if (display->platform.geminilake) {
@@ -2144,6 +2149,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+ int ret;
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
@@ -2152,8 +2158,10 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
PORT_PLL_POWER_ENABLE, 0);
- if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
+ ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, 0,
+ 200, 0, NULL);
+ if (ret)
drm_err(display->drm,
"Power state not reset for PLL:%d\n", port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index aea249e2699f..c0a817018d08 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -33,8 +33,6 @@ i915_vm_to_dpt(struct i915_address_space *vm)
return container_of(vm, struct i915_dpt, vm);
}
-#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
-
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -322,5 +320,5 @@ void intel_dpt_destroy(struct i915_address_space *vm)
u64 intel_dpt_offset(struct i915_vma *dpt_vma)
{
- return dpt_vma->node.start;
+ return i915_vma_offset(dpt_vma);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 53d8ae3a70e9..dee44d45b668 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,10 +4,11 @@
*
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_regs.h"
@@ -871,8 +872,13 @@ void intel_dsb_wait(struct intel_dsb *dsb)
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
+ bool is_busy;
+ int ret;
- if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
+ ret = poll_timeout_us(is_busy = is_dsb_busy(display, pipe, dsb->id),
+ !is_busy,
+ 100, 1000, false);
+ if (ret) {
u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e6a851d276f8..23402408e172 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -777,7 +777,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
- mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ mipi_config->bta_disable ? DISABLE_VIDEO_BTA : 0;
intel_dsi->bgr_enabled = mipi_config->rgb_flip;
/* Starting point, adjusted depending on dual link and burst mode */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h
new file mode 100644
index 000000000000..edc7331dcca2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DSI_VBT_DEFS_H__
+#define __INTEL_DSI_VBT_DEFS_H__
+
+#include <linux/types.h>
+
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
+enum mipi_seq {
+ MIPI_SEQ_END = 0,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
+ MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_END = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_MAX
+};
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
+
+struct mipi_config {
+ u16 panel_id;
+
+ /* General Params */
+ struct {
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
+ u32 pwm_blc:1;
+
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_disable:1;
+ u32 rsvd2:15;
+ } __packed;
+
+ /* Port Desc */
+ struct {
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 pixel_overlap:3;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 port_sync:1; /* 219-230 */
+ u16 rsvd3:3;
+ } __packed;
+
+ /* DSI Controller Parameters */
+ struct {
+ u16 dsi_usage:1;
+ u16 rsvd4:15;
+ } __packed;
+
+ u8 rsvd5;
+ u32 target_burst_mode_freq;
+ u32 dsi_ddr_clk;
+ u32 bridge_ref_clk;
+
+ /* LP Byte Clock */
+ struct {
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+ u8 rsvd6:6;
+ } __packed;
+
+ /* DPhy Flags */
+ struct {
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 blanking_packets_during_bllp:1; /* 219+ */
+ u16 lp_clock_during_lpm:1; /* 219+ */
+ u16 rsvd7:11;
+ } __packed;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* DPhy Params */
+ struct {
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd9:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd10:2;
+ } __packed;
+
+ u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+} __packed;
+
+/* all delays have a unit of 100us */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+} __packed;
+
+#endif /* __INTEL_DSI_VBT_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c
index 0b7bd26f4339..2ffe1f251ef8 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.c
+++ b/drivers/gpu/drm/i915/display/intel_encoder.c
@@ -8,6 +8,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_encoder.h"
+#include "intel_hotplug.h"
static void intel_encoder_link_check_work_fn(struct work_struct *work)
{
@@ -37,6 +38,28 @@ void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int dela
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
+void intel_encoder_unblock_all_hpds(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ for_each_intel_encoder(display->drm, encoder)
+ intel_hpd_unblock(encoder);
+}
+
+void intel_encoder_block_all_hpds(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ for_each_intel_encoder(display->drm, encoder)
+ intel_hpd_block(encoder);
+}
+
void intel_encoder_suspend_all(struct intel_display *display)
{
struct intel_encoder *encoder;
@@ -80,3 +103,21 @@ void intel_encoder_shutdown_all(struct intel_display *display)
if (encoder->shutdown_complete)
encoder->shutdown_complete(encoder);
}
+
+struct intel_digital_port *intel_dig_port_alloc(void)
+{
+ struct intel_digital_port *dig_port;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return NULL;
+
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->aux_ch = AUX_CH_NONE;
+ dig_port->max_lanes = 4;
+
+ mutex_init(&dig_port->hdcp.mutex);
+
+ return dig_port;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.h b/drivers/gpu/drm/i915/display/intel_encoder.h
index 3fa5589f0b1c..ace0fe1a8f27 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.h
+++ b/drivers/gpu/drm/i915/display/intel_encoder.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_ENCODER_H__
#define __INTEL_ENCODER_H__
+struct intel_digital_port;
struct intel_display;
struct intel_encoder;
@@ -17,4 +18,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder);
void intel_encoder_suspend_all(struct intel_display *display);
void intel_encoder_shutdown_all(struct intel_display *display);
+void intel_encoder_block_all_hpds(struct intel_display *display);
+void intel_encoder_unblock_all_hpds(struct intel_display *display);
+
+struct intel_digital_port *intel_dig_port_alloc(void);
+
#endif /* __INTEL_ENCODER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 0da842bd2f2f..22a4a1575d22 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -11,6 +11,7 @@
#include <drm/drm_modeset_helper.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_core.h"
@@ -19,6 +20,7 @@
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@@ -2342,6 +2344,26 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
+struct intel_framebuffer *intel_framebuffer_alloc(void)
+{
+ struct intel_framebuffer *intel_fb;
+ struct intel_panic *panic;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ panic = intel_panic_alloc();
+ if (!panic) {
+ kfree(intel_fb);
+ return NULL;
+ }
+
+ intel_fb->panic = panic;
+
+ return intel_fb;
+}
+
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
const struct drm_format_info *info,
@@ -2350,7 +2372,7 @@ intel_framebuffer_create(struct drm_gem_object *obj,
struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 403b8b63721a..22514d5f2bb6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -104,6 +104,9 @@ int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct intel_framebuffer *intel_framebuffer_alloc(void);
+
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
const struct drm_format_info *info,
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 5a0151775a3a..45af04cb0fb2 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -11,6 +11,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
@@ -151,7 +152,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* happy to scanout from anywhere within its global aperture.
*/
pinctl = 0;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
pinctl |= PIN_MAPPABLE;
i915_gem_ww_ctx_init(&ww, true);
@@ -192,7 +193,7 @@ retry:
* mode that matches the user configuration.
*/
ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ if (ret != 0 && DISPLAY_VER(display) < 4) {
i915_vma_unpin(vma);
goto err_unpin;
}
@@ -260,6 +261,7 @@ intel_plane_fb_vtd_guard(const struct intel_plane_state *plane_state)
int intel_plane_pin_fb(struct intel_plane_state *plane_state,
const struct intel_plane_state *old_plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct intel_framebuffer *fb =
to_intel_framebuffer(plane_state->hw.fb);
@@ -277,17 +279,6 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
plane_state->ggtt_vma = vma;
- /*
- * Pre-populate the dma address before we enter the vblank
- * evade critical section as i915_gem_object_get_dma_address()
- * will trigger might_sleep() even if it won't actually sleep,
- * which is the case when the fb has already been pinned.
- */
- if (intel_plane_needs_physical(plane)) {
- struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
-
- plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0);
- }
} else {
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
@@ -309,6 +300,28 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
plane_state->dpt_vma = vma;
WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+
+ /*
+ * The DPT object contains only one vma, and there is no VT-d
+ * guard, so the VMA's offset within the DPT is always 0.
+ */
+ drm_WARN_ON(display->drm, intel_dpt_offset(plane_state->dpt_vma));
+ }
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (intel_plane_needs_physical(plane)) {
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+
+ plane_state->surf = i915_gem_object_get_dma_address(obj, 0) +
+ plane->surf_offset(plane_state);
+ } else {
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma) +
+ plane->surf_offset(plane_state);
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 685ac98bd001..0d380c825791 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -98,11 +98,7 @@ struct intel_fbc {
struct intel_display *display;
const struct intel_fbc_funcs *funcs;
- /*
- * This is always the inner lock when overlapping with
- * struct_mutex and it's the outer lock when overlapping
- * with stolen_lock.
- */
+ /* This is always the outer lock when overlapping with stolen_lock */
struct mutex lock;
unsigned int busy_bits;
@@ -383,11 +379,11 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
struct drm_i915_private *i915 = to_i915(display->drm);
drm_WARN_ON(display->drm,
- range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_fb),
U32_MAX));
drm_WARN_ON(display->drm,
- range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
@@ -1460,7 +1456,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(display)) {
+ if (intel_display_wa(display, 16023588340)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1550,14 +1546,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (DISPLAY_VER(display) >= 9 &&
+ if (IS_DISPLAY_VER(display, 9, 12) &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (DISPLAY_VER(display) >= 11 &&
+ if (IS_DISPLAY_VER(display, 9, 12) &&
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
@@ -2240,10 +2236,9 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
/* FIXME: remove this once igt is on board with per-crtc stuff */
void intel_fbc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
struct intel_fbc *fbc;
fbc = display->fbc[INTEL_FBC_A];
if (fbc)
- intel_fbc_debugfs_add(fbc, minor->debugfs_root);
+ intel_fbc_debugfs_add(fbc, display->drm->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 8039a84671cc..59a36b3a22c1 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -292,34 +292,6 @@ int intel_fdi_link_freq(struct intel_display *display,
return display->fdi.pll_freq;
}
-/**
- * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
- * @crtc_state: the crtc state
- *
- * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
- * call this function during state computation in the simple case where the
- * link bpp will always match the pipe bpp. This is the case for all non-DP
- * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
- * of DSC compression.
- *
- * Returns %true in case of success, %false if pipe bpp would need to be
- * reduced below its valid range.
- */
-bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
-{
- int pipe_bpp = min(crtc_state->pipe_bpp,
- fxp_q4_to_int(crtc_state->max_link_bpp_x16));
-
- pipe_bpp = rounddown(pipe_bpp, 2 * 3);
-
- if (pipe_bpp < 6 * 3)
- return false;
-
- crtc_state->pipe_bpp = pipe_bpp;
-
- return true;
-}
-
int ilk_fdi_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index ad5e103c38a8..1cd08df9b0c2 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -20,7 +20,6 @@ struct intel_link_bw_limits;
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state);
int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config);
-bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config);
int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 000a898c9480..30eff6009e87 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -13,6 +13,36 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
struct intel_global_commit {
struct kref ref;
struct completion done;
@@ -148,7 +178,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
int index, num_objs, i;
size_t size;
- struct __intel_global_objs_state *arr;
+ struct intel_global_objs_state *arr;
struct intel_global_state *obj_state;
for (i = 0; i < state->num_global_objs; i++)
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index d42fb2547ee9..e1efa530cc86 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -11,6 +11,7 @@
struct intel_atomic_state;
struct intel_display;
+struct intel_global_commit;
struct intel_global_obj;
struct intel_global_state;
@@ -26,36 +27,6 @@ struct intel_global_obj {
const struct intel_global_state_funcs *funcs;
};
-#define intel_for_each_global_obj(obj, dev_priv) \
- list_for_each_entry(obj, &(dev_priv)->display.global.obj_list, head)
-
-#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (old_obj_state) = (__state)->global_objs[__i].old_state, \
- (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-struct intel_global_commit;
-
struct intel_global_state {
struct intel_global_obj *obj;
struct intel_atomic_state *state;
@@ -64,11 +35,6 @@ struct intel_global_state {
bool changed, serialized;
};
-struct __intel_global_objs_state {
- struct intel_global_obj *ptr;
- struct intel_global_state *state, *old_state, *new_state;
-};
-
void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 0d73f32fe7f1..358210adb8f8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -30,6 +30,7 @@
#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -39,6 +40,7 @@
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
@@ -217,7 +219,7 @@ static void pnv_gmbus_clock_gating(struct intel_display *display,
bool enable)
{
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, DSPCLK_GATE_D,
PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
!enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
@@ -240,14 +242,20 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- u32 reserved = 0;
+ u32 preserve_bits = 0;
+
+ if (display->platform.i830 || display->platform.i845g)
+ return 0;
/* On most chips, these bits must be preserved in software. */
- if (!display->platform.i830 && !display->platform.i845g)
- reserved = intel_de_read_notrace(display, bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
+ preserve_bits |= GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE;
+
+ /* Wa_16025573575: the masks bits need to be preserved through out */
+ if (intel_display_wa(display, 16025573575))
+ preserve_bits |= GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
+ GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
- return reserved;
+ return intel_de_read_notrace(display, bus->gpio_reg) & preserve_bits;
}
static int get_clock(void *data)
@@ -308,6 +316,22 @@ static void set_data(void *data, int state_high)
intel_de_posting_read(display, bus->gpio_reg);
}
+static void
+ptl_handle_mask_bits(struct intel_gmbus *bus, bool set)
+{
+ struct intel_display *display = bus->display;
+ u32 reg_val = intel_de_read_notrace(display, bus->gpio_reg);
+ u32 mask_bits = GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
+ GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
+ if (set)
+ reg_val |= mask_bits;
+ else
+ reg_val &= ~mask_bits;
+
+ intel_de_write_notrace(display, bus->gpio_reg, reg_val);
+ intel_de_posting_read(display, bus->gpio_reg);
+}
+
static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
@@ -319,6 +343,9 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
+ if (intel_display_wa(display, 16025573575))
+ ptl_handle_mask_bits(bus, true);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -336,6 +363,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
+
+ if (intel_display_wa(display, 16025573575))
+ ptl_handle_mask_bits(bus, false);
}
static void
@@ -385,11 +415,14 @@ static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en)
intel_de_write_fw(display, GMBUS4(display), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
- 2);
+
+ ret = poll_timeout_us_atomic(gmbus2 = intel_de_read_fw(display, GMBUS2(display)),
+ gmbus2 & status,
+ 0, 2, false);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
- 50);
+ ret = poll_timeout_us(gmbus2 = intel_de_read_fw(display, GMBUS2(display)),
+ gmbus2 & status,
+ 500, 50 * 1000, false);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 42202c8bb066..531ee122bf82 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -11,6 +11,7 @@
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/random.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -326,16 +327,13 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
bool ksv_ready;
/* Poll for ksv list ready (spec says max time allowed is 5s) */
- ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
- &ksv_ready),
- read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
- 100 * 1000);
+ ret = poll_timeout_us(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready),
+ read_ret || ksv_ready,
+ 100 * 1000, 5 * 1000 * 1000, false);
if (ret)
return ret;
if (read_ret)
return read_ret;
- if (!ksv_ready)
- return -ETIMEDOUT;
return 0;
}
@@ -817,6 +815,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
enum port port = dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
+ u32 val;
union {
u32 reg[2];
u8 shim[DRM_HDCP_AN_LEN];
@@ -905,8 +904,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+ ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
+ val & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC),
+ 100, 1000, false);
+ if (ret) {
drm_err(display->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -938,16 +939,16 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
+ val & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC),
+ 100, 1000, false);
+ if (!ret)
break;
}
if (i == tries) {
drm_dbg_kms(display->drm,
- "Timed out waiting for Ri prime match (%x)\n",
- intel_de_read(display,
- HDCP_STATUS(display, cpu_transcoder, port)));
+ "Timed out waiting for Ri prime match (%x)\n", val);
return -ETIMEDOUT;
}
@@ -2446,12 +2447,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
if (!hdcp->shim)
return -ENOENT;
- if (!connector->encoder) {
- drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
- connector->base.base.id, connector->base.name);
- return -ENODEV;
- }
-
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp.mutex);
drm_WARN_ON(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 9961ff259298..4ab7e2e3bfd4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -60,6 +61,7 @@
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_hdmi.h"
+#include "intel_link_bw.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_pfit.h"
@@ -1582,9 +1584,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ ret = intel_de_wait_for_set(display, HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1);
+ if (ret) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
port)));
@@ -1689,11 +1691,10 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
if (timeout < 0)
return timeout;
- ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
- msg_id, &msg_ready,
- &msg_sz),
- !ret && msg_ready && msg_sz, timeout * 1000,
- 1000, 5 * 1000);
+ ret = poll_timeout_us(ret = hdcp2_detect_msg_availability(dig_port, msg_id,
+ &msg_ready, &msg_sz),
+ !ret && msg_ready && msg_sz,
+ 4000, timeout * 1000, false);
if (ret)
drm_dbg_kms(display->drm,
"msg_id: %d, ret: %d, timeout: %d\n",
@@ -2053,6 +2054,10 @@ intel_hdmi_mode_valid(struct drm_connector *_connector,
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ status = intel_pfit_mode_valid(display, mode, sink_format, 0);
+ if (status != MODE_OK)
+ return status;
+
status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
@@ -2341,6 +2346,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
+ if (!intel_link_bw_compute_pipe_bpp(pipe_config))
+ return -EINVAL;
+
pipe_config->has_audio =
intel_hdmi_has_audio(encoder, pipe_config, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 265aa97fcc75..4451a792600a 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -28,6 +28,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_display_power.h"
#include "intel_display_core.h"
@@ -971,8 +972,6 @@ void intel_hpd_cancel_work(struct intel_display *display)
spin_lock_irq(&display->irq.lock);
- drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
-
display->hotplug.long_hpd_pin_mask = 0;
display->hotplug.short_hpd_pin_mask = 0;
display->hotplug.event_bits = 0;
@@ -1333,12 +1332,12 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_hpd_storm_ctl", 0644, debugfs_root,
display, &i915_hpd_storm_ctl_fops);
- debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_hpd_short_storm_ctl", 0644, debugfs_root,
display, &i915_hpd_short_storm_ctl_fops);
- debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
+ debugfs_create_bool("i915_ignore_long_hpd", 0644, debugfs_root,
&display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 43aee70597bf..4f72f3fb9af5 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1025,7 +1025,7 @@ static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index 3caef7f9c7c4..f52dee0ea412 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -165,6 +165,34 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
}
/**
+ * intel_link_bw_compute_pipe_bpp - compute pipe bpp limited by max link bpp
+ * @crtc_state: the crtc state
+ *
+ * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
+ * call this function during state computation in the simple case where the
+ * link bpp will always match the pipe bpp. This is the case for all non-DP
+ * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
+ * of DSC compression.
+ *
+ * Returns %true in case of success, %false if pipe bpp would need to be
+ * reduced below its valid range.
+ */
+bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
+{
+ int pipe_bpp = min(crtc_state->pipe_bpp,
+ fxp_q4_to_int(crtc_state->max_link_bpp_x16));
+
+ pipe_bpp = rounddown(pipe_bpp, 2 * 3);
+
+ if (pipe_bpp < 6 * 3)
+ return false;
+
+ crtc_state->pipe_bpp = pipe_bpp;
+
+ return true;
+}
+
+/**
* intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum
* @state: atomic state
* @old_limits: link BW limits
@@ -449,6 +477,7 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_HDMIA:
break;
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_SVIDEO:
@@ -458,11 +487,6 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
break;
return;
- case DRM_MODE_CONNECTOR_HDMIA:
- if (HAS_FDI(display) && !HAS_DDI(display))
- break;
-
- return;
default:
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index b499042e62b1..95ab7c50c61d 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -27,6 +27,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
u8 pipe_mask,
const char *reason);
+bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
const struct intel_link_bw_limits *old_limits,
struct intel_link_bw_limits *new_limits,
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 666148a14522..42284e9928f2 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -68,9 +68,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
#include <drm/intel/intel_lpe_audio.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_audio_regs.h"
#include "intel_de.h"
@@ -170,14 +170,11 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int irq = display->audio.lpe.irq;
- drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- irq_set_chip_and_handler_name(irq,
- &lpe_audio_irqchip,
- handle_simple_irq,
- "hdmi_lpe_audio_irq_handler");
+ irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip,
+ handle_simple_irq,
+ "hdmi_lpe_audio_irq_handler");
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index abc4b562083d..d56026c4efdd 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -181,6 +183,8 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
+ int timeout_us;
+ int ret;
current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode)
@@ -189,9 +193,12 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode,
- lspcon_get_mode_settle_timeout(lspcon));
- if (current_mode != mode)
+ timeout_us = lspcon_get_mode_settle_timeout(lspcon) * 1000;
+
+ ret = poll_timeout_us(current_mode = lspcon_get_current_mode(lspcon),
+ current_mode == mode,
+ 5000, timeout_us, false);
+ if (ret)
drm_err(display->drm, "LSPCON mode hasn't settled\n");
out:
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7e48a235c99f..48f4d8ed4f15 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -48,6 +48,7 @@
#include "intel_dpll.h"
#include "intel_fdi.h"
#include "intel_gmbus.h"
+#include "intel_link_bw.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
@@ -433,7 +434,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 81efdb17fc0c..cbc220310813 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -28,13 +28,13 @@
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/dmi.h>
+#include <linux/iopoll.h>
#include <acpi/video.h>
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
@@ -357,10 +357,12 @@ static int swsci(struct intel_display *display,
pci_write_config_word(pdev, SWSCI, swsci_val);
/* Poll for the result. */
-#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
- if (wait_for(C, dslp)) {
+ ret = poll_timeout_us(scic = swsci->scic,
+ (scic & SWSCI_SCIC_INDICATOR) == 0,
+ 1000, dslp * 1000, false);
+ if (ret) {
drm_dbg(display->drm, "SWSCI request timed out\n");
- return -ETIMEDOUT;
+ return ret;
}
scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
@@ -1299,8 +1301,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_opregion);
void intel_opregion_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_opregion", 0444, display->drm->debugfs_root,
display, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 159a5f998ea0..272f9e7af4d4 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -217,10 +217,9 @@ static void i830_overlay_clock_gating(struct intel_display *display,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(display, DSPCLK_GATE_D(display), 0);
+ intel_de_write(display, DSPCLK_GATE_D, 0);
else
- intel_de_write(display, DSPCLK_GATE_D(display),
- OVRUNIT_CLOCK_GATE_DISABLE);
+ intel_de_write(display, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
pci_bus_read_config_byte(pdev->bus,
diff --git a/drivers/gpu/drm/i915/display/intel_panic.c b/drivers/gpu/drm/i915/display/intel_panic.c
new file mode 100644
index 000000000000..7311ce4e8b6c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panic.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_panic.h>
+
+#include "gem/i915_gem_object.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_panic.h"
+
+struct intel_panic *intel_panic_alloc(void)
+{
+ return i915_gem_object_alloc_panic();
+}
+
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = sb->private;
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+
+ return i915_gem_object_panic_setup(panic, sb, obj, fb->panic_tiling);
+}
+
+void intel_panic_finish(struct intel_panic *panic)
+{
+ return i915_gem_object_panic_finish(panic);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panic.h b/drivers/gpu/drm/i915/display/intel_panic.h
new file mode 100644
index 000000000000..afb472e924aa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panic.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_PANIC_H__
+#define __INTEL_PANIC_H__
+
+struct drm_scanout_buffer;
+struct intel_panic;
+
+struct intel_panic *intel_panic_alloc(void);
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb);
+void intel_panic_finish(struct intel_panic *panic);
+
+#endif /* __INTEL_PANIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
index cf4dab1b98bf..19cac7412d0a 100644
--- a/drivers/gpu/drm/i915/display/intel_pch.h
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_PCH__
#define __INTEL_PCH__
-#include "intel_display_conversion.h"
-
struct intel_display;
/*
@@ -36,7 +34,7 @@ enum intel_pch {
PCH_LNL,
};
-#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define INTEL_PCH_TYPE(_display) ((_display)->pch_type)
#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index d3c5255bf1a8..9ae53679a041 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -17,16 +17,22 @@
static void lpt_fdi_reset_mphy(struct intel_display *display)
{
+ int ret;
+
intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ ret = intel_de_wait_custom(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, FDI_MPHY_IOSFSB_RESET_STATUS,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "FDI mPHY reset assert timeout\n");
intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ ret = intel_de_wait_custom(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, 0,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 13541be4d6df..68539e7c2a24 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -14,6 +14,7 @@
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
#include "intel_pfit_regs.h"
+#include "skl_scaler.h"
static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state)
{
@@ -546,6 +547,16 @@ out:
return intel_gmch_pfit_check_timings(crtc_state);
}
+enum drm_mode_status
+intel_pfit_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes)
+{
+ return skl_scaler_mode_valid(display, mode, output_format,
+ num_joined_pipes);
+}
+
int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h
index ef34f9b49d09..c1bb0d1f344e 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.h
+++ b/drivers/gpu/drm/i915/display/intel_pfit.h
@@ -6,8 +6,12 @@
#ifndef __INTEL_PFIT_H__
#define __INTEL_PFIT_H__
+enum drm_mode_status;
+struct drm_display_mode;
struct drm_connector_state;
struct intel_crtc_state;
+struct intel_display;
+enum intel_output_format;
int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -17,5 +21,9 @@ void ilk_pfit_get_config(struct intel_crtc_state *crtc_state);
void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state);
void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state);
void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state);
-
+enum drm_mode_status
+intel_pfit_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes);
#endif /* __INTEL_PFIT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 36fb07471deb..2329f09d413d 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -46,9 +46,7 @@
#include "gem/i915_gem_object.h"
#include "i915_scheduler_types.h"
-#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_bo.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_display_rps.h"
@@ -57,6 +55,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "skl_scaler.h"
@@ -1327,7 +1326,7 @@ static void intel_panic_flush(struct drm_plane *plane)
struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- intel_bo_panic_finish(intel_fb);
+ intel_panic_finish(intel_fb->panic);
if (crtc_state->enable_psr2_sel_fetch) {
/* Force a full update for psr2 */
@@ -1410,7 +1409,7 @@ static int intel_get_scanout_buffer(struct drm_plane *plane,
return -EOPNOTSUPP;
}
sb->private = intel_fb;
- ret = intel_bo_panic_setup(sb);
+ ret = intel_panic_setup(intel_fb->panic, sb);
if (ret)
return ret;
}
@@ -1749,8 +1748,3 @@ int intel_plane_atomic_check(struct intel_atomic_state *state)
return 0;
}
-
-u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
-{
- return i915_ggtt_offset(plane_state->ggtt_vma);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 4ef012c08fa4..8af41ccc0a69 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -87,7 +87,6 @@ int intel_plane_add_affected(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_plane_atomic_check(struct intel_atomic_state *state);
-u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
u32 format,
u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 4246173ed311..a9f36b1b50c1 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -360,6 +360,8 @@ valid_fb:
i915_vma_pin_fence(vma) == 0 && vma->fence)
plane_state->flags |= PLANE_HAS_FENCE;
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index b64d0b30f5b1..327e0de86f1e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <drm/drm_print.h>
@@ -608,6 +609,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ int ret;
+ u32 val;
lockdep_assert_held(&display->pps.mutex);
@@ -624,13 +627,18 @@ static void wait_panel_status(struct intel_dp *intel_dp,
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
- if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
+ ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg),
+ (val & mask) == value,
+ 10 * 1000, 5000 * 1000, true);
+ if (ret) {
drm_err(display->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
pps_name(intel_dp),
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
+ return;
+ }
drm_dbg_kms(display->drm, "Wait complete\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 41988e193a41..01bf304c705f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -42,6 +42,7 @@
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_dsb.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
@@ -233,16 +234,12 @@ bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (display->params.enable_psr == -1)
- return intel_dp_is_edp(intel_dp) ?
- connector->panel.vbt.psr.enable :
- true;
- return display->params.enable_psr;
+ return intel_dp_is_edp(intel_dp) ?
+ connector->panel.vbt.psr.enable : true;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -250,39 +247,23 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
}
}
-static bool psr2_global_enabled(struct intel_dp *intel_dp)
+static bool sel_update_global_enabled(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
-
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (display->params.enable_psr == 1)
- return false;
return true;
}
}
-static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
-{
- struct intel_display *display = to_intel_display(intel_dp);
-
- if (display->params.enable_psr != -1)
- return false;
-
- return true;
-}
-
static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if ((display->params.enable_psr != -1) ||
- (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
- return false;
- return true;
+ return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
+ display->params.enable_panel_replay;
}
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
@@ -514,12 +495,14 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
{
u8 su_capability = 0;
- if (intel_dp->psr.sink_panel_replay_su_support)
- drm_dp_dpcd_readb(&intel_dp->aux,
- DP_PANEL_REPLAY_CAP_CAPABILITY,
- &su_capability);
- else
+ if (intel_dp->psr.sink_panel_replay_su_support) {
+ if (drm_dp_dpcd_read_byte(&intel_dp->aux,
+ DP_PANEL_REPLAY_CAP_CAPABILITY,
+ &su_capability) < 0)
+ return 0;
+ } else {
su_capability = intel_dp->psr_dpcd[1];
+ }
return su_capability;
}
@@ -600,6 +583,16 @@ exit:
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+ if (ret < 0)
+ return;
+
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SUPPORT))
+ return;
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
@@ -631,6 +624,15 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
static void _psr_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (ret < 0)
+ return;
+
+ if (!intel_dp->psr_dpcd[0])
+ return;
drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
@@ -676,18 +678,9 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ _psr_init_dpcd(intel_dp);
- drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
- &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
-
- if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
- DP_PANEL_REPLAY_SUPPORT)
- _panel_replay_init_dpcd(intel_dp);
-
- if (intel_dp->psr_dpcd[0])
- _psr_init_dpcd(intel_dp);
+ _panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr.sink_psr2_support ||
intel_dp->psr.sink_panel_replay_su_support)
@@ -742,8 +735,7 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return panel_replay ?
intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
- intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
- psr2_su_region_et_global_enabled(intel_dp);
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
}
static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
@@ -936,7 +928,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
/* Wa_16025596647 */
if ((DISPLAY_VER(display) == 20 ||
IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
- is_dc5_dc6_blocked(intel_dp))
+ is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
intel_dp->psr.pipe,
true);
@@ -1026,7 +1018,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
/* Wa_16025596647 */
if ((DISPLAY_VER(display) == 20 ||
IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
- is_dc5_dc6_blocked(intel_dp))
+ is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
idle_frames = 0;
else
idle_frames = psr_compute_idle_frames(intel_dp);
@@ -1423,7 +1415,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
- if (!intel_dp->psr.sink_psr2_support)
+ if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
return false;
/* JSL and EHL only supports eDP 1.3 */
@@ -1528,7 +1520,7 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
- if (!psr2_global_enabled(intel_dp)) {
+ if (!sel_update_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm,
"Selective update disabled by flag\n");
goto unsupported;
@@ -1576,7 +1568,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
return false;
/*
@@ -1808,6 +1800,8 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, intel_dp->psr.active);
+ drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
+
lockdep_assert_held(&intel_dp->psr.lock);
/* psr1, psr2 and panel-replay are mutually exclusive.*/
@@ -2027,6 +2021,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
+ intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2103,8 +2098,9 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
- if (DISPLAY_VER(display) == 20 ||
- IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ intel_dp->psr.pkg_c_latency_used)
intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
intel_dp->psr.pipe,
false);
@@ -2207,6 +2203,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.active_non_psr_pipes = 0;
+ intel_dp->psr.pkg_c_latency_used = 0;
}
/**
@@ -3003,35 +3000,57 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
}
}
-static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+/*
+ * From bspec: Panel Self Refresh (BDW+)
+ * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+ * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+ * defensive enough to cover everything.
+ */
+#define PSR_IDLE_TIMEOUT_MS 50
+
+static int
+_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
+ struct intel_dsb *dsb)
{
- struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
/*
* Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
+ if (dsb) {
+ intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
+ PSR_IDLE_TIMEOUT_MS * 1000 / 200);
+ return true;
+ }
+
return intel_de_wait_for_clear(display,
EDP_PSR2_STATUS(display, cpu_transcoder),
- EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
+ PSR_IDLE_TIMEOUT_MS);
}
-static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+static int
+_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
+ struct intel_dsb *dsb)
{
- struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
+
+ if (dsb) {
+ intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
+ EDP_PSR_STATUS_STATE_MASK, 0, 200,
+ PSR_IDLE_TIMEOUT_MS * 1000 / 200);
+ return true;
+ }
- /*
- * From bspec: Panel Self Refresh (BDW+)
- * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
- * exit training time + 1.5 ms of aux channel handshake. 50 ms is
- * defensive enough to cover everything.
- */
return intel_de_wait_for_clear(display,
psr_status_reg(display, cpu_transcoder),
- EDP_PSR_STATUS_STATE_MASK, 50);
+ EDP_PSR_STATUS_STATE_MASK,
+ PSR_IDLE_TIMEOUT_MS);
}
/**
@@ -3060,9 +3079,11 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
continue;
if (intel_dp->psr.sel_update_enabled)
- ret = _psr2_ready_for_pipe_update_locked(intel_dp);
+ ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
+ NULL);
else
- ret = _psr1_ready_for_pipe_update_locked(intel_dp);
+ ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
+ NULL);
if (ret)
drm_err(display->drm,
@@ -3070,6 +3091,18 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
}
}
+void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
+ return;
+
+ if (new_crtc_state->has_sel_update)
+ _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
+ else
+ _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
+}
+
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -3099,7 +3132,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
/* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&intel_dp->psr.lock);
- return err == 0 && intel_dp->psr.enabled;
+ return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
}
static int intel_psr_fastset_force(struct intel_display *display)
@@ -3228,8 +3261,13 @@ static void intel_psr_work(struct work_struct *work)
if (!intel_dp->psr.enabled)
goto unlock;
- if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
intel_psr_handle_irq(intel_dp);
+ goto unlock;
+ }
+
+ if (intel_dp->psr.pause_counter)
+ goto unlock;
/*
* We have to make sure PSR is ready for re-enable
@@ -3723,7 +3761,7 @@ static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp
struct intel_display *display = to_intel_display(intel_dp);
bool dc5_dc6_blocked;
- if (!intel_dp->psr.active)
+ if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
return;
dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
@@ -3748,7 +3786,8 @@ static void psr_dc5_dc6_wa_work(struct work_struct *work)
mutex_lock(&intel_dp->psr.lock);
- if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
+ !intel_dp->psr.pkg_c_latency_used)
intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
@@ -3826,7 +3865,8 @@ void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
goto unlock;
if ((enable && intel_dp->psr.active_non_psr_pipes) ||
- (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ (!enable && !intel_dp->psr.active_non_psr_pipes) ||
+ !intel_dp->psr.pkg_c_latency_used) {
intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
goto unlock;
}
@@ -3861,7 +3901,7 @@ void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
break;
}
- if (intel_dp->psr.enabled)
+ if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
@@ -4157,12 +4197,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
void intel_psr_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
display, &i915_edp_psr_debug_fops);
- debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
display, &i915_edp_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 9b061a22361f..077751aa599f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -52,6 +52,8 @@ void intel_psr_get_config(struct intel_encoder *encoder,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state);
+void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
+ const struct intel_crtc_state *new_crtc_state);
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index a32fae510ed2..d2e16b79d6be 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -80,6 +80,12 @@ static void quirk_fw_sync_len(struct intel_dp *intel_dp)
drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n");
}
+static void quirk_edp_limit_rate_hbr2(struct intel_display *display)
+{
+ intel_set_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2);
+ drm_info(display->drm, "Applying eDP Limit rate to HBR2 quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -231,6 +237,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
/* HP Notebook - 14-r206nv */
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
+
+ /* Dell XPS 13 7390 2-in-1 */
+ { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 },
};
static const struct intel_dpcd_quirk intel_dpcd_quirks[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index cafdebda7535..06da0e286c67 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -20,6 +20,7 @@ enum intel_quirk_id {
QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
QUIRK_FW_SYNC_LEN,
+ QUIRK_EDP_LIMIT_RATE_HBR2,
};
void intel_init_quirks(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 87aff2754f69..6c032d81e7ee 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -47,11 +47,11 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
@@ -1367,7 +1367,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ if (!intel_link_bw_compute_pipe_bpp(pipe_config))
return -EINVAL;
}
@@ -2052,8 +2052,10 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2))
+ drm_warn(intel_sdvo->base.base.dev,
+ "Failed to enable hotplug on SDVO encoder\n");
}
static enum intel_hotplug_state
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6844df837af..75bbaa923204 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -264,8 +264,7 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
-static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 vlv_sprite_ctl(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
@@ -395,15 +394,12 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 sprctl, linear_offset;
+ u32 sprctl;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (display->platform.cherryview && pipe == PIPE_B)
chv_sprite_update_csc(plane_state);
@@ -418,7 +414,8 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0);
- intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(display, SPLINOFF(pipe, plane_id),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, SPTILEOFF(pipe, plane_id),
SP_OFFSET_Y(y) | SP_OFFSET_X(x));
@@ -428,8 +425,7 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl);
- intel_de_write_fw(display, SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id), plane_state->surf);
vlv_sprite_update_clrc(plane_state);
vlv_sprite_update_gamma(plane_state);
@@ -663,8 +659,7 @@ static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
(display->platform.ivybridge || display->platform.haswell);
}
-static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ivb_sprite_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -830,15 +825,12 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 sprctl, linear_offset;
+ u32 sprctl;
sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (key->flags) {
intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value);
intel_de_write_fw(display, SPRKEYMSK(pipe),
@@ -852,7 +844,8 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPROFFSET(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
} else {
- intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, SPRLINOFF(pipe),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, SPRTILEOFF(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
}
@@ -863,8 +856,7 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, SPRCTL(pipe), sprctl);
- intel_de_write_fw(display, SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(display, SPRSURF(pipe), plane_state->surf);
ivb_sprite_update_gamma(plane_state);
}
@@ -1016,8 +1008,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return dvscntr;
}
-static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 g4x_sprite_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1181,15 +1172,12 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 dvscntr, linear_offset;
+ u32 dvscntr;
dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (key->flags) {
intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value);
intel_de_write_fw(display, DVSKEYMSK(pipe),
@@ -1197,7 +1185,8 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value);
}
- intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, DVSLINOFF(pipe),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, DVSTILEOFF(pipe),
DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
@@ -1207,8 +1196,7 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, DVSCNTR(pipe), dvscntr);
- intel_de_write_fw(display, DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ intel_de_write_fw(display, DVSSURF(pipe), plane_state->surf);
if (display->platform.g4x)
g4x_sprite_update_gamma(plane_state);
@@ -1387,9 +1375,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
return ret;
if (DISPLAY_VER(display) >= 7)
- plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = ivb_sprite_ctl(plane_state);
else
- plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = g4x_sprite_ctl(plane_state);
return 0;
}
@@ -1439,7 +1427,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = vlv_sprite_ctl(plane_state);
return 0;
}
@@ -1624,6 +1612,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = vlv_sprite_capture_error;
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
plane->max_stride = i965_plane_max_stride;
plane->min_alignment = vlv_plane_min_alignment;
plane->min_cdclk = vlv_plane_min_cdclk;
@@ -1648,6 +1637,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = ivb_sprite_capture_error;
plane->get_hw_state = ivb_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
if (display->platform.broadwell || display->platform.haswell) {
plane->max_stride = hsw_sprite_max_stride;
@@ -1673,6 +1663,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = g4x_sprite_capture_error;
plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
plane->max_stride = g4x_sprite_max_stride;
plane->min_alignment = g4x_sprite_min_alignment;
plane->min_cdclk = g4x_sprite_min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 668ef139391b..c4a5601c5107 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_print.h>
#include "i915_reg.h"
@@ -23,11 +25,6 @@
#include "intel_modeset_lock.h"
#include "intel_tc.h"
-#define DP_PIN_ASSIGNMENT_NONE 0x0
-#define DP_PIN_ASSIGNMENT_C 0x3
-#define DP_PIN_ASSIGNMENT_D 0x4
-#define DP_PIN_ASSIGNMENT_E 0x5
-
enum tc_port_mode {
TC_PORT_DISCONNECTED,
TC_PORT_TBT_ALT,
@@ -66,6 +63,7 @@ struct intel_tc_port {
enum tc_port_mode mode;
enum tc_port_mode init_mode;
enum phy_fia phy_fia;
+ enum intel_tc_pin_assignment pin_assignment;
u8 phy_fia_idx;
u8 max_lane_count;
};
@@ -253,6 +251,9 @@ tc_port_power_domain(struct intel_tc_port *tc)
{
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
+ if (tc_port == TC_PORT_NONE)
+ return POWER_DOMAIN_INVALID;
+
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
@@ -265,13 +266,14 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
!intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
}
-static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+static u32 get_lane_mask(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ intel_wakeref_t wakeref;
u32 lane_mask;
- lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
@@ -280,77 +282,87 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
}
-u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
- u32 pin_mask;
+ if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE)
+ return '-';
- pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
-
- drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
- assert_tc_cold_blocked(tc);
-
- return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
- DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
+ return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A;
}
-static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static enum intel_tc_pin_assignment
+get_pin_assignment(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
+ enum intel_tc_pin_assignment pin_assignment;
intel_wakeref_t wakeref;
- u32 val, pin_assignment;
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+
+ if (tc->mode == TC_PORT_TBT_ALT)
+ return INTEL_TC_PIN_ASSIGNMENT_NONE;
+
+ if (DISPLAY_VER(display) >= 20) {
+ reg = TCSS_DDI_STATUS(tc_port);
+ mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK;
+ } else {
+ reg = PORT_TX_DFLEXPA1(tc->phy_fia);
+ mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
+ }
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, reg);
+
+ drm_WARN_ON(display->drm, val == 0xffffffff);
+ assert_tc_cold_blocked(tc);
- pin_assignment =
- REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
+ pin_assignment = (val & mask) >> (ffs(mask) - 1);
switch (pin_assignment) {
- case DP_PIN_ASSIGNMENT_NONE:
- return 0;
+ case INTEL_TC_PIN_ASSIGNMENT_A:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11);
+ break;
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
+ break;
default:
MISSING_CASE(pin_assignment);
- fallthrough;
- case DP_PIN_ASSIGNMENT_D:
- return 2;
- case DP_PIN_ASSIGNMENT_C:
- case DP_PIN_ASSIGNMENT_E:
- return 4;
}
+
+ return pin_assignment;
}
-static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static int mtl_get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- intel_wakeref_t wakeref;
- u32 pin_mask;
+ enum intel_tc_pin_assignment pin_assignment;
- with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = get_pin_assignment(tc);
- switch (pin_mask) {
+ switch (pin_assignment) {
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ return 0;
default:
- MISSING_CASE(pin_mask);
+ MISSING_CASE(pin_assignment);
fallthrough;
- case DP_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
return 2;
- case DP_PIN_ASSIGNMENT_C:
- case DP_PIN_ASSIGNMENT_E:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
return 4;
}
}
-static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static int icl_get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- intel_wakeref_t wakeref;
u32 lane_mask = 0;
- with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- lane_mask = intel_tc_port_get_lane_mask(dig_port);
+ lane_mask = get_lane_mask(tc);
switch (lane_mask) {
default:
@@ -372,41 +384,43 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
static int get_max_lane_count(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct intel_digital_port *dig_port = tc->dig_port;
if (tc->mode != TC_PORT_DP_ALT)
return 4;
- assert_tc_cold_blocked(tc);
-
- if (DISPLAY_VER(display) >= 20)
- return lnl_tc_port_get_max_lane_count(dig_port);
-
if (DISPLAY_VER(display) >= 14)
- return mtl_tc_port_get_max_lane_count(dig_port);
+ return mtl_get_max_lane_count(tc);
- return intel_tc_port_get_max_lane_count(dig_port);
+ return icl_get_max_lane_count(tc);
}
static void read_pin_configuration(struct intel_tc_port *tc)
{
+ tc->pin_assignment = get_pin_assignment(tc);
tc->max_lane_count = get_max_lane_count(tc);
}
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base))
return 4;
- if (DISPLAY_VER(display) < 20)
- return get_max_lane_count(tc);
-
return tc->max_lane_count;
}
+enum intel_tc_pin_assignment
+intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port)
+{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ if (!intel_encoder_is_tc(&dig_port->base))
+ return INTEL_TC_PIN_ASSIGNMENT_NONE;
+
+ return tc->pin_assignment;
+}
+
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
@@ -1038,8 +1052,13 @@ static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
struct intel_display *display = to_intel_display(tc->dig_port);
+ bool is_enabled;
+ int ret;
- if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
+ ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc),
+ is_enabled == enabled,
+ 200, 5000, false);
+ if (ret) {
drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
@@ -1320,8 +1339,13 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
+ bool is_ready;
+ int ret;
- if (wait_for(tc_phy_is_ready(tc), 500)) {
+ ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc),
+ is_ready,
+ 1000, 500 * 1000, false);
+ if (ret) {
drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
@@ -1509,10 +1533,13 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
if (!force_disconnect)
tc_phy_connect(tc, required_lanes);
- drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ drm_dbg_kms(display->drm,
+ "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n",
tc->port_name,
tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(tc->mode));
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
}
static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
@@ -1667,9 +1694,11 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n",
tc->port_name,
- tc_port_mode_name(tc->mode));
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
mutex_unlock(&tc->lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 26c4265368c1..fff8b96e4972 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -12,6 +12,75 @@ struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
+/*
+ * The following enum values must stay fixed, as they match the corresponding
+ * pin assignment fields in the PORT_TX_DFLEXPA1 and TCSS_DDI_STATUS registers.
+ */
+enum intel_tc_pin_assignment { /* Lanes (a) Signal/ Cable Notes */
+ /* DP USB Rate (b) type */
+ INTEL_TC_PIN_ASSIGNMENT_NONE = 0, /* 4 - - - (c) */
+ INTEL_TC_PIN_ASSIGNMENT_A, /* 2/4 0 GEN2 TC->TC (d,e) */
+ INTEL_TC_PIN_ASSIGNMENT_B, /* 1/2 1 GEN2 TC->TC (d,f,g) */
+ INTEL_TC_PIN_ASSIGNMENT_C, /* 4 0 DP2 TC->TC (h) */
+ INTEL_TC_PIN_ASSIGNMENT_D, /* 2 1 DP2 TC->TC (h,g) */
+ INTEL_TC_PIN_ASSIGNMENT_E, /* 4 0 DP2 TC->DP */
+ INTEL_TC_PIN_ASSIGNMENT_F, /* 2 1 GEN1/DP1 TC->DP (d,g,i) */
+ /*
+ * (a) - DP unidirectional lanes, each lane using 1 differential signal
+ * pair.
+ * - USB SuperSpeed bidirectional lane, using 2 differential (TX and
+ * RX) signal pairs.
+ * - USB 2.0 (HighSpeed) unidirectional lane, using 1 differential
+ * signal pair. Not indicated, this lane is always present on pin
+ * assignments A-D and never present on pin assignments E/F.
+ * (b) - GEN1: USB 3.1 GEN1 bit rate (5 Gbps) and signaling. This
+ * is used for transferring only a USB stream.
+ * - GEN2: USB 3.1 GEN2 bit rate (10 Gbps) and signaling. This
+ * allows transferring an HBR3 (8.1 Gbps) DP stream.
+ * - DP1: Display Port signaling defined by the DP v1.3 Standard,
+ * with a maximum bit rate of HBR3.
+ * - DP2: Display Port signaling defined by the DP v2.1 Standard,
+ * with a maximum bit rate defined by the DP Alt Mode
+ * v2.1a Standard depending on the cable type as follows:
+ * - Passive (Full-Featured) USB 3.2 GEN1
+ * TC->TC cables (CC3G1-X) : UHBR10
+ * - Passive (Full-Featured) USB 3.2/4 GEN2 and
+ * Thunderbolt Alt Mode GEN2
+ * TC->TC cables (CC3G2-X) all : UHBR10
+ * DP54 logo : UHBR13.5
+ * - Passive (Full-Featured) USB4 GEN3+ and
+ * Thunderbolt Alt Mode GEN3+
+ * TC->TC cables (CC4G3-X) all : UHBR13.5
+ * DP80 logo : UHBR20
+ * - Active Re-Timed or
+ * Active Linear Re-driven (LRD)
+ * USB3.2 GEN1/2 and USB4 GEN2+
+ * TC->TC cables all : HBR3
+ * with DP_BR CTS : UHBR10
+ * DP54 logo : UHBR13.5
+ * DP80 logo : UHBR20
+ * - Passive/Active Re-Timed or
+ * Active Linear Re-driven (LRD)
+ * TC->DP cables with DP_BR CTS/DP8K logo : HBR3
+ * with DP_BR CTS : UHBR10
+ * DP54 logo : UHBR13.5
+ * DP80 logo : UHBR20
+ * (c) Used in TBT-alt/legacy modes and on LNL+ after the sink
+ * disconnected in DP-alt mode.
+ * (d) Only defined by the DP Alt Standard v1.0a, deprecated by v1.0b,
+ * only supported on ICL.
+ * (e) GEN2 passive 1 m cable: 4 DP lanes, GEN2 active cable: 2 DP lanes.
+ * (f) GEN2 passive 1 m cable: 2 DP lanes, GEN2 active cable: 1 DP lane.
+ * (g) These pin assignments are also referred to as (USB/DP)
+ * multifunction or Multifunction Display Port (MFD) modes.
+ * (h) Also used where one end of the cable is a captive connector,
+ * attached to a DP->HDMI/DVI/VGA converter.
+ * (i) The DP end of the cable is a captive connector attached to a
+ * (DP/USB) multifunction dock as defined by the DockPort v1.0a
+ * specification.
+ */
+};
+
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
@@ -19,7 +88,8 @@ bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
-u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
+enum intel_tc_pin_assignment
+intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port);
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 70ba7aa26bf4..c15234c1d96e 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -3,9 +3,12 @@
* Copyright © 2022-2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_vblank.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -492,9 +495,14 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
+ bool is_moving;
+ int ret;
/* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100))
+ ret = poll_timeout_us(is_moving = pipe_scanline_is_moving(display, pipe),
+ is_moving == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"pipe %c scanline %s wait timed out\n",
pipe_name(pipe), str_on_off(state));
@@ -724,9 +732,9 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
break;
if (!timeout) {
- drm_err(display->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(display->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 92c04811aa28..70e31520c560 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -37,7 +37,7 @@
#ifndef _INTEL_VBT_DEFS_H_
#define _INTEL_VBT_DEFS_H_
-#include "intel_bios.h"
+#include "intel_dsi_vbt_defs.h"
/* EDID derived structures */
struct bdb_edid_pnp_id {
@@ -437,6 +437,22 @@ enum vbt_gmbus_ddi {
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
+/* EDP link rate 263+ */
+#define BDB_263_VBT_EDP_LINK_RATE_1_62 BIT_U32(0)
+#define BDB_263_VBT_EDP_LINK_RATE_2_16 BIT_U32(1)
+#define BDB_263_VBT_EDP_LINK_RATE_2_43 BIT_U32(2)
+#define BDB_263_VBT_EDP_LINK_RATE_2_7 BIT_U32(3)
+#define BDB_263_VBT_EDP_LINK_RATE_3_24 BIT_U32(4)
+#define BDB_263_VBT_EDP_LINK_RATE_4_32 BIT_U32(5)
+#define BDB_263_VBT_EDP_LINK_RATE_5_4 BIT_U32(6)
+#define BDB_263_VBT_EDP_LINK_RATE_6_75 BIT_U32(7)
+#define BDB_263_VBT_EDP_LINK_RATE_8_1 BIT_U32(8)
+#define BDB_263_VBT_EDP_LINK_RATE_10 BIT_U32(9)
+#define BDB_263_VBT_EDP_LINK_RATE_13_5 BIT_U32(10)
+#define BDB_263_VBT_EDP_LINK_RATE_20 BIT_U32(11)
+#define BDB_263_VBT_EDP_NUM_RATES 12
+#define BDB_263_VBT_EDP_RATES_MASK GENMASK(BDB_263_VBT_EDP_NUM_RATES - 1, 0)
+
/*
* The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform.
@@ -547,6 +563,8 @@ struct child_device_config {
u8 dp_max_link_rate:3; /* 216+ */
u8 dp_max_link_rate_reserved:5; /* 216+ */
u8 efp_index; /* 256+ */
+ u32 edp_data_rate_override:12; /* 263+ */
+ u32 edp_data_rate_override_reserved:20; /* 263+ */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index bba82e888db2..f887a664fe22 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,7 +5,6 @@
#include <linux/debugfs.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "i9xx_wm.h"
@@ -390,15 +389,15 @@ static const struct file_operations i915_cur_wm_latency_fops = {
void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_pri_wm_latency", 0644, debugfs_root,
display, &i915_pri_wm_latency_fops);
- debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_spr_wm_latency", 0644, debugfs_root,
display, &i915_spr_wm_latency_fops);
- debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_cur_wm_latency", 0644, debugfs_root,
display, &i915_cur_wm_latency_fops);
skl_watermark_debugfs_register(display);
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index d77798499c57..c6cccf170ff1 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -10,6 +10,7 @@
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -91,11 +92,9 @@ static void skl_scaler_min_src_size(const struct drm_format_info *format,
}
}
-static void skl_scaler_max_src_size(struct intel_crtc *crtc,
+static void skl_scaler_max_src_size(struct intel_display *display,
int *max_w, int *max_h)
{
- struct intel_display *display = to_intel_display(crtc);
-
if (DISPLAY_VER(display) >= 14) {
*max_w = 4096;
*max_h = 8192;
@@ -134,6 +133,23 @@ static void skl_scaler_max_dst_size(struct intel_crtc *crtc,
}
}
+enum drm_mode_status
+skl_scaler_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes)
+{
+ int max_h, max_w;
+
+ if (num_joined_pipes < 2 && output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ skl_scaler_max_src_size(display, &max_w, &max_h);
+ if (mode->hdisplay > max_h)
+ return MODE_NO_420;
+ }
+
+ return MODE_OK;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -201,7 +217,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
}
skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h);
- skl_scaler_max_src_size(crtc, &max_src_w, &max_src_h);
+ skl_scaler_max_src_size(display, &max_src_w, &max_src_h);
skl_scaler_min_dst_size(&min_dst_w, &min_dst_h);
skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h);
@@ -747,6 +763,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
crtc_state->scaler_state.scaler_id < 0))
return;
+ if (intel_display_wa(display, 14011503117))
+ adl_scaler_ecc_mask(crtc_state);
+
drm_rect_init(&src, 0, 0,
drm_rect_width(&crtc_state->pipe_src) << 16,
drm_rect_height(&crtc_state->pipe_src) << 16);
@@ -923,3 +942,29 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
else
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
+
+void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+}
+
+void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
+ if (scaler_state->scaler_id < 0)
+ return;
+
+ intel_de_write_fw(display,
+ SKL_PS_ECC_STAT(crtc->pipe, scaler_state->scaler_id),
+ 1);
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 355ea15260ca..12a19016c5f6 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -5,10 +5,14 @@
#ifndef INTEL_SCALER_H
#define INTEL_SCALER_H
+enum drm_mode_status;
+struct drm_display_mode;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsb;
+enum intel_output_format;
struct intel_plane;
struct intel_plane_state;
@@ -32,4 +36,13 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
+enum drm_mode_status
+skl_scaler_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes);
+
+void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state);
+
+void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e20972ddfa09..e13fb781e7b2 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -10,6 +10,7 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
@@ -19,6 +20,7 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@@ -1166,8 +1168,7 @@ static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
return plane_ctl;
}
-static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 skl_plane_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1225,8 +1226,7 @@ static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
return plane_color_ctl;
}
-static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1271,12 +1271,6 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
u32 offset = plane_state->view.color_plane[color_plane].offset;
if (intel_fb_uses_dpt(fb)) {
- /*
- * The DPT object contains only one vma, so the VMA's offset
- * within the DPT is always 0.
- */
- drm_WARN_ON(display->drm, plane_state->dpt_vma &&
- intel_dpt_offset(plane_state->dpt_vma));
drm_WARN_ON(display->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1285,13 +1279,20 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
-static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
- int color_plane)
+static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
+{
+ if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 skl_plane_surf_offset(const struct intel_plane_state *plane_state)
{
+ int color_plane = icl_plane_color_plane(plane_state);
u32 plane_surf;
- plane_surf = intel_plane_ggtt_offset(plane_state) +
- skl_surf_address(plane_state, color_plane);
+ plane_surf = skl_surf_address(plane_state, color_plane);
if (plane_state->decrypt)
plane_surf |= PLANE_SURF_DECRYPT;
@@ -1373,14 +1374,6 @@ static void icl_plane_csc_load_black(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
-static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
-{
- if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
- return 1;
- else
- return 0;
-}
-
static void
skl_plane_update_noarm(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -1476,7 +1469,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
plane_ctl);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, 0));
+ plane_state->surf);
}
static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb,
@@ -1632,7 +1625,6 @@ icl_plane_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- int color_plane = icl_plane_color_plane(plane_state);
u32 plane_ctl;
plane_ctl = plane_state->ctl |
@@ -1658,7 +1650,7 @@ icl_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
plane_ctl);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, color_plane));
+ plane_state->surf);
}
static void skl_plane_capture_error(struct intel_crtc *crtc,
@@ -1682,10 +1674,10 @@ skl_plane_async_flip(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl, plane_surf;
+ u32 plane_ctl = plane_state->ctl;
+ u32 plane_surf = plane_state->surf;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
- plane_surf = skl_plane_surf(plane_state, 0);
if (async_flip) {
if (DISPLAY_VER(display) >= 30)
@@ -2363,11 +2355,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
}
- plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = skl_plane_ctl(plane_state);
if (DISPLAY_VER(display) >= 10)
- plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
- plane_state);
+ plane_state->color_ctl = glk_plane_color_ctl(plane_state);
if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
icl_is_hdr_plane(display, plane->id))
@@ -2814,7 +2805,7 @@ static void skl_disable_tiling(struct intel_plane *plane)
intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl);
intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id),
- skl_plane_surf(state, 0));
+ state->surf);
}
struct intel_plane *
@@ -2865,6 +2856,8 @@ skl_universal_plane_create(struct intel_display *display,
}
plane->disable_tiling = skl_disable_tiling;
+ plane->surf_offset = skl_plane_surf_offset;
+
if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
else
@@ -3036,7 +3029,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
return;
}
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
@@ -3191,21 +3184,18 @@ bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
enum pipe pipe = crtc->pipe;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
- intel_de_write(display, PLANE_SURF(pipe, plane_id), base);
+ intel_de_write(display, PLANE_SURF(pipe, plane_id), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 222c069fdadb..d74cbb43ae6f 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,7 +6,6 @@
#include <linux/debugfs.h>
#include <drm/drm_blend.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
@@ -1389,7 +1388,7 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
{
u16 size, extra = 0;
- if (data_rate) {
+ if (data_rate && iter->data_rate) {
extra = min_t(u16, iter->size,
DIV64_U64_ROUND_UP(iter->size * data_rate,
iter->data_rate));
@@ -2273,6 +2272,11 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
return wm0_lines;
}
+/*
+ * TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank
+ * size is too small for the package C exit latency we need to notify PSR about
+ * the scenario to apply Wa_16025596647.
+ */
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
@@ -3205,12 +3209,12 @@ adjust_wm_latency(struct intel_display *display,
}
/*
- * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * WA Level-0 adjustment for 16Gb DIMMs: SKL+
* If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get Dimm info assume 16GB dimm
+ * any underrun. If not able to get DIMM info assume 16Gb DIMM
* to avoid any underrun.
*/
- if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed)
+ if (!display->platform.dg2 && dram_info->has_16gb_dimms)
wm[0] += 1;
}
@@ -4033,14 +4037,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_ipc_status", 0644, debugfs_root,
display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_sagv_status", 0444, debugfs_root,
display, &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 6d9f3312de7e..c9a53fde79c4 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -761,7 +761,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
@@ -918,7 +918,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index d42b61e6f076..f078b9cda96c 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -25,12 +25,12 @@
* Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
*/
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -142,11 +142,9 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
- while (pll_ctl) {
- pll_ctl = pll_ctl >> 1;
- p++;
- }
- p--;
+ p = fls(pll_ctl);
+ if (p)
+ p--;
if (!p) {
drm_err(display->drm, "wrong P1 divisor\n");
@@ -216,6 +214,8 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
+ u32 val;
+ int ret;
drm_dbg_kms(display->drm, "\n");
@@ -233,9 +233,10 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) &
- DSI_PLL_LOCK, 20)) {
-
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL),
+ val & DSI_PLL_LOCK,
+ 500, 20 * 1000, false);
+ if (ret) {
vlv_cck_put(display->drm);
drm_err(display->drm, "DSI PLL lock failed\n");
return;
@@ -262,6 +263,11 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(display->drm);
}
+static bool has_dsic_clock(struct intel_display *display)
+{
+ return display->platform.broxton;
+}
+
bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
@@ -284,7 +290,7 @@ bool bxt_dsi_pll_is_enabled(struct intel_display *display)
* causes a system hang.
*/
val = intel_de_read(display, BXT_DSI_PLL_CTL);
- if (display->platform.geminilake) {
+ if (!has_dsic_clock(display)) {
if (!(val & BXT_DSIA_16X_MASK)) {
drm_dbg_kms(display->drm,
"Invalid PLL divider (%08x)\n", val);
@@ -358,6 +364,8 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
u32 pclk;
config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (!has_dsic_clock(display))
+ config->dsi_pll.ctrl &= ~BXT_DSIC_16X_MASK;
pclk = bxt_dsi_pclk(encoder, config);
@@ -514,7 +522,9 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
- config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
+ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2;
+ if (has_dsic_clock(display))
+ config->dsi_pll.ctrl |= BXT_DSIC_16X_BY2;
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 15835952352e..ed6599694835 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -2158,18 +2158,12 @@ static int set_context_image(struct i915_gem_context *ctx,
goto out_ce;
}
- state = kmalloc(ce->engine->context_size, GFP_KERNEL);
- if (!state) {
- ret = -ENOMEM;
+ state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
goto out_ce;
}
- if (copy_from_user(state, u64_to_user_ptr(user.image),
- ce->engine->context_size)) {
- ret = -EFAULT;
- goto out_state;
- }
-
shmem_state = shmem_create_from_data(ce->engine->name,
state, ce->engine->context_size);
if (IS_ERR(shmem_state)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index ca7e9216934a..39c7c32e1e74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -182,7 +182,7 @@ enum {
* the object. Simple! ... The relocation entries are stored in user memory
* and so to access them we have to copy them into a local buffer. That copy
* has to avoid taking any pagefaults as they may lead back to a GEM object
- * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
+ * requiring the vm->mutex (i.e. recursive deadlock). So once again we split
* the relocation into multiple passes. First we try to do everything within an
* atomic context (avoid the pagefaults) which requires that we never wait. If
* we detect that we may wait, or if we need to fault, then we have to fallback
@@ -1382,8 +1382,9 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
*/
if (flushes & CLFLUSH_AFTER)
drm_clflush_virt_range(addr, sizeof(*addr));
- } else
+ } else {
*addr = value;
+ }
}
static u64
@@ -1567,36 +1568,36 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
do {
u64 offset = eb_relocate_entry(eb, ev, r);
- if (likely(offset == 0)) {
- } else if ((s64)offset < 0) {
+ if (likely(offset == 0))
+ continue;
+
+ if ((s64)offset < 0) {
remain = (int)offset;
goto out;
- } else {
- /*
- * Note that reporting an error now
- * leaves everything in an inconsistent
- * state as we have *already* changed
- * the relocation value inside the
- * object. As we have not changed the
- * reloc.presumed_offset or will not
- * change the execobject.offset, on the
- * call we may not rewrite the value
- * inside the object, leaving it
- * dangling and causing a GPU hang. Unless
- * userspace dynamically rebuilds the
- * relocations on each execbuf rather than
- * presume a static tree.
- *
- * We did previously check if the relocations
- * were writable (access_ok), an error now
- * would be a strange race with mprotect,
- * having already demonstrated that we
- * can read from this userspace address.
- */
- offset = gen8_canonical_addr(offset & ~UPDATE);
- __put_user(offset,
- &urelocs[r - stack].presumed_offset);
}
+ /*
+ * Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang. Unless
+ * userspace dynamically rebuilds the
+ * relocations on each execbuf rather than
+ * presume a static tree.
+ *
+ * We did previously check if the relocations
+ * were writable (access_ok), an error now
+ * would be a strange race with mprotect,
+ * having already demonstrated that we
+ * can read from this userspace address.
+ */
+ offset = gen8_canonical_addr(offset & ~UPDATE);
+ __put_user(offset, &urelocs[r - stack].presumed_offset);
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1f38e367c60b..478011e5ecb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -459,8 +459,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
atomic_inc(&i915->mm.free_count);
/*
- * Since we require blocking on struct_mutex to unbind the freed
- * object from the GPU before releasing resources back to the
+ * Since we require blocking on drm_i915_gem_object->vma.lock to unbind
+ * the freed object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
* be a softirq context), but must instead then defer that work onto a
* kthread. We use the RCU callback rather than move the freed object
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 565f8fa330db..148034ef504d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,9 +16,9 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
-struct drm_scanout_buffer;
enum intel_region_id;
-struct intel_framebuffer;
+struct drm_scanout_buffer;
+struct intel_panic;
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
@@ -693,9 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
-int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
-void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
+struct intel_panic *i915_gem_object_alloc_panic(void);
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling);
+void i915_gem_object_panic_finish(struct intel_panic *panic);
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index c16a57160b26..3f09cbce05bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -357,23 +357,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
return vaddr ?: ERR_PTR(-ENOMEM);
}
-struct i915_panic_data {
+struct intel_panic {
struct page **pages;
int page;
void *vaddr;
};
-struct i915_framebuffer {
- struct intel_framebuffer base;
- struct i915_panic_data panic;
-};
-
-static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
-{
- return &container_of_const(fb, struct i915_framebuffer, base)->panic;
-}
-
-static void i915_panic_kunmap(struct i915_panic_data *panic)
+static void i915_panic_kunmap(struct intel_panic *panic)
{
if (panic->vaddr) {
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
@@ -420,7 +410,7 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb,
unsigned int new_page;
unsigned int offset;
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct i915_panic_data *panic = to_i915_panic_data(fb);
+ struct intel_panic *panic = fb->panic;
if (fb->panic_tiling)
offset = fb->panic_tiling(sb->width, x, y);
@@ -441,14 +431,13 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb,
}
}
-struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
+struct intel_panic *i915_gem_object_alloc_panic(void)
{
- struct i915_framebuffer *i915_fb;
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
- i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL);
- if (i915_fb)
- return &i915_fb->base;
- return NULL;
+ return panic;
}
/*
@@ -456,12 +445,11 @@ struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
* Use current vaddr if it exists, or setup a list of pages.
* pfn is not supported yet.
*/
-int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling)
{
enum i915_map_type has_type;
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct i915_panic_data *panic = to_i915_panic_data(fb);
- struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
void *ptr;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
@@ -471,7 +459,7 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
else
iosys_map_set_vaddr(&sb->map[0], ptr);
- if (fb->panic_tiling)
+ if (panic_tiling)
sb->set_pixel = i915_gem_object_panic_map_set_pixel;
return 0;
}
@@ -486,10 +474,8 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
return -EOPNOTSUPP;
}
-void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
+void i915_gem_object_panic_finish(struct intel_panic *panic)
{
- struct i915_panic_data *panic = to_i915_panic_data(fb);
-
i915_panic_kunmap(panic);
panic->page = -1;
kfree(panic->pages);
@@ -779,7 +765,7 @@ __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
+ return sg_page(sg) + offset;
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index b81e67504bbe..7a3e74a6676e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -170,7 +170,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
* Also note that although these lists do not hold a reference to
* the object we can safely grab one here: The final object
* unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
+ * i915->mm.obj_lock and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
for (phase = phases; phase->list; phase++) {
@@ -185,7 +185,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
/*
* We serialize our access to unreferenced objects through
- * the use of the struct_mutex. While the objects are not
+ * the use of the obj_lock. While the objects are not
* yet freed (due to RCU then a workqueue) we still want
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 991666fd9f85..54829801d3f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -217,10 +217,10 @@ static unsigned long to_wait_timeout(s64 timeout_ns)
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
- * nanoseconds on an object becoming unbusy. Since the wait itself does so
- * without holding struct_mutex the object may become re-busied before this
- * function completes. A similar but shorter * race condition exists in the busy
- * ioctl
+ * nanoseconds on an object becoming unbusy. Since the wait occurs without
+ * holding a global or exclusive lock the object may become re-busied before
+ * this function completes. A similar but shorter * race condition exists
+ * in the busy ioctl
*/
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index a09e2eb47175..8f13ec4ff0d0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -11,11 +11,6 @@
#include "i915_gemfs.h"
#include "i915_utils.h"
-static int add_param(struct fs_context *fc, const char *key, const char *val)
-{
- return vfs_parse_fs_string(fc, key, val, strlen(val));
-}
-
void i915_gemfs_init(struct drm_i915_private *i915)
{
struct file_system_type *type;
@@ -48,9 +43,9 @@ void i915_gemfs_init(struct drm_i915_private *i915)
fc = fs_context_for_mount(type, SB_KERNMOUNT);
if (IS_ERR(fc))
goto err;
- ret = add_param(fc, "source", "tmpfs");
+ ret = vfs_parse_fs_string(fc, "source", "tmpfs");
if (!ret)
- ret = add_param(fc, "huge", "within_size");
+ ret = vfs_parse_fs_string(fc, "huge", "within_size");
if (!ret)
gemfs = fc_mount_longterm(fc);
put_fs_context(fc);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 86d9d2fcb6a6..539c620364e3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,7 +5,7 @@
#include "i915_selftest.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
@@ -110,6 +110,7 @@ struct tiled_blits {
static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
int gen = GRAPHICS_VER(i915);
/* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
@@ -121,7 +122,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
- return HAS_DISPLAY(i915);
+ return intel_display_device_present(display);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9c3f17e51885..78734c404a6d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1096,32 +1096,20 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
unsigned long addr,
bool unfaultable)
{
- struct vm_area_struct *area;
- int err = 0, i;
+ int i;
pr_info("igt_mmap(%s, %d) @ %lx\n",
obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
- mmap_read_lock(current->mm);
- area = vma_lookup(current->mm, addr);
- mmap_read_unlock(current->mm);
- if (!area) {
- pr_err("%s: Did not create a vm_area_struct for the mmap\n",
- obj->mm.region->name);
- err = -EINVAL;
- goto out_unmap;
- }
-
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
if (get_user(x, ux)) {
- err = -EFAULT;
if (!unfaultable) {
pr_err("%s: Unable to read from mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
- goto out_unmap;
+ return -EFAULT;
}
continue;
@@ -1130,37 +1118,29 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
if (unfaultable) {
pr_err("%s: Faulted unmappable memory\n",
obj->mm.region->name);
- err = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
if (x != expand32(POISON_INUSE)) {
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
obj->mm.region->name,
i * sizeof(x), x, expand32(POISON_INUSE));
- err = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
x = expand32(POISON_FREE);
if (put_user(x, ux)) {
pr_err("%s: Unable to write to mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
- err = -EFAULT;
- goto out_unmap;
+ return -EFAULT;
}
}
- if (unfaultable) {
- if (err == -EFAULT)
- err = 0;
- } else {
- obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
- err = wc_check(obj);
- }
-out_unmap:
- vm_munmap(addr, obj->base.size);
- return err;
+ if (unfaultable)
+ return 0;
+
+ obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ return wc_check(obj);
}
#define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
@@ -1176,6 +1156,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
struct drm_i915_private *i915 = placements[0]->i915;
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
+ struct vm_area_struct *area;
unsigned long addr;
LIST_HEAD(objects);
u64 offset;
@@ -1207,20 +1188,30 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
goto out_put;
}
+ mmap_read_lock(current->mm);
+ area = vma_lookup(current->mm, addr);
+ mmap_read_unlock(current->mm);
+ if (!area) {
+ pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_addr;
+ }
+
if (flags & IGT_MMAP_MIGRATE_FILL) {
err = igt_fill_mappable(placements[0], &objects);
if (err)
- goto out_put;
+ goto out_addr;
}
err = i915_gem_object_lock(obj, NULL);
if (err)
- goto out_put;
+ goto out_addr;
err = i915_gem_object_pin_pages(obj);
if (err) {
i915_gem_object_unlock(obj);
- goto out_put;
+ goto out_addr;
}
err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
@@ -1228,7 +1219,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
i915_gem_object_is_lmem(obj),
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
- if (rq) {
+ if (rq && !err) {
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
dma_resv_add_fence(obj->base.resv, &rq->fence,
@@ -1237,7 +1228,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
i915_gem_object_unlock(obj);
if (err)
- goto out_put;
+ goto out_addr;
if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
igt_make_evictable(&objects);
@@ -1245,16 +1236,16 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
err = i915_gem_object_lock(obj, NULL);
if (err)
- goto out_put;
+ goto out_addr;
/*
- * Ensure we only simulate the gpu failuire when faulting the
+ * Ensure we only simulate the gpu failure when faulting the
* pages.
*/
err = i915_gem_object_wait_moving_fence(obj, true);
i915_gem_object_unlock(obj);
if (err)
- goto out_put;
+ goto out_addr;
i915_ttm_migrate_set_failure_modes(true, false);
}
@@ -1298,6 +1289,9 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
}
+out_addr:
+ vm_munmap(addr, obj->base.size);
+
out_put:
i915_gem_object_put(obj);
igt_close_objects(i915, &objects);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 98c7f6052069..10070ee4d74c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -14,7 +14,6 @@
#include "i915_active_types.h"
#include "i915_sw_fence.h"
-#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
#include "intel_wakeref.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 03baa7fa0a27..7f389cb0bde4 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -106,14 +106,18 @@
* preemption, but just sampling the new tail pointer).
*
*/
+
#include <linux/interrupt.h>
#include <linux/string_helpers.h>
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
+#include "i915_list_util.h"
#include "i915_reg.h"
+#include "i915_timer_util.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "gen8_engine_cs.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 86b5a9ba323d..c7befc5c20d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "i915_list_util.h"
#include "intel_engine_pm.h"
#include "intel_gt_buffer_pool.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 6c499692d61e..88b147fa5cb1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -148,7 +148,7 @@ static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
*
* Testing on actual hardware has shown there is no /16.
*/
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(uncore->i915), 4) * 1000;
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(uncore->i915), 4) * 1000;
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index 4dc23b8d3aa2..dcd40b30a96b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -82,14 +82,15 @@ static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
void intel_gt_debugfs_register(struct intel_gt *gt)
{
+ struct dentry *debugfs_root = gt->i915->drm.debugfs_root;
struct dentry *root;
char gtname[4];
- if (!gt->i915->drm.primary->debugfs_root)
+ if (!debugfs_root)
return;
snprintf(gtname, sizeof(gtname), "gt%u", gt->info.id);
- root = debugfs_create_dir(gtname, gt->i915->drm.primary->debugfs_root);
+ root = debugfs_create_dir(gtname, debugfs_root);
if (IS_ERR(root))
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index a60822e2b5d4..c3afa321fe30 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9ca42589da4d..bf38cc5fe872 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -341,7 +341,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_end_t(u64,
+ GEM_BUG_ON(range_end_overflows_t(u64,
i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4a1675dea1c7..41b5036dc538 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -9,18 +9,17 @@
#include "display/intel_display_reset.h"
#include "display/intel_overlay.h"
-
#include "gem/i915_gem_context.h"
-
#include "gt/intel_gt_regs.h"
-
#include "gt/uc/intel_gsc_fw.h"
+#include "uc/intel_guc.h"
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -32,8 +31,6 @@
#include "intel_pci_config.h"
#include "intel_reset.h"
-#include "uc/intel_guc.h"
-
#define RESET_MAX_RETRIES 3
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 4f5fd393af6f..ee4eb574a219 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -20,7 +20,7 @@ struct intel_reset {
* FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
+ * acquire a global lock to reset an engine, we need an explicit
* flag to prevent two concurrent reset attempts in the same engine.
* As the number of engines continues to grow, allocate the flags from
* the most significant bits.
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 2a6d79abf25b..8314a4b0505e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -15,18 +15,19 @@
#include "i915_irq.h"
#include "i915_mitigations.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "shmem_utils.h"
-#include "intel_engine_heartbeat.h"
-#include "intel_engine_pm.h"
-#include "intel_gt_print.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0b35fdd461d4..4da94098bd3e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -9,9 +9,12 @@
#include "display/intel_display.h"
#include "display/intel_display_rps.h"
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
@@ -276,20 +279,24 @@ static void gen5_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned int fsb_freq, mem_freq;
u8 fmax, fmin, fstart;
u32 rgvmodectl;
int c_m, i;
- if (i915->fsb_freq <= 3200000)
+ fsb_freq = intel_fsb_freq(i915);
+ mem_freq = intel_mem_freq(i915);
+
+ if (fsb_freq <= 3200000)
c_m = 0;
- else if (i915->fsb_freq <= 4800000)
+ else if (fsb_freq <= 4800000)
c_m = 1;
else
c_m = 2;
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
if (cparams[i].i == c_m &&
- cparams[i].t == DIV_ROUND_CLOSEST(i915->mem_freq, 1000)) {
+ cparams[i].t == DIV_ROUND_CLOSEST(mem_freq, 1000)) {
rps->ips.m = cparams[i].m;
rps->ips.c = cparams[i].c;
break;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 57308c4d664a..85b43f9b9d95 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -9,6 +9,7 @@
#include <linux/lockdep.h>
#include "i915_active.h"
+#include "i915_list_util.h"
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5a95f06900b5..7d486dfa2fc1 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -337,12 +337,26 @@ static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -2567,18 +2581,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_MODE_GEN7(RENDER_RING_BASE),
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
- /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization:ivb,hsw
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal,
- CACHE_MODE_1,
- PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
@@ -2645,9 +2647,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index f057c16410e7..4f252f704975 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -904,7 +904,7 @@ static void active_engine(struct kthread_work *work)
arg->result = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n",
engine->name, count, arg->result);
- while (--count)
+ while (count--)
intel_context_put(ce[count]);
return;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 69ed946a39e5..a5184f09d1de 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -3,17 +3,17 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_selftest.h"
-
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gen8_engine_cs.h"
#include "i915_gem_ww.h"
+#include "i915_selftest.h"
+#include "i915_wait_util.h"
+#include "intel_context.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
-#include "intel_context.h"
#include "intel_gt.h"
#include "intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index aab2759067d2..4a81bc396b21 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include "i915_drv.h"
+#include "i915_timer_util.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "sysfs_engines.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index d8edd7c054c8..e7444ebc373e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -10,11 +10,13 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_gsc_proxy.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
/*
* GSC proxy:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
index 2fde5c360cff..9bd29be7656f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -8,6 +8,8 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_ring.h"
+
+#include "i915_wait_util.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
struct gsc_heci_pkt {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index f360f020d8f1..52ec4421a211 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -8,15 +8,17 @@
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
#include "gt/intel_gt_regs.h"
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
/**
* DOC: GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 0d5197c0824a..3e7e5badcc2b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,15 +5,16 @@
#include <linux/circ_buf.h>
#include <linux/ktime.h>
-#include <linux/time64.h>
#include <linux/string_helpers.h>
+#include <linux/time64.h>
#include <linux/timekeeping.h>
#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_guc_ct.h"
#include "intel_guc_print.h"
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
enum {
CT_DEAD_ALIVE = 0,
CT_DEAD_SETUP,
@@ -144,7 +145,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func);
#endif
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
@@ -373,7 +374,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
ct->enabled = true;
ct->stall_time = KTIME_MAX;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
ct->dead_ct_reported = false;
ct->dead_ct_reason = CT_DEAD_ALIVE;
#endif
@@ -1377,7 +1378,7 @@ void intel_guc_ct_print_info(struct intel_guc_ct *ct,
ct->ctbs.recv.desc->tail);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static void ct_dead_ct_worker_func(struct work_struct *w)
{
struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker);
@@ -1386,6 +1387,9 @@ static void ct_dead_ct_worker_func(struct work_struct *w)
if (ct->dead_ct_reported)
return;
+ if (i915_error_injected())
+ return;
+
ct->dead_ct_reported = true;
guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 2c4bb9a941be..e9a6ec4e6d38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -97,7 +97,7 @@ struct intel_guc_ct {
/** @stall_time: time of first time a CTB submission is stalled */
ktime_t stall_time;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
int dead_ct_reason;
bool dead_ct_reported;
struct work_struct dead_ct_worker;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index e7ccfa520df3..b1bda1b84f0a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,9 +13,11 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+
+#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_guc_fw.h"
#include "intel_guc_print.h"
-#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_gt *gt)
{
@@ -46,6 +48,14 @@ static void guc_prepare_xfer(struct intel_gt *gt)
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
}
+
+ /*
+ * Starting from IP 12.50 we need to enable the mirroring of GuC
+ * internal state to debug registers. This is always enabled on previous
+ * IPs.
+ */
+ if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50))
+ intel_uncore_rmw(uncore, GUC_SHIM_CONTROL2, 0, GUC_ENABLE_DEBUG_REG);
}
static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 09a64f224c49..cdff48920ee6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -6,6 +6,8 @@
#include <linux/debugfs.h>
#include <linux/string_helpers.h>
+#include <drm/drm_managed.h>
+
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -511,7 +513,11 @@ static void guc_log_relay_unmap(struct intel_guc_log *log)
void intel_guc_log_init_early(struct intel_guc_log *log)
{
- mutex_init(&log->relay.lock);
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
+ drmm_mutex_init(&i915->drm, &log->relay.lock);
+ drmm_mutex_init(&i915->drm, &log->guc_lock);
INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
log->relay.started = false;
}
@@ -677,7 +683,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&log->guc_lock);
if (log->level == level)
goto out_unlock;
@@ -695,7 +701,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
log->level = level;
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&log->guc_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 02127703be80..13cb93ad0710 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -42,6 +42,14 @@ enum {
struct intel_guc_log {
u32 level;
+ /*
+ * Protects concurrent access and modification of intel_guc_log->level.
+ *
+ * This lock replaces the legacy struct_mutex usage in
+ * intel_guc_log system.
+ */
+ struct mutex guc_lock;
+
/* Allocation settings */
struct {
s32 bytes; /* Size in bytes */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 3fd798837502..f73dab527547 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -96,6 +96,7 @@
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
+#define GUC_ENABLE_DEBUG_REG (1<<11)
#define GUC_IS_PRIVILEGED (1<<29)
#define GSC_LOADS_HUC (1<<30)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index d5ee6e5e1443..fa9af08f9708 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -3,17 +3,20 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_cache.h>
#include <linux/string_helpers.h>
+#include <drm/drm_cache.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_guc_slpc.h"
+#include "i915_wait_util.h"
#include "intel_guc_print.h"
+#include "intel_guc_slpc.h"
#include "intel_mchbar_regs.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_gt_regs.h"
-#include "gt/intel_rps.h"
/**
* DOC: SLPC - Dynamic Frequency management
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 127316d2c8aa..68f2b8d363ac 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -25,16 +25,16 @@
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_wait_util.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_submission.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "i915_irq.h"
-#include "i915_trace.h"
-
/**
* DOC: GuC-based command submission
*
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index a91e23c22ea1..d432fdd69833 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1921,7 +1921,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (!bb)
return -ENOMEM;
- bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
+ bb->ppgtt = s->buf_addr_type != GTT_BUFFER;
/*
* The start_offset stores the batch buffer's start gma's
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 673534f061ef..415422b5943c 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -194,9 +194,9 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- if (minor->debugfs_root && gvt->debugfs_root) {
+ if (debugfs_root && gvt->debugfs_root) {
debugfs_remove_recursive(vgpu->debugfs);
vgpu->debugfs = NULL;
}
@@ -208,9 +208,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
+ gvt->debugfs_root = debugfs_create_dir("gvt", debugfs_root);
debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
&gvt->mmio.num_tracked_mmio);
@@ -222,9 +222,9 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- if (minor->debugfs_root) {
+ if (debugfs_root) {
debugfs_remove_recursive(gvt->debugfs_root);
gvt->debugfs_root = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2f7208843367..0b810baad20a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -33,14 +33,16 @@
*
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_ring.h"
+
#include "gvt.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "trace.h"
#define GEN9_MOCS_SIZE 64
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 0dbc4e289300..6b0c1162505a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -257,10 +257,9 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* claimed the cache and we know that is does not match our
* idx. If, and only if, the timeline is currently zero is it
* worth competing to claim it atomically for ourselves (for
- * only the winner of that race will cmpxchg return the old
- * value of 0).
+ * only the winner of that race will cmpxchg succeed).
*/
- if (!cached && !cmpxchg64(&it->timeline, 0, idx))
+ if (!cached && try_cmpxchg64(&it->timeline, &cached, idx))
return it;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 967c0501e91e..c2e38d4bcd01 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,11 +26,11 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/string_helpers.h>
-#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
@@ -54,6 +54,7 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
+#include "i915_wait_util.h"
#include "intel_mchbar_regs.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -720,26 +721,24 @@ static const struct i915_debugfs_files {
{"i915_gem_drop_caches", &i915_drop_caches_fops},
};
-void i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = dev_priv->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
int i;
- i915_debugfs_params(dev_priv);
+ i915_debugfs_params(i915);
- debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
- to_i915(minor->dev), &i915_forcewake_fops);
+ debugfs_create_file("i915_forcewake_user", S_IRUSR, debugfs_root,
+ i915, &i915_forcewake_fops);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
- debugfs_create_file(i915_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root,
- to_i915(minor->dev),
+ debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR,
+ debugfs_root, i915,
i915_debugfs_files[i].fops);
}
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, i915->drm.primary);
- i915_gpu_error_debugfs_register(dev_priv);
+ i915_gpu_error_debugfs_register(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 33d2dcb0de65..89ab5eb14779 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -248,11 +248,11 @@ i915_debugfs_create_charp(const char *name, umode_t mode,
/* add a subdirectory with files for each i915 param */
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
struct i915_params *params = &i915->params;
struct dentry *dir;
- dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+ dir = debugfs_create_dir("i915_params", debugfs_root);
if (IS_ERR(dir))
return dir;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c6263c6d3384..a28c3710c4d5 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -51,13 +51,15 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "display/intel_display_driver.h"
+#include "display/intel_display_power.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
+#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_opregion.h"
#include "display/intel_overlay.h"
@@ -977,7 +979,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_disable(display);
drm_client_dev_suspend(&i915->drm, false);
- if (HAS_DISPLAY(i915)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(display);
@@ -989,7 +991,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_irq_suspend(i915);
intel_hpd_cancel_work(display);
- if (HAS_DISPLAY(i915))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -1060,7 +1062,7 @@ static int i915_drm_suspend(struct drm_device *dev)
* properly. */
intel_power_domains_disable(display);
drm_client_dev_suspend(dev, false);
- if (HAS_DISPLAY(dev_priv)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(display);
}
@@ -1072,7 +1074,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_irq_suspend(dev_priv);
intel_hpd_cancel_work(display);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -1219,7 +1221,7 @@ static int i915_drm_resume(struct drm_device *dev)
*/
intel_irq_resume(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
drm_mode_config_reset(dev);
i915_gem_resume(dev_priv);
@@ -1228,14 +1230,14 @@ static int i915_drm_resume(struct drm_device *dev)
intel_clock_gating_init(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
intel_display_driver_resume_access(display);
intel_hpd_init(display);
intel_display_driver_resume(display);
- if (HAS_DISPLAY(dev_priv)) {
+ if (intel_display_device_present(display)) {
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4e4e89746aa6..6a768aad8edd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -114,8 +114,7 @@ struct i915_gem_mm {
struct intel_memory_region *stolen_region;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
- /** Protects the usage of the GTT stolen memory allocator. This is
- * always the inner lock when overlapping with struct_mutex. */
+ /** Protects the usage of the GTT stolen memory allocator */
struct mutex stolen_lock;
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
@@ -222,6 +221,9 @@ struct drm_i915_private {
bool irqs_enabled;
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex sbi_lock;
+
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
@@ -237,8 +239,6 @@ struct drm_i915_private {
bool preserve_bios_swizzle;
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
unsigned int hpll_freq;
unsigned int czclk_freq;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c8d43451f35..e14a0c3db999 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -847,8 +847,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
/*
* Only called during RPM suspend. All users of the userfault_list
* must be holding an RPM wakeref to ensure that this can not
- * run concurrently with themselves (and use the struct_mutex for
- * protection between themselves).
+ * run concurrently with themselves.
*/
list_for_each_entry_safe(obj, on,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0e4b832dff84..7582ef34bf3f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -685,6 +685,74 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
}
+/* This list includes registers that are useful in debugging GuC hangs. */
+const struct {
+ u32 start;
+ u32 count;
+} guc_hw_reg_state[] = {
+ { 0xc0b0, 2 },
+ { 0xc000, 65 },
+ { 0xc140, 1 },
+ { 0xc180, 16 },
+ { 0xc1dc, 10 },
+ { 0xc300, 79 },
+ { 0xc4b4, 47 },
+ { 0xc574, 1 },
+ { 0xc57c, 1 },
+ { 0xc584, 11 },
+ { 0xc5c0, 8 },
+ { 0xc5e4, 1 },
+ { 0xc5ec, 103 },
+ { 0xc7c0, 1 },
+ { 0xc0b0, 2 }
+};
+
+static u32 print_range_line(struct drm_i915_error_state_buf *m, u32 start, u32 *dump, u32 count)
+{
+ if (count >= 8) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ start, dump[0], dump[1], dump[2], dump[3],
+ dump[4], dump[5], dump[6], dump[7]);
+ return 8;
+ } else if (count >= 4) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ start, dump[0], dump[1], dump[2], dump[3]);
+ return 4;
+ } else if (count >= 2) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x\n", start, dump[0], dump[1]);
+ return 2;
+ }
+
+ err_printf(m, "[0x%04x] 0x%08x\n", start, dump[0]);
+ return 1;
+}
+
+static void err_print_guc_hw_state(struct drm_i915_error_state_buf *m, u32 *hw_state)
+{
+ u32 total = 0;
+ int i;
+
+ if (!hw_state)
+ return;
+
+ err_printf(m, "GuC Register State:\n");
+
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++) {
+ u32 entry = 0;
+
+ while (entry < guc_hw_reg_state[i].count) {
+ u32 start = guc_hw_reg_state[i].start + entry * sizeof(u32);
+ u32 count = guc_hw_reg_state[i].count - entry;
+ u32 *values = hw_state + total + entry;
+
+ entry += print_range_line(m, start, values, count);
+ }
+
+ GEM_BUG_ON(entry != guc_hw_reg_state[i].count);
+ total += entry;
+ }
+}
+
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct intel_uc_coredump *error_uc)
{
@@ -693,6 +761,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
+ err_print_guc_hw_state(m, error_uc->guc.hw_state);
intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
@@ -1025,6 +1094,7 @@ static void cleanup_uc(struct intel_uc_coredump *uc)
kfree(uc->huc_fw.file_wanted.path);
i915_vma_coredump_free(uc->guc.vma_log);
i915_vma_coredump_free(uc->guc.vma_ctb);
+ kfree(uc->guc.hw_state);
kfree(uc);
}
@@ -1721,6 +1791,37 @@ static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
}
+static u32 read_guc_state_reg(struct intel_uncore *uncore, int range, int count)
+{
+ GEM_BUG_ON(range >= ARRAY_SIZE(guc_hw_reg_state));
+ GEM_BUG_ON(count >= guc_hw_reg_state[range].count);
+
+ return intel_uncore_read(uncore,
+ _MMIO(guc_hw_reg_state[range].start + count * sizeof(u32)));
+}
+
+static void gt_record_guc_hw_state(struct intel_uncore *uncore,
+ struct intel_uc_coredump *error_uc)
+{
+ u32 *hw_state;
+ u32 count = 0;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
+ count += guc_hw_reg_state[i].count;
+
+ hw_state = kcalloc(count, sizeof(u32), ALLOW_FAIL);
+ if (!hw_state)
+ return;
+
+ count = 0;
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
+ for (j = 0; j < guc_hw_reg_state[i].count; j++)
+ hw_state[count++] = read_guc_state_reg(uncore, i, j);
+
+ error_uc->guc.hw_state = hw_state;
+}
+
static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
struct i915_vma_compress *compress)
@@ -1755,6 +1856,7 @@ gt_record_uc(struct intel_gt_coredump *gt,
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
+ gt_record_guc_hw_state(gt->_gt->uncore, error_uc);
return error_uc;
}
@@ -2445,11 +2547,11 @@ static const struct file_operations i915_error_state_fops = {
void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
- debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ debugfs_create_file("i915_error_state", 0644, debugfs_root, i915,
&i915_error_state_fops);
- debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ debugfs_create_file("i915_gpu_info", 0644, debugfs_root, i915,
&i915_gpu_info_fops);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 182324979278..91b3df621a49 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -177,6 +177,7 @@ struct intel_gt_coredump {
struct intel_ctb_coredump ctb[2];
struct i915_vma_coredump *vma_ctb;
struct i915_vma_coredump *vma_log;
+ u32 *hw_state;
u32 timestamp;
u16 last_fence;
bool is_guc_capture;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 191ed8bb1d9c..8d5da222a187 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -163,11 +163,6 @@ static void ivb_parity_work(struct work_struct *work)
u32 misccpctl;
u8 slice = 0;
- /* We must turn off DOP level clock gating to access the L3 registers.
- * In order to prevent a get/put style interface, acquire struct mutex
- * any time we access those registers.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
@@ -225,7 +220,6 @@ out:
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(gt->irq_lock);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -439,7 +433,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(i915)) {
+ if (!HAS_PCH_NOP(display)) {
sde_ier = raw_reg_read(regs, SDEIER);
raw_reg_write(regs, SDEIER, 0);
}
@@ -459,7 +453,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
de_iir = raw_reg_read(regs, DEIIR);
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
- if (DISPLAY_VER(i915) >= 7)
+ if (DISPLAY_VER(display) >= 7)
ivb_display_irq_handler(display, de_iir);
else
ilk_display_irq_handler(display, de_iir);
@@ -834,6 +828,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
static u32 i9xx_error_mask(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
/*
* On gen2/3 FBC generates (seemingly spurious)
* display INVALID_GTT/INVALID_GTT_PTE table errors.
@@ -846,7 +841,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
* Unfortunately we can't mask off individual PGTBL_ER bits,
* so we just have to mask off all page table errors via EMR.
*/
- if (HAS_FBC(i915))
+ if (HAS_FBC(display))
return I915_ERROR_MEMORY_REFRESH;
else
return I915_ERROR_PAGE_TABLE |
@@ -924,12 +919,12 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
- if (DISPLAY_VER(dev_priv) >= 3) {
+ if (DISPLAY_VER(display) >= 3) {
dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(display)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
@@ -963,7 +958,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(display) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(display);
diff --git a/drivers/gpu/drm/i915/i915_list_util.h b/drivers/gpu/drm/i915/i915_list_util.h
new file mode 100644
index 000000000000..4e515dc8a3e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_list_util.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_LIST_UTIL_H__
+#define __I915_LIST_UTIL_H__
+
+#include <linux/list.h>
+#include <asm/rwonce.h>
+
+static inline void __list_del_many(struct list_head *head,
+ struct list_head *first)
+{
+ first->prev = head;
+ WRITE_ONCE(head->next, first);
+}
+
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
+#endif /* __I915_LIST_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_ptr_util.h b/drivers/gpu/drm/i915/i915_ptr_util.h
new file mode 100644
index 000000000000..9f8931d7d99b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ptr_util.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_PTR_UTIL_H__
+#define __I915_PTR_UTIL_H__
+
+#include <linux/types.h>
+
+#define ptr_mask_bits(ptr, n) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v & -BIT(n)); \
+})
+
+#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
+
+#define ptr_unpack_bits(ptr, bits, n) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ *(bits) = __v & (BIT(n) - 1); \
+ (typeof(ptr))(__v & -BIT(n)); \
+})
+
+#define ptr_pack_bits(ptr, bits, n) ({ \
+ unsigned long __bits = (bits); \
+ GEM_BUG_ON(__bits & -BIT(n)); \
+ ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
+})
+
+#define ptr_dec(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v - 1); \
+})
+
+#define ptr_inc(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v + 1); \
+})
+
+#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
+#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
+#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
+#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
+
+static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+{
+ return a - b;
+}
+
+#define u64_to_ptr(T, x) ({ \
+ typecheck(u64, x); \
+ (T *)(uintptr_t)(x); \
+})
+
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({ \
+ void __user *__mptr = (void __user *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type __user *)(__mptr - offsetof(type, member))); })
+
+#endif /* __I915_PTR_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 03b895897f60..354ef75ef6a5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -412,9 +412,9 @@
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1 << 31)
-#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
-#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK REG_BIT(31)
+#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
+#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -613,7 +613,8 @@
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
+#define DSPCLK_GATE_D _MMIO(0x6200)
+#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 5f7e8138ec14..b09135301f39 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -31,19 +31,20 @@
#include <linux/llist.h>
#include <linux/lockdep.h>
+#include <uapi/drm/i915_drm.h>
+
#include "gem/i915_gem_context_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
+#include "i915_ptr_util.h"
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include "i915_vma_resource.h"
-#include <uapi/drm/i915_drm.h>
-
struct drm_file;
struct drm_i915_gem_object;
struct drm_printer;
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 4c02a04be681..d5b6d8ab31a2 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,7 +5,7 @@
#include <linux/vga_switcheroo.h>
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -15,13 +15,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_display *display = i915 ? i915->display : NULL;
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (!i915) {
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
- if (!HAS_DISPLAY(i915)) {
+ if (!intel_display_device_present(display)) {
dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
return;
}
@@ -44,13 +45,15 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_display *display = i915 ? i915->display : NULL;
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
+ return i915 && intel_display_device_present(display) &&
+ atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_timer_util.c b/drivers/gpu/drm/i915/i915_timer_util.c
new file mode 100644
index 000000000000..ee4cfd8b3c07
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timer_util.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <linux/jiffies.h>
+
+#include "i915_timer_util.h"
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!timer_active(t))
+ return;
+
+ timer_delete(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffy.
+ */
+ barrier();
+
+ /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+ mod_timer(t, jiffies + timeout ?: 1);
+}
diff --git a/drivers/gpu/drm/i915/i915_timer_util.h b/drivers/gpu/drm/i915/i915_timer_util.h
new file mode 100644
index 000000000000..f35ad730820c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timer_util.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_TIMER_UTIL_H__
+#define __I915_TIMER_UTIL_H__
+
+#include <linux/timer.h>
+#include <asm/rwonce.h>
+
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return timer_active(t) && !timer_pending(t);
+}
+
+#endif /* __I915_TIMER_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index b60c28fbd207..49f7ed413132 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -47,36 +47,6 @@ bool i915_error_injected(void)
#endif
-void cancel_timer(struct timer_list *t)
-{
- if (!timer_active(t))
- return;
-
- timer_delete(t);
- WRITE_ONCE(t->expires, 0);
-}
-
-void set_timer_ms(struct timer_list *t, unsigned long timeout)
-{
- if (!timeout) {
- cancel_timer(t);
- return;
- }
-
- timeout = msecs_to_jiffies(timeout);
-
- /*
- * Paranoia to make sure the compiler computes the timeout before
- * loading 'jiffies' as jiffies is volatile and may be updated in
- * the background by a timer tick. All to reduce the complexity
- * of the addition and reduce the risk of losing a jiffy.
- */
- barrier();
-
- /* Keep t->expires = 0 reserved to indicate a canceled timer. */
- mod_timer(t, jiffies + timeout ?: 1);
-}
-
bool i915_vtd_active(struct drm_i915_private *i915)
{
if (device_iommu_mapped(i915->drm.dev))
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index f7fb40cfdb70..a0c892e4c40d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,7 +25,6 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
-#include <linux/list.h>
#include <linux/overflow.h>
#include <linux/sched.h>
#include <linux/string_helpers.h>
@@ -38,7 +37,6 @@
#endif
struct drm_i915_private;
-struct timer_list;
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
@@ -67,88 +65,12 @@ bool i915_error_injected(void);
drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
})
-#define range_overflows(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ >= max__ || size__ > max__ - start__; \
-})
-
-#define range_overflows_t(type, start, size, max) \
- range_overflows((type)(start), (type)(size), (type)(max))
-
-#define range_overflows_end(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
-})
-
-#define range_overflows_end_t(type, start, size, max) \
- range_overflows_end((type)(start), (type)(size), (type)(max))
-
-#define ptr_mask_bits(ptr, n) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v & -BIT(n)); \
-})
-
-#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
-
-#define ptr_unpack_bits(ptr, bits, n) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- *(bits) = __v & (BIT(n) - 1); \
- (typeof(ptr))(__v & -BIT(n)); \
-})
-
-#define ptr_pack_bits(ptr, bits, n) ({ \
- unsigned long __bits = (bits); \
- GEM_BUG_ON(__bits & -BIT(n)); \
- ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
-})
-
-#define ptr_dec(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v - 1); \
-})
-
-#define ptr_inc(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v + 1); \
-})
-
-#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
-#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
-#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
-#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
-
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
__T; \
})
-static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
-{
- return a - b;
-}
-
-/*
- * container_of_user: Extract the superclass from a pointer to a member.
- *
- * Exactly like container_of() with the exception that it plays nicely
- * with sparse for __user @ptr.
- */
-#define container_of_user(ptr, type, member) ({ \
- void __user *__mptr = (void __user *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type __user *)(__mptr - offsetof(type, member))); })
-
/*
* check_user_mbz: Check that a user value exists and is zero
*
@@ -167,11 +89,6 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
-#define u64_to_ptr(T, x) ({ \
- typecheck(u64, x); \
- (T *)(uintptr_t)(x); \
-})
-
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
mask &= ~BIT(__idx); \
@@ -183,19 +100,6 @@ static inline bool is_power_of_2_u64(u64 n)
return (n != 0 && ((n & (n - 1)) == 0));
}
-static inline void __list_del_many(struct list_head *head,
- struct list_head *first)
-{
- first->prev = head;
- WRITE_ONCE(head->next, first);
-}
-
-static inline int list_is_last_rcu(const struct list_head *list,
- const struct list_head *head)
-{
- return READ_ONCE(list->next) == head;
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
@@ -230,107 +134,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-/*
- * __wait_for - magic wait macro
- *
- * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
- * important that we check the condition again after having timed out, since the
- * timeout could be due to preemption or similar and we've never had a chance to
- * check the condition before the timeout.
- */
-#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
- const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
- long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
- int ret__; \
- might_sleep(); \
- for (;;) { \
- const bool expired__ = ktime_after(ktime_get_raw(), end__); \
- OP; \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret__ = 0; \
- break; \
- } \
- if (expired__) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- usleep_range(wait__, wait__ * 2); \
- if (wait__ < (Wmax)) \
- wait__ <<= 1; \
- } \
- ret__; \
-})
-
-#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
- (Wmax))
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
-
-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
-#else
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
-#endif
-
-#define _wait_for_atomic(COND, US, ATOMIC) \
-({ \
- int cpu, ret, timeout = (US) * 1000; \
- u64 base; \
- _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- cpu = smp_processor_id(); \
- } \
- base = local_clock(); \
- for (;;) { \
- u64 now = local_clock(); \
- if (!(ATOMIC)) \
- preempt_enable(); \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret = 0; \
- break; \
- } \
- if (now - base >= timeout) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- cpu_relax(); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- if (unlikely(cpu != smp_processor_id())) { \
- timeout -= now - base; \
- cpu = smp_processor_id(); \
- base = local_clock(); \
- } \
- } \
- } \
- ret; \
-})
-
-#define wait_for_us(COND, US) \
-({ \
- int ret__; \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10, 10); \
- else \
- ret__ = _wait_for_atomic((COND), (US), 0); \
- ret__; \
-})
-
-#define wait_for_atomic_us(COND, US) \
-({ \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- BUILD_BUG_ON((US) > 50000); \
- _wait_for_atomic((COND), (US), 1); \
-})
-
-#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
-
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -346,19 +149,6 @@ static inline void __add_taint_for_CI(unsigned int taint)
add_taint(taint, LOCKDEP_STILL_OK);
}
-void cancel_timer(struct timer_list *t);
-void set_timer_ms(struct timer_list *t, unsigned long timeout);
-
-static inline bool timer_active(const struct timer_list *t)
-{
- return READ_ONCE(t->expires);
-}
-
-static inline bool timer_expired(const struct timer_list *t)
-{
- return timer_active(t) && !timer_pending(t);
-}
-
static inline bool i915_run_as_guest(void)
{
#if IS_ENABLED(CONFIG_X86)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 0f9eee6d18d2..8054047840aa 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,12 +30,12 @@
#include <drm/drm_mm.h>
-#include "gt/intel_ggtt_fencing.h"
#include "gem/i915_gem_object.h"
-
-#include "i915_gem_gtt.h"
+#include "gt/intel_ggtt_fencing.h"
#include "i915_active.h"
+#include "i915_gem_gtt.h"
+#include "i915_ptr_util.h"
#include "i915_request.h"
#include "i915_vma_resource.h"
#include "i915_vma_types.h"
diff --git a/drivers/gpu/drm/i915/i915_wait_util.h b/drivers/gpu/drm/i915/i915_wait_util.h
new file mode 100644
index 000000000000..7376898e3bf8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_wait_util.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_WAIT_UTIL_H__
+#define __I915_WAIT_UTIL_H__
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/sched/clock.h>
+#include <linux/smp.h>
+
+/*
+ * __wait_for - magic wait macro
+ *
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
+ */
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
+ break; \
+ } \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
+ } \
+ ret__; \
+})
+
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
+/*
+ * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false.
+ * On PREEMPT_RT the context isn't becoming atomic because it is used in an
+ * interrupt handler or because a spinlock_t is acquired. This leads to
+ * warnings which don't occur otherwise and therefore the check is disabled.
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
+ } \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10, 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
+ ret__; \
+})
+
+#define wait_for_atomic_us(COND, US) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ BUILD_BUG_ON((US) > 50000); \
+ _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
+
+#endif /* __I915_WAIT_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index f86a3629ae9e..467740969431 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -132,16 +132,17 @@ static void ibx_init_clock_gating(struct drm_i915_private *i915)
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(dev_priv, pipe),
+ for_each_pipe(display, pipe) {
+ intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(display, pipe),
0, DISP_TRICKLE_FEED_DISABLE);
- intel_uncore_rmw(&dev_priv->uncore, DSPSURF(dev_priv, pipe),
+ intel_uncore_rmw(&dev_priv->uncore, DSPSURF(display, pipe),
0, 0);
intel_uncore_posting_read(&dev_priv->uncore,
- DSPSURF(dev_priv, pipe));
+ DSPSURF(display, pipe));
}
}
@@ -218,7 +219,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
@@ -229,7 +230,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(&i915->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
intel_uncore_write(&i915->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
@@ -307,11 +308,13 @@ static void gen6_init_clock_gating(struct drm_i915_private *i915)
static void lpt_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
+
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(i915))
+ if (HAS_PCH_LPT_LP(display))
intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D,
0, PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -355,7 +358,9 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
- if (!HAS_PCH_CNP(i915))
+ struct intel_display *display = i915->display;
+
+ if (!HAS_PCH_CNP(display))
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
@@ -421,6 +426,7 @@ static void skl_init_clock_gating(struct drm_i915_private *i915)
static void bdw_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
@@ -432,7 +438,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
/* WaPsrDPAMaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
0, BDW_UNMASK_VBL_TO_REGS_IN_SRD);
@@ -468,6 +474,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
static void hsw_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
@@ -476,7 +483,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
/* WaPsrDPAMaskVBlankInSRD:hsw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:hsw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
0, HSW_UNMASK_VBL_TO_REGS_IN_SRD);
@@ -494,6 +501,8 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
static void ivb_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
+
intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
@@ -531,7 +540,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK,
GEN6_MBC_SNPCR_MED);
- if (!HAS_PCH_NOP(i915))
+ if (!HAS_PCH_NOP(display))
cpt_init_clock_gating(i915);
gen6_check_mch_setup(i915);
@@ -611,7 +620,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *i915)
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(i915))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&i915->uncore, DSPCLK_GATE_D(i915), dspclk_gate);
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(i915);
}
@@ -622,7 +631,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D(i915), 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 87ac4446d306..ca57a3dd3148 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -62,6 +62,7 @@
static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
+ struct intel_display *display = dev_priv->display;
MMIO_RING_D(RING_IMR);
MMIO_D(SDEIMR);
@@ -133,38 +134,38 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(_MMIO(0x650b4));
MMIO_D(_MMIO(0xc4040));
MMIO_D(DERRMR);
- MMIO_D(PIPEDSL(dev_priv, PIPE_A));
- MMIO_D(PIPEDSL(dev_priv, PIPE_B));
- MMIO_D(PIPEDSL(dev_priv, PIPE_C));
- MMIO_D(PIPEDSL(dev_priv, _PIPE_EDP));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_A));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_B));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_C));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPESTAT(dev_priv, PIPE_A));
- MMIO_D(PIPESTAT(dev_priv, PIPE_B));
- MMIO_D(PIPESTAT(dev_priv, PIPE_C));
- MMIO_D(PIPESTAT(dev_priv, _PIPE_EDP));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_A));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_B));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_C));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, _PIPE_EDP));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_A));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_B));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_C));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, _PIPE_EDP));
- MMIO_D(CURCNTR(dev_priv, PIPE_A));
- MMIO_D(CURCNTR(dev_priv, PIPE_B));
- MMIO_D(CURCNTR(dev_priv, PIPE_C));
- MMIO_D(CURPOS(dev_priv, PIPE_A));
- MMIO_D(CURPOS(dev_priv, PIPE_B));
- MMIO_D(CURPOS(dev_priv, PIPE_C));
- MMIO_D(CURBASE(dev_priv, PIPE_A));
- MMIO_D(CURBASE(dev_priv, PIPE_B));
- MMIO_D(CURBASE(dev_priv, PIPE_C));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_A));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_B));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_C));
+ MMIO_D(PIPEDSL(display, PIPE_A));
+ MMIO_D(PIPEDSL(display, PIPE_B));
+ MMIO_D(PIPEDSL(display, PIPE_C));
+ MMIO_D(PIPEDSL(display, _PIPE_EDP));
+ MMIO_D(TRANSCONF(display, TRANSCODER_A));
+ MMIO_D(TRANSCONF(display, TRANSCODER_B));
+ MMIO_D(TRANSCONF(display, TRANSCODER_C));
+ MMIO_D(TRANSCONF(display, TRANSCODER_EDP));
+ MMIO_D(PIPESTAT(display, PIPE_A));
+ MMIO_D(PIPESTAT(display, PIPE_B));
+ MMIO_D(PIPESTAT(display, PIPE_C));
+ MMIO_D(PIPESTAT(display, _PIPE_EDP));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_A));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_B));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_C));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, _PIPE_EDP));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_A));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_B));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_C));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, _PIPE_EDP));
+ MMIO_D(CURCNTR(display, PIPE_A));
+ MMIO_D(CURCNTR(display, PIPE_B));
+ MMIO_D(CURCNTR(display, PIPE_C));
+ MMIO_D(CURPOS(display, PIPE_A));
+ MMIO_D(CURPOS(display, PIPE_B));
+ MMIO_D(CURPOS(display, PIPE_C));
+ MMIO_D(CURBASE(display, PIPE_A));
+ MMIO_D(CURBASE(display, PIPE_B));
+ MMIO_D(CURBASE(display, PIPE_C));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_A));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_B));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_C));
MMIO_D(_MMIO(0x700ac));
MMIO_D(_MMIO(0x710ac));
MMIO_D(_MMIO(0x720ac));
@@ -172,32 +173,32 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(_MMIO(0x70094));
MMIO_D(_MMIO(0x70098));
MMIO_D(_MMIO(0x7009c));
- MMIO_D(DSPCNTR(dev_priv, PIPE_A));
- MMIO_D(DSPADDR(dev_priv, PIPE_A));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_A));
- MMIO_D(DSPPOS(dev_priv, PIPE_A));
- MMIO_D(DSPSIZE(dev_priv, PIPE_A));
- MMIO_D(DSPSURF(dev_priv, PIPE_A));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_A));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_A));
+ MMIO_D(DSPCNTR(display, PIPE_A));
+ MMIO_D(DSPADDR(display, PIPE_A));
+ MMIO_D(DSPSTRIDE(display, PIPE_A));
+ MMIO_D(DSPPOS(display, PIPE_A));
+ MMIO_D(DSPSIZE(display, PIPE_A));
+ MMIO_D(DSPSURF(display, PIPE_A));
+ MMIO_D(DSPOFFSET(display, PIPE_A));
+ MMIO_D(DSPSURFLIVE(display, PIPE_A));
MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
- MMIO_D(DSPCNTR(dev_priv, PIPE_B));
- MMIO_D(DSPADDR(dev_priv, PIPE_B));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_B));
- MMIO_D(DSPPOS(dev_priv, PIPE_B));
- MMIO_D(DSPSIZE(dev_priv, PIPE_B));
- MMIO_D(DSPSURF(dev_priv, PIPE_B));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_B));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_B));
+ MMIO_D(DSPCNTR(display, PIPE_B));
+ MMIO_D(DSPADDR(display, PIPE_B));
+ MMIO_D(DSPSTRIDE(display, PIPE_B));
+ MMIO_D(DSPPOS(display, PIPE_B));
+ MMIO_D(DSPSIZE(display, PIPE_B));
+ MMIO_D(DSPSURF(display, PIPE_B));
+ MMIO_D(DSPOFFSET(display, PIPE_B));
+ MMIO_D(DSPSURFLIVE(display, PIPE_B));
MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
- MMIO_D(DSPCNTR(dev_priv, PIPE_C));
- MMIO_D(DSPADDR(dev_priv, PIPE_C));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_C));
- MMIO_D(DSPPOS(dev_priv, PIPE_C));
- MMIO_D(DSPSIZE(dev_priv, PIPE_C));
- MMIO_D(DSPSURF(dev_priv, PIPE_C));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_C));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_C));
+ MMIO_D(DSPCNTR(display, PIPE_C));
+ MMIO_D(DSPADDR(display, PIPE_C));
+ MMIO_D(DSPSTRIDE(display, PIPE_C));
+ MMIO_D(DSPPOS(display, PIPE_C));
+ MMIO_D(DSPSIZE(display, PIPE_C));
+ MMIO_D(DSPSURF(display, PIPE_C));
+ MMIO_D(DSPOFFSET(display, PIPE_C));
+ MMIO_D(DSPSURFLIVE(display, PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
MMIO_D(SPRCTL(PIPE_A));
MMIO_D(SPRLINOFF(PIPE_A));
@@ -238,73 +239,73 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_A));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_A));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_B));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_B));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_C));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_C));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_EDP));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_EDP));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_A));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_A));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_A));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_A));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_A));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_A));
+ MMIO_D(BCLRPAT(display, TRANSCODER_A));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_A));
+ MMIO_D(PIPESRC(display, TRANSCODER_A));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_B));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_B));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_B));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_B));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_B));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_B));
+ MMIO_D(BCLRPAT(display, TRANSCODER_B));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_B));
+ MMIO_D(PIPESRC(display, TRANSCODER_B));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_C));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_C));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_C));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_C));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_C));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_C));
+ MMIO_D(BCLRPAT(display, TRANSCODER_C));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_C));
+ MMIO_D(PIPESRC(display, TRANSCODER_C));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_EDP));
+ MMIO_D(BCLRPAT(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_EDP));
MMIO_D(PF_CTL(PIPE_A));
MMIO_D(PF_WIN_SZ(PIPE_A));
MMIO_D(PF_WIN_POS(PIPE_A));
@@ -513,12 +514,12 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_C));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_A));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_B));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_C));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_A));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_B));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_C));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_C));
MMIO_D(SFUSE_STRAP);
MMIO_D(SBI_ADDR);
MMIO_D(SBI_DATA);
@@ -1111,6 +1112,7 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
+ struct intel_display *display = dev_priv->display;
MMIO_F(_MMIO(0x80000), 0x3000);
MMIO_D(GEN7_SAMPLER_INSTDONE);
@@ -1242,9 +1244,9 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_DSI_PLL_ENABLE);
MMIO_D(GEN9_CLKGATE_DIS_0);
MMIO_D(GEN9_CLKGATE_DIS_4);
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_A));
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_B));
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_C));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_C));
MMIO_D(RC6_CTX_BASE);
MMIO_D(GEN8_PUSHBUS_CONTROL);
MMIO_D(GEN8_PUSHBUS_ENABLE);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 81da75108c60..55ffedad2490 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_pcode.h"
static int gen6_check_mailbox_status(u32 mbox)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c8e29fd72290..8cb59f8d1f4c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,19 +21,20 @@
* IN THE SOFTWARE.
*/
-#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
-#include "display/intel_display_core.h"
+#include <drm/drm_managed.h>
-#include "gt/intel_gt.h"
+#include "display/intel_display_core.h"
#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
+#include "i915_wait_util.h"
#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -2502,6 +2503,7 @@ static int sanity_check_mmio_access(struct intel_uncore *uncore)
int intel_uncore_init_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
+ struct intel_display *display = i915->display;
int ret;
ret = sanity_check_mmio_access(uncore);
@@ -2536,7 +2538,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
- if (HAS_FPGA_DBG_UNCLAIMED(i915))
+ if (HAS_FPGA_DBG_UNCLAIMED(display))
uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index f8da693ad3ce..27d545c4e6a5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -2,15 +2,15 @@
/*
* Copyright(c) 2020 Intel Corporation.
*/
+
#include <linux/workqueue.h>
#include "gem/i915_gem_context.h"
-
#include "gt/intel_context.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
-
+#include "i915_wait_util.h"
#include "intel_pxp.h"
#include "intel_pxp_gsccs.h"
#include "intel_pxp_irq.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index e07c5b380789..545f79eb0cc5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -69,17 +69,17 @@ DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set
void intel_pxp_debugfs_register(struct intel_pxp *pxp)
{
- struct drm_minor *minor;
+ struct dentry *debugfs_root;
struct dentry *pxproot;
if (!intel_pxp_is_supported(pxp))
return;
- minor = pxp->ctrl_gt->i915->drm.primary;
- if (!minor->debugfs_root)
+ debugfs_root = pxp->ctrl_gt->i915->drm.debugfs_root;
+ if (!debugfs_root)
return;
- pxproot = debugfs_create_dir("pxp", minor->debugfs_root);
+ pxproot = debugfs_create_dir("pxp", debugfs_root);
if (IS_ERR(pxproot))
return;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 2fb7a9e7efec..48cd617247d1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -22,14 +22,13 @@
*
*/
-#include <linux/prime_numbers.h>
#include <linux/pm_qos.h>
+#include <linux/prime_numbers.h>
#include <linux/sort.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
-
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
@@ -40,11 +39,11 @@
#include "i915_random.h"
#include "i915_selftest.h"
+#include "i915_wait_util.h"
#include "igt_flush_test.h"
#include "igt_live_test.h"
#include "igt_spinner.h"
#include "lib_sw_fence.h"
-
#include "mock_drm.h"
#include "mock_gem_device.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 889281819c5b..9c276c9d0a75 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -31,7 +31,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_selftest.h"
-
+#include "i915_wait_util.h"
#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8c3e1f20e5a1..820364171ebe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,12 +3,13 @@
*
* Copyright © 2018 Intel Corporation
*/
-#include "gt/intel_gpu_commands.h"
-#include "gt/intel_gt.h"
#include "gem/i915_gem_internal.h"
#include "gem/selftests/igt_gem_utils.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "i915_wait_util.h"
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 41eaa9b7f67d..58bcbdcef563 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -277,13 +277,15 @@ static int live_forcewake_domains(void *arg)
#define FW_RANGE 0x40000
struct intel_gt *gt = arg;
struct intel_uncore *uncore = gt->uncore;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = i915->display;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
- !IS_VALLEYVIEW(gt->i915) &&
- !IS_CHERRYVIEW(gt->i915))
+ if (!HAS_FPGA_DBG_UNCLAIMED(display) &&
+ !IS_VALLEYVIEW(i915) &&
+ !IS_CHERRYVIEW(i915))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index deb159548a09..edffaed8f9a7 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
@@ -30,10 +31,11 @@ struct dram_channel_info {
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-static const char *intel_dram_type_str(enum intel_dram_type type)
+const char *intel_dram_type_str(enum intel_dram_type type)
{
static const char * const str[] = {
DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR2),
DRAM_TYPE_STR(DDR3),
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
@@ -54,9 +56,10 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
-static bool pnv_is_ddr3(struct drm_i915_private *i915)
+static enum intel_dram_type pnv_dram_type(struct drm_i915_private *i915)
{
- return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3;
+ return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3 ?
+ INTEL_DRAM_DDR3 : INTEL_DRAM_DDR2;
}
static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv)
@@ -135,25 +138,21 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
return 0;
}
-static void detect_mem_freq(struct drm_i915_private *i915)
+unsigned int intel_mem_freq(struct drm_i915_private *i915)
{
if (IS_PINEVIEW(i915))
- i915->mem_freq = pnv_mem_freq(i915);
+ return pnv_mem_freq(i915);
else if (GRAPHICS_VER(i915) == 5)
- i915->mem_freq = ilk_mem_freq(i915);
+ return ilk_mem_freq(i915);
else if (IS_CHERRYVIEW(i915))
- i915->mem_freq = chv_mem_freq(i915);
+ return chv_mem_freq(i915);
else if (IS_VALLEYVIEW(i915))
- i915->mem_freq = vlv_mem_freq(i915);
-
- if (IS_PINEVIEW(i915))
- i915->is_ddr3 = pnv_is_ddr3(i915);
-
- if (i915->mem_freq)
- drm_dbg(&i915->drm, "DDR speed: %d kHz\n", i915->mem_freq);
+ return vlv_mem_freq(i915);
+ else
+ return 0;
}
-unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
+static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
{
u32 fsb;
@@ -235,15 +234,30 @@ static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv)
}
}
-static void detect_fsb_freq(struct drm_i915_private *i915)
+unsigned int intel_fsb_freq(struct drm_i915_private *i915)
{
if (GRAPHICS_VER(i915) == 5)
- i915->fsb_freq = ilk_fsb_freq(i915);
+ return ilk_fsb_freq(i915);
else if (GRAPHICS_VER(i915) == 3 || GRAPHICS_VER(i915) == 4)
- i915->fsb_freq = i9xx_fsb_freq(i915);
+ return i9xx_fsb_freq(i915);
+ else
+ return 0;
+}
- if (i915->fsb_freq)
- drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", i915->fsb_freq);
+static int i915_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ dram_info->fsb_freq = intel_fsb_freq(i915);
+ if (dram_info->fsb_freq)
+ drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq);
+
+ dram_info->mem_freq = intel_mem_freq(i915);
+ if (dram_info->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d kHz\n", dram_info->mem_freq);
+
+ if (IS_PINEVIEW(i915))
+ dram_info->type = pnv_dram_type(i915);
+
+ return 0;
}
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
@@ -392,6 +406,9 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
u32 val;
int ret;
+ /* Assume 16Gb DIMMs are present until proven otherwise */
+ dram_info->has_16gb_dimms = true;
+
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
@@ -414,13 +431,16 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
return -EINVAL;
}
- dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->has_16gb_dimms = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
+ drm_dbg_kms(&i915->drm, "16Gb DIMMs: %s\n",
+ str_yes_no(dram_info->has_16gb_dimms));
+
return 0;
}
@@ -649,8 +669,9 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- int ret = skl_get_dram_info(i915, dram_info);
+ int ret;
+ ret = skl_dram_get_channels_info(i915, dram_info);
if (ret)
return ret;
@@ -659,8 +680,6 @@ static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *
static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- dram_info->wm_lv_0_adjust_needed = false;
-
return icl_pcode_read_mem_global_info(i915, dram_info);
}
@@ -709,13 +728,11 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info
int intel_dram_detect(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
struct dram_info *dram_info;
int ret;
- detect_fsb_freq(i915);
- detect_mem_freq(i915);
-
- if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
+ if (IS_DG2(i915) || !intel_display_device_present(display))
return 0;
dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
@@ -724,13 +741,7 @@ int intel_dram_detect(struct drm_i915_private *i915)
i915->dram_info = dram_info;
- /*
- * Assume level 0 watermark latency adjustment is needed until proven
- * otherwise, this w/a is not needed by bxt/glk.
- */
- dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
-
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915, dram_info);
@@ -738,23 +749,23 @@ int intel_dram_detect(struct drm_i915_private *i915)
ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
ret = bxt_get_dram_info(i915, dram_info);
- else
+ else if (GRAPHICS_VER(i915) >= 9)
ret = skl_get_dram_info(i915, dram_info);
+ else
+ ret = i915_get_dram_info(i915, dram_info);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "Num QGV points %u\n", dram_info->num_qgv_points);
+ drm_dbg_kms(&i915->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points);
+
/* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
return 0;
- drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
-
- drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
-
- drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
- str_yes_no(dram_info->wm_lv_0_adjust_needed));
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index 2a696e03aad4..03a973f1c941 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -12,11 +12,9 @@ struct drm_i915_private;
struct drm_device;
struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR2,
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
@@ -27,13 +25,20 @@ struct dram_info {
INTEL_DRAM_GDDR_ECC,
__INTEL_DRAM_TYPE_MAX,
} type;
+ unsigned int fsb_freq;
+ unsigned int mem_freq;
+ u8 num_channels;
u8 num_qgv_points;
u8 num_psf_gv_points;
+ bool symmetric_memory;
+ bool has_16gb_dimms;
};
void intel_dram_edram_detect(struct drm_i915_private *i915);
int intel_dram_detect(struct drm_i915_private *i915);
-unsigned int i9xx_fsb_freq(struct drm_i915_private *i915);
+unsigned int intel_fsb_freq(struct drm_i915_private *i915);
+unsigned int intel_mem_freq(struct drm_i915_private *i915);
const struct dram_info *intel_dram_info(struct drm_device *drm);
+const char *intel_dram_type_str(enum intel_dram_type type);
#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 5346b8dda79a..f210c9655b53 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -148,7 +148,8 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915)
int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
- unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ struct intel_display *display = i915->display;
+ unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index fc9f311ea1db..221e4c0b2c58 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -8,16 +8,17 @@
#include <drm/drm_print.h>
+#include "gt/intel_gt_regs.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
+#include "i915_wait_util.h"
#include "intel_clock_gating.h"
#include "intel_uncore_trace.h"
#include "vlv_suspend.h"
-#include "gt/intel_gt_regs.h"
-
struct vlv_s0ix_state {
/* GAM */
u32 wr_watermark;
diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
index 3bfa2ac212dc..682dd2633d0c 100644
--- a/drivers/gpu/drm/imagination/Kconfig
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -3,8 +3,9 @@
config DRM_POWERVR
tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics"
- depends on ARM64
+ depends on (ARM64 || RISCV && 64BIT)
depends on DRM
+ depends on MMU
depends on PM
select DRM_EXEC
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 8b9ba4983c4c..294b6019b415 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -121,21 +122,6 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
-static int pvr_device_reset_init(struct pvr_device *pvr_dev)
-{
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- struct reset_control *reset;
-
- reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
- if (IS_ERR(reset))
- return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
- "failed to get gpu reset line\n");
-
- pvr_dev->reset = reset;
-
- return 0;
-}
-
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -618,6 +604,9 @@ pvr_device_init(struct pvr_device *pvr_dev)
struct device *dev = drm_dev->dev;
int err;
+ /* Get the platform-specific data based on the compatible string. */
+ pvr_dev->device_data = of_device_get_match_data(dev);
+
/*
* Setup device parameters. We do this first in case other steps
* depend on them.
@@ -631,8 +620,7 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
- /* Get the reset line for the GPU */
- err = pvr_device_reset_init(pvr_dev);
+ err = pvr_dev->device_data->pwr_ops->init(pvr_dev);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 7cb01c38d2a9..ab8f56ae15df 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -37,6 +37,9 @@ struct clk;
/* Forward declaration from <linux/firmware.h>. */
struct firmware;
+/* Forward declaration from <linux/pwrseq/consumer.h> */
+struct pwrseq_desc;
+
/**
* struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
* @b: Branch ID.
@@ -58,6 +61,14 @@ struct pvr_fw_version {
};
/**
+ * struct pvr_device_data - Platform specific data associated with a compatible string.
+ * @pwr_ops: Pointer to a structure with platform-specific power functions.
+ */
+struct pvr_device_data {
+ const struct pvr_power_sequence_ops *pwr_ops;
+};
+
+/**
* struct pvr_device - powervr-specific wrapper for &struct drm_device
*/
struct pvr_device {
@@ -98,6 +109,9 @@ struct pvr_device {
/** @fw_version: Firmware version detected at runtime. */
struct pvr_fw_version fw_version;
+ /** @device_data: Pointer to platform-specific data. */
+ const struct pvr_device_data *device_data;
+
/** @regs_resource: Resource representing device control registers. */
struct resource *regs_resource;
@@ -148,6 +162,9 @@ struct pvr_device {
*/
struct reset_control *reset;
+ /** @pwrseq: Pointer to a power sequencer, if one is used. */
+ struct pwrseq_desc *pwrseq;
+
/** @irq: IRQ number. */
int irq;
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index b058ec183bb3..916b40ced7eb 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1480,15 +1480,33 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_power_domains_fini(pvr_dev);
}
+static const struct pvr_device_data pvr_device_data_manual = {
+ .pwr_ops = &pvr_power_sequence_ops_manual,
+};
+
+static const struct pvr_device_data pvr_device_data_pwrseq = {
+ .pwr_ops = &pvr_power_sequence_ops_pwrseq,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "img,img-rogue", .data = NULL },
+ {
+ .compatible = "thead,th1520-gpu",
+ .data = &pvr_device_data_pwrseq,
+ },
+ {
+ .compatible = "img,img-rogue",
+ .data = &pvr_device_data_manual,
+ },
/*
* This legacy compatible string was introduced early on before the more generic
* "img,img-rogue" was added. Keep it around here for compatibility, but never use
* "img,img-axe" in new devicetrees.
*/
- { .compatible = "img,img-axe", .data = NULL },
+ {
+ .compatible = "img,img-axe",
+ .data = &pvr_device_data_manual,
+ },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1513,4 +1531,5 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.52.104.182_v1.fw");
MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index 187a07e0bd9a..c6e7ff9e935d 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/pwrseq/consumer.h>
#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -234,6 +235,118 @@ pvr_watchdog_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_power_init_manual(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
+static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = clk_prepare_enable(pvr_dev->core_clk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(pvr_dev->sys_clk);
+ if (err)
+ goto err_core_clk_disable;
+
+ err = clk_prepare_enable(pvr_dev->mem_clk);
+ if (err)
+ goto err_sys_clk_disable;
+
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
+ return 0;
+
+err_mem_clk_disable:
+ clk_disable_unprepare(pvr_dev->mem_clk);
+
+err_sys_clk_disable:
+ clk_disable_unprepare(pvr_dev->sys_clk);
+
+err_core_clk_disable:
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+ return err;
+}
+
+static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = reset_control_assert(pvr_dev->reset);
+
+ clk_disable_unprepare(pvr_dev->mem_clk);
+ clk_disable_unprepare(pvr_dev->sys_clk);
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+ return err;
+}
+
+const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = {
+ .init = pvr_power_init_manual,
+ .power_on = pvr_power_on_sequence_manual,
+ .power_off = pvr_power_off_sequence_manual,
+};
+
+static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power");
+ if (IS_ERR(pvr_dev->pwrseq)) {
+ /*
+ * This platform requires a sequencer. If we can't get it, we
+ * must return the error (including -EPROBE_DEFER to wait for
+ * the provider to appear)
+ */
+ return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq),
+ "Failed to get required power sequencer\n");
+ }
+
+ return 0;
+}
+
+static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev)
+{
+ return pwrseq_power_on(pvr_dev->pwrseq);
+}
+
+static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev)
+{
+ return pwrseq_power_off(pvr_dev->pwrseq);
+}
+
+const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = {
+ .init = pvr_power_init_pwrseq,
+ .power_on = pvr_power_on_sequence_pwrseq,
+ .power_off = pvr_power_off_sequence_pwrseq,
+};
+
int
pvr_power_device_suspend(struct device *dev)
{
@@ -252,11 +365,7 @@ pvr_power_device_suspend(struct device *dev)
goto err_drm_dev_exit;
}
- clk_disable_unprepare(pvr_dev->mem_clk);
- clk_disable_unprepare(pvr_dev->sys_clk);
- clk_disable_unprepare(pvr_dev->core_clk);
-
- err = reset_control_assert(pvr_dev->reset);
+ err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -276,53 +385,22 @@ pvr_power_device_resume(struct device *dev)
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
- err = clk_prepare_enable(pvr_dev->core_clk);
+ err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev);
if (err)
goto err_drm_dev_exit;
- err = clk_prepare_enable(pvr_dev->sys_clk);
- if (err)
- goto err_core_clk_disable;
-
- err = clk_prepare_enable(pvr_dev->mem_clk);
- if (err)
- goto err_sys_clk_disable;
-
- /*
- * According to the hardware manual, a delay of at least 32 clock
- * cycles is required between de-asserting the clkgen reset and
- * de-asserting the GPU reset. Assuming a worst-case scenario with
- * a very high GPU clock frequency, a delay of 1 microsecond is
- * sufficient to ensure this requirement is met across all
- * feasible GPU clock speeds.
- */
- udelay(1);
-
- err = reset_control_deassert(pvr_dev->reset);
- if (err)
- goto err_mem_clk_disable;
-
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_reset_assert;
+ goto err_power_off;
}
drm_dev_exit(idx);
return 0;
-err_reset_assert:
- reset_control_assert(pvr_dev->reset);
-
-err_mem_clk_disable:
- clk_disable_unprepare(pvr_dev->mem_clk);
-
-err_sys_clk_disable:
- clk_disable_unprepare(pvr_dev->sys_clk);
-
-err_core_clk_disable:
- clk_disable_unprepare(pvr_dev->core_clk);
+err_power_off:
+ pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
err_drm_dev_exit:
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index ada85674a7ca..b853d092242c 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -41,4 +41,19 @@ pvr_power_put(struct pvr_device *pvr_dev)
int pvr_power_domains_init(struct pvr_device *pvr_dev);
void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+/**
+ * struct pvr_power_sequence_ops - Platform specific power sequence operations.
+ * @init: Pointer to the platform-specific initialization function.
+ * @power_on: Pointer to the platform-specific power on function.
+ * @power_off: Pointer to the platform-specific power off function.
+ */
+struct pvr_power_sequence_ops {
+ int (*init)(struct pvr_device *pvr_dev);
+ int (*power_on)(struct pvr_device *pvr_dev);
+ int (*power_off)(struct pvr_device *pvr_dev);
+};
+
+extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual;
+extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq;
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 2896fa7501b1..3d97990170bf 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
- case PVR_VM_BIND_TYPE_MAP:
+ case PVR_VM_BIND_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = bind_op->device_addr,
+ .map.va.range = bind_op->size,
+ .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
+ .map.gem.offset = bind_op->offset,
+ };
+
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
- bind_op, bind_op->device_addr,
- bind_op->size,
- gem_from_pvr_gem(bind_op->pvr_obj),
- bind_op->offset);
+ bind_op, &map_req);
+ }
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 00e1afd46b81..44df6410bce1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -913,6 +913,11 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 185, 0 },
+ { 127, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -1024,6 +1029,11 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 169, 0 },
+ { 113, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1343,6 +1353,69 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist);
+/* Applicable for X185, A750 */
+static const u32 a750_ifpc_reglist_regs[] = {
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
+ REG_A6XX_TPL1_NC_MODE_CNTL,
+ REG_A6XX_SP_NC_MODE_CNTL,
+ REG_A6XX_CP_DBG_ECO_CNTL,
+ REG_A6XX_CP_PROTECT_CNTL,
+ REG_A6XX_CP_PROTECT(0),
+ REG_A6XX_CP_PROTECT(1),
+ REG_A6XX_CP_PROTECT(2),
+ REG_A6XX_CP_PROTECT(3),
+ REG_A6XX_CP_PROTECT(4),
+ REG_A6XX_CP_PROTECT(5),
+ REG_A6XX_CP_PROTECT(6),
+ REG_A6XX_CP_PROTECT(7),
+ REG_A6XX_CP_PROTECT(8),
+ REG_A6XX_CP_PROTECT(9),
+ REG_A6XX_CP_PROTECT(10),
+ REG_A6XX_CP_PROTECT(11),
+ REG_A6XX_CP_PROTECT(12),
+ REG_A6XX_CP_PROTECT(13),
+ REG_A6XX_CP_PROTECT(14),
+ REG_A6XX_CP_PROTECT(15),
+ REG_A6XX_CP_PROTECT(16),
+ REG_A6XX_CP_PROTECT(17),
+ REG_A6XX_CP_PROTECT(18),
+ REG_A6XX_CP_PROTECT(19),
+ REG_A6XX_CP_PROTECT(20),
+ REG_A6XX_CP_PROTECT(21),
+ REG_A6XX_CP_PROTECT(22),
+ REG_A6XX_CP_PROTECT(23),
+ REG_A6XX_CP_PROTECT(24),
+ REG_A6XX_CP_PROTECT(25),
+ REG_A6XX_CP_PROTECT(26),
+ REG_A6XX_CP_PROTECT(27),
+ REG_A6XX_CP_PROTECT(28),
+ REG_A6XX_CP_PROTECT(29),
+ REG_A6XX_CP_PROTECT(30),
+ REG_A6XX_CP_PROTECT(31),
+ REG_A6XX_CP_PROTECT(32),
+ REG_A6XX_CP_PROTECT(33),
+ REG_A6XX_CP_PROTECT(34),
+ REG_A6XX_CP_PROTECT(35),
+ REG_A6XX_CP_PROTECT(36),
+ REG_A6XX_CP_PROTECT(37),
+ REG_A6XX_CP_PROTECT(38),
+ REG_A6XX_CP_PROTECT(39),
+ REG_A6XX_CP_PROTECT(40),
+ REG_A6XX_CP_PROTECT(41),
+ REG_A6XX_CP_PROTECT(42),
+ REG_A6XX_CP_PROTECT(43),
+ REG_A6XX_CP_PROTECT(44),
+ REG_A6XX_CP_PROTECT(45),
+ REG_A6XX_CP_PROTECT(46),
+ REG_A6XX_CP_PROTECT(47),
+};
+
+DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1432,14 +1505,27 @@ static const struct adreno_info a7xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
- ADRENO_QUIRK_PREEMPTION,
+ ADRENO_QUIRK_PREEMPTION |
+ ADRENO_QUIRK_IFPC,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .ifpc_reglist = &a750_ifpc_reglist,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
},
.preempt_record_size = 4192 * SZ_1K,
.speedbins = ADRENO_SPEEDBINS(
@@ -1460,12 +1546,14 @@ static const struct adreno_info a7xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
- ADRENO_QUIRK_PREEMPTION,
+ ADRENO_QUIRK_PREEMPTION |
+ ADRENO_QUIRK_IFPC,
.init = a6xx_gpu_init,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .ifpc_reglist = &a750_ifpc_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 28e6705c6da6..fc62fef2fed8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -93,14 +93,25 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
/* Check to see if the GX rail is still powered */
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (!gmu->initialized)
return false;
+ /* If GMU is absent, then GX power domain is ON as long as GPU is in active state */
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return true;
+
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ if (adreno_is_a7xx(adreno_gpu))
+ return !(val &
+ (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
@@ -272,6 +283,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+ set_bit(GMU_STATUS_FW_START, &gmu->status);
+
return ret;
}
@@ -403,7 +416,10 @@ int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
int ret;
u32 val;
- if (!gmu->legacy)
+ WARN_ON(!gmu->legacy);
+
+ /* Nothing to do if GMU does the power management */
+ if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE)
return 0;
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
@@ -518,6 +534,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
@@ -545,6 +564,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
+ return;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
@@ -553,6 +575,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
@@ -681,8 +705,6 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
- a6xx_rpmh_stop(gmu);
-
err:
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
@@ -842,19 +864,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
else
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
- if (state == GMU_WARM_BOOT) {
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
- } else {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+
+ if (state == GMU_COLD_BOOT) {
if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
"GMU firmware is not loaded\n"))
return -ENOENT;
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
-
ret = a6xx_gmu_fw_load(gmu);
if (ret)
return ret;
@@ -925,10 +943,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
ret = a6xx_gmu_gfx_rail_on(gmu);
if (ret)
return ret;
- }
- /* Enable SPTP_PC if the CPU is responsible for it */
- if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
ret = a6xx_sptprac_enable(gmu);
if (ret)
return ret;
@@ -980,6 +995,22 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
val, (val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 1000);
+
+ if (!adreno_is_a740_family(adreno_gpu))
+ return;
+
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS5_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS6_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS7_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS8_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS9_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
@@ -1023,6 +1054,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
+
+ a6xx_rpmh_stop(gmu);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@@ -1128,6 +1161,11 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Set the GPU to the current freq */
a6xx_gmu_set_initial_freq(gpu, gmu);
+ if (refcount_read(&gpu->sysprof_active) > 1) {
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+ if (!ret)
+ set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status);
+ }
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
@@ -1175,6 +1213,9 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
}
+ if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+
ret = a6xx_gmu_wait_for_idle(gmu);
/* If the GMU isn't responding assume it is hung */
@@ -1318,8 +1359,6 @@ static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
struct msm_mmu *mmu;
mmu = msm_iommu_new(gmu->dev, 0);
- if (!mmu)
- return -ENODEV;
if (IS_ERR(mmu))
return PTR_ERR(mmu);
@@ -1692,6 +1731,7 @@ static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
u32 val;
freq = gmu->gpu_freqs[i];
+ /* This is unlikely to fail because we are passing back a known freq */
opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
np = dev_pm_opp_get_of_node(opp);
@@ -1790,6 +1830,35 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
return irq;
}
+void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int sysprof_active;
+
+ /* Nothing to do if GPU is suspended. We will handle this during GMU resume */
+ if (!pm_runtime_get_if_active(&gpu->pdev->dev))
+ return;
+
+ mutex_lock(&gmu->lock);
+
+ sysprof_active = refcount_read(&gpu->sysprof_active);
+
+ /*
+ * 'Perfcounter select' register values are lost during IFPC collapse. To avoid that,
+ * use the currently unused perfcounter oob vote to block IFPC when sysprof is active
+ */
+ if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+ else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&gmu->lock);
+
+ pm_runtime_put(&gpu->pdev->dev);
+}
+
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -1932,8 +2001,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
return ret;
- /* Fow now, don't do anything fancy until we get our feet under us */
- gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+ /* Set GMU idle level */
+ gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ?
+ GMU_IDLE_STATE_IFPC : GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index d1ce11131ba6..06cfc294016f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -50,6 +50,9 @@ struct a6xx_bcm {
/* The GMU does not do any idle state management */
#define GMU_IDLE_STATE_ACTIVE 0
+/* Unknown power state. Not exposed by the firmware. For documentation purpose only */
+#define GMU_IDLE_STATE_RESERVED 1
+
/* The GMU manages SPTP power collapse */
#define GMU_IDLE_STATE_SPTP 2
@@ -117,6 +120,14 @@ struct a6xx_gmu {
struct qmp *qmp;
struct a6xx_hfi_msg_bw_table *bw_table;
+
+/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
+#define GMU_STATUS_FW_START 0
+/* To track if PDC sleep seq was done */
+#define GMU_STATUS_PDC_SLEEP 1
+/* To track Perfcounter OOB set status */
+#define GMU_STATUS_OOB_PERF_SET 2
+ unsigned long status;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -158,6 +169,9 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
{
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 45dd5fd1c2bf..b8f8ae940b55 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -16,6 +16,97 @@
#define GPU_PAS_ID 13
+static u64 read_gmu_ao_counter(struct a6xx_gpu *a6xx_gpu)
+{
+ u64 count_hi, count_lo, temp;
+
+ do {
+ count_hi = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H);
+ count_lo = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L);
+ temp = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H);
+ } while (unlikely(count_hi != temp));
+
+ return (count_hi << 32) | count_lo;
+}
+
+static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask)
+{
+ /* Success if !writedropped0/1 */
+ if (!(status & mask))
+ return true;
+
+ udelay(10);
+
+ /* Try to update fenced register again */
+ gpu_write(gpu, offset, value);
+
+ /* We can't do a posted write here because the power domain could be
+ * in collapse state. So use the heaviest barrier instead
+ */
+ mb();
+ return false;
+}
+
+static int fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u32 value, u32 mask)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ gpu_write(gpu, offset, value);
+
+ /* Nothing else to be done in the case of no-GMU */
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ /* We can't do a posted write here because the power domain could be
+ * in collapse state. So use the heaviest barrier instead
+ */
+ mb();
+
+ if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status,
+ fence_status_check(gpu, offset, value, status, mask), 0, 1000))
+ return 0;
+
+ /* Try again for another 1ms before failing */
+ gpu_write(gpu, offset, value);
+ mb();
+
+ if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status,
+ fence_status_check(gpu, offset, value, status, mask), 0, 1000)) {
+ /*
+ * The 'delay' warning is here because the pause to print this
+ * warning will allow gpu to move to power collapse which
+ * defeats the purpose of continuous polling for 2 ms
+ */
+ dev_err_ratelimited(gmu->dev, "delay in fenced register write (0x%x)\n",
+ offset);
+ return 0;
+ }
+
+ dev_err_ratelimited(gmu->dev, "fenced register write (0x%x) fail\n",
+ offset);
+
+ return -ETIMEDOUT;
+}
+
+int a6xx_fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u64 value, u32 mask, bool is_64b)
+{
+ int ret;
+
+ ret = fenced_write(a6xx_gpu, offset, lower_32_bits(value), mask);
+ if (ret)
+ return ret;
+
+ if (!is_64b)
+ return 0;
+
+ ret = fenced_write(a6xx_gpu, offset + 1, upper_32_bits(value), mask);
+
+ return ret;
+}
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -86,7 +177,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Update HW if this is the current ring and we are not in preempt*/
if (!a6xx_in_preempt(a6xx_gpu)) {
if (a6xx_gpu->cur_ring == ring)
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
else
ring->restore_wptr = true;
} else {
@@ -173,8 +264,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
* Needed for preemption
*/
OUT_PKT7(ring, CP_MEM_WRITE, 5);
- OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
- OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+ OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_LO(lower_32_bits(memptr)));
+ OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
OUT_RING(ring, upper_32_bits(ttbr));
OUT_RING(ring, ctx->seqno);
@@ -204,9 +295,9 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
*/
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
- OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
- OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0));
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
@@ -298,8 +389,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
- trace_msm_gpu_submit_flush(submit,
- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+ trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu));
a6xx_flush(gpu, ring);
}
@@ -499,8 +589,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
}
- trace_msm_gpu_submit_flush(submit,
- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+ trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu));
a6xx_flush(gpu, ring);
@@ -739,11 +828,10 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
u32 *dest = (u32 *)&lock->regs[0];
int i;
- reglist = adreno_gpu->info->a6xx->pwrup_reglist;
-
lock->gpu_req = lock->cpu_req = lock->turn = 0;
- lock->ifpc_list_len = 0;
- lock->preemption_list_len = reglist->count;
+
+ reglist = adreno_gpu->info->a6xx->ifpc_reglist;
+ lock->ifpc_list_len = reglist->count;
/*
* For each entry in each of the lists, write the offset and the current
@@ -754,6 +842,14 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
*dest++ = gpu_read(gpu, reglist->regs[i]);
}
+ reglist = adreno_gpu->info->a6xx->pwrup_reglist;
+ lock->preemption_list_len = reglist->count;
+
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
+
/*
* The overall register list is composed of
* 1. Static IFPC-only registers
@@ -1241,14 +1337,14 @@ static int hw_init(struct msm_gpu *gpu)
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu) || adreno_is_x185(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
0x3fe05ff4);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
0x3fa0ebee);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
0x3f5193ed);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
0x3f0243f0);
}
@@ -1448,21 +1544,25 @@ static void a6xx_recover(struct msm_gpu *gpu)
adreno_dump_info(gpu);
- for (i = 0; i < 8; i++)
- DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+ if (a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) {
+ /* Sometimes crashstate capture is skipped, so SQE should be halted here again */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
+ for (i = 0; i < 8; i++)
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
- if (hang_debug)
- a6xx_dump(gpu);
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ }
/*
* To handle recovery specific sequences during the rpm suspend we are
* about to trigger
*/
- a6xx_gpu->hung = true;
- /* Halt SQE first */
- gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+ a6xx_gpu->hung = true;
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
@@ -1693,8 +1793,6 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
/*
@@ -1706,13 +1804,6 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
return;
- /*
- * Force the GPU to stay on until after we finish
- * collecting information
- */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
-
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
@@ -1727,6 +1818,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
+ /* Turn off interrupts to avoid triggering recovery again */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, 0);
+
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
@@ -1751,9 +1845,49 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
}
}
+static void a6xx_gpu_keepalive_vote(struct msm_gpu *gpu, bool on)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on);
+}
+
+static int irq_poll_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) {
+ u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n",
+ status, rbbm_unmasked);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
+
+ /* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */
+ a6xx_gpu_keepalive_vote(gpu, true);
+
+ if (irq_poll_fence(gpu))
+ goto done;
+
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
@@ -1790,6 +1924,9 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
a6xx_preempt_irq(gpu);
+done:
+ a6xx_gpu_keepalive_vote(gpu, false);
+
return IRQ_HANDLED;
}
@@ -2179,16 +2316,7 @@ static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
-
- /* Force the GPU power on so we can read this register */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-
- *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
-
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-
- mutex_unlock(&a6xx_gpu->gmu.lock);
+ *value = read_gmu_ao_counter(a6xx_gpu);
return 0;
}
@@ -2298,18 +2426,36 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
return a6xx_gpu->shadow[ring->id];
+ /*
+ * This is true only on an A6XX_GEN1 with GMU, has IFPC enabled and a super old SQE firmware
+ * without 'whereami' support
+ */
+ WARN_ONCE((to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC),
+ "Can't read CP_RB_RPTR register reliably\n");
+
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
- struct msm_cp_state cp_state = {
+ struct msm_cp_state cp_state;
+ bool progress;
+
+ /*
+ * With IFPC, KMD doesn't know whether GX power domain is collapsed
+ * or not. So, we can't blindly read the below registers in GX domain.
+ * Lets trust the hang detection in HW and lie to the caller that
+ * there was progress.
+ */
+ if (to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC)
+ return true;
+
+ cp_state = (struct msm_cp_state) {
.ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
.ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
.ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
.ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
};
- bool progress;
/*
* Adjust the remaining data to account for what has already been
@@ -2408,6 +2554,7 @@ static const struct adreno_gpu_funcs funcs = {
.create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
+ .sysprof_setup = a6xx_gmu_sysprof_setup,
},
.get_timestamp = a6xx_gmu_get_timestamp,
};
@@ -2468,6 +2615,7 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
+ .sysprof_setup = a6xx_gmu_sysprof_setup,
},
.get_timestamp = a6xx_gmu_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 6e71f617fc3d..0b17d36c36a9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -45,6 +45,7 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
+ const struct adreno_reglist_list *ifpc_reglist;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
@@ -254,6 +255,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu);
void a6xx_preempt_init(struct msm_gpu *gpu);
void a6xx_preempt_hw_init(struct msm_gpu *gpu);
@@ -295,5 +297,6 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state);
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
+int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index d5d1271fce61..4c7f3c642f6a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1586,8 +1586,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
GFP_KERNEL);
- bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
- A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
+ bool stalled;
if (!a6xx_state)
return ERR_PTR(-ENOMEM);
@@ -1608,15 +1607,20 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
}
/* If GX isn't on the rest of the data isn't going to be accessible */
- if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
/* Get the banks of indexed registers */
if (adreno_is_a7xx(adreno_gpu))
a7xx_get_indexed_registers(gpu, a6xx_state);
else
a6xx_get_indexed_registers(gpu, a6xx_state);
+ stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
+ A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
/*
* Try to initialize the crashdumper, if we are not dumping state
* with the SMMU stalled. The crashdumper needs memory access to
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 8e69b1e84657..550de6ad68ef 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -21,6 +21,7 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_FEATURE_CTRL),
HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
@@ -765,23 +766,40 @@ send:
NULL, 0);
}
+static int a6xx_hfi_feature_ctrl_msg(struct a6xx_gmu *gmu, u32 feature, u32 enable, u32 data)
+{
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = feature,
+ .enable = enable,
+ .data = data,
+ };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+}
+
+#define HFI_FEATURE_IFPC 9
+#define IFPC_LONG_HYST 0x1680
+
+static int a6xx_hfi_enable_ifpc(struct a6xx_gmu *gmu)
+{
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC)
+ return 0;
+
+ return a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_IFPC, 1, IFPC_LONG_HYST);
+}
+
#define HFI_FEATURE_ACD 12
static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
- struct a6xx_hfi_msg_feature_ctrl msg = {
- .feature = HFI_FEATURE_ACD,
- .enable = 1,
- .data = 0,
- };
int ret;
if (!acd_table->enable_by_level)
return 0;
/* Enable ACD feature at GMU */
- ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ ret = a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_ACD, 1, 0);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
return ret;
@@ -898,6 +916,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_ifpc(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 6a12a35dabff..afc5f4aa3b17 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -41,7 +41,7 @@ static inline void set_preempt_state(struct a6xx_gpu *gpu,
}
/* Write the most recent wptr for the given ring into the hardware */
-static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+static inline void update_wptr(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring)
{
unsigned long flags;
uint32_t wptr;
@@ -51,7 +51,7 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
if (ring->restore_wptr) {
wptr = get_wptr(ring);
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
ring->restore_wptr = false;
}
@@ -111,9 +111,9 @@ static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu)
postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6);
postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ);
- postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_LO(
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS);
- postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0);
+ postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_HI(0);
postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1);
postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1);
postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0);
@@ -136,6 +136,21 @@ static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu)
a6xx_gpu->postamble_enabled = false;
}
+/*
+ * Set preemption keepalive vote. Please note that this vote is different from the one used in
+ * a6xx_irq()
+ */
+static void a6xx_preempt_keepalive_vote(struct msm_gpu *gpu, bool on)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, on);
+}
+
void a6xx_preempt_irq(struct msm_gpu *gpu)
{
uint32_t status;
@@ -172,10 +187,12 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
- update_wptr(gpu, a6xx_gpu->cur_ring);
+ update_wptr(a6xx_gpu, a6xx_gpu->cur_ring);
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+ a6xx_preempt_keepalive_vote(gpu, false);
+
trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
/*
@@ -268,7 +285,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
*/
if (!ring || (a6xx_gpu->cur_ring == ring)) {
set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
- update_wptr(gpu, a6xx_gpu->cur_ring);
+ update_wptr(a6xx_gpu, a6xx_gpu->cur_ring);
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
return;
@@ -302,13 +319,16 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
spin_unlock_irqrestore(&ring->preempt_lock, flags);
- gpu_write64(gpu,
- REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO,
- a6xx_gpu->preempt_smmu_iova[ring->id]);
+ /* Set the keepalive bit to keep the GPU ON until preemption is complete */
+ a6xx_preempt_keepalive_vote(gpu, true);
+
+ a6xx_fenced_write(a6xx_gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, a6xx_gpu->preempt_smmu_iova[ring->id],
+ BIT(1), true);
- gpu_write64(gpu,
+ a6xx_fenced_write(a6xx_gpu,
REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR,
- a6xx_gpu->preempt_iova[ring->id]);
+ a6xx_gpu->preempt_iova[ring->id], BIT(1), true);
a6xx_gpu->next_ring = ring;
@@ -328,7 +348,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED);
/* Trigger the preemption */
- gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl, BIT(1), false);
}
static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 50945bfe9b49..28f744f3caf7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -24,6 +24,10 @@ bool disable_acd;
MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
module_param_unsafe(disable_acd, bool, 0400);
+static bool skip_gpu;
+MODULE_PARM_DESC(no_gpu, "Disable GPU driver register (0=enable GPU driver register (default), 1=skip GPU driver register");
+module_param(skip_gpu, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
@@ -184,6 +188,9 @@ bool adreno_has_gpu(struct device_node *node)
uint32_t chip_id;
int ret;
+ if (skip_gpu)
+ return false;
+
ret = find_chipid(node, &chip_id);
if (ret)
return false;
@@ -404,10 +411,16 @@ static struct platform_driver adreno_driver = {
void __init adreno_register(void)
{
+ if (skip_gpu)
+ return;
+
platform_driver_register(&adreno_driver);
}
void __exit adreno_unregister(void)
{
+ if (skip_gpu)
+ return;
+
platform_driver_unregister(&adreno_driver);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1230465bf0d..afaa3cfefd35 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -10,7 +10,7 @@
#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -33,7 +33,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
const char *signed_fwname = NULL;
- struct device_node *np, *mem_np;
+ struct device_node *np;
struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
@@ -51,18 +51,11 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
return -ENODEV;
}
- mem_np = of_parse_phandle(np, "memory-region", 0);
- of_node_put(np);
- if (!mem_np) {
+ ret = of_reserved_mem_region_to_resource(np, 0, &r);
+ if (ret) {
zap_available = false;
- return -EINVAL;
- }
-
- ret = of_address_to_resource(mem_np, 0, &r);
- of_node_put(mem_np);
- if (ret)
return ret;
-
+ }
mem_phys = r.start;
/*
@@ -209,9 +202,7 @@ adreno_iommu_create_vm(struct msm_gpu *gpu,
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
- if (!mmu)
- return ERR_PTR(-ENODEV);
- else if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9dc93c247196..390fa6720d9b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -59,6 +59,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
#define ADRENO_QUIRK_4GB_VA BIT(6)
+#define ADRENO_QUIRK_IFPC BIT(7)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 0fb5789c60d0..13cc658065c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -32,6 +32,26 @@ enum dpu_perf_mode {
};
/**
+ * dpu_core_perf_adjusted_mode_clk - Adjust given mode clock rate according to
+ * the perf clock factor.
+ * @crtc_clk_rate - Unadjusted mode clock rate
+ * @perf_cfg: performance configuration
+ */
+u64 dpu_core_perf_adjusted_mode_clk(u64 mode_clk_rate,
+ const struct dpu_perf_cfg *perf_cfg)
+{
+ u32 clk_factor;
+
+ clk_factor = perf_cfg->clk_inefficiency_factor;
+ if (clk_factor) {
+ mode_clk_rate *= clk_factor;
+ do_div(mode_clk_rate, 100);
+ }
+
+ return mode_clk_rate;
+}
+
+/**
* _dpu_core_perf_calc_bw() - to calculate BW per crtc
* @perf_cfg: performance configuration
* @crtc: pointer to a crtc
@@ -75,28 +95,21 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
struct drm_plane *plane;
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
- u64 crtc_clk;
- u32 clk_factor;
+ u64 mode_clk;
mode = &state->adjusted_mode;
- crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+ mode_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
- crtc_clk = max(pstate->plane_clk, crtc_clk);
- }
-
- clk_factor = perf_cfg->clk_inefficiency_factor;
- if (clk_factor) {
- crtc_clk *= clk_factor;
- do_div(crtc_clk, 100);
+ mode_clk = max(pstate->plane_clk, mode_clk);
}
- return crtc_clk;
+ return dpu_core_perf_adjusted_mode_clk(mode_clk, perf_cfg);
}
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index d2f21d34e501..3740bc97422c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -54,6 +54,9 @@ struct dpu_core_perf {
u32 fix_core_ab_vote;
};
+u64 dpu_core_perf_adjusted_mode_clk(u64 clk_rate,
+ const struct dpu_perf_cfg *perf_cfg);
+
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 94912b4708fb..4b970a59deaf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -377,11 +377,10 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
struct dpu_crtc_state *crtc_state;
- int lm_idx, lm_horiz_position;
+ int lm_idx;
crtc_state = to_dpu_crtc_state(crtc->state);
- lm_horiz_position = 0;
for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
@@ -392,7 +391,7 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
cfg.out_width = drm_rect_width(lm_roi);
cfg.out_height = drm_rect_height(lm_roi);
- cfg.right_mixer = lm_horiz_position++;
+ cfg.right_mixer = lm_idx & 0x1;
cfg.flags = 0;
hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
}
@@ -1534,6 +1533,7 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ u64 adjusted_mode_clk;
/* if there is no 3d_mux block we cannot merge LMs so we cannot
* split the large layer into 2 LMs, filter out such modes
@@ -1541,6 +1541,17 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
if (!dpu_kms->catalog->caps->has_3d_merge &&
mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
return MODE_BAD_HVALUE;
+
+ adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
+ dpu_kms->perf.perf_cfg);
+
+ /*
+ * The given mode, adjusted for the perf clock factor, should not exceed
+ * the max core clock rate
+ */
+ if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000)
+ return MODE_CLOCK_HIGH;
+
/*
* max crtc width is equal to the max mixer width * 2 and max height is 4K
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 56a5b596554d..46f348972a97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -446,7 +446,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
static int dpu_encoder_phys_wb_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- unsigned long ret;
+ int ret;
struct dpu_encoder_wait_info wait_info;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index e824cd64fd3f..6641455c4ec6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -338,7 +338,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
*************************************************************/
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x50, 0x80, 0xb0, 0x230,
@@ -347,7 +346,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
};
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
@@ -356,7 +354,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
};
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
@@ -364,7 +361,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
};
static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
/* 0x40 + n*0x30 */
@@ -374,7 +370,6 @@ static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
};
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
- .maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index a78bb2c334e3..f0768f54e9b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -307,7 +307,6 @@ struct dpu_sspp_sub_blks {
* @blendstage_base: Blend-stage register base offset
*/
struct dpu_lm_sub_blks {
- u32 maxwidth;
u32 maxblendstages;
u32 blendstage_base[MAX_BLOCKS];
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index a306077647c3..4e5a8ecd31f7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1110,7 +1110,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct drm_gpuvm *vm;
- vm = msm_kms_init_vm(dpu_kms->dev);
+ vm = msm_kms_init_vm(dpu_kms->dev, dpu_kms->dev->dev->parent);
if (IS_ERR(vm))
return PTR_ERR(vm);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6859e8ef6b05..f54cf0faa1c7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -922,6 +922,9 @@ static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
+ if (!sspp)
+ return true;
+
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -1028,6 +1031,7 @@ static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
return false;
+ /* Do not validate SSPP of current plane when it is not ready */
if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
!dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
return false;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 25382120cb1a..2c77c74fac0f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -865,6 +865,21 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
}
+static char *dpu_hw_blk_type_name[] = {
+ [DPU_HW_BLK_TOP] = "TOP",
+ [DPU_HW_BLK_SSPP] = "SSPP",
+ [DPU_HW_BLK_LM] = "LM",
+ [DPU_HW_BLK_CTL] = "CTL",
+ [DPU_HW_BLK_PINGPONG] = "pingpong",
+ [DPU_HW_BLK_INTF] = "INTF",
+ [DPU_HW_BLK_WB] = "WB",
+ [DPU_HW_BLK_DSPP] = "DSPP",
+ [DPU_HW_BLK_MERGE_3D] = "merge_3d",
+ [DPU_HW_BLK_DSC] = "DSC",
+ [DPU_HW_BLK_CDM] = "CDM",
+ [DPU_HW_BLK_MAX] = "unknown",
+};
+
/**
* dpu_rm_get_assigned_resources - Get hw resources of the given type that are
* assigned to this encoder
@@ -946,13 +961,13 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
}
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to crtc %d\n",
- blks_size, crtc_id);
+ DPU_ERROR("More than %d %s assigned to crtc %d\n",
+ blks_size, dpu_hw_blk_type_name[type], crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
- type, crtc_id);
+ DPU_ERROR("%s unavailable to assign to crtc %d\n",
+ dpu_hw_blk_type_name[type], crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 8ff496082902..cd73468e369a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -80,7 +80,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -131,12 +130,9 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
- /* DPU initializes the encoder and sets it up completely for writeback
- * cases and hence should use the new API drm_writeback_connector_init_with_encoder
- * to initialize the writeback connector
- */
- rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
- &dpu_wb_conn_funcs, format_list, num_formats);
+ rc = drmm_writeback_connector_init(dev, &dpu_wb_conn->base,
+ &dpu_wb_conn_funcs, enc,
+ format_list, num_formats);
if (!rc)
dpu_wb_conn->wb_enc = enc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 0952c7f18abd..809ca191e9de 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -391,11 +391,9 @@ static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
static int mdp4_kms_init(struct drm_device *dev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
struct drm_gpuvm *vm;
int ret;
u32 major, minor;
@@ -458,29 +456,14 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_disable(mdp4_kms);
mdelay(16);
- mmu = msm_iommu_new(&pdev->dev, 0);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ vm = msm_kms_init_vm(mdp4_kms->dev, NULL);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
goto fail;
- } else if (!mmu) {
- DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- vm = NULL;
- } else {
- vm = msm_gem_vm_create(dev, mmu, "mdp4",
- 0x1000, 0x100000000 - 0x1000,
- true);
-
- if (IS_ERR(vm)) {
- if (!IS_ERR(mmu))
- mmu->funcs->destroy(mmu);
- ret = PTR_ERR(vm);
- goto fail;
- }
-
- kms->vm = vm;
}
+ kms->vm = vm;
+
ret = modeset_init(mdp4_kms);
if (ret) {
DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
@@ -529,7 +512,7 @@ static int mdp4_probe(struct platform_device *pdev)
mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n");
+ return -ENOMEM;
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
if (IS_ERR(mdp4_kms->mmio))
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index fb348583dc84..06458d4ee48c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -202,6 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
+struct clk *mdp4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 06a307c1272d..1051873057f6 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -375,7 +375,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mdp4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index fa2c29470510..04c49bf3d854 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -54,7 +54,7 @@ static const struct pll_rate *find_rate(unsigned long rate)
return &freqtbl[i-1];
}
-static int mpd4_lvds_pll_enable(struct clk_hw *hw)
+static int mdp4_lvds_pll_enable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
@@ -80,7 +80,7 @@ static int mpd4_lvds_pll_enable(struct clk_hw *hw)
return 0;
}
-static void mpd4_lvds_pll_disable(struct clk_hw *hw)
+static void mdp4_lvds_pll_disable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
@@ -91,21 +91,24 @@ static void mpd4_lvds_pll_disable(struct clk_hw *hw)
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
}
-static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
+static unsigned long mdp4_lvds_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
return lvds_pll->pixclk;
}
-static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int mdp4_lvds_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- const struct pll_rate *pll_rate = find_rate(rate);
- return pll_rate->rate;
+ const struct pll_rate *pll_rate = find_rate(req->rate);
+
+ req->rate = pll_rate->rate;
+
+ return 0;
}
-static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+static int mdp4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
@@ -114,26 +117,26 @@ static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
}
-static const struct clk_ops mpd4_lvds_pll_ops = {
- .enable = mpd4_lvds_pll_enable,
- .disable = mpd4_lvds_pll_disable,
- .recalc_rate = mpd4_lvds_pll_recalc_rate,
- .round_rate = mpd4_lvds_pll_round_rate,
- .set_rate = mpd4_lvds_pll_set_rate,
+static const struct clk_ops mdp4_lvds_pll_ops = {
+ .enable = mdp4_lvds_pll_enable,
+ .disable = mdp4_lvds_pll_disable,
+ .recalc_rate = mdp4_lvds_pll_recalc_rate,
+ .determine_rate = mdp4_lvds_pll_determine_rate,
+ .set_rate = mdp4_lvds_pll_set_rate,
};
-static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+static const struct clk_parent_data mdp4_lvds_pll_parents[] = {
{ .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
- .name = "mpd4_lvds_pll",
- .ops = &mpd4_lvds_pll_ops,
- .parent_data = mpd4_lvds_pll_parents,
- .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
+ .name = "mdp4_lvds_pll",
+ .ops = &mdp4_lvds_pll_ops,
+ .parent_data = mdp4_lvds_pll_parents,
+ .num_parents = ARRAY_SIZE(mdp4_lvds_pll_parents),
};
-static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mdp4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
int ret;
@@ -156,14 +159,14 @@ static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
return &lvds_pll->pll_hw;
}
-struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+struct clk *mdp4_get_lcdc_clock(struct drm_device *dev)
{
struct clk_hw *hw;
struct clk *clk;
/* TODO: do we need different pll in other cases? */
- hw = mpd4_lvds_pll_init(dev);
+ hw = mdp4_lvds_pll_init(dev);
if (IS_ERR(hw)) {
DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
return ERR_CAST(hw);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 5b6ca8dd929e..61edf6864092 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -534,7 +534,7 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- vm = msm_kms_init_vm(mdp5_kms->dev);
+ vm = msm_kms_init_vm(mdp5_kms->dev, pdev->dev.parent);
if (IS_ERR(vm)) {
ret = PTR_ERR(vm);
goto fail;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 3cbf08231492..e391505fdaf0 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -109,6 +109,7 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
+ void *pll_data;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index af2e30f3f842..ec486ff02c9b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -444,21 +444,19 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
return (unsigned long)vco_rate;
}
-static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_10nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
- if (rate < pll_10nm->phy->cfg->min_pll_rate)
- return pll_10nm->phy->cfg->min_pll_rate;
- else if (rate > pll_10nm->phy->cfg->max_pll_rate)
- return pll_10nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_10nm->phy->cfg->min_pll_rate, pll_10nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
- .round_rate = dsi_pll_10nm_clk_round_rate,
+ .determine_rate = dsi_pll_10nm_clk_determine_rate,
.set_rate = dsi_pll_10nm_vco_set_rate,
.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
.prepare = dsi_pll_10nm_vco_prepare,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 3a1c8ece6657..fdefcbd9c284 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -578,21 +578,19 @@ static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
pll_14nm->phy->pll_on = false;
}
-static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_14nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
- if (rate < pll_14nm->phy->cfg->min_pll_rate)
- return pll_14nm->phy->cfg->min_pll_rate;
- else if (rate > pll_14nm->phy->cfg->max_pll_rate)
- return pll_14nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_14nm->phy->cfg->min_pll_rate, pll_14nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
- .round_rate = dsi_pll_14nm_clk_round_rate,
+ .determine_rate = dsi_pll_14nm_clk_determine_rate,
.set_rate = dsi_pll_14nm_vco_set_rate,
.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
.prepare = dsi_pll_14nm_vco_prepare,
@@ -622,18 +620,20 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
postdiv->flags, width);
}
-static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int dsi_pll_14nm_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
struct dsi_pll_14nm *pll_14nm = postdiv->pll;
- DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, req->rate);
- return divider_round_rate(hw, rate, prate, NULL,
- postdiv->width,
- postdiv->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ postdiv->width,
+ postdiv->flags);
+
+ return 0;
}
static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -680,7 +680,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
- .round_rate = dsi_pll_14nm_postdiv_round_rate,
+ .determine_rate = dsi_pll_14nm_postdiv_determine_rate,
.set_rate = dsi_pll_14nm_postdiv_set_rate,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 90348a2af3e9..d00e415b9a99 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -533,21 +533,20 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
pll_28nm->phy->pll_on = false;
}
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
- if (rate < pll_28nm->phy->cfg->min_pll_rate)
- return pll_28nm->phy->cfg->min_pll_rate;
- else if (rate > pll_28nm->phy->cfg->max_pll_rate)
- return pll_28nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_28nm->phy->cfg->min_pll_rate,
+ pll_28nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_hpm,
@@ -556,7 +555,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_lp,
@@ -565,7 +564,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_8226,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index f3643320ff2f..8dcce9581dc3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -231,21 +231,19 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
pll_28nm->phy->pll_on = false;
}
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
- if (rate < pll_28nm->phy->cfg->min_pll_rate)
- return pll_28nm->phy->cfg->min_pll_rate;
- else if (rate > pll_28nm->phy->cfg->max_pll_rate)
- return pll_28nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_28nm->phy->cfg->min_pll_rate, pll_28nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare,
@@ -296,18 +294,20 @@ static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
return 8;
}
-static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_bytediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
unsigned int factor;
- factor = get_vco_mul_factor(rate);
+ factor = get_vco_mul_factor(req->rate);
+
+ best_parent = req->rate * factor;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- best_parent = rate * factor;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->rate = req->best_parent_rate / factor;
- return *prate / factor;
+ return 0;
}
static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -328,7 +328,7 @@ static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
/* Our special byte clock divider ops */
static const struct clk_ops clk_bytediv_ops = {
- .round_rate = clk_bytediv_round_rate,
+ .determine_rate = clk_bytediv_determine_rate,
.set_rate = clk_bytediv_set_rate,
.recalc_rate = clk_bytediv_recalc_rate,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 8c98f91a5930..32f06edd21a9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -90,6 +90,13 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
spinlock_t pclk_mux_lock;
+ /*
+ * protects REG_DSI_7nm_PHY_CMN_CTRL_0 register and pll_enable_cnt
+ * member
+ */
+ spinlock_t pll_enable_lock;
+ int pll_enable_cnt;
+
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@@ -103,6 +110,9 @@ struct dsi_pll_7nm {
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll);
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll);
+
static void dsi_pll_setup_config(struct dsi_pll_config *config)
{
config->ssc_freq = 31500;
@@ -340,6 +350,7 @@ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
struct dsi_pll_config config;
+ dsi_pll_enable_pll_bias(pll_7nm);
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
parent_rate);
@@ -357,6 +368,7 @@ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_pll_ssc_commit(pll_7nm, &config);
+ dsi_pll_disable_pll_bias(pll_7nm);
/* flush, ensure all register writes are done*/
wmb();
@@ -385,19 +397,47 @@ static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
{
- u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ unsigned long flags;
+ u32 data;
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ --pll->pll_enable_cnt;
+ if (pll->pll_enable_cnt < 0) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ DRM_DEV_ERROR_RATELIMITED(&pll->phy->pdev->dev,
+ "bug: imbalance in disabling PLL bias\n");
+ return;
+ } else if (pll->pll_enable_cnt > 0) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ return;
+ } /* else: == 0 */
+
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ data &= ~DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
{
- u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ if (pll->pll_enable_cnt++) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ WARN_ON(pll->pll_enable_cnt == INT_MAX);
+ return;
+ }
+
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- writel(data | BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0xc0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
ndelay(250);
}
@@ -491,6 +531,10 @@ static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
if (pll_7nm->slave)
dsi_pll_enable_global_clk(pll_7nm->slave);
+ writel(0x1, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
+ if (pll_7nm->slave)
+ writel(0x1, pll_7nm->slave->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
+
error:
return rc;
}
@@ -534,6 +578,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
u32 dec;
u64 pll_freq, tmp64;
+ dsi_pll_enable_pll_bias(pll_7nm);
dec = readl(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
@@ -558,24 +603,24 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
+ dsi_pll_disable_pll_bias(pll_7nm);
+
return (unsigned long)vco_rate;
}
-static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_7nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
- if (rate < pll_7nm->phy->cfg->min_pll_rate)
- return pll_7nm->phy->cfg->min_pll_rate;
- else if (rate > pll_7nm->phy->cfg->max_pll_rate)
- return pll_7nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_7nm->phy->cfg->min_pll_rate, pll_7nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
- .round_rate = dsi_pll_7nm_clk_round_rate,
+ .determine_rate = dsi_pll_7nm_clk_determine_rate,
.set_rate = dsi_pll_7nm_vco_set_rate,
.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
.prepare = dsi_pll_7nm_vco_prepare,
@@ -593,6 +638,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *phy_base = pll_7nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
+ dsi_pll_enable_pll_bias(pll_7nm);
cached->pll_out_div = readl(pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
@@ -604,6 +650,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
+ dsi_pll_disable_pll_bias(pll_7nm);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
cached->pix_clk_div, cached->pll_mux);
@@ -826,8 +873,10 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->postdiv_lock);
spin_lock_init(&pll_7nm->pclk_mux_lock);
+ spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
+ phy->pll_data = pll_7nm;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
@@ -839,6 +888,12 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
+ /*
+ * Store also proper vco_current_rate, because its value will be used in
+ * dsi_7nm_pll_restore_state().
+ */
+ if (!dsi_pll_7nm_vco_recalc_rate(&pll_7nm->clk_hw, VCO_REF_CLK_RATE))
+ pll_7nm->vco_current_rate = pll_7nm->phy->cfg->min_pll_rate;
return 0;
}
@@ -910,8 +965,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
+ struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
+ unsigned long flags;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
@@ -1033,9 +1090,13 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ pll->pll_enable_cnt = 1;
/* de-assert digital and pll power down */
- data = BIT(6) | BIT(5);
+ data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
+ DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
@@ -1148,7 +1209,9 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
+ struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
+ unsigned long flags;
u32 data;
DBG("");
@@ -1175,8 +1238,12 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ pll->pll_enable_cnt = 0;
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+
/* make sure phy is turned off */
wmb();
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 8c8d80b59573..36e928b0fd5a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -629,16 +629,12 @@ static int hdmi_8996_pll_prepare(struct clk_hw *hw)
return 0;
}
-static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_8996_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < HDMI_PCLK_MIN_FREQ)
- return HDMI_PCLK_MIN_FREQ;
- else if (rate > HDMI_PCLK_MAX_FREQ)
- return HDMI_PCLK_MAX_FREQ;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate, HDMI_PCLK_MIN_FREQ, HDMI_PCLK_MAX_FREQ);
+
+ return 0;
}
static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
@@ -684,7 +680,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops hdmi_8996_pll_ops = {
.set_rate = hdmi_8996_pll_set_clk_rate,
- .round_rate = hdmi_8996_pll_round_rate,
+ .determine_rate = hdmi_8996_pll_determine_rate,
.recalc_rate = hdmi_8996_pll_recalc_rate,
.prepare = hdmi_8996_pll_prepare,
.unprepare = hdmi_8996_pll_unprepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index 33bb48ae58a2..a86ff3706369 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -646,16 +646,12 @@ static int hdmi_8998_pll_prepare(struct clk_hw *hw)
return 0;
}
-static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_8998_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < HDMI_PCLK_MIN_FREQ)
- return HDMI_PCLK_MIN_FREQ;
- else if (rate > HDMI_PCLK_MAX_FREQ)
- return HDMI_PCLK_MAX_FREQ;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate, HDMI_PCLK_MIN_FREQ, HDMI_PCLK_MAX_FREQ);
+
+ return 0;
}
static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
@@ -688,7 +684,7 @@ static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops hdmi_8998_pll_ops = {
.set_rate = hdmi_8998_pll_set_clk_rate,
- .round_rate = hdmi_8998_pll_round_rate,
+ .determine_rate = hdmi_8998_pll_determine_rate,
.recalc_rate = hdmi_8998_pll_recalc_rate,
.prepare = hdmi_8998_pll_prepare,
.unprepare = hdmi_8998_pll_unprepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 83c8781fcc3f..6ba6bbdb7e05 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -373,12 +373,14 @@ static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
return pll->pixclk;
}
-static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- const struct pll_rate *pll_rate = find_rate(rate);
+ const struct pll_rate *pll_rate = find_rate(req->rate);
+
+ req->rate = pll_rate->rate;
- return pll_rate->rate;
+ return 0;
}
static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static const struct clk_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.recalc_rate = hdmi_pll_recalc_rate,
- .round_rate = hdmi_pll_round_rate,
+ .determine_rate = hdmi_pll_determine_rate,
.set_rate = hdmi_pll_set_rate,
};
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9dcc7a596a11..7e977fec4100 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -826,6 +826,7 @@ static const struct file_operations fops = {
#define DRIVER_FEATURES_KMS ( \
DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
DRIVER_ATOMIC | \
DRIVER_MODESET | \
0 )
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 985db9febd98..6d847d593f1a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -229,7 +229,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e7631f4ef530..07d8cdd6bb2e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -191,7 +191,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
p = drm_gem_get_pages(obj);
@@ -1148,7 +1148,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle,
+ size_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
@@ -1214,9 +1214,8 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.vm_ops = &vm_ops,
};
-static int msm_gem_new_impl(struct drm_device *dev,
- uint32_t size, uint32_t flags,
- struct drm_gem_object **obj)
+static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
+ struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1250,7 +1249,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
return 0;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1265,7 +1264,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
+ ret = msm_gem_new_impl(dev, flags, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1305,12 +1304,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
- uint32_t size;
- int ret, npages;
+ size_t size, npages;
+ int ret;
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1353,7 +1352,7 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 751c3b4965bc..a4cf31853c50 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -297,10 +297,10 @@ bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle, char *name);
+ size_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ size_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova);
void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index c0a33ac839cb..036d34c674d9 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -15,7 +15,7 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
if (msm_obj->flags & MSM_BO_NO_SHARE)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 00d0f3b7ba32..8316af1723c2 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -396,7 +396,14 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
if (obj)
GEM_WARN_ON((range_end - range_start) > obj->size);
- drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ struct drm_gpuva_op_map op_map = {
+ .va.addr = range_start,
+ .va.range = range_end - range_start,
+ .gem.obj = obj,
+ .gem.offset = offset,
+ };
+
+ drm_gpuva_init_from_op(&vma->base, &op_map);
vma->mapped = false;
ret = drm_gpuva_insert(&vm->base, &vma->base);
@@ -1023,6 +1030,7 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
struct drm_device *dev = job->vm->drm;
int ret = 0;
int cnt = 0;
+ int i = -1;
if (args->nr_ops == 1) {
/* Single op case, the op is inlined: */
@@ -1056,11 +1064,12 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
spin_lock(&file->table_lock);
- for (unsigned i = 0; i < args->nr_ops; i++) {
+ for (i = 0; i < args->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
struct drm_gem_object *obj;
- if (!job->ops[i].handle) {
- job->ops[i].obj = NULL;
+ if (!op->handle) {
+ op->obj = NULL;
continue;
}
@@ -1068,16 +1077,22 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, job->ops[i].handle);
+ obj = idr_find(&file->object_idr, op->handle);
if (!obj) {
- ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
goto out_unlock;
}
drm_gem_object_get(obj);
- job->ops[i].obj = obj;
+ op->obj = obj;
cnt++;
+
+ if ((op->range + op->obj_offset) > obj->size) {
+ ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
+ op->range, op->obj_offset, obj->size);
+ goto out_unlock;
+ }
}
*nr_bos = cnt;
@@ -1085,6 +1100,17 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
out_unlock:
spin_unlock(&file->table_lock);
+ if (ret) {
+ for (; i >= 0; i--) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ if (!op->obj)
+ continue;
+
+ drm_gem_object_put(op->obj);
+ op->obj = NULL;
+ }
+ }
out:
return ret;
}
@@ -1200,11 +1226,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
op->obj_offset);
break;
case MSM_VM_BIND_OP_MAP:
- case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
- op->iova, op->range,
- op->obj, op->obj_offset);
+ case MSM_VM_BIND_OP_MAP_NULL: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1312,10 +1344,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
if (op->flags & MSM_VM_BIND_OP_DUMP)
arg.flags |= MSM_VMA_DUMP;
fallthrough;
- case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
- op->range, op->obj, op->obj_offset);
+ case MSM_VM_BIND_OP_MAP_NULL: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 26c5ce897cbb..17759abc46d7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -304,7 +304,7 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (int i = 0; state->bos && i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;;
+ struct drm_gem_object *obj = submit->bos[i].obj;
bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
msm_gem_lock(obj);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b2a96544f92a..a597f2bee30b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -16,6 +16,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
#include "msm_ringbuffer.h"
#include "msm_gem.h"
@@ -91,6 +92,7 @@ struct msm_gpu_funcs {
* for cmdstream that is buffered in this FIFO upstream of the CP fw.
*/
bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ void (*sysprof_setup)(struct msm_gpu *gpu);
};
/* Additional state for iommu faults: */
@@ -613,16 +615,19 @@ struct msm_gpu_state {
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
+ trace_msm_gpu_regaccess(reg);
writel(data, gpu->mmio + (reg << 2));
}
static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
{
+ trace_msm_gpu_regaccess(reg);
return readl(gpu->mmio + (reg << 2));
}
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
{
+ trace_msm_gpu_regaccess(reg);
msm_rmw(gpu->mmio + (reg << 2), mask, or);
}
@@ -644,7 +649,9 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
* when the lo is read, so make sure to read the lo first to trigger
* that
*/
+ trace_msm_gpu_regaccess(reg);
val = (u64) readl(gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32);
return val;
@@ -652,8 +659,10 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
{
+ trace_msm_gpu_regaccess(reg);
/* Why not a writeq here? Read the screed above */
writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 781bbe5540bd..5417f8d389a3 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -219,6 +219,18 @@ TRACE_EVENT(msm_mmu_prealloc_cleanup,
TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
);
+TRACE_EVENT(msm_gpu_regaccess,
+ TP_PROTO(u32 offset),
+ TP_ARGS(offset),
+ TP_STRUCT__entry(
+ __field(u32, offset)
+ ),
+ TP_fast_assign(
+ __entry->offset = offset;
+ ),
+ TP_printk("offset=0x%x", __entry->offset)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 76cdd5ea06a0..0e18619f96cb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -721,7 +721,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
int ret;
if (!device_iommu_mapped(dev))
- return NULL;
+ return ERR_PTR(-ENODEV);
domain = iommu_paging_domain_alloc(dev);
if (IS_ERR(domain))
@@ -756,7 +756,7 @@ struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
@@ -772,11 +772,11 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
- if (adreno_smmu && adreno_smmu->cookie) {
+ if (adreno_smmu->cookie) {
const struct io_pgtable_cfg *cfg =
adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
size_t tblsz = get_tblsz(cfg);
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 56828d218e88..6e5e94f5c9a7 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -177,12 +177,11 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void
return -ENOSYS;
}
-struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev)
{
struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
- struct device *mdss_dev = mdp_dev->parent;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
@@ -193,18 +192,17 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
- else
+ else if (mdss_dev && device_iommu_mapped(mdss_dev))
iommu_dev = mdss_dev;
+ else {
+ drm_info(dev, "no IOMMU, bailing out\n");
+ return ERR_PTR(-ENODEV);
+ }
mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- if (!mmu) {
- drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
- return NULL;
- }
-
vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000, true);
if (IS_ERR(vm)) {
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 39885b333910..2d0e3e784c04 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -154,8 +154,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
- &msm_mdss_irqdomain_ops, msm_mdss);
+ domain = irq_domain_create_linear(dev_fwnode(dev), 32, &msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 8617a82cd6b3..d53dfad16bde 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -40,6 +40,10 @@ int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sy
break;
}
+ /* Some gpu families require additional setup for sysprof */
+ if (gpu->funcs->sysprof_setup)
+ gpu->funcs->sysprof_setup(gpu);
+
ctx->sysprof = sysprof;
return 0;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 86fab2750ba7..9459b6038217 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -814,7 +814,7 @@ by a particular renderpass/blit.
<bitfield name="Y" low="16" high="29" type="uint"/>
</bitset>
- <reg32 offset="0x8000" name="GRAS_CL_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_cl_cntl" inline="yes">
<bitfield name="CLIP_DISABLE" pos="0" type="boolean"/>
<bitfield name="ZNEAR_CLIP_DISABLE" pos="1" type="boolean"/>
<bitfield name="ZFAR_CLIP_DISABLE" pos="2" type="boolean"/>
@@ -826,18 +826,20 @@ by a particular renderpass/blit.
<bitfield name="VP_CLIP_CODE_IGNORE" pos="7" type="boolean"/>
<bitfield name="VP_XFORM_DISABLE" pos="8" type="boolean"/>
<bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8000" name="GRAS_CL_CNTL" type="a6xx_gras_cl_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes">
<bitfield name="CLIP_MASK" low="0" high="7"/>
<bitfield name="CULL_MASK" low="8" high="15"/>
</bitset>
- <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit" variants="A6XX-A7XX" />
- <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_cl_interp_cntl" inline="yes">
<!-- see also RB_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
@@ -848,26 +850,69 @@ by a particular renderpass/blit.
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
<bitfield name="UNK10" pos="10" type="boolean" variants="A7XX-"/>
<bitfield name="UNK11" pos="11" type="boolean" variants="A7XX-"/>
- </reg32>
- <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" type="a6xx_gras_cl_interp_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_cl_guardband_clip_adj" inline="true">
<bitfield name="HORZ" low="0" high="8" type="uint"/>
<bitfield name="VERT" low="10" high="18" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" type="a6xx_gras_cl_guardband_clip_adj" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- Something connected to depth-stencil attachment size -->
<reg32 offset="0x8007" name="GRAS_UNKNOWN_8007" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x8008" name="GRAS_UNKNOWN_8008" variants="A7XX-" usage="cmd"/>
+ <!-- the scale/offset is per view, with up to 6 views -->
+ <bitset name="a6xx_gras_bin_foveat" inline="yes">
+ <bitfield name="BINSCALEEN" pos="6" type="boolean"/>
+ <enum name="a7xx_bin_scale">
+ <value value="0" name="NOSCALE"/>
+ <value value="1" name="SCALE2X"/>
+ <value value="2" name="SCALE4X"/>
+ </enum>
+ <bitfield name="XSCALE_0" low="8" high="9" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_0" low="10" high="11" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_1" low="12" high="13" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_1" low="14" high="15" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_2" low="16" high="17" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_2" low="18" high="19" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_3" low="20" high="21" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_3" low="22" high="23" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_4" low="24" high="25" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_4" low="26" high="27" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_5" low="28" high="29" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_5" low="30" high="31" type="a7xx_bin_scale"/>
+ </bitset>
- <reg32 offset="0x8009" name="GRAS_UNKNOWN_8009" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800a" name="GRAS_UNKNOWN_800A" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800b" name="GRAS_UNKNOWN_800B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800c" name="GRAS_UNKNOWN_800C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8008" name="GRAS_BIN_FOVEAT" type="a6xx_gras_bin_foveat" variants="A7XX" usage="cmd"/>
+
+ <reg32 offset="0x8009" name="GRAS_BIN_FOVEAT_OFFSET_0" variants="A7XX-" usage="cmd">
+ <bitfield name="XOFFSET_0" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_1" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_2" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800a" name="GRAS_BIN_FOVEAT_OFFSET_1" variants="A7XX-" usage="cmd">
+ <bitfield name="XOFFSET_3" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_4" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_5" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800b" name="GRAS_BIN_FOVEAT_OFFSET_2" variants="A7XX-" usage="cmd">
+ <bitfield name="YOFFSET_0" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_1" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_2" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800c" name="GRAS_BIN_FOVEAT_OFFSET_3" variants="A7XX-" usage="cmd">
+ <bitfield name="YOFFSET_3" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_4" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_5" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
<!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
<!-- 0x8006-0x800f invalid -->
- <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" usage="rp_blit">
+ <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="XOFFSET" type="float"/>
<reg32 offset="1" name="XSCALE" type="float"/>
<reg32 offset="2" name="YOFFSET" type="float"/>
@@ -875,12 +920,13 @@ by a particular renderpass/blit.
<reg32 offset="4" name="ZOFFSET" type="float"/>
<reg32 offset="5" name="ZSCALE" type="float"/>
</array>
- <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="MIN" type="float"/>
<reg32 offset="1" name="MAX" type="float"/>
</array>
- <reg32 offset="0x8090" name="GRAS_SU_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_su_cntl" varset="chip">
<bitfield name="CULL_FRONT" pos="0" type="boolean"/>
<bitfield name="CULL_BACK" pos="1" type="boolean"/>
<bitfield name="FRONT_CW" pos="2" type="boolean"/>
@@ -890,39 +936,66 @@ by a particular renderpass/blit.
<bitfield name="LINE_MODE" pos="13" type="a5xx_line_mode"/>
<bitfield name="UNK15" low="15" high="16"/>
<!--
- On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
- the ability to add the view index to either the RT array
- index or the viewport index, and it seems that
- MULTIVIEW_ENABLE doesn't do anything, instead we need to
- set at least one of RENDERTARGETINDEXINCR or
- VIEWPORTINDEXINCR to enable multiview. The blob still
- sets MULTIVIEW_ENABLE regardless.
- TODO: what about gen2 (a640)?
+ On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
+ the ability to add the view index to either the RT array
+ index or the viewport index, and it seems that
+ MULTIVIEW_ENABLE doesn't do anything, instead we need to
+ set at least one of RENDERTARGETINDEXINCR or
+ VIEWPORTINDEXINCR to enable multiview. The blob still
+ sets MULTIVIEW_ENABLE regardless.
+ TODO: what about gen2 (a640)?
-->
<bitfield name="MULTIVIEW_ENABLE" pos="17" type="boolean"/>
- <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean"/>
- <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean"/>
- <bitfield name="UNK20" low="20" high="22"/>
- </reg32>
- <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" usage="rp_blit">
+ <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean" variants="A6XX-A7XX"/>
+ <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean" variants="A6XX-A7XX"/>
+ <bitfield name="UNK20" low="20" high="22" variants="A6XX-A7XX"/>
+ </bitset>
+ <reg32 offset="0x8090" name="GRAS_SU_CNTL" type="a6xx_gras_su_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_point_minmax" inline="yes">
<bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
<bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
- </reg32>
- <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" usage="rp_blit"/>
+ </bitset>
+
+ <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" type="a6xx_gras_su_point_minmax" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_depth_cntl" inline="yes">
+ <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" variants="A6XX-A7XX" type="a6xx_gras_su_depth_cntl" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_stencil_cntl" inline="yes">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" type="a6xx_gras_su_stencil_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_render_cntl" inline="yes">
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" type="a6xx_gras_su_render_cntl" variants="A7XX" usage="rp_blit"/>
+
<!-- 0x8093 invalid -->
- <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" usage="rp_blit">
+ <bitset name="a6xx_depth_plane_cntl" inline="yes">
<bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
- </reg32>
- <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" usage="rp_blit"/>
- <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" usage="rp_blit"/>
- <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" usage="rp_blit"/>
- <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
- <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <bitset name="a6xx_depth_buffer_info" inline="yes">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
+ <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
+ <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_conservative_ras_cntl" inline="yes">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
<enum name="a6xx_shift_amount">
<value value="0" name="NO_SHIFT"/>
@@ -932,7 +1005,10 @@ by a particular renderpass/blit.
<bitfield name="SHIFTAMOUNT" low="1" high="2" type="a6xx_shift_amount"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="5"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_gras_su_conservative_ras_cntl" variants="A6XX-A7XX" usage="cmd"/>
+
<reg32 offset="0x809a" name="GRAS_SU_PATH_RENDERING_CNTL">
<bitfield name="UNK0" pos="0" type="boolean"/>
<bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
@@ -942,10 +1018,13 @@ by a particular renderpass/blit.
<bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
<bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
</bitset>
- <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <!-- 0x809e/0x809f invalid -->
+ <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_rast_cntl" inline="yes">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </bitset>
<enum name="a6xx_sequenced_thread_dist">
<value value="0x0" name="DIST_SCREEN_COORD"/>
@@ -993,7 +1072,7 @@ by a particular renderpass/blit.
<value value="0x3" name="RB_BT"/>
</enum>
- <reg32 offset="0x80a0" name="GRAS_SC_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_sc_cntl" inline="yes">
<bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2"/>
<bitfield name="SINGLE_PRIM_MODE" low="3" high="4" type="a6xx_single_prim_mode"/>
<bitfield name="RASTER_MODE" pos="5" type="a6xx_raster_mode"/>
@@ -1003,7 +1082,9 @@ by a particular renderpass/blit.
<bitfield name="UNK9" pos="9" type="boolean"/>
<bitfield name="ROTATION" low="10" high="11" type="uint"/>
<bitfield name="EARLYVIZOUTEN" pos="12" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80a0" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<enum name="a6xx_render_mode">
<value value="0x0" name="RENDERING_PASS"/>
@@ -1024,7 +1105,7 @@ by a particular renderpass/blit.
<value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
</enum>
- <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" usage="rp_blit">
+ <bitset name="a6xx_bin_cntl" inline="yes">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -1037,18 +1118,25 @@ by a particular renderpass/blit.
In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
</doc>
<bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- <bitfield name="UNK27" pos="27"/>
- </reg32>
+ <bitfield name="FORCE_LRZ_DIS" pos="27" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" type="a6xx_bin_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_sc_ras_msaa_cntl" inline="yes">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" pos="2"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
- <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" type="a6xx_gras_sc_ras_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_sc_dest_msaa_cntl" inline="yes">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" type="a6xx_gras_sc_dest_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_msaa_sample_pos_cntl" inline="yes">
<bitfield name="UNK0" pos="0"/>
@@ -1066,30 +1154,35 @@ by a particular renderpass/blit.
<bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
</bitset>
- <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
- <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
- <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x80a7" name="GRAS_ROTATION_CNTL" variants="A7XX" usage="cmd"/>
- <!-- 0x80a7-0x80ae invalid -->
- <reg32 offset="0x80af" name="GRAS_UNKNOWN_80AF" pos="0" usage="cmd"/>
+ <bitset name="a6xx_screen_scissor_cntl" inline="yes">
+ <bitfield name="SCISSOR_DISABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x80af" name="GRAS_SC_SCREEN_SCISSOR_CNTL" type="a6xx_screen_scissor_cntl" variants="A6XX-A7XX" pos="0" usage="cmd"/>
<bitset name="a6xx_scissor_xy" inline="yes">
<bitfield name="X" low="0" high="15" type="uint"/>
<bitfield name="Y" low="16" high="31" type="uint"/>
</bitset>
- <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
<reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
</array>
- <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
<reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
</array>
- <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<enum name="a6xx_fsr_combiner">
<value value="0" name="FSR_COMBINER_OP_KEEP"/>
@@ -1099,7 +1192,7 @@ by a particular renderpass/blit.
<value value="4" name="FSR_COMBINER_OP_MUL"/>
</enum>
- <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitset name="a6xx_gras_vrs_config">
<bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
<bitfield name="FRAG_SIZE_X" low="1" high="2" type="uint"/>
<bitfield name="FRAG_SIZE_Y" low="3" high="4" type="uint"/>
@@ -1107,20 +1200,32 @@ by a particular renderpass/blit.
<bitfield name="COMBINER_OP_2" low="8" high="10" type="a6xx_fsr_combiner"/>
<bitfield name="ATTACHMENT_FSR_ENABLE" pos="13" type="boolean"/>
<bitfield name="PRIMITIVE_FSR_ENABLE" pos="20" type="boolean"/>
- </reg32>
- <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" type="a6xx_gras_vrs_config" variants="A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_info" inline="yes">
<bitfield name="LAYERED" pos="0" type="boolean"/>
<bitfield name="TILE_MODE" low="1" high="2" type="a6xx_tile_mode"/>
- </reg32>
- <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" type="a6xx_gras_quality_buffer_info" variants="A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_dimension" inline="yes">
<bitfield name="WIDTH" low="0" high="15" type="uint"/>
<bitfield name="HEIGHT" low="16" high="31" type="uint"/>
- </reg32>
- <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX-" type="waddress" usage="rp_blit"/>
- <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" type="a6xx_gras_quality_buffer_dimension" variants="A7XX" usage="rp_blit"/>
+
+ <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX" type="waddress" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_pitch" inline="yes">
<bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/>
<bitfield name="ARRAY_PITCH" shr="6" low="10" high="28" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" type="a6xx_gras_quality_buffer_pitch" variants="A7XX" usage="rp_blit"/>
<enum name="a6xx_lrz_dir_status">
<value value="0x1" name="LRZ_DIR_LE"/>
@@ -1128,7 +1233,7 @@ by a particular renderpass/blit.
<value value="0x3" name="LRZ_DIR_INVALID"/>
</enum>
- <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_cntl" inline="yes">
<bitfield name="ENABLE" pos="0" type="boolean"/>
<doc>LRZ write also disabled for blend/etc.</doc>
<bitfield name="LRZ_WRITE" pos="1" type="boolean"/>
@@ -1155,26 +1260,36 @@ by a particular renderpass/blit.
</doc>
<bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
<bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A6XX-A7XX"/>
<enum name="a6xx_fragcoord_sample_mode">
<value value="0" name="FRAGCOORD_CENTER"/>
<value value="3" name="FRAGCOORD_SAMPLE"/>
</enum>
- <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" low="0" high="2" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_ps_input_cntl" inline="yes">
<bitfield name="SAMPLEID" pos="0" type="boolean"/>
<bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" type="a6xx_gras_lrz_ps_input_cntl" usage="rp_blit" variants="A6XX-A7XX"/>
- <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_mrt_buffer_info_0" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
- </reg32>
- <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
- <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" type="a6xx_gras_lrz_mrt_buffer_info_0" usage="rp_blit" variants="A6XX-A7XX"/>
+
+ <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit" variants="A6XX-A7XX"/>
+
+ <bitset name="a6xx_gras_lrz_buffer_pitch" inline="yes">
<bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
<bitfield name="ARRAY_PITCH" low="10" high="28" shr="8" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" type="a6xx_gras_lrz_buffer_pitch" usage="rp_blit" variants="A6XX-A7XX"/>
<!--
The LRZ "fast clear" buffer is initialized to zero's by blob, and
@@ -1207,7 +1322,6 @@ by a particular renderpass/blit.
not.
-->
<reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
- <!-- 0x8108 invalid -->
<reg32 offset="0x8109" name="GRAS_LRZ_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
@@ -1232,19 +1346,20 @@ by a particular renderpass/blit.
<!-- 0x810c-0x810f invalid -->
- <reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
+ <reg32 offset="0x8110" name="GRAS_MODE_CNTL" low="0" high="1" variants="A6XX-A7XX" usage="cmd"/>
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
- <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX-"/>
+ <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX"/>
- <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_depth_buffer_info" inline="yes">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_gras_lrz_depth_buffer_info" variants="A7XX" usage="rp_blit"/>
- <!-- Always written together and always equal 09510840 00000a62 -->
- <reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x8121" name="GRAS_UNKNOWN_8121" variants="A7XX-" usage="cmd"/>
+ <doc>LUT used to convert quality buffer values to HW shading rate values. An array of 4-bit values.</doc>
+ <array offset="0x8120" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A7XX-" stride="1" length="2"/>
<!-- 0x8112-0x83ff invalid -->
@@ -1269,28 +1384,29 @@ by a particular renderpass/blit.
<bitfield name="D24S8" pos="19" type="boolean"/>
<!-- some sort of channel mask, disabled channels are set to zero ? -->
<bitfield name="MASK" low="20" high="23"/>
- <bitfield name="IFMT" low="24" high="28" type="a6xx_2d_ifmt"/>
+ <bitfield name="IFMT" low="24" high="26" type="a6xx_2d_ifmt"/>
+ <bitfield name="UNK27" pos="27" type="boolean"/>
+ <bitfield name="UNK28" pos="28" type="boolean"/>
<bitfield name="RASTER_MODE" pos="29" type="a6xx_raster_mode"/>
- <bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
+ <bitfield name="COPY" pos="30" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- note: the low 8 bits for src coords are valid, probably fixed point
it would be a bit weird though, since we subtract 1 from BR coords
apparently signed, gallium driver uses negative coords and it works?
-->
- <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
<reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
<reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
- <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
- <!-- 0x840c-0x85ff invalid -->
+ <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
<reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="cmd">
@@ -1308,22 +1424,7 @@ by a particular renderpass/blit.
-->
<!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
- <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX" usage="rp_blit">
- <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
- <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
- <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
- <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- </reg32>
-
- <reg32 offset="0x8800" name="RB_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
- <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
- <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
- <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- </reg32>
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX-A7XX" type="a6xx_bin_cntl" usage="rp_blit"/>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
@@ -1347,9 +1448,6 @@ by a particular renderpass/blit.
<bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
</reg32>
- <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
- </reg32>
<reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
@@ -1516,9 +1614,7 @@ by a particular renderpass/blit.
<bitfield name="SAMPLE_MASK" low="16" high="31"/>
</reg32>
<!-- 0x8866-0x886f invalid -->
- <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" usage="rp_blit">
- <bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
- </reg32>
+ <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" usage="rp_blit"/>
<reg32 offset="0x8871" name="RB_DEPTH_CNTL" usage="rp_blit">
<bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
@@ -1532,14 +1628,9 @@ by a particular renderpass/blit.
<bitfield name="Z_READ_ENABLE" pos="6" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" usage="rp_blit">
- <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
- </reg32>
+
<!-- duplicates GRAS_SU_DEPTH_BUFFER_INFO: -->
- <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" usage="rp_blit">
- <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
- <bitfield name="UNK3" low="3" high="4"/>
- </reg32>
+ <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" type="a6xx_depth_buffer_info" usage="rp_blit"/>
<!-- first 4 bits duplicates GRAS_SU_DEPTH_BUFFER_INFO -->
<reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
@@ -1575,9 +1666,7 @@ by a particular renderpass/blit.
<bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
<bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
</reg32>
- <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
- <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
- </reg32>
+
<reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A6XX" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
@@ -1616,8 +1705,9 @@ by a particular renderpass/blit.
<reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
<!-- 0x8899-0x88bf invalid -->
<!-- clamps depth value for depth test/write -->
- <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit" variants="A6XX-A7XX"/>
+
<!-- 0x88c2-0x88cf invalid-->
<reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit">
<bitfield name="UNK0" low="0" high="12"/>
@@ -1626,7 +1716,7 @@ by a particular renderpass/blit.
<reg32 offset="0x88d1" name="RB_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x88d2" name="RB_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- weird to duplicate other regs from same block?? -->
- <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" usage="rp_blit">
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" variants="A6XX-A7XX" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
</reg32>
@@ -1650,10 +1740,13 @@ by a particular renderpass/blit.
<!-- array-pitch is size of layer -->
<reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x88dc" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" usage="rp_blit">
+
+ <bitset name="a6xx_flag_buffer_pitch" inline="yes">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
- </reg32>
+ <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" usage="rp_blit"/>
<reg32 offset="0x88df" name="RB_RESOLVE_CLEAR_COLOR_DW0" usage="rp_blit"/>
<reg32 offset="0x88e0" name="RB_RESOLVE_CLEAR_COLOR_DW1" usage="rp_blit"/>
@@ -1726,10 +1819,7 @@ by a particular renderpass/blit.
<reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" usage="cmd"/>
<!-- could be for separate stencil? (or may not be a flag buffer at all) -->
<reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64"/>
- <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH">
- <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
- </reg32>
+ <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch"/>
<reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit">
<bitfield name="UNK2" pos="2" type="boolean"/>
@@ -1737,8 +1827,9 @@ by a particular renderpass/blit.
<bitfield name="ATTACHMENT_FSR_ENABLE" pos="5" type="boolean"/>
<bitfield name="PRIMITIVE_FSR_ENABLE" pos="18" type="boolean"/>
</reg32>
- <!-- Connected to VK_EXT_fragment_density_map? -->
- <reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
+ <reg32 offset="0x88f5" name="RB_BIN_FOVEAT" variants="A7XX-" usage="cmd">
+ <bitfield name="BINSCALEEN" pos="6" type="boolean"/>
+ </reg32>
<!-- 0x88f6-0x88ff invalid -->
<reg64 offset="0x8900" name="RB_DEPTH_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8902" name="RB_DEPTH_FLAG_BUFFER_PITCH" usage="rp_blit">
@@ -1747,12 +1838,10 @@ by a particular renderpass/blit.
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
+
<array offset="0x8903" name="RB_COLOR_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
<reg64 offset="0" name="ADDR" type="waddress" align="64"/>
- <reg32 offset="2" name="PITCH">
- <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
- </reg32>
+ <reg32 offset="2" name="PITCH" type="a6xx_flag_buffer_pitch"/>
</array>
<!-- 0x891b-0x8926 invalid -->
<doc>
@@ -1815,7 +1904,7 @@ by a particular renderpass/blit.
<reg64 offset="0x8c1e" name="RB_A2D_DEST_BUFFER_BASE_2" type="waddress" align="64" usage="rp_blit"/>
<reg64 offset="0x8c20" name="RB_A2D_DEST_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12 with UBWC): -->
<reg64 offset="0x8c23" name="RB_A2D_DEST_FLAG_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8c25" name="RB_A2D_DEST_FLAG_BUFFER_PITCH_1" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
@@ -1921,13 +2010,13 @@ by a particular renderpass/blit.
<bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
<bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
</bitset>
- <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_vpc_xs_siv_cntl" inline="yes">
<bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
@@ -1935,23 +2024,33 @@ by a particular renderpass/blit.
<bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
- <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_rast_stream_cntl" inline="yes">
+ <!-- which stream to send to GRAS -->
+ <bitfield name="STREAM" low="0" high="1" type="uint"/>
+ <!-- discard primitives before rasterization -->
+ <bitfield name="DISCARD" pos="2" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/>
<reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
<!-- this mirrors VPC_RAST_STREAM_CNTL::DISCARD, although it seems it's unused -->
<bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
<bitfield name="UNK2" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9108" name="VPC_RAST_CNTL" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
+ <reg32 offset="0x9108" name="VPC_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_pc_cntl" inline="yes">
<bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
<bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
@@ -1991,10 +2090,10 @@ by a particular renderpass/blit.
<bitfield name="VIEWS" low="2" high="6" type="uint"/>
</bitset>
- <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX" usage="rp_blit"/>
<enum name="a6xx_varying_interp_mode">
<value value="0" name="INTERP_SMOOTH"/>
@@ -2011,11 +2110,11 @@ by a particular renderpass/blit.
</enum>
<!-- 0x9109-0x91ff invalid -->
- <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" variants="A6XX-A7XX" usage="rp_blit">
<doc>Packed array of a6xx_varying_interp_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
- <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE_0" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE" stride="1" length="8" variants="A6XX-A7XX" usage="rp_blit">
<doc>Packed array of a6xx_varying_ps_repl_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
@@ -2024,12 +2123,12 @@ by a particular renderpass/blit.
<reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
<reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
- <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL_0" stride="1" length="4" usage="rp_blit">
+ <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL" stride="1" length="4" variants="A6XX-A7XX" usage="rp_blit">
<!-- one bit per varying component: -->
<reg32 offset="0" name="DISABLE"/>
</array>
- <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" usage="rp_blit">
+ <bitset name="a6xx_vpc_so_mapping_wptr" inline="yes">
<!--
Choose which DWORD to write to. There is an array of
(4 * 64) DWORD's, dumped in the devcoredump at
@@ -2056,20 +2155,25 @@ by a particular renderpass/blit.
<bitfield name="ADDR" low="0" high="7" type="hex"/>
<!-- clear all A_EN and B_EN bits for all DWORD's -->
<bitfield name="RESET" pos="16" type="boolean"/>
- </reg32>
- <!-- special register, write multiple times to load SO program (not readable) -->
- <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" type="a6xx_vpc_so_mapping_wptr" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_so_mapping_port" inline="yes">
<bitfield name="A_BUF" low="0" high="1" type="uint"/>
<bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
<bitfield name="A_EN" pos="11" type="boolean"/>
<bitfield name="B_BUF" low="12" high="13" type="uint"/>
<bitfield name="B_OFF" low="14" high="22" shr="2" type="uint"/>
<bitfield name="B_EN" pos="23" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <!-- special register, write multiple times to load SO program (not readable) -->
+ <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" type="a6xx_vpc_so_mapping_port" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" variants="A6XX-A7XX" usage="cmd"/>
- <array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
+ <array offset="0x921a" name="VPC_SO" stride="7" length="4" variants="A6XX-A7XX" usage="cmd">
<reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
<reg32 offset="2" name="BUFFER_SIZE" low="2" high="31" shr="2"/>
<reg32 offset="3" name="BUFFER_STRIDE" low="0" high="9" shr="2"/>
@@ -2077,12 +2181,13 @@ by a particular renderpass/blit.
<reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
</array>
- <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" usage="cmd">
+ <bitset name="a6xx_vpc_replace_mode_cntl" inline="yes">
<bitfield name="INVERT" pos="0" type="boolean"/>
- </reg32>
- <!-- 0x9237-0x92ff invalid -->
- <!-- always 0x0 ? -->
- <reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
+ </bitset>
+
+ <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" type="a6xx_vpc_replace_mode_cntl" variants="A6XX-A7XX" usage="cmd"/>
+
+ <reg32 offset="0x9300" name="VPC_ROTATION_CNTL" low="0" high="2" variants="A6XX-A7XX" usage="cmd"/>
<bitset name="a6xx_vpc_xs_cntl" inline="yes">
<doc>
@@ -2101,11 +2206,12 @@ by a particular renderpass/blit.
</doc>
</bitfield>
</bitset>
- <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9304" name="VPC_PS_CNTL" usage="rp_blit">
+ <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_ps_cntl" inline="yes">
<bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
<!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
<bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
@@ -2122,9 +2228,11 @@ by a particular renderpass/blit.
ViewID through the VS.
</doc>
</bitfield>
- </reg32>
+ </bitset>
- <reg32 offset="0x9305" name="VPC_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9304" name="VPC_PS_CNTL" type="a6xx_vpc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_so_cntl" inline="yes">
<!--
It's offset by 1, and 0 means "disabled"
-->
@@ -2133,22 +2241,28 @@ by a particular renderpass/blit.
<bitfield name="BUF2_STREAM" low="6" high="8" type="uint"/>
<bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
- </reg32>
- <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x9305" name="VPC_SO_CNTL" type="a6xx_vpc_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_so_override" inline="yes">
<bitfield name="DISABLE" pos="0" type="boolean"/>
- </reg32>
- <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" variants="A6XX-" usage="rp_blit"> <!-- A702 + A7xx -->
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
- <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
- <bitfield name="SIZE_GMEM" low="0" high="31"/>
- </reg32>
- <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX-" usage="rp_blit">
- <bitfield name="BASE_GMEM" low="0" high="31"/>
- </reg32>
- <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
- <bitfield name="SIZE_GMEM" low="0" high="31"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" type="a6xx_so_override" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9807" name="PC_DGEN_SO_OVERRIDE" type="a6xx_so_override" variants="A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX" type="uint" usage="rp_blit"/>
+
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="rp_blit"/>
+
+ <reg32 offset="0x930a" name="VPC_UNKNOWN_930A" variants="A7XX"/>
+
+ <reg32 offset="0x960a" name="VPC_FLATSHADE_MODE_CNTL" variants="A7XX"/>
<!-- 0x9307-0x95ff invalid -->
@@ -2163,52 +2277,62 @@ by a particular renderpass/blit.
<!-- TODO: regs from 0x9624-0x963a -->
<!-- 0x963b-0x97ff invalid -->
- <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" variants="A6XX-A7XX" usage="rp_blit"/>
- <!-- always 0x0 ? -->
- <reg32 offset="0x9801" name="PC_HS_PARAM_1" usage="rp_blit">
+ <bitset name="a6xx_pc_hs_param_1" inline="yes">
<bitfield name="SIZE" low="0" high="10" type="uint"/>
<bitfield name="UNK13" pos="13"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9801" name="PC_HS_PARAM_1" type="a6xx_pc_hs_param_1" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9802" name="PC_DS_PARAM" usage="rp_blit">
+ <bitset name="a6xx_pc_ds_param" inline="yes">
<bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
<bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9802" name="PC_DS_PARAM" type="a6xx_pc_ds_param" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" usage="rp_blit"/>
- <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" usage="rp_blit"/>
+ <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x9805" name="PC_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0x9806" name="PC_PS_CNTL" usage="rp_blit">
+ <bitset name="a6xx_pc_ps_cntl" inline="yes">
<bitfield name="PRIMITIVEIDEN" pos="0" type="boolean"/>
- </reg32>
+ </bitset>
- <!-- New in a6xx gen3+ -->
- <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9806" name="PC_PS_CNTL" type="a6xx_pc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_pc_dgen_so_cntl" inline="yes">
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
- </reg32>
+ </bitset>
+
+ <!-- New in a6xx gen3+ -->
+ <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" type="a6xx_pc_dgen_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL">
+ <bitset name="a6xx_pc_dgen_su_conservative_ras_cntl" inline="yes">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
- </reg32>
- <!-- 0x980b-0x983f invalid -->
+ </bitset>
+
+ <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_pc_dgen_su_conservative_ras_cntl" variants="A6XX-A7XX"/>
<!-- 0x9840 - 0x9842 are not readable -->
- <reg32 offset="0x9840" name="PC_DRAW_INITIATOR">
+ <bitset name="a6xx_draw_initiator" inline="yes">
<bitfield name="STATE_ID" low="0" high="7"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR">
- <bitfield name="STATE_ID" low="0" high="7"/>
- </reg32>
+ <reg32 offset="0x9840" name="PC_DRAW_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/>
- <reg32 offset="0x9842" name="PC_EVENT_INITIATOR">
+ <bitset name="a6xx_event_initiator" inline="yes">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9842" name="PC_EVENT_INITIATOR" type="a6xx_event_initiator" variants="A6XX-A7XX"/>
<!--
0x9880 written in a lot of places by SQE, same value gets written
@@ -2219,45 +2343,21 @@ by a particular renderpass/blit.
<!-- 0x9843-0x997f invalid -->
- <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" variants="A6XX" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
- <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
-
- <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" variants="A6XX" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
- <!-- VPC_RAST_STREAM_CNTL -->
- <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" variants="A7XX-" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
- <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" variants="A7XX-" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
+ <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A7XX" usage="rp_blit"/>
<!-- Both are a750+.
Probably needed to correctly overlap execution of several draws.
-->
- <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX" usage="cmd"/>
<!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
this additional space is not known.
-->
- <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX" usage="cmd"/>
<!-- 0x9982-0x9aff invalid -->
- <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_pc_xs_cntl" inline="yes">
<doc>
@@ -2270,18 +2370,18 @@ by a particular renderpass/blit.
<bitfield name="LAYER" pos="9" type="boolean"/>
<bitfield name="VIEW" pos="10" type="boolean"/>
<!-- note: PC_VS_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
<bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
<bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
<bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
- <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
<doc>
@@ -2290,9 +2390,9 @@ by a particular renderpass/blit.
<bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
</reg32>
- <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- mask of enabled views, doesn't exist on A630 -->
- <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- 0x9b09-0x9bff invalid -->
<reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
<!-- special register (but note first 8 bits can be written/read) -->
@@ -2303,34 +2403,39 @@ by a particular renderpass/blit.
<!-- TODO: 0x9e00-0xa000 range incomplete -->
<reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
<reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg64 offset="0x9e04" name="PC_DMA_BASE"/>
- <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint"/>
- <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint"/>
+ <reg64 offset="0x9e04" name="PC_DMA_BASE" type="address" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint" variants="A6XX-A7XX"/>
+
<reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/>
- <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX" type="waddress" align="32" usage="cmd"/>
- <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx">
+ <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx" variants="A6XX-A7XX">
<doc>
Possibly not really "initiating" the draw but the layout is similar
to VGT_DRAW_INITIATOR on older gens
</doc>
</reg32>
- <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint"/>
- <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint"/>
+ <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint" variants="A6XX-A7XX"/>
<!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
- <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL">
+ <bitset name="a6xx_pc_vis_stream_cntl" inline="yes">
<bitfield name="UNK0" low="0" high="15"/>
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
- </reg32>
- <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
- <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
+ </bitset>
+
+ <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL" type="a6xx_pc_vis_stream_cntl" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/>
- <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE">
+ <bitset name="a6xx_pc_drawcall_cntl_override" inline="yes">
<doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
<bitfield name="OVERRIDE" pos="0" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE" type="a6xx_pc_drawcall_cntl_override" variants="A6XX-A7XX"/>
<reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="cmd"/>
@@ -2936,7 +3041,7 @@ by a particular renderpass/blit.
<reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/>
<reg64 offset="0xa9b4" name="SP_CS_BASE" type="address" align="32" usage="cmd"/>
<reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
- <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" align="32" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" type="waddress" align="32" usage="cmd"/>
<reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
<reg32 offset="0xa9ba" name="SP_CS_TSIZE" low="0" high="7" type="uint" usage="cmd"/>
<reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
@@ -3021,7 +3126,7 @@ by a particular renderpass/blit.
UAV state for compute shader:
-->
<reg64 offset="0xa9f2" name="SP_CS_UAV_BASE" type="address" align="16" variants="A6XX"/>
- <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX"/>
+ <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX-"/>
<reg32 offset="0xaa00" name="SP_CS_USIZE" low="0" high="6" type="uint"/>
<!-- Correlated with avgs/uvgs usage in FS -->
@@ -3104,14 +3209,19 @@ by a particular renderpass/blit.
instructions VS/HS/DS/GS/FS. See SP_CS_UAV_BASE_* for compute shaders.
-->
<reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/>
- <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" variants="A6XX-A7XX" usage="cmd"/>
- <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX" usage="cmd"/>
+
+ <enum name="a6xx_sp_a2d_output_ifmt_type">
+ <value name="OUTPUT_IFMT_2D_FLOAT" value="0"/>
+ <value name="OUTPUT_IFMT_2D_SINT" value="1"/>
+ <value name="OUTPUT_IFMT_2D_UINT" value="2"/>
+ </enum>
<bitset name="a6xx_sp_a2d_output_info" inline="yes">
- <bitfield name="NORM" pos="0" type="boolean"/>
- <bitfield name="SINT" pos="1" type="boolean"/>
- <bitfield name="UINT" pos="2" type="boolean"/>
+ <bitfield name="HALF_PRECISION" pos="0" type="boolean"/>
+ <bitfield name="IFMT_TYPE" low="1" high="2" type="a6xx_sp_a2d_output_ifmt_type"/>
<!-- looks like HW only cares about the base type of this format,
which matches the ifmt? -->
<bitfield name="COLOR_FORMAT" low="3" high="10" type="a6xx_format"/>
@@ -3156,7 +3266,7 @@ by a particular renderpass/blit.
<reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
- <bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
+ <bitfield name="LOCATION" low="18" high="20" type="a7xx_state_location"/>
<bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
<bitfield name="STATETYPE" low="8" high="15" type="a7xx_statetype_id"/>
<bitfield name="USPTP" low="4" high="7"/>
@@ -3192,7 +3302,7 @@ by a particular renderpass/blit.
<!-- looks to work in the same way as a5xx: -->
<reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
@@ -3232,12 +3342,12 @@ by a particular renderpass/blit.
</reg32>
<reg32 offset="0xb2c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX">
+ <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX-">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
<reg64 offset="0xb2c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX">
+ <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX-">
<!--
Bits from 3..9 must be zero unless 'TPL1_A2D_BLT_CNTL::TYPE'
is A6XX_TEX_IMG_BUFFER, which allows for lower alignment.
@@ -3270,13 +3380,13 @@ by a particular renderpass/blit.
<reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
<reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
<reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
- <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-"/>
<reg32 offset="0xb2d2" name="TPL1_A2D_BLT_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="RAW_COPY" pos="0" type="boolean"/>
<bitfield name="START_OFFSET_TEXELS" low="16" high="21"/>
<bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
</reg32>
- <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX" usage="rp_blit"/>
<!-- always 0x100000 or 0x1000000? -->
<reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
@@ -3296,17 +3406,13 @@ by a particular renderpass/blit.
</reg32>
<reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="cmd"/> <!-- always 0x0 or 0x44 ? -->
- <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A6XX"/>
+ <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A6XX">
+ <reg32 offset="0" name="REG" low="0" high="29"/>
+ </array>
- <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A7XX">
+ <reg32 offset="0" name="REG" low="0" high="29" usage="cmd"/>
+ </array>
<array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/>
<array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/>
@@ -3638,7 +3744,7 @@ by a particular renderpass/blit.
<reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX_0" stride="1" length="64" variants="A7XX-"/>
+ <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX" stride="1" length="64" variants="A7XX"/>
<reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
<doc>
@@ -3800,7 +3906,7 @@ by a particular renderpass/blit.
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
</domain>
-<domain name="A7XX_CX_DBGC" width="32">
+<domain name="A7XX_CX_DBGC" width="32" varset="chip">
<!-- Bitfields shifted, but otherwise the same: -->
<reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
<bitfield high="7" low="0" name="PING_INDEX"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
index 307d43dda8a2..56cfaff614a4 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
@@ -9,38 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="A6XX_TEX_SAMP" width="32">
<doc>Texture sampler dwords</doc>
- <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_NEAREST" value="0"/>
- <value name="A6XX_TEX_LINEAR" value="1"/>
- <value name="A6XX_TEX_ANISO" value="2"/>
- <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
- </enum>
- <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_REPEAT" value="0"/>
- <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
- <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
- <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
- <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
- </enum>
- <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_ANISO_1" value="0"/>
- <value name="A6XX_TEX_ANISO_2" value="1"/>
- <value name="A6XX_TEX_ANISO_4" value="2"/>
- <value name="A6XX_TEX_ANISO_8" value="3"/>
- <value name="A6XX_TEX_ANISO_16" value="4"/>
- </enum>
- <enum name="a6xx_reduction_mode">
- <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
- <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
- <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
- </enum>
- <enum name="a6xx_fast_border_color">
- <!-- R B G A -->
- <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
- <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
- <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
- <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
- </enum>
<reg32 offset="0" name="0">
<bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
@@ -79,14 +47,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="A6XX_TEX_CONST" width="32" varset="chip">
<doc>Texture constant dwords</doc>
- <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_X" value="0"/>
- <value name="A6XX_TEX_Y" value="1"/>
- <value name="A6XX_TEX_Z" value="2"/>
- <value name="A6XX_TEX_W" value="3"/>
- <value name="A6XX_TEX_ZERO" value="4"/>
- <value name="A6XX_TEX_ONE" value="5"/>
- </enum>
<reg32 offset="0" name="0">
<bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
<bitfield name="SRGB" pos="2" type="boolean"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
index 665539b098c6..4e42f055b85f 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
@@ -320,14 +320,14 @@ to upconvert to 32b float internally?
16b float: 3
-->
<enum name="a6xx_2d_ifmt">
- <value value="0x10" name="R2D_UNORM8"/>
<value value="0x7" name="R2D_INT32"/>
<value value="0x6" name="R2D_INT16"/>
<value value="0x5" name="R2D_INT8"/>
<value value="0x4" name="R2D_FLOAT32"/>
<value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x2" name="R2D_SNORM8"/>
<value value="0x1" name="R2D_UNORM8_SRGB"/>
- <value value="0x0" name="R2D_RAW"/>
+ <value value="0x0" name="R2D_UNORM8"/>
</enum>
<enum name="a6xx_tex_type">
@@ -380,4 +380,50 @@ to upconvert to 32b float internally?
<value value="0x3" name="TESS_CCW_TRIS"/>
</enum>
+<enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+</enum>
+
+<enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+</enum>
+
+<enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+</enum>
+
+<enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+</enum>
+
+<enum name="a6xx_fast_border_color">
+ <!-- R B G A -->
+ <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
+ <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
+</enum>
+
+<enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+</enum>
+
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
index 3d2cc339b8f1..b15a242d974d 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -99,6 +99,10 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="GX_HM_GDSC_POWER_OFF" pos="6" type="boolean"/>
<bitfield name="GX_HM_CLK_OFF" pos="7" type="boolean"/>
</reg32>
+ <reg32 offset="0x50d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A7XX">
+ <bitfield name="GX_HM_GDSC_POWER_OFF" pos="0" type="boolean"/>
+ <bitfield name="GX_HM_CLK_OFF" pos="1" type="boolean"/>
+ </reg32>
<reg32 offset="0x50e4" name="GMU_GPU_NAP_CTRL">
<bitfield name="HW_NAP_ENABLE" pos="0"/>
<bitfield name="SID" low="4" high="8"/>
@@ -127,6 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x5088" name="GMU_ALWAYS_ON_COUNTER_L"/>
<reg32 offset="0x5089" name="GMU_ALWAYS_ON_COUNTER_H"/>
<reg32 offset="0x50c3" name="GMU_GMU_PWR_COL_KEEPALIVE"/>
+ <reg32 offset="0x50c4" name="GMU_PWR_COL_PREEMPT_KEEPALIVE"/>
<reg32 offset="0x5180" name="GMU_HFI_CTRL_STATUS"/>
<reg32 offset="0x5181" name="GMU_HFI_VERSION_INFO"/>
<reg32 offset="0x5182" name="GMU_HFI_SFR_ADDR"/>
@@ -228,6 +233,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x03ee" name="RSCC_TCS1_DRV0_STATUS"/>
<reg32 offset="0x0496" name="RSCC_TCS2_DRV0_STATUS"/>
<reg32 offset="0x053e" name="RSCC_TCS3_DRV0_STATUS"/>
+ <reg32 offset="0x05e6" name="RSCC_TCS4_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x068e" name="RSCC_TCS5_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x0736" name="RSCC_TCS6_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x07de" name="RSCC_TCS7_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x0886" name="RSCC_TCS8_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x092e" name="RSCC_TCS9_DRV0_STATUS" variants="A7XX"/>
</domain>
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 7abc08635495..0e10e1c6d263 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -120,12 +120,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="LRZ_FLUSH" value="38" variants="A5XX-"/>
<value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
<value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
- <value name="UNK_40" value="40" variants="A7XX"/>
+ <value name="LRZ_CACHE_INVALIDATE" value="40" variants="A7XX"/>
<value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/>
<value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
<value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
- <value name="UNK_2C" value="44" variants="A5XX-"/>
- <value name="UNK_2D" value="45" variants="A5XX-"/>
+ <value name="VSC_BINNING_START" value="44" variants="A5XX-"/>
+ <value name="VSC_BINNING_END" value="45" variants="A5XX-"/>
<!-- a6xx events -->
<doc>
@@ -523,7 +523,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<!--
Seems to set the mode flags which control which CP_SET_DRAW_STATE
packets are executed, based on their ENABLE_MASK values
-
+
CP_SET_MODE w/ payload of 0x1 seems to cause CP_SET_DRAW_STATE
packets w/ ENABLE_MASK & 0x6 to execute immediately
-->
@@ -640,8 +640,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_BV_BR_COUNT_OPS" value="0x1b" variants="A7XX-"/>
<doc> Clears, adds to local, or adds to global timestamp </doc>
<value name="CP_MODIFY_TIMESTAMP" value="0x1c" variants="A7XX-"/>
- <!-- similar to CP_CONTEXT_REG_BUNCH, but discards first two dwords?? -->
- <value name="CP_CONTEXT_REG_BUNCH2" value="0x5d" variants="A7XX-"/>
+ <value name="CP_NON_CONTEXT_REG_BUNCH" value="0x5d" variants="A7XX-"/>
<doc>
Write to a scratch memory that is read by CP_REG_TEST with
SOURCE_SCRATCH_MEM set. It's not the same scratch as scratch registers.
@@ -918,12 +917,6 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<stripe varset="chip" variants="A5XX-">
- <reg32 offset="4" name="4">
- <bitfield name="INDX_BASE_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="INDX_BASE_HI" low="0" high="31"/>
- </reg32>
<reg64 offset="4" name="INDX_BASE" type="address"/>
<reg32 offset="6" name="6">
<!-- max # of elements in index buffer -->
@@ -1099,8 +1092,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="BINNING" pos="20" varset="chip" variants="A6XX-" type="boolean"/>
<bitfield name="GMEM" pos="21" varset="chip" variants="A6XX-" type="boolean"/>
<bitfield name="SYSMEM" pos="22" varset="chip" variants="A6XX-" type="boolean"/>
- <bitfield name="GROUP_ID" low="24" high="28" type="uint"/>
+ <!-- high bit is 28 until a750: -->
+ <bitfield name="GROUP_ID" low="24" high="29" type="uint"/>
</reg32>
+ <reg64 offset="1" name="ADDR" type="address"/>
<reg32 offset="1" name="1">
<bitfield name="ADDR_LO" low="0" high="31" type="hex"/>
</reg32>
@@ -1166,26 +1161,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
<!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="BIN_DATA_ADDR" type="address"/>
<!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="3" name="BIN_SIZE_ADDR" type="address"/>
<!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="5" name="5">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="5" name="BIN_PRIM_STRM" type="address"/>
<!--
a7xx adds a few more addresses to the end of the pkt
-->
@@ -1195,26 +1175,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
<reg32 offset="1" name="ABS_MASK"/>
<!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="3" name="3">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="2" name="BIN_DATA_ADDR" type="address"/>
<!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="4" name="BIN_SIZE_ADDR" type="address"/>
<!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="7" name="7">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="6" name="BIN_PRIM_STRM" type="address"/>
<!--
a7xx adds a few more addresses to the end of the pkt
-->
@@ -1300,7 +1265,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
-<domain name="CP_REG_TO_MEM" width="32">
+<domain name="CP_REG_TO_MEM" width="32" prefix="chip">
<reg32 offset="0" name="0">
<bitfield name="REG" low="0" high="17" type="hex"/>
<!-- number of registers/dwords copied is max(CNT, 1). -->
@@ -1308,12 +1273,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="1" name="DEST" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="1" name="DEST" type="address"/>
+ </stripe>
</domain>
<domain name="CP_REG_TO_MEM_OFFSET_REG" width="32">
@@ -1329,12 +1294,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="DEST" type="waddress"/>
<reg32 offset="3" name="3">
<bitfield name="OFFSET0" low="0" high="17" type="hex"/>
<bitfield name="OFFSET0_SCRATCH" pos="19" type="boolean"/>
@@ -1354,18 +1314,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
- <reg32 offset="3" name="3">
- <bitfield name="OFFSET_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="OFFSET_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="DEST" type="waddress"/>
+ <reg64 offset="3" name="OFFSET" type="waddress"/>
</domain>
<domain name="CP_MEM_TO_REG" width="32">
@@ -1378,12 +1328,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- does the same thing as CP_MEM_TO_MEM::UNK31 -->
<bitfield name="UNK31" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="SRC" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="SRC_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="1" name="SRC" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="1" name="SRC" type="address"/>
+ </stripe>
</domain>
<domain name="CP_MEM_TO_MEM" width="32">
@@ -1403,6 +1353,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- some other kind of wait -->
<bitfield name="UNK31" pos="31" type="boolean"/>
</reg32>
+ <reg64 offset="1" name="DST" type="waddress"/>
+ <reg64 offset="3" name="SRC_A" type="address"/>
+ <reg64 offset="5" name="SRC_B" type="address"/>
+ <reg64 offset="7" name="SRC_C" type="address"/>
<!--
followed by sequence of addresses.. the first is the
destination and the rest are N src addresses which are
@@ -1461,12 +1415,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</domain>
<domain name="CP_MEM_WRITE" width="32">
- <reg32 offset="0" name="0">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="0" name="ADDR" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="0" name="ADDR" type="address"/>
+ </stripe>
<!-- followed by the DWORDs to write -->
</domain>
@@ -1518,24 +1472,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
<bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
<reg32 offset="4" name="4">
<bitfield name="MASK" low="0" high="31"/>
</reg32>
- <reg32 offset="5" name="5">
- <bitfield name="WRITE_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="WRITE_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="5" name="WRITE_ADDR" type="waddress"/>
<reg32 offset="7" name="7">
<bitfield name="WRITE_DATA" low="0" high="31"/>
</reg32>
@@ -1550,12 +1494,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- Reserved for flags, presumably? Unused in FW -->
<bitfield name="RESERVED" low="0" high="31" type="hex"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
@@ -1573,12 +1512,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
<bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
@@ -1712,12 +1646,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
TODO what is gpuaddr for, seems to be all 0's.. maybe needed for
context switch?
-->
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_0_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_0_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="ADDR" type="waddress"/>
<reg32 offset="3" name="3">
<!-- ??? -->
</reg32>
@@ -1832,9 +1761,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<reg32 offset="0" name="0">
</reg32>
<stripe varset="chip" variants="A4XX">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR" low="0" high="31"/>
- </reg32>
+ <reg32 offset="1" name="ADDR" type="address"/>
<reg32 offset="2" name="2">
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
@@ -1843,12 +1770,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</stripe>
<stripe varset="chip" variants="A5XX-">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="ADDR" type="address"/>
<reg32 offset="3" name="3">
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
@@ -2161,12 +2083,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</doc>
</value>
</enum>
- <reg32 offset="0" name="0">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="0" name="ADDR" type="address"/>
<reg32 offset="2" name="2">
<bitfield name="DWORDS" low="0" high="19" type="uint"/>
<bitfield name="TYPE" low="20" high="21" type="amble_type"/>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 4e5ac0f25dea..f41516dd0567 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -22,7 +22,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>
- <reg32 offset="0x00024" name="CTRL_0"/>
+ <reg32 offset="0x00024" name="CTRL_0">
+ <bitfield name="CLKSL_SHUTDOWNB" pos="7" type="boolean"/>
+ <bitfield name="DIGTOP_PWRDN_B" pos="6" type="boolean"/>
+ <bitfield name="PLL_SHUTDOWNB" pos="5" type="boolean"/>
+ <bitfield name="DLN3_SHUTDOWNB" pos="4" type="boolean"/>
+ <bitfield name="DLN2_SHUTDOWNB" pos="3" type="boolean"/>
+ <bitfield name="CLK_SHUTDOWNB" pos="2" type="boolean"/>
+ <bitfield name="DLN1_SHUTDOWNB" pos="1" type="boolean"/>
+ <bitfield name="DLN0_SHUTDOWNB" pos="0" type="boolean"/>
+ </reg32>
<reg32 offset="0x00028" name="CTRL_1"/>
<reg32 offset="0x0002c" name="CTRL_2"/>
<reg32 offset="0x00030" name="CTRL_3"/>
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index a409404627c7..1d603dadfabd 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -11,7 +11,6 @@ import collections
import argparse
import time
import datetime
-import re
class Error(Exception):
def __init__(self, message):
@@ -31,7 +30,7 @@ class Enum(object):
def names(self):
return [n for (n, value) in self.values]
- def dump(self):
+ def dump(self, is_deprecated):
use_hex = False
for (name, value) in self.values:
if value > 0x1000:
@@ -45,7 +44,7 @@ class Enum(object):
print("\t%s = %d," % (name, value))
print("};\n")
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
pass
class Field(object):
@@ -70,11 +69,11 @@ class Field(object):
raise parser.error("booleans should be 1 bit fields")
elif self.type == "float" and not (high - low == 31 or high - low == 15):
raise parser.error("floats should be 16 or 32 bit fields")
- elif not self.type in builtin_types and not self.type in parser.enums:
+ elif self.type not in builtin_types and self.type not in parser.enums:
raise parser.error("unknown type '%s'" % self.type)
def ctype(self, var_name):
- if self.type == None:
+ if self.type is None:
type = "uint32_t"
val = var_name
elif self.type == "boolean":
@@ -124,7 +123,7 @@ def field_name(reg, f):
name = f.name.lower()
else:
# We hit this path when a reg is defined with no bitset fields, ie.
- # <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
+ # <reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
name = reg.name.lower()
if (name in [ "double", "float", "int" ]) or not (name[0].isalpha()):
@@ -146,10 +145,23 @@ def indices_strides(indices):
"%s(i%d)" % (offset, idx)
for (idx, (ctype, stride, offset)) in enumerate(indices)])
+def is_number(str):
+ try:
+ int(str)
+ return True
+ except ValueError:
+ return False
+
+def sanitize_variant(variant):
+ if variant and "-" in variant:
+ return variant[:variant.index("-")]
+ return variant
+
class Bitset(object):
def __init__(self, name, template):
self.name = name
self.inline = False
+ self.reg = None
if template:
self.fields = template.fields[:]
else:
@@ -175,11 +187,7 @@ class Bitset(object):
print("#endif\n")
print(" return (struct fd_reg_pair) {")
- if reg.array:
- print(" .reg = REG_%s(__i)," % reg.full_name)
- else:
- print(" .reg = REG_%s," % reg.full_name)
-
+ print(" .reg = (uint32_t)%s," % reg.reg_offset())
print(" .value =")
for f in self.fields:
if f.type in [ "address", "waddress" ]:
@@ -204,7 +212,7 @@ class Bitset(object):
print(" };")
- def dump_pack_struct(self, reg=None):
+ def dump_pack_struct(self, is_deprecated, reg=None):
if not reg:
return
@@ -229,12 +237,15 @@ class Bitset(object):
tab_to(" uint32_t", "dword;")
print("};\n")
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED"
if reg.array:
- print("static inline struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
- (prefix, prefix))
+ print("static inline%s struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
+ (depcrstr, prefix, prefix))
else:
- print("static inline struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
- (prefix, prefix))
+ print("static inline%s struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
+ (depcrstr, prefix, prefix))
self.dump_regpair_builder(reg)
@@ -253,18 +264,23 @@ class Bitset(object):
(prefix, prefix, prefix, skip))
- def dump(self, prefix=None):
- if prefix == None:
+ def dump(self, is_deprecated, prefix=None):
+ if prefix is None:
prefix = self.name
+ if self.reg and self.reg.bit_size == 64:
+ print("static inline uint32_t %s_LO(uint32_t val)\n{" % prefix)
+ print("\treturn val;\n}")
+ print("static inline uint32_t %s_HI(uint32_t val)\n{" % prefix)
+ print("\treturn val;\n}")
for f in self.fields:
if f.name:
name = prefix + "_" + f.name
else:
name = prefix
- if not f.name and f.low == 0 and f.shr == 0 and not f.type in ["float", "fixed", "ufixed"]:
+ if not f.name and f.low == 0 and f.shr == 0 and f.type not in ["float", "fixed", "ufixed"]:
pass
- elif f.type == "boolean" or (f.type == None and f.low == f.high):
+ elif f.type == "boolean" or (f.type is None and f.low == f.high):
tab_to("#define %s" % name, "0x%08x" % (1 << f.low))
else:
tab_to("#define %s__MASK" % name, "0x%08x" % mask(f.low, f.high))
@@ -286,6 +302,7 @@ class Array(object):
self.domain = domain
self.variant = variant
self.parent = parent
+ self.children = []
if self.parent:
self.name = self.parent.name + "_" + self.local_name
else:
@@ -337,12 +354,15 @@ class Array(object):
offset += self.parent.total_offset()
return offset
- def dump(self):
+ def dump(self, is_deprecated):
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED"
proto = indices_varlist(self.indices())
strides = indices_strides(self.indices())
array_offset = self.total_offset()
if self.fixed_offsets:
- print("static inline uint32_t __offset_%s(%s idx)" % (self.local_name, self.index_ctype()))
+ print("static inline%s uint32_t __offset_%s(%s idx)" % (depcrstr, self.local_name, self.index_ctype()))
print("{\n\tswitch (idx) {")
if self.index_type:
for val, offset in zip(self.index_type.names(), self.offsets):
@@ -357,7 +377,7 @@ class Array(object):
else:
tab_to("#define REG_%s_%s(%s)" % (self.domain, self.name, proto), "(0x%08x + %s )\n" % (array_offset, strides))
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
pass
def dump_regpair_builder(self):
@@ -373,6 +393,7 @@ class Reg(object):
self.bit_size = bit_size
if array:
self.name = array.name + "_" + self.name
+ array.children.append(self)
self.full_name = self.domain + "_" + self.name
if "stride" in attrs:
self.stride = int(attrs["stride"], 0)
@@ -397,25 +418,34 @@ class Reg(object):
else:
return self.offset
- def dump(self):
+ def reg_offset(self):
+ if self.array:
+ offset = self.array.offset + self.offset
+ return "(0x%08x + 0x%x*__i)" % (offset, self.array.stride)
+ return "0x%08x" % self.offset
+
+ def dump(self, is_deprecated):
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED "
proto = indices_prototype(self.indices())
strides = indices_strides(self.indices())
offset = self.total_offset()
if proto == '':
tab_to("#define REG_%s" % self.full_name, "0x%08x" % offset)
else:
- print("static inline uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (self.full_name, proto, offset, strides))
+ print("static inline%s uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (depcrstr, self.full_name, proto, offset, strides))
if self.bitset.inline:
- self.bitset.dump(self.full_name)
+ self.bitset.dump(is_deprecated, self.full_name)
+ print("")
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
if self.bitset.inline:
- self.bitset.dump_pack_struct(self)
+ self.bitset.dump_pack_struct(is_deprecated, self)
def dump_regpair_builder(self):
- if self.bitset.inline:
- self.bitset.dump_regpair_builder(self)
+ self.bitset.dump_regpair_builder(self)
def dump_py(self):
print("\tREG_%s = 0x%08x" % (self.full_name, self.offset))
@@ -444,9 +474,6 @@ class Parser(object):
self.variants = set()
self.file = []
self.xml_files = []
- self.copyright_year = None
- self.authors = []
- self.license = None
def error(self, message):
parser, filename = self.stack[-1]
@@ -454,7 +481,7 @@ class Parser(object):
def prefix(self, variant=None):
if self.current_prefix_type == "variant" and variant:
- return variant
+ return sanitize_variant(variant)
elif self.current_stripe:
return self.current_stripe + "_" + self.current_domain
elif self.current_prefix:
@@ -500,15 +527,22 @@ class Parser(object):
return varset
def parse_variants(self, attrs):
- if not "variants" in attrs:
+ if "variants" not in attrs:
return None
- variant = attrs["variants"].split(",")[0]
- if "-" in variant:
- variant = variant[:variant.index("-")]
+ variant = attrs["variants"].split(",")[0]
varset = self.parse_varset(attrs)
- assert varset.has_name(variant)
+ if "-" in variant:
+ # if we have a range, validate that both the start and end
+ # of the range are valid enums:
+ start = variant[:variant.index("-")]
+ end = variant[variant.index("-") + 1:]
+ assert varset.has_name(start)
+ if end != "":
+ assert varset.has_name(end)
+ else:
+ assert varset.has_name(variant)
return variant
@@ -572,9 +606,6 @@ class Parser(object):
error_str = str(xmlschema.error_log.filter_from_errors()[0])
raise self.error("Schema validation failed for: " + filename + "\n" + error_str)
except ImportError as e:
- if self.validate:
- raise e
-
print("lxml not found, skipping validation", file=sys.stderr)
def do_parse(self, filename):
@@ -620,6 +651,7 @@ class Parser(object):
self.current_reg = Reg(attrs, self.prefix(variant), self.current_array, bit_size)
self.current_reg.bitset = self.current_bitset
+ self.current_bitset.reg = self.current_reg
if len(self.stack) == 1:
self.file.append(self.current_reg)
@@ -643,7 +675,7 @@ class Parser(object):
elif name == "domain":
self.current_domain = attrs["name"]
if "prefix" in attrs:
- self.current_prefix = self.parse_variants(attrs)
+ self.current_prefix = sanitize_variant(self.parse_variants(attrs))
self.current_prefix_type = attrs["prefix"]
else:
self.current_prefix = None
@@ -651,7 +683,7 @@ class Parser(object):
if "varset" in attrs:
self.current_varset = self.enums[attrs["varset"]]
elif name == "stripe":
- self.current_stripe = self.parse_variants(attrs)
+ self.current_stripe = sanitize_variant(self.parse_variants(attrs))
elif name == "enum":
self.current_enum_value = 0
self.current_enum = Enum(attrs["name"])
@@ -686,10 +718,6 @@ class Parser(object):
self.parse_field(attrs["name"], attrs)
elif name == "database":
self.do_validate(attrs["xsi:schemaLocation"])
- elif name == "copyright":
- self.copyright_year = attrs["year"]
- elif name == "author":
- self.authors.append(attrs["name"] + " <" + attrs["email"] + "> " + attrs["name"])
def end_element(self, name):
if name == "domain":
@@ -703,11 +731,16 @@ class Parser(object):
elif name == "reg32":
self.current_reg = None
elif name == "array":
+ # if the array has no Reg children, push an implicit reg32:
+ if len(self.current_array.children) == 0:
+ attrs = {
+ "name": "REG",
+ "offset": "0",
+ }
+ self.parse_reg(attrs, 32)
self.current_array = self.current_array.parent
elif name == "enum":
self.current_enum = None
- elif name == "license":
- self.license = self.cdata
def character_data(self, data):
self.cdata += data
@@ -720,10 +753,10 @@ class Parser(object):
if variants:
for variant, vreg in variants.items():
if reg == vreg:
- d[(usage, variant)].append(reg)
+ d[(usage, sanitize_variant(variant))].append(reg)
else:
for variant in self.variants:
- d[(usage, variant)].append(reg)
+ d[(usage, sanitize_variant(variant))].append(reg)
print("#ifdef __cplusplus")
@@ -753,6 +786,9 @@ class Parser(object):
print("#endif")
+ def has_variants(self, reg):
+ return reg.name in self.variant_regs and not is_number(reg.name) and not is_number(reg.name[1:])
+
def dump(self):
enums = []
bitsets = []
@@ -766,7 +802,7 @@ class Parser(object):
regs.append(e)
for e in enums + bitsets + regs:
- e.dump()
+ e.dump(self.has_variants(e))
self.dump_reg_usages()
@@ -782,8 +818,7 @@ class Parser(object):
def dump_reg_variants(self, regname, variants):
- # Don't bother for things that only have a single variant:
- if len(variants) == 1:
+ if is_number(regname) or is_number(regname[1:]):
return
print("#ifdef __cplusplus")
print("struct __%s {" % regname)
@@ -834,11 +869,20 @@ class Parser(object):
xtravar = "__i, "
print("__%s(%sstruct __%s fields) {" % (regname, xtra, regname))
for variant in variants.keys():
- print(" if (%s == %s) {" % (varenum.upper(), variant))
+ if "-" in variant:
+ start = variant[:variant.index("-")]
+ end = variant[variant.index("-") + 1:]
+ if end != "":
+ print(" if ((%s >= %s) && (%s <= %s)) {" % (varenum.upper(), start, varenum.upper(), end))
+ else:
+ print(" if (%s >= %s) {" % (varenum.upper(), start))
+ else:
+ print(" if (%s == %s) {" % (varenum.upper(), variant))
reg = variants[variant]
reg.dump_regpair_builder()
print(" } else")
print(" assert(!\"invalid variant\");")
+ print(" return (struct fd_reg_pair){};")
print("}")
if bit_size == 64:
@@ -851,7 +895,7 @@ class Parser(object):
def dump_structs(self):
for e in self.file:
- e.dump_pack_struct()
+ e.dump_pack_struct(self.has_variants(e))
for regname in self.variant_regs:
self.dump_reg_variants(regname, self.variant_regs[regname])
@@ -868,33 +912,7 @@ def dump_c(args, guard, func):
print("#ifndef %s\n#define %s\n" % (guard, guard))
- print("""/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-""")
- maxlen = 0
- for filepath in p.xml_files:
- new_filepath = re.sub("^.+drivers","drivers",filepath)
- maxlen = max(maxlen, len(new_filepath))
- for filepath in p.xml_files:
- pad = " " * (maxlen - len(new_filepath))
- filesize = str(os.path.getsize(filepath))
- filesize = " " * (7 - len(filesize)) + filesize
- filetime = time.ctime(os.path.getmtime(filepath))
- print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
- if p.copyright_year:
- current_year = str(datetime.date.today().year)
- print()
- print("Copyright (C) %s-%s by the following authors:" % (p.copyright_year, current_year))
- for author in p.authors:
- print("- " + author)
- if p.license:
- print(p.license)
- print("*/")
+ print("/* Autogenerated file, DO NOT EDIT manually! */")
print()
print("#ifdef __KERNEL__")
@@ -912,9 +930,20 @@ The rules-ng-ng source files this header was generated from are:
print("#endif")
print()
+ print("#ifndef FD_NO_DEPRECATED_PACK")
+ print("#define FD_DEPRECATED __attribute__((deprecated))")
+ print("#else")
+ print("#define FD_DEPRECATED")
+ print("#endif")
+ print()
+
func(p)
- print("\n#endif /* %s */" % guard)
+ print()
+ print("#undef FD_DEPRECATED")
+ print()
+
+ print("#endif /* %s */" % guard)
def dump_c_defines(args):
@@ -931,7 +960,7 @@ def dump_py_defines(args):
p = Parser()
try:
- p.parse(args.rnn, args.xml)
+ p.parse(args.rnn, args.xml, args.validate)
except Error as e:
print(e, file=sys.stderr)
exit(1)
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index dbd42cc1da87..1c3b33be6c40 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -433,7 +433,6 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
u32 bus_format, bus_flags;
bool format_set = false, flags_set = false;
int ret, i;
@@ -453,7 +452,8 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
encoder = connector_state->best_encoder;
- bridge = drm_bridge_chain_get_first_bridge(encoder);
+ struct drm_bridge *bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_first_bridge(encoder);
if (!bridge)
continue;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d1587639ebb0..c88776d1e784 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -102,14 +102,6 @@ config DRM_NOUVEAU_SVM
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
-config DRM_NOUVEAU_GSP_DEFAULT
- bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
- depends on DRM_NOUVEAU
- default n
- help
- Say Y here if you want to use the GSP codepaths by default on
- Turing and Ampere GPUs.
-
config DRM_NOUVEAU_CH7006
tristate "Chrontel ch7006 TV encoder"
depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 561877725aac..bb34b0a6082d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -31,8 +31,6 @@ struct nouveau_channel {
u64 addr;
} push;
- /* TODO: this will be reworked in the near future */
- bool accel_done;
void *fence;
struct {
int max;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 0e27b76d1e1c..c25ef9a54b9f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -90,7 +90,6 @@ FIRE_RING(struct nouveau_channel *chan)
{
if (chan->dma.cur == chan->dma.put)
return;
- chan->accel_done = true;
WRITE_PUT(chan->dma.cur);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index a5ce8eb4a3be..8d5853deeee4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -30,10 +30,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
func = of_device_get_match_data(&pdev->dev);
drm = nouveau_platform_device_create(func, pdev, &device);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- return 0;
+ return PTR_ERR_OR_ZERO(drm);
}
static void nouveau_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index cd95446d6851..caab60fc62f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -108,9 +108,21 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (nvbo->no_share)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&nvbo->bo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
return drm_gem_prime_export(gobj, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..79eefdfd08a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->gem.obj,
+ .map.gem.offset = op->gem.offset,
+ };
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
- op->va.addr,
- op->va.range,
- op->gem.obj,
- op->gem.offset);
+ &map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index b9581feb24cc..a23b40b27b81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -44,7 +44,7 @@ nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
bool space = false;
while (size >= 1 && bf->name) {
if (value & bf->mask) {
- int this = snprintf(data, size, "%s%s",
+ int this = scnprintf(data, size, "%s%s",
space ? " " : "", bf->name);
size -= this;
data += this;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index eb765da0876e..35d1fcef520b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -41,8 +41,8 @@ ad102_gsp = {
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
- { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index d23243a83a4c..7ccb41761066 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -138,8 +138,10 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
- if (IS_ERR(fwif))
+ if (IS_ERR(fwif)) {
+ nvkm_error(&gsp->subdev, "Failed to load required firmware for device.");
return PTR_ERR(fwif);
+ }
gsp->func = fwif->func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
index 12a3f2c1ed82..1b3b31b95ce4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -20,7 +20,7 @@ gb100_gsp = {
static struct nvkm_gsp_fwif
gb100_gsps[] = {
- { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
index c1d718172ddf..51384c63148c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -20,7 +20,7 @@ gb202_gsp = {
static struct nvkm_gsp_fwif
gb202_gsps[] = {
- { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
index ce31e8248807..b0dd5fce7bad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -344,7 +344,7 @@ done:
static struct nvkm_gsp_fwif
gh100_gsps[] = {
- { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 4f14e85fc69e..c3494b7ac572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -14,7 +14,6 @@ struct nvkm_gsp_fwif {
const struct nvkm_gsp_func *func;
const struct nvkm_rm_impl *rm;
const char *ver;
- bool enable;
};
int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index 588cb4ab85cb..32e6a065d6d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -582,10 +582,13 @@ struct nv_gsp_registry_entries {
* RMSecBusResetEnable - enables PCI secondary bus reset
* RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration
* registers on any PCI reset.
+ * RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID
+ * is not found in the internal product name database.
*/
static const struct nv_gsp_registry_entries r535_registry_entries[] = {
{ "RMSecBusResetEnable", 1 },
{ "RMForcePcieConfigSave", 1 },
+ { "RMDevidCheckIgnore", 1 },
};
#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 58e233bc53b1..81e56da0474a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -383,13 +383,9 @@ int
tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
- bool enable_gsp = fwif->enable;
int ret;
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", true))
return -EINVAL;
ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
index b28b2e05cc15..91b7380f83ab 100644
--- a/drivers/gpu/drm/nova/driver.rs
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+use kernel::{
+ auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, sync::aref::ARef,
+};
use crate::file::File;
use crate::gem::NovaObject;
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
index 4fe62cf98a23..90b9d2d0ec4a 100644
--- a/drivers/gpu/drm/nova/file.rs
+++ b/drivers/gpu/drm/nova/file.rs
@@ -2,13 +2,11 @@
use crate::driver::{NovaDevice, NovaDriver};
use crate::gem::NovaObject;
-use crate::uapi::{GemCreate, GemInfo, Getparam};
use kernel::{
alloc::flags::*,
drm::{self, gem::BaseObject},
pci,
prelude::*,
- types::Opaque,
uapi,
};
@@ -26,21 +24,19 @@ impl File {
/// IOCTL: get_param: Query GPU / driver metadata.
pub(crate) fn get_param(
dev: &NovaDevice,
- getparam: &Opaque<uapi::drm_nova_getparam>,
+ getparam: &mut uapi::drm_nova_getparam,
_file: &drm::File<File>,
) -> Result<u32> {
let adev = &dev.adev;
let parent = adev.parent().ok_or(ENOENT)?;
let pdev: &pci::Device = parent.try_into()?;
- let getparam: &Getparam = getparam.into();
- let value = match getparam.param() as u32 {
+ let value = match getparam.param as u32 {
uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
_ => return Err(EINVAL),
};
- #[allow(clippy::useless_conversion)]
- getparam.set_value(value.into());
+ getparam.value = Into::<u64>::into(value);
Ok(0)
}
@@ -48,13 +44,12 @@ impl File {
/// IOCTL: gem_create: Create a new DRM GEM object.
pub(crate) fn gem_create(
dev: &NovaDevice,
- req: &Opaque<uapi::drm_nova_gem_create>,
+ req: &mut uapi::drm_nova_gem_create,
file: &drm::File<File>,
) -> Result<u32> {
- let req: &GemCreate = req.into();
- let obj = NovaObject::new(dev, req.size().try_into()?)?;
+ let obj = NovaObject::new(dev, req.size.try_into()?)?;
- req.set_handle(obj.create_handle(file)?);
+ req.handle = obj.create_handle(file)?;
Ok(0)
}
@@ -62,13 +57,12 @@ impl File {
/// IOCTL: gem_info: Query GEM metadata.
pub(crate) fn gem_info(
_dev: &NovaDevice,
- req: &Opaque<uapi::drm_nova_gem_info>,
+ req: &mut uapi::drm_nova_gem_info,
file: &drm::File<File>,
) -> Result<u32> {
- let req: &GemInfo = req.into();
- let bo = NovaObject::lookup_handle(file, req.handle())?;
+ let bo = NovaObject::lookup_handle(file, req.handle)?;
- req.set_size(bo.size().try_into()?);
+ req.size = bo.size().try_into()?;
Ok(0)
}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
index 33b62d21400c..2760ba4f3450 100644
--- a/drivers/gpu/drm/nova/gem.rs
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -4,7 +4,7 @@ use kernel::{
drm,
drm::{gem, gem::BaseObject},
prelude::*,
- types::ARef,
+ sync::aref::ARef,
};
use crate::{
@@ -16,16 +16,14 @@ use crate::{
#[pin_data]
pub(crate) struct NovaObject {}
-impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+
fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
try_pin_init!(NovaObject {})
}
}
-impl gem::DriverObject for NovaObject {
- type Driver = NovaDriver;
-}
-
impl NovaObject {
/// Create a new DRM GEM object.
pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
index 64fd670e99e1..8893e58ee0db 100644
--- a/drivers/gpu/drm/nova/nova.rs
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -5,7 +5,6 @@
mod driver;
mod file;
mod gem;
-mod uapi;
use crate::driver::NovaDriver;
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
deleted file mode 100644
index eb228a58d423..000000000000
--- a/drivers/gpu/drm/nova/uapi.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-use kernel::uapi;
-
-// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
-
-macro_rules! define_uapi_abstraction {
- ($name:ident <= $inner:ty) => {
- #[repr(transparent)]
- pub struct $name(::kernel::types::Opaque<$inner>);
-
- impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
- fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
- // SAFETY: `Self` is a transparent wrapper of `$inner`.
- unsafe { ::core::mem::transmute(value) }
- }
- }
- };
-}
-
-define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
-
-impl Getparam {
- pub fn param(&self) -> u64 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
- unsafe { (*self.0.get()).param }
- }
-
- pub fn set_value(&self, v: u64) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
- unsafe { (*self.0.get()).value = v };
- }
-}
-
-define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
-
-impl GemCreate {
- pub fn size(&self) -> u64 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
- unsafe { (*self.0.get()).size }
- }
-
- pub fn set_handle(&self, handle: u32) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
- unsafe { (*self.0.get()).handle = handle };
- }
-}
-
-define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
-
-impl GemInfo {
- pub fn handle(&self) -> u32 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
- unsafe { (*self.0.get()).handle }
- }
-
- pub fn set_size(&self, size: u64) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
- unsafe { (*self.0.get()).size = size };
- }
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 054b71dba6a7..794267f0f007 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -378,10 +378,8 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->bridge) {
- struct drm_bridge *bridge = output->bridge;
-
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(output->bridge->encoder);
node = bridge->of_node;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 09b9f7ff9340..407c5f6a268b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -215,6 +215,19 @@ config DRM_PANEL_HIMAX_HX8394
If M is selected the module will be called panel-himax-hx8394.
+config DRM_PANEL_HYDIS_HV101HD1
+ tristate "Hydis HV101HD1 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Hydis HV101HD1
+ 2-lane 1366x768 MIPI DSI panel found in ASUS VivoTab RT TF600T.
+ HV101HD1 is a color active matrix TFT LCD module using amorphous
+ silicon TFT's (Thin Film Transistors) as an active switching devices.
+
+ If M is selected the module will be called panel-hydis-hv101hd1
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -843,6 +856,17 @@ config DRM_PANEL_SAMSUNG_S6E8AA0
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01
+ tristate "Samsung AMS561RA01 panel with S6E8AA5X01 controller"
+ depends on GPIOLIB && OF && REGULATOR
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Samsung AMS561RA01
+ panel, which uses Samsung's S6E8AA5X01 controller. The panel has a
+ ~5.6 inch AMOLED display, and the controller is driven by the MIPI
+ DSI protocol with 4 lanes.
+
config DRM_PANEL_SAMSUNG_SOFEF00
tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels"
depends on OF
@@ -971,7 +995,7 @@ config DRM_PANEL_STARTEK_KD070FHFID015
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for STARTEK KD070FHFID015 DSI panel
- based on RENESAS-R69429 controller. The pannel is a 7-inch TFT LCD display
+ based on RENESAS-R69429 controller. The panel is a 7-inch TFT LCD display
with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
the host, a built-in LED backlight and touch controller.
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 957555b49996..3615a761b44f 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
+obj-$(CONFIG_DRM_PANEL_HYDIS_HV101HD1) += panel-hydis-hv101hd1.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o
@@ -87,6 +88,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 9a56e208cbdd..62435e3cd9f4 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1736,10 +1736,11 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
-static const struct panel_delay delay_200_500_e50_p2e200 = {
+static const struct panel_delay delay_200_500_e50_d50_p2e200 = {
.hpd_absent = 200,
.unprepare = 500,
.enable = 50,
+ .disable = 50,
.prepare_to_enable = 200,
};
@@ -1795,6 +1796,13 @@ static const struct panel_delay delay_200_500_e200_d10 = {
.disable = 10,
};
+static const struct panel_delay delay_200_500_e200_d50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 50,
+};
+
static const struct panel_delay delay_200_150_e200 = {
.hpd_absent = 200,
.unprepare = 150,
@@ -1828,6 +1836,20 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
.powered_on_to_enable = 335,
};
+static const struct panel_delay delay_200_500_e50_d100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 100,
+};
+
+static const struct panel_delay delay_80_500_e50_d50 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 50,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1857,6 +1879,7 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x04a4, &delay_200_500_e50, "B122UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x105c, &delay_200_500_e50, "B116XTN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x125c, &delay_200_500_e50, "Unknown"),
@@ -1875,6 +1898,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
&auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x52b0, &delay_200_500_e50, "B116XAK02.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
@@ -1882,10 +1906,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1934,21 +1960,25 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80_d50, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ddf, &delay_200_500_e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1966,27 +1996,36 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115f, &delay_200_500_e80_d50, "N116BCL-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x124c, &delay_200_500_e80_d50, "N122JCA-ENK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_d50_p2e200, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_d50_p2e200, "MNE007JA1-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
- EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
@@ -2027,12 +2066,16 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x158f, &delay_200_500_p2e100, "LQ134Z1"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0009, &delay_200_500_e250, "116QHD024002"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
+ EDP_PANEL_ENTRY('T', 'M', 'A', 0x0811, &delay_200_500_e80_d50, "TM140VDXP01-04"),
+ EDP_PANEL_ENTRY('T', 'M', 'A', 0x2094, &delay_200_500_e50_d100, "TL140VDMS03-01"),
+
{ /* sentinal */ }
};
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
index fb302d1f91b9..9e443c719843 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8279.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -935,7 +935,7 @@ static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u
j++;
x++;
} while (x < 4);
- };
+ }
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c
new file mode 100644
index 000000000000..46426c388932
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct hv101hd1 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data hv101hd1_supplies[] = {
+ { .supply = "vdd" },
+ { .supply = "vio" },
+};
+
+static inline struct hv101hd1 *to_hv101hd1(struct drm_panel *panel)
+{
+ return container_of(panel, struct hv101hd1, panel);
+}
+
+static int hv101hd1_prepare(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi };
+ struct device *dev = &hv->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hv101hd1_supplies), hv->supplies);
+ if (ret) {
+ dev_err(dev, "error enabling regulators (%d)\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return 0;
+}
+
+static int hv101hd1_disable(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return 0;
+}
+
+static int hv101hd1_unprepare(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+
+ return regulator_bulk_disable(ARRAY_SIZE(hv101hd1_supplies),
+ hv->supplies);
+}
+
+static const struct drm_display_mode hv101hd1_mode = {
+ .clock = (1366 + 74 + 36 + 24) * (768 + 21 + 7 + 4) * 60 / 1000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 74,
+ .hsync_end = 1366 + 74 + 36,
+ .htotal = 1366 + 74 + 36 + 24,
+ .vdisplay = 768,
+ .vsync_start = 768 + 21,
+ .vsync_end = 768 + 21 + 7,
+ .vtotal = 768 + 21 + 7 + 4,
+ .width_mm = 140,
+ .height_mm = 220,
+};
+
+static int hv101hd1_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hv101hd1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs hv101hd1_panel_funcs = {
+ .prepare = hv101hd1_prepare,
+ .disable = hv101hd1_disable,
+ .unprepare = hv101hd1_unprepare,
+ .get_modes = hv101hd1_get_modes,
+};
+
+static int hv101hd1_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hv101hd1 *hv;
+ int ret;
+
+ hv = devm_drm_panel_alloc(dev, struct hv101hd1, panel,
+ &hv101hd1_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hv))
+ return PTR_ERR(hv);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(hv101hd1_supplies),
+ hv101hd1_supplies, &hv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ hv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, hv);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&hv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&hv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void hv101hd1_remove(struct mipi_dsi_device *dsi)
+{
+ struct hv101hd1 *hv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&hv->panel);
+}
+
+static const struct of_device_id hv101hd1_of_match[] = {
+ { .compatible = "hydis,hv101hd1" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hv101hd1_of_match);
+
+static struct mipi_dsi_driver hv101hd1_driver = {
+ .driver = {
+ .name = "panel-hv101hd1",
+ .of_match_table = hv101hd1_of_match,
+ },
+ .probe = hv101hd1_probe,
+ .remove = hv101hd1_remove,
+};
+module_mipi_dsi_driver(hv101hd1_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Hydis HV101HD1 panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index ac433345a179..ad4993b2f92a 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -1417,6 +1417,200 @@ static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x39),
};
+static const struct ili9881c_instr bsd1218_a101kl68_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x55),
+ ILI9881C_COMMAND_INSTR(0x04, 0x55),
+ ILI9881C_COMMAND_INSTR(0x05, 0x03),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x07),
+ ILI9881C_COMMAND_INSTR(0x09, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x03),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x33),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x00),
+ ILI9881C_COMMAND_INSTR(0x51, 0x11),
+ ILI9881C_COMMAND_INSTR(0x52, 0x44),
+ ILI9881C_COMMAND_INSTR(0x53, 0x55),
+ ILI9881C_COMMAND_INSTR(0x54, 0x88),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x00),
+ ILI9881C_COMMAND_INSTR(0x57, 0x11),
+ ILI9881C_COMMAND_INSTR(0x58, 0x22),
+ ILI9881C_COMMAND_INSTR(0x59, 0x33),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x44),
+ ILI9881C_COMMAND_INSTR(0x5b, 0x55),
+ ILI9881C_COMMAND_INSTR(0x5c, 0x66),
+ ILI9881C_COMMAND_INSTR(0x5d, 0x77),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x02),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x62, 0x09),
+ ILI9881C_COMMAND_INSTR(0x63, 0x08),
+ ILI9881C_COMMAND_INSTR(0x64, 0x13),
+ ILI9881C_COMMAND_INSTR(0x65, 0x12),
+ ILI9881C_COMMAND_INSTR(0x66, 0x11),
+ ILI9881C_COMMAND_INSTR(0x67, 0x10),
+ ILI9881C_COMMAND_INSTR(0x68, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x06),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x07),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x02),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x78, 0x06),
+ ILI9881C_COMMAND_INSTR(0x79, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x12),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x13),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x81, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x82, 0x09),
+ ILI9881C_COMMAND_INSTR(0x83, 0x08),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x37),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x19),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0x88, 0x0b),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x02),
+ ILI9881C_COMMAND_INSTR(0x31, 0x25),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x98),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x53, 0x40),
+ ILI9881C_COMMAND_INSTR(0x55, 0x45),
+ ILI9881C_COMMAND_INSTR(0x50, 0xb7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xb2),
+ ILI9881C_COMMAND_INSTR(0x60, 0x07),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x4e),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x17),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x21),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xc4),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x25),
+ ILI9881C_COMMAND_INSTR(0xab, 0xa7),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xad, 0x19),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x59),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x22),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x48),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xc4),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xcb, 0xa3),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xce, 0x52),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x24),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x58),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x68),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1433,33 +1627,24 @@ static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
* So before any attempt at sending a command or data, we have to be
* sure if we're in the right page or not.
*/
-static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+static void ili9881c_switch_page(struct mipi_dsi_multi_context *mctx, u8 page)
{
u8 buf[4] = { 0xff, 0x98, 0x81, page };
- int ret;
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
+ mipi_dsi_dcs_write_buffer_multi(mctx, buf, sizeof(buf));
}
-static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+static void ili9881c_send_cmd_data(struct mipi_dsi_multi_context *mctx, u8 cmd, u8 data)
{
u8 buf[2] = { cmd, data };
- int ret;
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
+ mipi_dsi_dcs_write_buffer_multi(mctx, buf, sizeof(buf));
}
static int ili9881c_prepare(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
unsigned int i;
int ret;
@@ -1480,61 +1665,39 @@ static int ili9881c_prepare(struct drm_panel *panel)
const struct ili9881c_instr *instr = &ctx->desc->init[i];
if (instr->op == ILI9881C_SWITCH_PAGE)
- ret = ili9881c_switch_page(ctx, instr->arg.page);
+ ili9881c_switch_page(&mctx, instr->arg.page);
else if (instr->op == ILI9881C_COMMAND)
- ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
- instr->arg.cmd.data);
-
- if (ret)
- return ret;
+ ili9881c_send_cmd_data(&mctx, instr->arg.cmd.cmd,
+ instr->arg.cmd.data);
}
- ret = ili9881c_switch_page(ctx, 0);
- if (ret)
- return ret;
-
- if (ctx->address_mode) {
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_ADDRESS_MODE,
- &ctx->address_mode,
- sizeof(ctx->address_mode));
- if (ret < 0)
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret)
- return ret;
-
- ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
- if (ret)
- return ret;
+ ili9881c_switch_page(&mctx, 0);
- return 0;
-}
+ if (ctx->address_mode)
+ ili9881c_send_cmd_data(&mctx, MIPI_DCS_SET_ADDRESS_MODE,
+ ctx->address_mode);
-static int ili9881c_enable(struct drm_panel *panel)
-{
- struct ili9881c *ctx = panel_to_ili9881c(panel);
-
- msleep(120);
-
- mipi_dsi_dcs_set_display_on(ctx->dsi);
+ mipi_dsi_dcs_set_tear_on_multi(&mctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&mctx);
+ if (mctx.accum_err)
+ goto disable_power;
return 0;
-}
-static int ili9881c_disable(struct drm_panel *panel)
-{
- struct ili9881c *ctx = panel_to_ili9881c(panel);
-
- return mipi_dsi_dcs_set_display_off(ctx->dsi);
+disable_power:
+ regulator_disable(ctx->power);
+ return mctx.accum_err;
}
static int ili9881c_unprepare(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
- mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ mipi_dsi_dcs_set_display_off_multi(&mctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&mctx);
regulator_disable(ctx->power);
gpiod_set_value_cansleep(ctx->reset, 1);
@@ -1660,6 +1823,23 @@ static const struct drm_display_mode rpi_7inch_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode bsd1218_a101kl68_default_mode = {
+ .clock = 70000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 20,
+ .htotal = 800 + 40 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 20,
+
+ .width_mm = 120,
+ .height_mm = 170,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1706,8 +1886,6 @@ static enum drm_panel_orientation ili9881c_get_orientation(struct drm_panel *pan
static const struct drm_panel_funcs ili9881c_funcs = {
.prepare = ili9881c_prepare,
.unprepare = ili9881c_unprepare,
- .enable = ili9881c_enable,
- .disable = ili9881c_disable,
.get_modes = ili9881c_get_modes,
.get_orientation = ili9881c_get_orientation,
};
@@ -1830,8 +2008,18 @@ static const struct ili9881c_desc rpi_7inch_desc = {
.lanes = 2,
};
+static const struct ili9881c_desc bsd1218_a101kl68_desc = {
+ .init = bsd1218_a101kl68_init,
+ .init_length = ARRAY_SIZE(bsd1218_a101kl68_init),
+ .mode = &bsd1218_a101kl68_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
+ { .compatible = "bestar,bsd1218-a101kl68", .data = &bsd1218_a101kl68_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
index 5f897e143758..23462065d726 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
@@ -81,25 +81,25 @@ static int jdi_panel_disable(struct drm_panel *panel)
static int jdi_panel_unprepare(struct drm_panel *panel)
{
struct jdi_panel *jdi = to_panel_jdi(panel);
- int ret;
- ret = mipi_dsi_dcs_set_display_off(jdi->link1);
- if (ret < 0)
- dev_err(panel->dev, "failed to set display off: %d\n", ret);
+ /*
+ * One context per panel since we'll continue trying to shut down the
+ * other panel even if one isn't responding.
+ */
+ struct mipi_dsi_multi_context dsi_ctx1 = { .dsi = jdi->link1 };
+ struct mipi_dsi_multi_context dsi_ctx2 = { .dsi = jdi->link2 };
- ret = mipi_dsi_dcs_set_display_off(jdi->link2);
- if (ret < 0)
- dev_err(panel->dev, "failed to set display off: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx1);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx2);
/* Specified by JDI @ 50ms, subject to change */
msleep(50);
- ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link1);
- if (ret < 0)
- dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
- ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link2);
- if (ret < 0)
- dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
+ /* Doesn't hurt to try sleep mode even if display off fails */
+ dsi_ctx1.accum_err = 0;
+ dsi_ctx2.accum_err = 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx1);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx2);
/* Specified by JDI @ 150ms, subject to change */
msleep(150);
@@ -123,72 +123,46 @@ static int jdi_panel_unprepare(struct drm_panel *panel)
/* Specified by JDI @ 20ms, subject to change */
msleep(20);
- return ret;
+ return 0;
}
-static int jdi_setup_symmetrical_split(struct mipi_dsi_device *left,
- struct mipi_dsi_device *right,
- const struct drm_display_mode *mode)
+static void jdi_setup_symmetrical_split(struct mipi_dsi_multi_context *dsi_ctx,
+ struct mipi_dsi_device *left,
+ struct mipi_dsi_device *right,
+ const struct drm_display_mode *mode)
{
- int err;
-
- err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
- if (err < 0) {
- dev_err(&left->dev, "failed to set column address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_column_address(right, 0, mode->hdisplay / 2 - 1);
- if (err < 0) {
- dev_err(&right->dev, "failed to set column address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
- if (err < 0) {
- dev_err(&left->dev, "failed to set page address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
- if (err < 0) {
- dev_err(&right->dev, "failed to set page address: %d\n", err);
- return err;
- }
-
- return 0;
+ mipi_dsi_dual(mipi_dsi_dcs_set_column_address_multi,
+ dsi_ctx, left, right,
+ 0, mode->hdisplay / 2 - 1);
+ mipi_dsi_dual(mipi_dsi_dcs_set_page_address_multi,
+ dsi_ctx, left, right,
+ 0, mode->vdisplay - 1);
}
-static int jdi_write_dcdc_registers(struct jdi_panel *jdi)
+static void jdi_write_dcdc_registers(struct mipi_dsi_multi_context *dsi_ctx,
+ struct jdi_panel *jdi)
{
/* Clear the manufacturer command access protection */
- mipi_dsi_generic_write_seq(jdi->link1, MCS_CMD_ACS_PROT,
- MCS_CMD_ACS_PROT_OFF);
- mipi_dsi_generic_write_seq(jdi->link2, MCS_CMD_ACS_PROT,
- MCS_CMD_ACS_PROT_OFF);
+ mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2,
+ MCS_CMD_ACS_PROT,
+ MCS_CMD_ACS_PROT_OFF);
/*
- * Change the VGH/VGL divide rations to move the noise generated by the
+ * Change the VGH/VGL divide ratios to move the noise generated by the
* TCONN. This should hopefully avoid interaction with the backlight
* controller.
*/
- mipi_dsi_generic_write_seq(jdi->link1, MCS_PWR_CTRL_FUNC,
- MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
- MCS_PWR_CTRL_PARAM1_DEFAULT,
- MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
- MCS_PWR_CTRL_PARAM2_DEFAULT);
-
- mipi_dsi_generic_write_seq(jdi->link2, MCS_PWR_CTRL_FUNC,
- MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
- MCS_PWR_CTRL_PARAM1_DEFAULT,
- MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
- MCS_PWR_CTRL_PARAM2_DEFAULT);
-
- return 0;
+ mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2,
+ MCS_PWR_CTRL_FUNC,
+ MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
+ MCS_PWR_CTRL_PARAM1_DEFAULT,
+ MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
+ MCS_PWR_CTRL_PARAM2_DEFAULT);
}
static int jdi_panel_prepare(struct drm_panel *panel)
{
struct jdi_panel *jdi = to_panel_jdi(panel);
+ struct mipi_dsi_multi_context dsi_ctx = {};
int err;
/* Disable backlight to avoid showing random pixels
@@ -231,86 +205,36 @@ static int jdi_panel_prepare(struct drm_panel *panel)
* put in place to communicate the configuration back to the DSI host
* controller.
*/
- err = jdi_setup_symmetrical_split(jdi->link1, jdi->link2,
- jdi->mode);
- if (err < 0) {
- dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
- err);
- goto poweroff;
- }
+ jdi_setup_symmetrical_split(&dsi_ctx, jdi->link1, jdi->link2,
+ jdi->mode);
- err = mipi_dsi_dcs_set_tear_scanline(jdi->link1,
- jdi->mode->vdisplay - 16);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear scanline: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_tear_scanline_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ jdi->mode->vdisplay - 16);
- err = mipi_dsi_dcs_set_tear_scanline(jdi->link2,
- jdi->mode->vdisplay - 16);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear scanline: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_set_tear_on(jdi->link1,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear on: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_set_tear_on(jdi->link2,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear on: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_tear_on_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- err = mipi_dsi_dcs_set_pixel_format(jdi->link1, MIPI_DCS_PIXEL_FMT_24BIT);
- if (err < 0) {
- dev_err(panel->dev, "failed to set pixel format: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_pixel_format_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ MIPI_DCS_PIXEL_FMT_24BIT);
- err = mipi_dsi_dcs_set_pixel_format(jdi->link2, MIPI_DCS_PIXEL_FMT_24BIT);
- if (err < 0) {
- dev_err(panel->dev, "failed to set pixel format: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_exit_sleep_mode_multi,
+ &dsi_ctx, jdi->link1, jdi->link2);
- err = mipi_dsi_dcs_exit_sleep_mode(jdi->link1);
- if (err < 0) {
- dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_exit_sleep_mode(jdi->link2);
- if (err < 0) {
- dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
- goto poweroff;
- }
-
- err = jdi_write_dcdc_registers(jdi);
- if (err < 0) {
- dev_err(panel->dev, "failed to write dcdc registers: %d\n", err);
- goto poweroff;
- }
+ jdi_write_dcdc_registers(&dsi_ctx, jdi);
/*
- * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode() and
- * mipi_dsi_dcs_set_display_on().
+ * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode_multi()
+ * and mipi_dsi_dcs_set_display_on_multi().
*/
- msleep(150);
+ mipi_dsi_msleep(&dsi_ctx, 150);
- err = mipi_dsi_dcs_set_display_on(jdi->link1);
- if (err < 0) {
- dev_err(panel->dev, "failed to set display on: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_display_on_multi,
+ &dsi_ctx, jdi->link1, jdi->link2);
- err = mipi_dsi_dcs_set_display_on(jdi->link2);
- if (err < 0) {
- dev_err(panel->dev, "failed to set display on: %d\n", err);
+ if (dsi_ctx.accum_err < 0) {
+ err = dsi_ctx.accum_err;
goto poweroff;
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 23fd535d8f47..46b07f38559f 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -28,8 +28,6 @@ struct panel_lvds {
struct device *dev;
const char *label;
- unsigned int width;
- unsigned int height;
struct drm_display_mode dmode;
u32 bus_flags;
unsigned int bus_format;
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 98f0782c8411..561e6643dcbb 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel)
static int nt35560_set_brightness(struct backlight_device *bl)
{
struct nt35560 *nt = bl_get_data(bl);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- int period_ns = 1023;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
int duty_ns = bl->props.brightness;
+ int period_ns = 1023;
u8 pwm_ratio;
u8 pwm_div;
- u8 par;
- int ret;
if (backlight_is_blank(bl)) {
/* Disable backlight */
- par = 0x00;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret) {
- dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
- return ret;
- }
- return 0;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x00);
+ return dsi_ctx.accum_err;
}
/* Calculate the PWM duty cycle in n/256's */
@@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl)
/* Set up PWM dutycycle ONE byte (differs from the standard) */
dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio);
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
- &pwm_ratio, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret);
- return ret;
- }
/*
* Sequence to write PWMDIV:
@@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl)
* 0x22 PWMDIV
* 0x7F 0xAA CMD2 page 1 lock
*/
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret);
- return ret;
- }
- ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret);
- return ret;
- }
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ pwm_ratio);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01);
+
+ mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa);
/* Enable backlight */
- par = 0x24;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x24);
- return 0;
+ return dsi_ctx.accum_err;
}
static const struct backlight_ops nt35560_bl_ops = {
@@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = {
.max_brightness = 1023,
};
-static int nt35560_read_id(struct nt35560 *nt)
+static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ struct device dev = dsi_ctx->dsi->dev;
u8 vendor, version, panel;
u16 val;
- int ret;
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not vendor ID byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not read device version byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not read panel ID byte\n");
- return ret;
- }
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1);
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1);
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1);
+
+ if (dsi_ctx->accum_err < 0)
+ return;
if (vendor == 0x00) {
- dev_err(nt->dev, "device vendor ID is zero\n");
- return -ENODEV;
+ dev_err(&dev, "device vendor ID is zero\n");
+ dsi_ctx->accum_err = -ENODEV;
+ return;
}
val = (vendor << 8) | panel;
@@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt)
case DISPLAY_SONY_ACX424AKP_ID2:
case DISPLAY_SONY_ACX424AKP_ID3:
case DISPLAY_SONY_ACX424AKP_ID4:
- dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ dev_info(&dev,
+ "MTP vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
default:
- dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ dev_info(&dev,
+ "unknown vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
}
-
- return 0;
}
static int nt35560_power_on(struct nt35560 *nt)
@@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt)
static int nt35560_prepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- const u8 mddi = 3;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
int ret;
ret = nt35560_power_on(nt);
if (ret)
return ret;
- ret = nt35560_read_id(nt);
- if (ret) {
- dev_err(nt->dev, "failed to read panel ID (%d)\n", ret);
- goto err_power_off;
- }
+ nt35560_read_id(&dsi_ctx);
- /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
- ret = mipi_dsi_dcs_set_tear_on(dsi,
+ /* Enable tearing mode: send TE (tearing effect) at VBLANK */
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret) {
- dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret);
- goto err_power_off;
- }
/*
* Set MDDI
*
* This presumably deactivates the Qualcomm MDDI interface and
* selects DSI, similar code is found in other drivers such as the
- * Sharp LS043T1LE01 which makes us suspect that this panel may be
- * using a Novatek NT35565 or similar display driver chip that shares
- * this command. Due to the lack of documentation we cannot know for
- * sure.
+ * Sharp LS043T1LE01.
*/
- ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI,
- &mddi, sizeof(mddi));
- if (ret < 0) {
- dev_err(nt->dev, "failed to set MDDI (%d)\n", ret);
- goto err_power_off;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3);
- /* Exit sleep mode */
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
- goto err_power_off;
- }
- msleep(140);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 140);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
- goto err_power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
if (nt->video_mode) {
- /* In video mode turn peripheral on */
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn on peripheral\n");
- goto err_power_off;
- }
+ mipi_dsi_turn_on_peripheral_multi(&dsi_ctx);
}
- return 0;
-
-err_power_off:
- nt35560_power_off(nt);
- return ret;
+ if (dsi_ctx.accum_err < 0)
+ nt35560_power_off(nt);
+ return dsi_ctx.accum_err;
}
static int nt35560_unprepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ if (dsi_ctx.accum_err < 0)
+ return dsi_ctx.accum_err;
- /* Enter sleep mode */
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
msleep(85);
nt35560_power_off(nt);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 32cf64c7c18b..226d91daf8c7 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,14 +23,6 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
- do { \
- dsi_ctx.dsi = dsi0; \
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
- dsi_ctx.dsi = dsi1; \
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
- } while (0)
-
struct panel_info {
struct drm_panel panel;
struct mipi_dsi_device *dsi[2];
@@ -71,217 +63,217 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11);
mipi_dsi_msleep(&dsi_ctx, 70);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29);
return dsi_ctx.accum_err;
}
@@ -292,195 +284,195 @@ static int elish_csot_init_sequence(struct panel_info *pinfo)
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11);
mipi_dsi_msleep(&dsi_ctx, 70);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29);
return dsi_ctx.accum_err;
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index 3231e84dc66c..8a608972fc41 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -276,11 +276,8 @@ static int ota5601a_probe(struct spi_device *spi)
}
err = drm_panel_of_backlight(&panel->drm_panel);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get backlight handle\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to get backlight handle\n");
drm_panel_add(&panel->drm_panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e91f50662997..7e2f4e043d62 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -7,7 +7,9 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c
new file mode 100644
index 000000000000..56e10c7c3a76
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung AMS561RA01 panel with S6E8AA5X01 controller.
+ *
+ * Copyright (C) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer Command Set */
+#define MCS_AIDCTL 0xb2
+#define MCS_ADAPTIVECTL 0xb5
+#define MCS_ELVSS 0xb6
+#define MCS_TEMPERCTL 0xb8
+#define MCS_PENTILE 0xc0
+#define MCS_GAMMACTL 0xca
+#define MCS_LTPSCTL 0xcb
+#define MCS_PCD 0xcc
+#define MCS_ERRFLAG 0xe7
+#define MCS_ACCESSPROT 0xf0
+#define MCS_DISPCTL 0xf2
+#define MCS_GAMMAUPD 0xf7
+
+#define GAMMA_CMD_LEN 34
+#define AID_CMD_LEN 3
+
+static const struct {
+ u8 gamma[GAMMA_CMD_LEN];
+ u8 aid[AID_CMD_LEN];
+} s6e8aa5x01_ams561ra01_cmds[] = {
+ {
+ /* 5 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x94,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x8d, 0x8c, 0x8d, 0x89, 0x8c, 0x8e,
+ 0x8e, 0x8f, 0x90, 0xa3, 0xa2, 0x9a,
+ 0xcf, 0xca, 0x9f, 0xe6, 0xff, 0xb4,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0xa5 },
+ }, {
+ /* 6 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c,
+ 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x9a,
+ 0xd0, 0xcc, 0xa2, 0xed, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x95 },
+ }, {
+ /* 7 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c,
+ 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x99,
+ 0xc8, 0xc4, 0x9d, 0xed, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x89 },
+ }, {
+ /* 8 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x83, 0x86, 0x8b,
+ 0x8c, 0x8b, 0x8d, 0x9d, 0x9f, 0x97,
+ 0xc7, 0xc3, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x7e },
+ }, {
+ /* 9 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x90, 0x8f, 0x91, 0x95, 0x97, 0x94,
+ 0xc6, 0xc2, 0x9d, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x73 },
+ }, {
+ /* 10 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x90, 0x8f, 0x91, 0x94, 0x97, 0x93,
+ 0xc6, 0xc2, 0x9e, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x67 },
+ }, {
+ /* 11 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x8b, 0x8b, 0x8d, 0x90, 0x93, 0x92,
+ 0xc5, 0xc1, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x56 },
+ }, {
+ /* 12 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8f,
+ 0xcd, 0xc9, 0xa1, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x4a },
+ }, {
+ /* 13 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8e,
+ 0xc4, 0xbf, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x3b },
+ }, {
+ /* 14 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xc2, 0xbf, 0x9c, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x35 },
+ }, {
+ /* 15 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x25 },
+ }, {
+ /* 16 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x88, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x20 },
+ }, {
+ /* 17 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x86, 0x85, 0x89, 0x88, 0x8c, 0x8e,
+ 0xbf, 0xbe, 0x9c, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x11 },
+ }, {
+ /* 19 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x87, 0x85, 0x89, 0x88, 0x8c, 0x8e,
+ 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xf2 },
+ }, {
+ /* 20 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x87, 0x85, 0x89, 0x89, 0x8c, 0x8e,
+ 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xe4 },
+ }, {
+ /* 21 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x7d, 0x7e, 0x84,
+ 0x8c, 0x8a, 0x8c, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x97, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xd5 },
+ }, {
+ /* 22 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x95, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xc5 },
+ }, {
+ /* 24 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x94, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xa7 },
+ }, {
+ /* 25 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x98,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x87, 0x8e, 0x90, 0x8f,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x95 },
+ }, {
+ /* 27 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x83, 0x86, 0x8a,
+ 0x88, 0x87, 0x87, 0x88, 0x8b, 0x8c,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x76 },
+ }, {
+ /* 29 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x83, 0x86, 0x89,
+ 0x88, 0x87, 0x88, 0x88, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x54 },
+ }, {
+ /* 30 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x84, 0x86, 0x8a,
+ 0x87, 0x87, 0x87, 0x88, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x99, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x44 },
+ }, {
+ /* 32 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x89, 0x87, 0x8a, 0x84, 0x86, 0x8a,
+ 0x87, 0x87, 0x87, 0x89, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x1f },
+ }, {
+ /* 34 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8b, 0x87, 0x8b, 0x83, 0x86, 0x89,
+ 0x87, 0x87, 0x88, 0x88, 0x8b, 0x8a,
+ 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xff },
+ }, {
+ /* 37 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x86, 0x86, 0x86, 0x8d, 0x90, 0x8d,
+ 0xc0, 0xbf, 0x9a, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xd3 },
+ }, {
+ /* 39 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d,
+ 0xb6, 0xb6, 0x93, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xb3 },
+ }, {
+ /* 41 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x85,
+ 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d,
+ 0xb6, 0xb6, 0x94, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x93 },
+ }, {
+ /* 44 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x86, 0x85, 0x87, 0x8a,
+ 0xbe, 0xbe, 0x99, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x66 },
+ }, {
+ /* 47 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x84, 0x87, 0x89,
+ 0xb4, 0xb4, 0x94, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x40 },
+ }, {
+ /* 50 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x84, 0x87, 0x89,
+ 0xb4, 0xb4, 0x95, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x0e },
+ }, {
+ /* 53 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a,
+ 0xb4, 0xb4, 0x96, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0xe2 },
+ }, {
+ /* 56 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a,
+ 0xab, 0xab, 0x90, 0xdd, 0xf7, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0xb5 },
+ }, {
+ /* 60 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87,
+ 0x83, 0x81, 0x84, 0x81, 0x84, 0x88,
+ 0xb3, 0xb3, 0x96, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x77 },
+ }, {
+ /* 64 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87,
+ 0x83, 0x81, 0x84, 0x82, 0x84, 0x88,
+ 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x36 },
+ }, {
+ /* 68 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x9d,
+ 0x88, 0x88, 0x89, 0x89, 0x89, 0x8b,
+ 0x8a, 0x88, 0x8b, 0x7f, 0x80, 0x86,
+ 0x88, 0x86, 0x87, 0x7d, 0x7f, 0x85,
+ 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 72 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x9c, 0x00, 0xa9, 0x00, 0xa0,
+ 0x88, 0x88, 0x89, 0x88, 0x88, 0x8a,
+ 0x8c, 0x8a, 0x8d, 0x7f, 0x81, 0x85,
+ 0x84, 0x82, 0x84, 0x85, 0x87, 0x8a,
+ 0xaa, 0xab, 0x93, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 77 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xa1, 0x00, 0xad, 0x00, 0xa5,
+ 0x89, 0x89, 0x8a, 0x88, 0x87, 0x89,
+ 0x8c, 0x89, 0x8d, 0x7f, 0x81, 0x85,
+ 0x84, 0x83, 0x84, 0x81, 0x83, 0x86,
+ 0xaa, 0xab, 0x93, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 82 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xa5, 0x00, 0xb0, 0x00, 0xa9,
+ 0x88, 0x89, 0x89, 0x85, 0x86, 0x89,
+ 0x8a, 0x88, 0x8b, 0x82, 0x82, 0x87,
+ 0x81, 0x80, 0x82, 0x89, 0x8b, 0x8b,
+ 0xa2, 0xa3, 0x8e, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 87 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xab, 0x00, 0xb4, 0x00, 0xad,
+ 0x88, 0x89, 0x8a, 0x84, 0x86, 0x88,
+ 0x8a, 0x88, 0x8b, 0x7f, 0x7f, 0x84,
+ 0x86, 0x84, 0x85, 0x85, 0x86, 0x88,
+ 0xa2, 0xa3, 0x8f, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 93 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xaf, 0x00, 0xb9, 0x00, 0xb1,
+ 0x88, 0x89, 0x8a, 0x84, 0x85, 0x87,
+ 0x8a, 0x89, 0x8b, 0x7e, 0x7e, 0x83,
+ 0x87, 0x86, 0x86, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 98 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xb3, 0x00, 0xbc, 0x00, 0xb5,
+ 0x88, 0x88, 0x88, 0x84, 0x84, 0x86,
+ 0x8a, 0x88, 0x8a, 0x7f, 0x7f, 0x84,
+ 0x84, 0x83, 0x84, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 105 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xb7, 0x00, 0xc0, 0x00, 0xba,
+ 0x87, 0x87, 0x88, 0x85, 0x85, 0x87,
+ 0x89, 0x88, 0x89, 0x7f, 0x7f, 0x83,
+ 0x81, 0x80, 0x82, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 111 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xbb, 0x00, 0xc3, 0x00, 0xbe,
+ 0x87, 0x87, 0x88, 0x85, 0x85, 0x88,
+ 0x88, 0x87, 0x89, 0x80, 0x80, 0x84,
+ 0x81, 0x81, 0x82, 0x85, 0x86, 0x87,
+ 0x9c, 0x9c, 0x8b, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 119 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x14 },
+ }, {
+ /* 126 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8d, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0xde },
+ }, {
+ /* 134 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8d, 0xa4, 0xb0, 0x92,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0x94 },
+ }, {
+ /* 143 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x85,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x92, 0x92, 0x89, 0xab, 0xb6, 0x96,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0x46 },
+ }, {
+ /* 152 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x85,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x83,
+ 0x92, 0x92, 0x8b, 0xab, 0xb6, 0x96,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0xfa },
+ }, {
+ /* 162 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x84,
+ 0x84, 0x82, 0x84, 0x80, 0x81, 0x83,
+ 0x92, 0x92, 0x8b, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0xac },
+ }, {
+ /* 172 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x84,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x57 },
+ }, {
+ /* 183 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc2, 0x00, 0xca, 0x00, 0xc5,
+ 0x86, 0x86, 0x87, 0x85, 0x84, 0x87,
+ 0x87, 0x86, 0x88, 0x7e, 0x80, 0x83,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x83,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 195 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc7, 0x00, 0xce, 0x00, 0xc9,
+ 0x86, 0x87, 0x86, 0x83, 0x83, 0x85,
+ 0x85, 0x84, 0x86, 0x82, 0x82, 0x85,
+ 0x80, 0x80, 0x81, 0x81, 0x81, 0x84,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 207 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xcc, 0x00, 0xd2, 0x00, 0xce,
+ 0x86, 0x86, 0x87, 0x81, 0x83, 0x84,
+ 0x84, 0x82, 0x84, 0x83, 0x83, 0x85,
+ 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 220 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xd1, 0x00, 0xd6, 0x00, 0xd3,
+ 0x86, 0x86, 0x86, 0x81, 0x83, 0x84,
+ 0x84, 0x82, 0x84, 0x80, 0x80, 0x83,
+ 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 234 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xd6, 0x00, 0xdb, 0x00, 0xd8,
+ 0x85, 0x85, 0x85, 0x81, 0x83, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x80, 0x82,
+ 0x84, 0x82, 0x83, 0x79, 0x79, 0x7e,
+ 0x93, 0x92, 0x8d, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 249 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xdc, 0x00, 0xe0, 0x00, 0xdd,
+ 0x84, 0x84, 0x84, 0x81, 0x82, 0x83,
+ 0x84, 0x82, 0x84, 0x7f, 0x7f, 0x82,
+ 0x81, 0x80, 0x81, 0x80, 0x81, 0x82,
+ 0x8c, 0x8c, 0x86, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 265 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xe2, 0x00, 0xe5, 0x00, 0xe3,
+ 0x83, 0x83, 0x83, 0x81, 0x82, 0x83,
+ 0x82, 0x82, 0x83, 0x82, 0x81, 0x83,
+ 0x7f, 0x7e, 0x80, 0x7c, 0x7d, 0x80,
+ 0x8c, 0x8c, 0x86, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 282 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xe8, 0x00, 0xea, 0x00, 0xe9,
+ 0x83, 0x83, 0x83, 0x80, 0x82, 0x82,
+ 0x81, 0x82, 0x82, 0x82, 0x81, 0x82,
+ 0x81, 0x80, 0x81, 0x80, 0x80, 0x81,
+ 0x85, 0x85, 0x83, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 300 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xed, 0x00, 0xef, 0x00, 0xed,
+ 0x81, 0x82, 0x81, 0x81, 0x81, 0x82,
+ 0x82, 0x82, 0x83, 0x80, 0x80, 0x81,
+ 0x81, 0x81, 0x82, 0x83, 0x83, 0x83,
+ 0x80, 0x80, 0x7f, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 316 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xf3, 0x00, 0xf4, 0x00, 0xf3,
+ 0x80, 0x81, 0x80, 0x81, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x81, 0x80, 0x81,
+ 0x82, 0x82, 0x83, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 333 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8,
+ 0x80, 0x81, 0x80, 0x81, 0x80, 0x81,
+ 0x81, 0x82, 0x82, 0x81, 0x80, 0x81,
+ 0x83, 0x83, 0x83, 0x7e, 0x7d, 0x7e,
+ 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 360 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 378 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x04, 0x01, 0x03, 0x01, 0x04,
+ 0x7f, 0x7f, 0x80, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 395 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x09, 0x01, 0x07, 0x01, 0x08,
+ 0x7e, 0x7f, 0x80, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e,
+ 0x80, 0x80, 0x7f, 0x7e, 0x7e, 0x7f,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 413 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x0e, 0x01, 0x0b, 0x01, 0x0c,
+ 0x7e, 0x7f, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d,
+ 0x80, 0x80, 0x7f, 0x7d, 0x7e, 0x7e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 430 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x13, 0x01, 0x0f, 0x01, 0x10,
+ 0x7d, 0x7f, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d,
+ 0x80, 0x80, 0x7f, 0x7c, 0x7d, 0x7e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 448 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x18, 0x01, 0x13, 0x01, 0x14,
+ 0x7c, 0x7e, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7d, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7c, 0x7c, 0x7c,
+ 0x80, 0x80, 0x7e, 0x7b, 0x7c, 0x7d,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 465 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x1d, 0x01, 0x17, 0x01, 0x18,
+ 0x7c, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7d, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7b, 0x7b, 0x7b,
+ 0x80, 0x80, 0x7e, 0x7a, 0x7c, 0x7d,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 483 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x22, 0x01, 0x1b, 0x01, 0x1c,
+ 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a,
+ 0x80, 0x80, 0x7e, 0x79, 0x7b, 0x7c,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 500 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x27, 0x01, 0x1f, 0x01, 0x20,
+ 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a,
+ 0x81, 0x80, 0x7e, 0x79, 0x7b, 0x7c,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ },
+};
+
+struct s6e8aa5x01_ams561ra01_ctx {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *bl;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+ u32 nr_supplies;
+};
+
+static const struct regulator_bulk_data s6e8aa5x01_ams561ra01_supplies[] = {
+ { .supply = "vdd" },
+ { .supply = "vci" },
+};
+
+static inline struct s6e8aa5x01_ams561ra01_ctx *to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa5x01_ams561ra01_ctx, panel);
+}
+
+static int s6e8aa5x01_ams561ra01_update_status(struct backlight_device *bl)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = bl_get_data(bl);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+ u16 lvl = backlight_get_brightness(bl);
+
+ if (!ctx->panel.enabled)
+ return 0;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_write_buffer_multi(&dsi,
+ s6e8aa5x01_ams561ra01_cmds[lvl].gamma,
+ GAMMA_CMD_LEN);
+ mipi_dsi_dcs_write_buffer_multi(&dsi,
+ s6e8aa5x01_ams561ra01_cmds[lvl].aid,
+ AID_CMD_LEN);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_GAMMAUPD, 0x03);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5);
+
+ return dsi.accum_err;
+}
+
+static int s6e8aa5x01_ams561ra01_prepare(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ctx->nr_supplies, ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+
+ return 0;
+}
+
+static int s6e8aa5x01_ams561ra01_unprepare(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_bulk_disable(ctx->nr_supplies, ctx->supplies);
+
+ return 0;
+}
+
+static int s6e8aa5x01_ams561ra01_enable(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 100);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PENTILE, 0xd8, 0xd8, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PCD, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ERRFLAG, 0xed, 0xc7, 0x23, 0x67);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_DISPCTL, 0x0c, 0x0c, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_LTPSCTL,
+ 0x00, 0x45, 0x10, 0x10, 0x08, 0x32, 0x54, 0x00,
+ 0x00, 0x00, 0x00, 0x07, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x48, 0x5e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00,
+ 0x08, 0x05, 0x2a, 0x54, 0x03, 0xcc, 0x00, 0xff,
+ 0xfb, 0x03, 0x0d, 0x00, 0x11, 0x0f, 0x02, 0x03,
+ 0x0b, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x00, 0x02, 0x03, 0x0b,
+ 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi);
+
+ return dsi.accum_err;
+}
+
+static int s6e8aa5x01_ams561ra01_disable(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 100);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 150);
+
+ return dsi.accum_err;
+}
+
+static const struct drm_display_mode s6e8aa5x01_ams561ra01_mode = {
+ .clock = (720 + 62 + 2 + 26) * (1480 + 12 + 2 + 10) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 62,
+ .hsync_end = 720 + 62 + 2,
+ .htotal = 720 + 62 + 2 + 26,
+ .vdisplay = 1480,
+ .vsync_start = 1480 + 12,
+ .vsync_end = 1480 + 12 + 2,
+ .vtotal = 1480 + 12 + 2 + 10,
+ .width_mm = 62,
+ .height_mm = 128,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int s6e8aa5x01_ams561ra01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &s6e8aa5x01_ams561ra01_mode);
+}
+
+static const struct backlight_ops s6e8aa5x01_ams561ra01_bl_ops = {
+ .update_status = s6e8aa5x01_ams561ra01_update_status,
+};
+
+static const struct drm_panel_funcs s6e8aa5x01_ams561ra01_panel_funcs = {
+ .prepare = s6e8aa5x01_ams561ra01_prepare,
+ .unprepare = s6e8aa5x01_ams561ra01_unprepare,
+ .enable = s6e8aa5x01_ams561ra01_enable,
+ .disable = s6e8aa5x01_ams561ra01_disable,
+ .get_modes = s6e8aa5x01_ams561ra01_get_modes,
+};
+
+static int s6e8aa5x01_ams561ra01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e8aa5x01_ams561ra01_ctx *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa5x01_ams561ra01_ctx, panel,
+ &s6e8aa5x01_ams561ra01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->nr_supplies = ARRAY_SIZE(s6e8aa5x01_ams561ra01_supplies);
+ ret = devm_regulator_bulk_get_const(dev, ctx->nr_supplies,
+ s6e8aa5x01_ams561ra01_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "failed to get reset-gpios\n");
+
+ ctx->bl = devm_backlight_device_register(dev, dev_name(dev), dev, ctx,
+ &s6e8aa5x01_ams561ra01_bl_ops,
+ NULL);
+ if (IS_ERR(ctx->bl))
+ return dev_err_probe(dev, PTR_ERR(ctx->bl),
+ "failed to register backlight device\n");
+
+ ctx->bl->props.type = BACKLIGHT_PLATFORM;
+ ctx->bl->props.brightness = ARRAY_SIZE(s6e8aa5x01_ams561ra01_cmds) - 1;
+ ctx->bl->props.max_brightness = ctx->bl->props.brightness;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_NO_HFP;
+
+ ctx->panel.prepare_prev_first = true;
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void s6e8aa5x01_ams561ra01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e8aa5x01_ams561ra01_of_device_id[] = {
+ { .compatible = "samsung,s6e8aa5x01-ams561ra01" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e8aa5x01_ams561ra01_of_device_id);
+
+static struct mipi_dsi_driver s6e8aa5x01_ams561ra01_dsi_driver = {
+ .probe = s6e8aa5x01_ams561ra01_probe,
+ .remove = s6e8aa5x01_ams561ra01_remove,
+ .driver = {
+ .name = "panel-samsung-s6e8aa5x01-ams561ra01",
+ .of_match_table = s6e8aa5x01_ams561ra01_of_device_id,
+ },
+};
+module_mipi_dsi_driver(s6e8aa5x01_ams561ra01_dsi_driver);
+
+MODULE_AUTHOR("Kaustabh Chakraborty <kauschluss@disroot.org>");
+MODULE_DESCRIPTION("Samsung AMS561RA01 Panel with S6E8AA5X01 Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3333d4a07504..0019de93be1b 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3716,6 +3716,29 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode olimex_lcd_olinuxino_5cts_mode = {
+ .clock = 33300,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 20,
+ .htotal = 800 + 210 + 20 + 26,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 10,
+ .vtotal = 480 + 22 + 10 + 13,
+};
+
+static const struct panel_desc olimex_lcd_olinuxino_5cts = {
+ .modes = &olimex_lcd_olinuxino_5cts_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+
static const struct display_timing ontat_kd50g21_40nt_a1_timing = {
.pixelclock = { 30000000, 30000000, 50000000 },
.hactive = { 800, 800, 800 },
@@ -5279,6 +5302,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "olimex,lcd-olinuxino-43-ts",
.data = &olimex_lcd_olinuxino_43ts,
}, {
+ .compatible = "olimex,lcd-olinuxino-5-cts",
+ .data = &olimex_lcd_olinuxino_5cts,
+ }, {
.compatible = "ontat,kd50g21-40nt-a1",
.data = &ontat_kd50g21_40nt_a1,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 1a007a244d84..6c348fe28955 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for panels based on Sitronix ST7703 controller, souch as:
+ * Driver for panels based on Sitronix ST7703 controller, such as:
*
* - Rocktech jh057n00900 5.5" MIPI-DSI panel
*
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index 4854437e2899..6d40b9ddfe02 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/backlight.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_mode.h>
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 563f16bae543..0dd62e8b2fa7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -203,7 +203,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
- pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
return 0;
@@ -279,7 +278,6 @@ void panfrost_perfcnt_close(struct drm_file *file_priv)
if (perfcnt->user == pfile)
panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
- pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
}
diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile
index 15294719b09c..02db21748c12 100644
--- a/drivers/gpu/drm/panthor/Makefile
+++ b/drivers/gpu/drm/panthor/Makefile
@@ -8,6 +8,7 @@ panthor-y := \
panthor_gem.o \
panthor_gpu.o \
panthor_heap.o \
+ panthor_hw.o \
panthor_mmu.o \
panthor_sched.o
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index f0b2da5b2b96..81df49880bd8 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -18,6 +18,7 @@
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gpu.h"
+#include "panthor_hw.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@@ -244,6 +245,10 @@ int panthor_device_init(struct panthor_device *ptdev)
return ret;
}
+ ret = panthor_hw_init(ptdev);
+ if (ret)
+ goto err_rpm_put;
+
ret = panthor_gpu_init(ptdev);
if (ret)
goto err_rpm_put;
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 4d8e9b34702a..4c202fc5ce05 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1103,14 +1103,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
ret = group_priority_permit(file, args->priority);
if (ret)
- return ret;
+ goto out;
ret = panthor_group_create(pfile, args, queue_args);
- if (ret >= 0) {
- args->group_handle = ret;
- ret = 0;
- }
+ if (ret < 0)
+ goto out;
+ args->group_handle = ret;
+ ret = 0;
+out:
kvfree(queue_args);
return ret;
}
@@ -1400,14 +1401,9 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
struct panthor_file *pfile;
int ret;
- if (!try_module_get(THIS_MODULE))
- return -EINVAL;
-
pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
- if (!pfile) {
- ret = -ENOMEM;
- goto err_put_mod;
- }
+ if (!pfile)
+ return -ENOMEM;
pfile->ptdev = ptdev;
pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
@@ -1439,9 +1435,6 @@ err_destroy_vm_pool:
err_free_file:
kfree(pfile);
-
-err_put_mod:
- module_put(THIS_MODULE);
return ret;
}
@@ -1454,7 +1447,6 @@ panthor_postclose(struct drm_device *ddev, struct drm_file *file)
panthor_vm_pool_destroy(pfile);
kfree(pfile);
- module_put(THIS_MODULE);
}
static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
@@ -1555,6 +1547,7 @@ static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
}
static const struct file_operations panthor_drm_driver_fops = {
+ .owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 36f1034839c2..9bf06e55eaee 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1402,3 +1402,8 @@ err_unplug_fw:
}
MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch10.10/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin");
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index a123bc740ba1..156c7a0b62a2 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -74,7 +74,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
mutex_destroy(&bo->label.lock);
drm_gem_free_mmap_offset(&bo->base.base);
- mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
drm_gem_object_put(vm_root_gem);
}
@@ -246,8 +245,6 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
- mutex_init(&obj->gpuva_list_lock);
- drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
mutex_init(&obj->label.lock);
panthor_gem_debugfs_bo_init(obj);
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 8fc7215e9b90..80c6e24112d0 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -79,18 +79,6 @@ struct panthor_gem_object {
*/
struct drm_gem_object *exclusive_vm_root_gem;
- /**
- * @gpuva_list_lock: Custom GPUVA lock.
- *
- * Used to protect insertion of drm_gpuva elements to the
- * drm_gem_object.gpuva.list list.
- *
- * We can't use the GEM resv for that, because drm_gpuva_link() is
- * called in a dma-signaling path, where we're not allowed to take
- * resv locks.
- */
- struct mutex gpuva_list_lock;
-
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index cb7a335e07d7..db69449a5be0 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -35,40 +35,9 @@ struct panthor_gpu {
/** @reqs_acked: GPU request wait queue. */
wait_queue_head_t reqs_acked;
-};
-
-/**
- * struct panthor_model - GPU model description
- */
-struct panthor_model {
- /** @name: Model name. */
- const char *name;
-
- /** @arch_major: Major version number of architecture. */
- u8 arch_major;
- /** @product_major: Major version number of product. */
- u8 product_major;
-};
-
-/**
- * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
- * by a combination of the major architecture version and the major product
- * version.
- * @_name: Name for the GPU model.
- * @_arch_major: Architecture major.
- * @_product_major: Product major.
- */
-#define GPU_MODEL(_name, _arch_major, _product_major) \
-{\
- .name = __stringify(_name), \
- .arch_major = _arch_major, \
- .product_major = _product_major, \
-}
-
-static const struct panthor_model gpu_models[] = {
- GPU_MODEL(g610, 10, 7),
- {},
+ /** @cache_flush_lock: Lock to serialize cache flushes */
+ struct mutex cache_flush_lock;
};
#define GPU_INTERRUPTS_MASK \
@@ -83,66 +52,6 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
}
-static void panthor_gpu_init_info(struct panthor_device *ptdev)
-{
- const struct panthor_model *model;
- u32 arch_major, product_major;
- u32 major, minor, status;
- unsigned int i;
-
- ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
- ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
- ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
- ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
- ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
- ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
- ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
- ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
- ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
- ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
- ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
- ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
- ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
- for (i = 0; i < 4; i++)
- ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
-
- ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
-
- ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
- ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
- ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
-
- arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
- product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
- major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
- minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
- status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
-
- for (model = gpu_models; model->name; model++) {
- if (model->arch_major == arch_major &&
- model->product_major == product_major)
- break;
- }
-
- drm_info(&ptdev->base,
- "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
- model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
- major, minor, status);
-
- drm_info(&ptdev->base,
- "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
- ptdev->gpu_info.l2_features,
- ptdev->gpu_info.tiler_features,
- ptdev->gpu_info.mem_features,
- ptdev->gpu_info.mmu_features,
- ptdev->gpu_info.as_present);
-
- drm_info(&ptdev->base,
- "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
- ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
- ptdev->gpu_info.tiler_present);
-}
-
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
gpu_write(ptdev, GPU_INT_CLEAR, status);
@@ -204,8 +113,8 @@ int panthor_gpu_init(struct panthor_device *ptdev)
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
+ mutex_init(&gpu->cache_flush_lock);
ptdev->gpu = gpu;
- panthor_gpu_init_info(ptdev);
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
@@ -353,6 +262,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
bool timedout = false;
unsigned long flags;
+ /* Serialize cache flush operations. */
+ guard(mutex)(&ptdev->gpu->cache_flush_lock);
+
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c
new file mode 100644
index 000000000000..4f2858114e5e
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_hw.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#include "panthor_device.h"
+#include "panthor_hw.h"
+#include "panthor_regs.h"
+
+#define GPU_PROD_ID_MAKE(arch_major, prod_major) \
+ (((arch_major) << 24) | (prod_major))
+
+static char *get_gpu_model_name(struct panthor_device *ptdev)
+{
+ const u32 gpu_id = ptdev->gpu_info.gpu_id;
+ const u32 product_id = GPU_PROD_ID_MAKE(GPU_ARCH_MAJOR(gpu_id),
+ GPU_PROD_MAJOR(gpu_id));
+ const bool ray_intersection = !!(ptdev->gpu_info.gpu_features &
+ GPU_FEATURES_RAY_INTERSECTION);
+ const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present);
+
+ switch (product_id) {
+ case GPU_PROD_ID_MAKE(10, 2):
+ return "Mali-G710";
+ case GPU_PROD_ID_MAKE(10, 3):
+ return "Mali-G510";
+ case GPU_PROD_ID_MAKE(10, 4):
+ return "Mali-G310";
+ case GPU_PROD_ID_MAKE(10, 7):
+ return "Mali-G610";
+ case GPU_PROD_ID_MAKE(11, 2):
+ if (shader_core_count > 10 && ray_intersection)
+ return "Mali-G715-Immortalis";
+ else if (shader_core_count >= 7)
+ return "Mali-G715";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(11, 3):
+ return "Mali-G615";
+ case GPU_PROD_ID_MAKE(12, 0):
+ if (shader_core_count >= 10 && ray_intersection)
+ return "Mali-G720-Immortalis";
+ else if (shader_core_count >= 6)
+ return "Mali-G720";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(12, 1):
+ return "Mali-G620";
+ case GPU_PROD_ID_MAKE(13, 0):
+ if (shader_core_count >= 10 && ray_intersection)
+ return "Mali-G925-Immortalis";
+ else if (shader_core_count >= 6)
+ return "Mali-G725";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(13, 1):
+ return "Mali-G625";
+ }
+
+ return "(Unknown Mali GPU)";
+}
+
+static void panthor_gpu_info_init(struct panthor_device *ptdev)
+{
+ unsigned int i;
+
+ ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
+ ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
+ ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
+ ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
+ ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
+ ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
+ ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
+ ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
+ ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
+ ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
+ ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
+ ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
+
+ ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
+
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
+
+ /* Introduced in arch 11.x */
+ ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES);
+}
+
+static void panthor_hw_info_init(struct panthor_device *ptdev)
+{
+ u32 major, minor, status;
+
+ panthor_gpu_info_init(ptdev);
+
+ major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
+ minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
+ status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
+
+ drm_info(&ptdev->base,
+ "%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ get_gpu_model_name(ptdev), ptdev->gpu_info.gpu_id >> 16,
+ major, minor, status);
+
+ drm_info(&ptdev->base,
+ "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
+ ptdev->gpu_info.l2_features,
+ ptdev->gpu_info.tiler_features,
+ ptdev->gpu_info.mem_features,
+ ptdev->gpu_info.mmu_features,
+ ptdev->gpu_info.as_present);
+
+ drm_info(&ptdev->base,
+ "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
+ ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
+ ptdev->gpu_info.tiler_present);
+}
+
+int panthor_hw_init(struct panthor_device *ptdev)
+{
+ panthor_hw_info_init(ptdev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h
new file mode 100644
index 000000000000..0af6acc6aa6a
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_hw.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#ifndef __PANTHOR_HW_H__
+#define __PANTHOR_HW_H__
+
+struct panthor_device;
+
+int panthor_hw_init(struct panthor_device *ptdev);
+
+#endif /* __PANTHOR_HW_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 4140f697ba5a..6dec4354e378 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -29,6 +29,7 @@
#include "panthor_device.h"
#include "panthor_gem.h"
+#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
@@ -571,8 +572,24 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
u64 iova, u64 size, u32 op)
{
+ const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV;
+ u32 lsc_flush_op;
+ int ret;
+
lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+ switch (op) {
+ case AS_COMMAND_FLUSH_MEM:
+ lsc_flush_op = CACHE_CLEAN | CACHE_INV;
+ break;
+ case AS_COMMAND_FLUSH_PT:
+ lsc_flush_op = 0;
+ break;
+ default:
+ drm_WARN(&ptdev->base, 1, "Unexpected AS_COMMAND: %d", op);
+ return -EINVAL;
+ }
+
if (as_nr < 0)
return 0;
@@ -582,13 +599,24 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
* power it up
*/
- if (op != AS_COMMAND_UNLOCK)
- lock_region(ptdev, as_nr, iova, size);
+ lock_region(ptdev, as_nr, iova, size);
- /* Run the MMU operation */
- write_cmd(ptdev, as_nr, op);
+ ret = wait_ready(ptdev, as_nr);
+ if (ret)
+ return ret;
- /* Wait for the flush to complete */
+ ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Explicitly unlock the region as the AS is not unlocked automatically
+ * at the end of the GPU_CONTROL cache flush command, unlike
+ * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT.
+ */
+ write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK);
+
+ /* Wait for the unlock command to complete */
return wait_ready(ptdev, as_nr);
}
@@ -1074,9 +1102,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
* GEM vm_bo list.
*/
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
unpin = drm_gpuvm_bo_put(vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(drm_gpuvm_resv(vm));
/* If the vm_bo object was destroyed, release the pin reference that
@@ -1194,7 +1222,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
(flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
return -EINVAL;
- /* Make sure the VA and size are aligned and in-bounds. */
+ /* Make sure the VA and size are in-bounds. */
if (size > bo->base.base.size || offset > bo->base.base.size - size)
return -EINVAL;
@@ -1249,9 +1277,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
@@ -2003,10 +2031,10 @@ static void panthor_vma_link(struct panthor_vm *vm,
{
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_link(&vma->base, vm_bo);
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
}
static void panthor_vma_unlink(struct panthor_vm *vm,
@@ -2015,9 +2043,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm,
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_unlink(&vma->base);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
* when entering this function, so we can implement deferred VMA
@@ -2169,15 +2197,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
- case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->map.vm_bo->obj,
+ .map.gem.offset = op->map.bo_offset,
+ };
+
if (vm->unusable) {
ret = -EINVAL;
break;
}
- ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
- op->map.vm_bo->obj, op->map.bo_offset);
+ ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
+ }
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
@@ -2380,8 +2415,9 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
* to be handled the same way user VMAs are.
*/
drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
- DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
- min_va, va_range, 0, 0, &panthor_gpuvm_ops);
+ DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE,
+ &ptdev->base, dummy_gem, min_va, va_range, 0, 0,
+ &panthor_gpuvm_ops);
drm_gem_object_put(dummy_gem);
return vm;
@@ -2411,7 +2447,7 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
int ret;
/* Aligned on page size. */
- if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
+ if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index 48bbfd40138c..8bee76d01bf8 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -70,6 +70,9 @@
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
+#define GPU_FEATURES 0x60
+#define GPU_FEATURES_RAY_INTERSECTION BIT(2)
+
#define GPU_TIMESTAMP_OFFSET 0x88
#define GPU_CYCLE_COUNT 0x90
#define GPU_TIMESTAMP 0x98
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index df76653e649a..3d1f57e3990f 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -641,6 +641,15 @@ struct panthor_group {
size_t kbo_sizes;
} fdinfo;
+ /** @task_info: Info of current->group_leader that created the group. */
+ struct {
+ /** @task_info.pid: pid of current->group_leader */
+ pid_t pid;
+
+ /** @task_info.comm: comm of current->group_leader */
+ char comm[TASK_COMM_LEN];
+ } task_info;
+
/** @state: Group state. */
enum panthor_group_state state;
@@ -1354,8 +1363,12 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
fatal = cs_iface->output->fatal;
info = cs_iface->output->fatal_info;
- if (group)
+ if (group) {
+ drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+
group->fatal_queues |= BIT(cs_id);
+ }
if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
/* If this exception is unrecoverable, queue a reset, and make
@@ -1415,6 +1428,11 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
spin_unlock(&queue->fence_ctx.lock);
}
+ if (group) {
+ drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+ }
+
drm_warn(&ptdev->base,
"CSG slot %d CS slot: %d\n"
"CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
@@ -1631,11 +1649,15 @@ csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 c
lockdep_assert_held(&sched->lock);
- drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
-
group = csg_slot->group;
- if (!drm_WARN_ON(&ptdev->base, !group))
+ if (!drm_WARN_ON(&ptdev->base, !group)) {
+ drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+
group->timedout = true;
+ }
+
+ drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
sched_queue_delayed_work(sched, tick, 0);
}
@@ -3217,7 +3239,8 @@ queue_timedout_job(struct drm_sched_job *sched_job)
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_queue *queue = group->queues[job->queue_idx];
- drm_warn(&ptdev->base, "job timeout\n");
+ drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
+ group->task_info.pid, group->task_info.comm, job->done_fence->seqno);
drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
@@ -3388,6 +3411,14 @@ err_free_queue:
return ERR_PTR(ret);
}
+static void group_init_task_info(struct panthor_group *group)
+{
+ struct task_struct *task = current->group_leader;
+
+ group->task_info.pid = task->pid;
+ get_task_comm(group->task_info.comm, task);
+}
+
static void add_group_kbo_sizes(struct panthor_device *ptdev,
struct panthor_group *group)
{
@@ -3539,6 +3570,8 @@ int panthor_group_create(struct panthor_file *pfile,
add_group_kbo_sizes(group->ptdev, group);
spin_lock_init(&group->fdinfo.lock);
+ group_init_task_info(group);
+
return gid;
err_put_group:
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index d1c5e471bdca..3d9f47bc807a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1832,7 +1832,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
}
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7c3a960f486a..ba8db1d07c07 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2457,7 +2457,7 @@ static void ci_register_patching_mc_arb(struct radeon_device *rdev,
u32 tmp, tmp2;
tmp = RREG32(MC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+ patch = (tmp & 0x0000f00) == 0x300;
if (patch &&
((rdev->pdev->device == 0x67B0) ||
@@ -3238,7 +3238,8 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
SMU7_MAX_LEVELS_GRAPHICS;
SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
- u32 i, ret;
+ int ret;
+ u32 i;
memset(levels, 0, level_array_size);
@@ -3285,7 +3286,8 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
SMU7_MAX_LEVELS_MEMORY;
SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
- u32 i, ret;
+ int ret;
+ u32 i;
memset(levels, 0, level_array_size);
@@ -3436,7 +3438,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
allowed_sclk_vddc_table->entries[i].clk;
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
pi->dpm_table.sclk_table.count++;
}
}
@@ -3449,7 +3451,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
allowed_mclk_table->entries[i].clk;
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
pi->dpm_table.mclk_table.count++;
}
}
@@ -4487,7 +4489,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
bool patch;
tmp = RREG32(MC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+ patch = (tmp & 0x0000f00) == 0x300;
if (patch &&
((rdev->pdev->device == 0x67B0) ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 266c57733136..1162cb5d75ed 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -951,13 +951,13 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
+ dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
}
} else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ dev_warn_once(p->dev, "No buffer for streamout %d\n", i);
return -EINVAL;
}
}
@@ -979,8 +979,8 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
(tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
return -EINVAL;
}
/* check cb */
@@ -1056,8 +1056,8 @@ static int evergreen_packet0_check(struct radeon_cs_parser *p,
case EVERGREEN_VLINE_START_END:
r = evergreen_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
return r;
}
break;
@@ -1143,8 +1143,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1155,15 +1155,15 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case CAYMAN_DB_EQAA:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
break;
case CAYMAN_DB_DEPTH_INFO:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
break;
@@ -1172,8 +1172,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
@@ -1214,8 +1214,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_READ_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_z_read_offset = radeon_get_ib_value(p, idx);
@@ -1226,8 +1226,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_WRITE_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_z_write_offset = radeon_get_ib_value(p, idx);
@@ -1238,8 +1238,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_STENCIL_READ_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_s_read_offset = radeon_get_ib_value(p, idx);
@@ -1250,8 +1250,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_STENCIL_WRITE_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_s_write_offset = radeon_get_ib_value(p, idx);
@@ -1273,8 +1273,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_3:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
@@ -1295,8 +1295,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CP_COHER_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1311,8 +1311,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case PA_SC_AA_CONFIG:
if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
@@ -1320,8 +1320,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case CAYMAN_PA_SC_AA_CONFIG:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
@@ -1360,8 +1360,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
@@ -1378,8 +1378,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
@@ -1439,8 +1439,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_ATTRIB:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1467,8 +1467,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_ATTRIB:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1555,8 +1555,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
@@ -1571,8 +1571,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
@@ -1584,8 +1584,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_DATA_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx);
@@ -1702,36 +1702,36 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_15:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case CAYMAN_SX_SCATTER_EXPORT_BASE:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1740,7 +1740,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
break;
default:
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
return 0;
@@ -1795,7 +1795,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1807,13 +1807,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return 0;
if (pred_op > 2) {
- DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1827,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
- DRM_ERROR("bad CONTEXT_CONTROL\n");
+ dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n");
return -EINVAL;
}
break;
@@ -1835,17 +1835,17 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NUM_INSTANCES:
case PACKET3_CLEAR_STATE:
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
return -EINVAL;
}
break;
case CAYMAN_PACKET3_DEALLOC_STATE:
if (p->rdev->family < CHIP_CAYMAN) {
- DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ dev_warn_once(p->dev, "bad PACKET3_DEALLOC_STATE\n");
return -EINVAL;
}
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
return -EINVAL;
}
break;
@@ -1854,12 +1854,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad INDEX_BASE\n");
+ dev_warn_once(p->dev, "bad INDEX_BASE\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad INDEX_BASE\n");
+ dev_warn_once(p->dev, "bad INDEX_BASE\n");
return -EINVAL;
}
@@ -1872,7 +1872,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -1880,7 +1880,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDEX_BUFFER_SIZE:
{
if (pkt->count != 0) {
- DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
+ dev_warn_once(p->dev, "bad INDEX_BUFFER_SIZE\n");
return -EINVAL;
}
break;
@@ -1889,12 +1889,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
@@ -1907,7 +1907,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -1917,12 +1917,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad DRAW_INDEX_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_2\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DRAW_INDEX_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_2\n");
return -EINVAL;
}
@@ -1935,63 +1935,63 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_MULTI_AUTO:
if (pkt->count != 2) {
- DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_MULTI_AUTO\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_IMMD:
if (pkt->count < 2) {
- DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_DRAW_INDEX_OFFSET:
if (pkt->count != 2) {
- DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_DRAW_INDEX_OFFSET_2:
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET_2\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -2005,19 +2005,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
*/
if (pkt->count != 2) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
/* currently only supporting setting indirect draw buffer base address */
if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
@@ -2039,54 +2039,54 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
*/
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DRAW_INDIRECT\n");
return -EINVAL;
}
if (idx_value + size > track->indirect_draw_buffer_size) {
- dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
- idx_value, size, track->indirect_draw_buffer_size);
+ dev_warn_once(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
+ idx_value, size, track->indirect_draw_buffer_size);
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DISPATCH_DIRECT:
if (pkt->count != 3) {
- DRM_ERROR("bad DISPATCH_DIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_DIRECT\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DISPATCH_INDIRECT:
if (pkt->count != 1) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
/* bit 4 is reg (0) or mem (1) */
@@ -2095,7 +2095,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
+ dev_warn_once(p->dev, "cannot use PFP on REG wait\n");
return -EINVAL;
}
break;
@@ -2115,7 +2115,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 command, size, info;
u64 offset, tmp;
if (pkt->count != 4) {
- DRM_ERROR("bad CP DMA\n");
+ dev_warn_once(p->dev, "bad CP DMA\n");
return -EINVAL;
}
command = radeon_get_ib_value(p, idx+4);
@@ -2129,7 +2129,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
(command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
/* non mem to mem copies requires dw aligned count */
if (size % 4) {
- DRM_ERROR("CP DMA command requires dw count alignment\n");
+ dev_warn_once(p->dev, "CP DMA command requires dw count alignment\n");
return -EINVAL;
}
}
@@ -2137,19 +2137,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* src address space is register */
/* GDS is ok */
if (((info & 0x60000000) >> 29) != 1) {
- DRM_ERROR("CP DMA SAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA SAS not supported\n");
return -EINVAL;
}
} else {
if (command & PACKET3_CP_DMA_CMD_SAIC) {
- DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n");
return -EINVAL;
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad CP DMA SRC\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC\n");
return -EINVAL;
}
@@ -2159,15 +2159,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx] = offset;
ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
} else if (((info & 0x60000000) >> 29) != 2) {
- DRM_ERROR("bad CP DMA SRC_SEL\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC_SEL\n");
return -EINVAL;
}
}
@@ -2175,19 +2175,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* dst address space is register */
/* GDS is ok */
if (((info & 0x00300000) >> 20) != 1) {
- DRM_ERROR("CP DMA DAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA DAS not supported\n");
return -EINVAL;
}
} else {
/* dst address space is memory */
if (command & PACKET3_CP_DMA_CMD_DAIC) {
- DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad CP DMA DST\n");
+ dev_warn_once(p->dev, "bad CP DMA DST\n");
return -EINVAL;
}
@@ -2197,15 +2197,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx+2] = offset;
ib[idx+3] = upper_32_bits(offset) & 0xff;
} else {
- DRM_ERROR("bad CP DMA DST_SEL\n");
+ dev_warn_once(p->dev, "bad CP DMA DST_SEL\n");
return -EINVAL;
}
}
@@ -2213,13 +2213,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_PFP_SYNC_ME:
if (pkt->count) {
- DRM_ERROR("bad PFP_SYNC_ME\n");
+ dev_warn_once(p->dev, "bad PFP_SYNC_ME\n");
return -EINVAL;
}
break;
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
/* 0xffffffff/0x0 is flush all cache flag */
@@ -2227,7 +2227,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
radeon_get_ib_value(p, idx + 2) != 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2235,7 +2235,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_EVENT_WRITE:
if (pkt->count != 2 && pkt->count != 0) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
if (pkt->count) {
@@ -2243,7 +2243,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
offset = reloc->gpu_offset +
@@ -2259,12 +2259,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
@@ -2281,12 +2281,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
@@ -2304,7 +2304,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
@@ -2321,7 +2321,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
@@ -2334,7 +2334,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SET_RESOURCE:
if (pkt->count % 8) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
@@ -2342,7 +2342,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_RESOURCE_START) ||
(start_reg >= PACKET3_SET_RESOURCE_END) ||
(end_reg >= PACKET3_SET_RESOURCE_END)) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
for (i = 0; i < (pkt->count / 8); i++) {
@@ -2355,7 +2355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* tex base */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -2392,7 +2392,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
} else {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2411,14 +2411,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* vtx base */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (vtx)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
size = radeon_get_ib_value(p, idx+1+(i*8)+1);
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n");
+ dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
@@ -2431,7 +2432,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
}
@@ -2445,7 +2446,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
- DRM_ERROR("bad SET_BOOL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_BOOL_CONST\n");
return -EINVAL;
}
break;
@@ -2455,7 +2456,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
- DRM_ERROR("bad SET_LOOP_CONST\n");
+ dev_warn_once(p->dev, "bad SET_LOOP_CONST\n");
return -EINVAL;
}
break;
@@ -2465,13 +2466,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
- DRM_ERROR("bad SET_CTL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_CTL_CONST\n");
return -EINVAL;
}
break;
case PACKET3_SET_SAMPLER:
if (pkt->count % 3) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
@@ -2479,13 +2480,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_SAMPLER_START) ||
(start_reg >= PACKET3_SET_SAMPLER_END) ||
(end_reg >= PACKET3_SET_SAMPLER_END)) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BUFFER_UPDATE:
if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
return -EINVAL;
}
/* Updating memory at DST_ADDRESS. */
@@ -2493,14 +2494,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2512,14 +2513,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2532,23 +2533,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 3) {
- DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+0);
offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2558,7 +2559,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COPY_DW:
if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x1) {
@@ -2566,14 +2567,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* SRC is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2583,8 +2584,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* SRC is a reg. */
reg = radeon_get_ib_value(p, idx+1) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
return -EINVAL;
}
}
@@ -2593,14 +2594,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* DST is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2610,8 +2611,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* DST is a reg. */
reg = radeon_get_ib_value(p, idx+3) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 3);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 3);
return -EINVAL;
}
}
@@ -2622,7 +2623,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint32_t allowed_reg_base;
uint32_t source_sel;
if (pkt->count != 2) {
- DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (invalid count)\n");
return -EINVAL;
}
@@ -2632,8 +2633,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
areg = idx_value >> 16;
if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
- dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
- areg, idx);
+ dev_warn_once(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
return -EINVAL;
}
@@ -2643,7 +2644,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint32_t swap;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 1);
@@ -2656,7 +2657,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (offset & 0xfffffffc) | swap;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
- DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (unsupported operation)\n");
return -EINVAL;
}
break;
@@ -2666,23 +2667,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 2) {
- DRM_ERROR("bad COND_EXEC (invalid count)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_EXEC (missing reloc)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 0);
offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2692,7 +2693,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COND_WRITE:
if (pkt->count != 7) {
- DRM_ERROR("bad COND_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x10) {
@@ -2700,14 +2701,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* POLL is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_WRITE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 1);
offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2717,8 +2718,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* POLL is a reg. */
reg = radeon_get_ib_value(p, idx + 1) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
return -EINVAL;
}
}
@@ -2727,14 +2728,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* WRITE is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_WRITE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 5);
offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32;
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2744,8 +2745,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* WRITE is a reg. */
reg = radeon_get_ib_value(p, idx + 5) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 5);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 5);
return -EINVAL;
}
}
@@ -2753,7 +2754,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2853,7 +2854,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
r = evergreen_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
kfree(p->track);
p->track = NULL;
return -EINVAL;
@@ -2896,8 +2897,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
do {
if (p->idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- p->idx, ib_chunk->length_dw);
+ dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
@@ -2910,7 +2911,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
switch (sub_cmd) {
@@ -2932,24 +2933,24 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
break;
default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
- dst_offset, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
switch (sub_cmd) {
@@ -2961,13 +2962,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3001,13 +3002,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
p->idx += 9;
@@ -3020,13 +3021,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
@@ -3039,7 +3040,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x41:
/* L2L, partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2L Partial is cayman only !\n");
return -EINVAL;
}
ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
@@ -3054,7 +3055,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* L2L, dw, broadcast */
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2L, dw, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3064,18 +3065,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+3);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3089,12 +3090,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* Copy L2T Frame to Field */
case 0x48:
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3104,18 +3105,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3128,7 +3129,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x49:
/* L2T, T2L partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
/* detile bit */
@@ -3151,12 +3152,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4b:
/* L2T, broadcast */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3166,18 +3167,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3212,13 +3213,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
p->idx += 9;
@@ -3227,7 +3228,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4d:
/* T2T partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
@@ -3238,12 +3239,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4f:
/* L2T, broadcast */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3253,18 +3254,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3274,21 +3275,21 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 10;
break;
default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_CONSTANT_FILL\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
- dst_offset, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3299,7 +3300,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
@@ -3430,7 +3431,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
return true;
default:
- DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+ DRM_DEBUG("Invalid register 0x%x in CS\n", reg);
return false;
}
}
@@ -3448,7 +3449,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
break;
case PACKET3_SET_BASE:
if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE");
+ dev_warn_once(rdev->dev, "bad SET_BASE");
return -EINVAL;
}
break;
@@ -3519,7 +3520,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(rdev->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -3539,7 +3540,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
(command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
/* non mem to mem copies requires dw aligned count */
if ((command & 0x1fffff) % 4) {
- DRM_ERROR("CP DMA command requires dw count alignment\n");
+ dev_warn_once(rdev->dev, "CP DMA command requires dw count alignment\n");
return -EINVAL;
}
}
@@ -3550,14 +3551,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if (command & PACKET3_CP_DMA_CMD_SAIC) {
reg = start_reg;
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n");
return -EINVAL;
}
}
@@ -3571,14 +3572,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if (command & PACKET3_CP_DMA_CMD_DAIC) {
reg = start_reg;
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad DST register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad DST register\n");
return -EINVAL;
}
}
@@ -3591,7 +3592,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
uint32_t allowed_reg_base;
if (pkt->count != 2) {
- DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ dev_warn_once(rdev->dev, "bad SET_APPEND_CNT (invalid count)\n");
return -EINVAL;
}
@@ -3601,8 +3602,8 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
areg = idx_value >> 16;
if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
- DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
- areg, idx);
+ dev_warn_once(rdev->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
return -EINVAL;
}
break;
@@ -3681,7 +3682,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += count + 3;
break;
default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ dev_warn_once(rdev->dev,
+ "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n",
+ idx, ib->ptr[idx]);
return -EINVAL;
}
break;
@@ -3732,7 +3735,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += 10;
break;
default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ dev_warn_once(rdev->dev,
+ "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n",
+ idx, ib->ptr[idx]);
return -EINVAL;
}
break;
@@ -3743,7 +3748,7 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(rdev->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (idx < ib->length_dw);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index e08559c44a5c..82edbfb259bf 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3397,7 +3397,7 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
if (PPSMC_Result_OK != smc_result)
ret = -EINVAL;
- ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
+ ni_pi->cac_enabled = PPSMC_Result_OK == smc_result;
}
} else if (ni_pi->cac_enabled) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 80703417d8a1..07a9c523a17a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1298,8 +1298,8 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1313,7 +1313,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
+ dev_warn_once(p->dev, "Cannot src blit from microtiled surface\n");
radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
@@ -1342,8 +1342,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
@@ -1351,8 +1351,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
for (i = 0; i < (c - 1); i += 2, idx += 3) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1364,8 +1364,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize &= 0x7F;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1377,8 +1377,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c & 1) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1470,12 +1470,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
/* check its a wait until and only 1 count */
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
- DRM_ERROR("vline wait had illegal wait until segment\n");
+ dev_warn_once(p->dev, "vline wait had illegal wait until segment\n");
return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
- DRM_ERROR("vline wait had illegal wait until\n");
+ dev_warn_once(p->dev, "vline wait had illegal wait until\n");
return -EINVAL;
}
@@ -1493,7 +1493,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
reg = R100_CP_PACKET0_GET_REG(header);
crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
radeon_crtc = to_radeon_crtc(crtc);
@@ -1514,7 +1514,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
break;
default:
- DRM_ERROR("unknown crtc reloc\n");
+ dev_warn_once(p->dev, "unknown crtc reloc\n");
return -EINVAL;
}
ib[h_idx] = header;
@@ -1599,7 +1599,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
@@ -1616,8 +1616,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1629,8 +1629,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1645,8 +1645,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1672,8 +1672,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1690,8 +1690,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1708,8 +1708,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1726,8 +1726,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1768,8 +1768,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->cb[0].cpp = 4;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
@@ -1797,8 +1797,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1927,10 +1927,10 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
if ((value + 1) > radeon_bo_size(robj)) {
- DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
- "(need %u have %lu) !\n",
- value + 1,
- radeon_bo_size(robj));
+ dev_warn_once(p->dev, "[drm] Buffer too small for PACKET3 INDX_BUFFER "
+ "(need %u have %lu) !\n",
+ value + 1,
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1957,7 +1957,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDX_BUFFER:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1971,7 +1971,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1992,7 +1992,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_3D_DRAW_IMMD:
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
@@ -2005,7 +2005,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
/* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_IMMD_2:
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
@@ -2051,7 +2051,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2093,8 +2093,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r = r100_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n",
+ pkt.type);
return -EINVAL;
}
if (r)
@@ -2105,19 +2105,19 @@ int r100_cs_parse(struct radeon_cs_parser *p)
static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
- DRM_ERROR("pitch %d\n", t->pitch);
- DRM_ERROR("use_pitch %d\n", t->use_pitch);
- DRM_ERROR("width %d\n", t->width);
- DRM_ERROR("width_11 %d\n", t->width_11);
- DRM_ERROR("height %d\n", t->height);
- DRM_ERROR("height_11 %d\n", t->height_11);
- DRM_ERROR("num levels %d\n", t->num_levels);
- DRM_ERROR("depth %d\n", t->txdepth);
- DRM_ERROR("bpp %d\n", t->cpp);
- DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
- DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
- DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
- DRM_ERROR("compress format %d\n", t->compress_format);
+ DRM_DEBUG("pitch %d\n", t->pitch);
+ DRM_DEBUG("use_pitch %d\n", t->use_pitch);
+ DRM_DEBUG("width %d\n", t->width);
+ DRM_DEBUG("width_11 %d\n", t->width_11);
+ DRM_DEBUG("height %d\n", t->height);
+ DRM_DEBUG("height_11 %d\n", t->height_11);
+ DRM_DEBUG("num levels %d\n", t->num_levels);
+ DRM_DEBUG("depth %d\n", t->txdepth);
+ DRM_DEBUG("bpp %d\n", t->cpp);
+ DRM_DEBUG("coordinate type %d\n", t->tex_coord_type);
+ DRM_DEBUG("width round to power of 2 %d\n", t->roundup_w);
+ DRM_DEBUG("height round to power of 2 %d\n", t->roundup_h);
+ DRM_DEBUG("compress format %d\n", t->compress_format);
}
static int r100_track_compress_size(int compress_format, int w, int h)
@@ -2172,8 +2172,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
if (size > radeon_bo_size(cube_robj)) {
- DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_bo_size(cube_robj));
+ dev_warn_once(rdev->dev,
+ "Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2196,7 +2197,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
- DRM_ERROR("No texture bound to unit %u\n", u);
+ dev_warn_once(rdev->dev, "No texture bound to unit %u\n", u);
return -EINVAL;
}
size = 0;
@@ -2249,13 +2250,13 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
size *= 6;
break;
default:
- DRM_ERROR("Invalid texture coordinate type %u for unit "
- "%u\n", track->textures[u].tex_coord_type, u);
+ dev_warn_once(rdev->dev, "Invalid texture coordinate type %u for unit "
+ "%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
- DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_bo_size(robj));
+ dev_warn_once(rdev->dev, "Texture of unit %u needs %lu bytes but is "
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2277,18 +2278,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
- DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ dev_warn_once(rdev->dev, "[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
- DRM_ERROR("[drm] Buffer too small for color buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->cb[i].robj));
- DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
- i, track->cb[i].pitch, track->cb[i].cpp,
- track->cb[i].offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->cb[i].robj));
+ dev_warn_once(rdev->dev, "[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
@@ -2296,18 +2297,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
- DRM_ERROR("[drm] No buffer for z buffer !\n");
+ dev_warn_once(rdev->dev, "[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
- DRM_ERROR("[drm] Buffer too small for z buffer "
- "(need %lu have %lu) !\n", size,
- radeon_bo_size(track->zb.robj));
- DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
- track->zb.pitch, track->zb.cpp,
- track->zb.offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_bo_size(track->zb.robj));
+ dev_warn_once(rdev->dev, "[drm] zbuffer (%u %u %u %u)\n",
+ track->zb.pitch, track->zb.cpp,
+ track->zb.offset, track->maxy);
return -EINVAL;
}
}
@@ -2315,19 +2316,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
- DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ dev_warn_once(rdev->dev, "[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
- DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->aa.robj));
- DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
- i, track->aa.pitch, track->cb[0].cpp,
- track->aa.offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ dev_warn_once(rdev->dev, "[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
return -EINVAL;
}
}
@@ -2344,17 +2345,17 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- DRM_ERROR("Max indices %u\n", track->max_indx);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ dev_warn_once(rdev->dev, "Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
@@ -2363,16 +2364,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -2380,16 +2381,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
- DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
- track->immd_dwords, size);
- DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
- nverts, track->vtx_size);
+ dev_warn_once(rdev->dev, "IMMD draw %u dwors but needs %lu dwords\n",
+ track->immd_dwords, size);
+ dev_warn_once(rdev->dev, "VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+ nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
- DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
- prim_walk);
+ dev_warn_once(rdev->dev, "[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+ prim_walk);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f5f2ffea5ab2..10a65a71de31 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -163,8 +163,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -180,8 +180,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -193,8 +193,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -212,8 +212,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
i = (reg - R200_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -265,8 +265,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -283,8 +283,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -326,12 +326,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->cb[0].cpp = 4;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
- DRM_ERROR("No support for depth xy offset in kms\n");
+ dev_warn_once(p->dev, "No support for depth xy offset in kms\n");
return -EINVAL;
}
@@ -360,8 +360,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d22889fbfa9c..d2ee6deec039 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -645,8 +645,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -664,8 +664,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -677,8 +677,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_ZB_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -706,8 +706,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
i = (reg - R300_TX_OFFSET_0) >> 2;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -762,7 +762,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_CCTL */
if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
p->rdev->cmask_filp != p->filp) {
- DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ dev_warn_once(p->dev, "Invalid RB3D_CCTL: Cannot enable CMASK.\n");
return -EINVAL;
}
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
@@ -779,8 +779,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -812,8 +812,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 5:
if (p->rdev->family < CHIP_RV515) {
- DRM_ERROR("Invalid color buffer format (%d)!\n",
- ((idx_value >> 21) & 0xF));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
return -EINVAL;
}
fallthrough;
@@ -827,8 +827,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb[i].cpp = 16;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> 21) & 0xF));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> 21) & 0xF));
return -EINVAL;
}
track->cb_dirty = true;
@@ -853,8 +853,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->zb.cpp = 4;
break;
default:
- DRM_ERROR("Invalid z buffer format (%d) !\n",
- (idx_value & 0xF));
+ dev_warn_once(p->dev, "Invalid z buffer format (%d) !\n",
+ (idx_value & 0xF));
return -EINVAL;
}
track->zb_dirty = true;
@@ -864,8 +864,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -962,8 +962,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case R300_TX_FORMAT_ATI2N:
if (p->rdev->family < CHIP_R420) {
- DRM_ERROR("Invalid texture format %u\n",
- (idx_value & 0x1F));
+ dev_warn_once(p->dev, "Invalid texture format %u\n",
+ (idx_value & 0x1F));
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
@@ -974,8 +974,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
break;
default:
- DRM_ERROR("Invalid texture format %u\n",
- (idx_value & 0x1F));
+ dev_warn_once(p->dev, "Invalid texture format %u\n",
+ (idx_value & 0x1F));
return -EINVAL;
}
track->tex_dirty = true;
@@ -1041,7 +1041,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
R100_TRACK_COMP_DXT1;
}
} else if (idx_value & (1 << 14)) {
- DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ dev_warn_once(p->dev, "Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
}
track->tex_dirty = true;
@@ -1079,8 +1079,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_ZB_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1121,8 +1121,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_AARESOLVE_OFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1191,7 +1191,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDX_BUFFER:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1207,7 +1207,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
@@ -1222,7 +1222,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
@@ -1272,7 +1272,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -1308,7 +1308,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r = r300_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ac77d1246b94..8eeceeeca362 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -361,9 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
- dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, format,
- i, track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, format,
+ i, track->cb_color_info[i]);
return -EINVAL;
}
/* pitch in pixels */
@@ -384,9 +384,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
- G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
- track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
return -EINVAL;
}
switch (array_mode) {
@@ -402,25 +402,26 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
case V_0280A0_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
- G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
- track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
return -EINVAL;
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
+ dev_warn_once(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
+ dev_warn_once(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
+ dev_warn_once(p->dev,
+ "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -447,13 +448,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
- __func__, i, array_mode,
- track->cb_color_bo_offset[i], tmp,
- radeon_bo_size(track->cb_color_bo[i]),
- pitch, height, r600_fmt_get_nblocksx(format, pitch),
- r600_fmt_get_nblocksy(format, height),
- r600_fmt_get_blocksize(format));
+ dev_warn_once(p->dev,
+ "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
+ __func__, i, array_mode,
+ track->cb_color_bo_offset[i], tmp,
+ radeon_bo_size(track->cb_color_bo[i]),
+ pitch, height, r600_fmt_get_nblocksx(format, pitch),
+ r600_fmt_get_nblocksy(format, height),
+ r600_fmt_get_blocksize(format));
return -EINVAL;
}
}
@@ -478,11 +480,11 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_frag_offset[i] >
radeon_bo_size(track->cb_color_frag_bo[i])) {
- dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
- "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
- __func__, tile_max, bytes,
- track->cb_color_frag_offset[i],
- radeon_bo_size(track->cb_color_frag_bo[i]));
+ dev_warn_once(p->dev, "%s FMASK_TILE_MAX too large "
+ "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, tile_max, bytes,
+ track->cb_color_frag_offset[i],
+ radeon_bo_size(track->cb_color_frag_bo[i]));
return -EINVAL;
}
}
@@ -496,17 +498,17 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_tile_offset[i] >
radeon_bo_size(track->cb_color_tile_bo[i])) {
- dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
- "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
- __func__, block_max, bytes,
- track->cb_color_tile_offset[i],
- radeon_bo_size(track->cb_color_tile_bo[i]));
+ dev_warn_once(p->dev, "%s CMASK_BLOCK_MAX too large "
+ "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, block_max, bytes,
+ track->cb_color_tile_offset[i],
+ radeon_bo_size(track->cb_color_tile_bo[i]));
return -EINVAL;
}
break;
}
default:
- dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+ dev_warn_once(p->dev, "%s invalid tile mode\n", __func__);
return -EINVAL;
}
return 0;
@@ -526,7 +528,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ dev_warn_once(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
}
switch (G_028010_FORMAT(track->db_depth_info)) {
@@ -544,20 +546,22 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
bpe = 8;
break;
default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ dev_warn_once(p->dev,
+ "z/stencil with invalid format %d\n",
+ G_028010_FORMAT(track->db_depth_info));
return -EINVAL;
}
if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
+ dev_warn_once(p->dev, "z/stencil buffer size not set\n");
return -EINVAL;
}
tmp = radeon_bo_size(track->db_bo) - track->db_offset;
tmp = (tmp / bpe) >> 6;
if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn_once(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
@@ -579,9 +583,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
switch (array_mode) {
@@ -592,24 +596,24 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
case V_028010_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ dev_warn_once(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
__func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ dev_warn_once(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
__func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+ dev_warn_once(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -618,10 +622,11 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews * track->nsamples;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn_once(p->dev,
+ "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
}
@@ -632,13 +637,13 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
unsigned nbx, nby;
if (track->htile_bo == NULL) {
- dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
- __func__, __LINE__, track->db_depth_info);
+ dev_warn_once(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_depth_info);
return -EINVAL;
}
if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
- __func__, __LINE__, track->db_depth_size);
+ dev_warn_once(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+ __func__, __LINE__, track->db_depth_size);
return -EINVAL;
}
@@ -676,8 +681,8 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
nby = round_up(nby, 16 * 8);
break;
default:
- dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
- __func__, __LINE__, track->npipes);
+ dev_warn_once(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
return -EINVAL;
}
}
@@ -689,9 +694,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
- dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
- __func__, __LINE__, radeon_bo_size(track->htile_bo),
- size, nbx, nby);
+ dev_warn_once(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
return -EINVAL;
}
}
@@ -718,13 +723,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
+ dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
}
} else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ dev_warn_once(p->dev, "No buffer for streamout %d\n", i);
return -EINVAL;
}
}
@@ -753,8 +758,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
(tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
return -EINVAL;
}
/* perform rewrite of CB_COLOR[0-7]_SIZE */
@@ -841,33 +846,33 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
/* check its a WAIT_REG_MEM */
if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ dev_warn_once(p->dev, "vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on MEM instead of REG\n");
return -EINVAL;
}
/* bit 8 is me (0) or pfp (1) */
if (wait_reg_mem_info & 0x100) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
@@ -886,7 +891,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
radeon_crtc = to_radeon_crtc(crtc);
@@ -907,7 +912,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
ib[h_idx] = header;
ib[h_idx + 4] = vline_status[crtc_id] >> 2;
} else {
- DRM_ERROR("unknown crtc reloc\n");
+ dev_warn_once(p->dev, "unknown crtc reloc\n");
return -EINVAL;
}
return 0;
@@ -923,8 +928,8 @@ static int r600_packet0_check(struct radeon_cs_parser *p,
case AVIVO_D1MODE_VLINE_START_END:
r = r600_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
return r;
}
break;
@@ -972,7 +977,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
i = (reg >> 7);
if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
@@ -1013,8 +1018,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1031,8 +1036,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
radeon_cs_packet_next_is_pkt3_nop(p)) {
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_depth_info = radeon_get_ib_value(p, idx);
@@ -1073,8 +1078,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_3:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
@@ -1096,8 +1101,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CP_COHER_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1270,8 +1275,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
@@ -1285,8 +1290,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_DEPTH_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_offset = radeon_get_ib_value(p, idx) << 8;
@@ -1298,8 +1303,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_DATA_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
@@ -1368,8 +1373,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_15:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1377,8 +1382,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SX_MEMORY_EXPORT_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1387,7 +1392,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
break;
default:
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
return 0;
@@ -1408,7 +1413,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level;
+ unsigned offset, i;
unsigned width, height, depth, size;
unsigned blocksize;
unsigned nbx, nby;
@@ -1420,7 +1425,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
w0 = r600_mip_minify(w0, 0);
h0 = r600_mip_minify(h0, 0);
d0 = r600_mip_minify(d0, 0);
- for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ for (i = 0, offset = 0; i < nlevels; i++) {
width = r600_mip_minify(w0, i);
nbx = r600_fmt_get_nblocksx(format, width);
@@ -1543,43 +1548,43 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
llevel = 0;
break;
default:
- dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ dev_warn_once(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
if (!r600_fmt_is_valid_texture(format, p->family)) {
- dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, format);
+ dev_warn_once(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, format);
return -EINVAL;
}
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
- __func__, __LINE__, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex array mode (%d) invalid\n",
+ __func__, __LINE__, G_038000_TILE_MODE(word0));
return -EINVAL;
}
/* XXX check height as well... */
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(mip_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (blevel > llevel) {
- dev_warn(p->dev, "texture blevel %d > llevel %d\n",
- blevel, llevel);
+ dev_warn_once(p->dev, "texture blevel %d > llevel %d\n",
+ blevel, llevel);
}
if (is_array) {
barray = G_038014_BASE_ARRAY(word5);
@@ -1592,16 +1597,16 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
if ((l0_size + word2) > radeon_bo_size(texture)) {
- dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
- w0, h0, pitch_align, height_align,
- array_check.array_mode, format, word2,
- l0_size, radeon_bo_size(texture));
- dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+ dev_warn_once(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+ w0, h0, pitch_align, height_align,
+ array_check.array_mode, format, word2,
+ l0_size, radeon_bo_size(texture));
+ dev_warn_once(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
- /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ /*dev_warn_once(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
@@ -1613,13 +1618,13 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
i = (reg >> 7);
if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return true;
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
@@ -1648,7 +1653,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1660,13 +1665,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return 0;
if (pred_op > 2) {
- DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1681,20 +1686,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
- DRM_ERROR("bad START_3D\n");
+ dev_warn_once(p->dev, "bad START_3D\n");
return -EINVAL;
}
break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
- DRM_ERROR("bad CONTEXT_CONTROL\n");
+ dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n");
return -EINVAL;
}
break;
case PACKET3_INDEX_TYPE:
case PACKET3_NUM_INSTANCES:
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES\n");
return -EINVAL;
}
break;
@@ -1702,12 +1707,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
@@ -1720,37 +1725,37 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_IMMD_BE:
case PACKET3_DRAW_INDEX_IMMD:
if (pkt->count < 2) {
- DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
/* bit 4 is reg (0) or mem (1) */
@@ -1759,7 +1764,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
@@ -1770,7 +1775,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
+ dev_warn_once(p->dev, "cannot use PFP on REG wait\n");
return -EINVAL;
}
break;
@@ -1779,24 +1784,24 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 command, size;
u64 offset, tmp;
if (pkt->count != 4) {
- DRM_ERROR("bad CP DMA\n");
+ dev_warn_once(p->dev, "bad CP DMA\n");
return -EINVAL;
}
command = radeon_get_ib_value(p, idx+4);
size = command & 0x1fffff;
if (command & PACKET3_CP_DMA_CMD_SAS) {
/* src address space is register */
- DRM_ERROR("CP DMA SAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA SAS not supported\n");
return -EINVAL;
} else {
if (command & PACKET3_CP_DMA_CMD_SAIC) {
- DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n");
return -EINVAL;
}
/* src address space is memory */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad CP DMA SRC\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC\n");
return -EINVAL;
}
@@ -1806,8 +1811,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@@ -1816,17 +1821,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
if (command & PACKET3_CP_DMA_CMD_DAS) {
/* dst address space is register */
- DRM_ERROR("CP DMA DAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA DAS not supported\n");
return -EINVAL;
} else {
/* dst address space is memory */
if (command & PACKET3_CP_DMA_CMD_DAIC) {
- DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad CP DMA DST\n");
+ dev_warn_once(p->dev, "bad CP DMA DST\n");
return -EINVAL;
}
@@ -1836,8 +1841,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@@ -1848,7 +1853,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
/* 0xffffffff/0x0 is flush all cache flag */
@@ -1856,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
radeon_get_ib_value(p, idx + 2) != 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1864,7 +1869,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_EVENT_WRITE:
if (pkt->count != 2 && pkt->count != 0) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
if (pkt->count) {
@@ -1872,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
offset = reloc->gpu_offset +
@@ -1888,12 +1893,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
@@ -1911,7 +1916,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -1927,7 +1932,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -1939,7 +1944,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SET_RESOURCE:
if (pkt->count % 7) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
@@ -1947,7 +1952,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
(start_reg >= PACKET3_SET_RESOURCE_END) ||
(end_reg >= PACKET3_SET_RESOURCE_END)) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
for (i = 0; i < (pkt->count / 7); i++) {
@@ -1959,7 +1964,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* tex base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1973,7 +1978,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* tex mip base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1994,15 +1999,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* vtx base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
- size + offset, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
}
@@ -2015,7 +2020,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
}
@@ -2027,7 +2032,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_ALU_CONST_END) ||
(end_reg >= PACKET3_SET_ALU_CONST_END)) {
- DRM_ERROR("bad SET_ALU_CONST\n");
+ dev_warn_once(p->dev, "bad SET_ALU_CONST\n");
return -EINVAL;
}
}
@@ -2038,7 +2043,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
- DRM_ERROR("bad SET_BOOL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_BOOL_CONST\n");
return -EINVAL;
}
break;
@@ -2048,7 +2053,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
- DRM_ERROR("bad SET_LOOP_CONST\n");
+ dev_warn_once(p->dev, "bad SET_LOOP_CONST\n");
return -EINVAL;
}
break;
@@ -2058,13 +2063,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
- DRM_ERROR("bad SET_CTL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_CTL_CONST\n");
return -EINVAL;
}
break;
case PACKET3_SET_SAMPLER:
if (pkt->count % 3) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
@@ -2072,22 +2077,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
(start_reg >= PACKET3_SET_SAMPLER_END) ||
(end_reg >= PACKET3_SET_SAMPLER_END)) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BASE_UPDATE:
/* RS780 and RS880 also need this */
if (p->family < CHIP_RS780) {
- DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ dev_warn_once(p->dev, "STRMOUT_BASE_UPDATE only supported on 7xx\n");
return -EINVAL;
}
if (pkt->count != 1) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE packet count\n");
return -EINVAL;
}
if (idx_value > 3) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE index\n");
return -EINVAL;
}
{
@@ -2095,25 +2100,27 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
}
if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE, bo does not match\n");
return -EINVAL;
}
offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
- offset, track->vgt_strmout_bo_offset[idx_value]);
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
return -EINVAL;
}
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2121,17 +2128,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SURFACE_BASE_UPDATE:
if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
- DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n");
return -EINVAL;
}
if (pkt->count) {
- DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BUFFER_UPDATE:
if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
return -EINVAL;
}
/* Updating memory at DST_ADDRESS. */
@@ -2139,14 +2146,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2158,14 +2166,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2178,23 +2187,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 3) {
- DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+0);
offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2204,7 +2213,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COPY_DW:
if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x1) {
@@ -2212,14 +2221,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* SRC is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2236,14 +2245,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* DST is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2259,7 +2268,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2306,7 +2315,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
r = r600_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
kfree(p->track);
p->track = NULL;
return -EINVAL;
@@ -2346,13 +2355,13 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
*cs_reloc = NULL;
if (p->chunk_relocs == NULL) {
- DRM_ERROR("No relocation chunk !\n");
+ dev_warn_once(p->dev, "No relocation chunk !\n");
return -EINVAL;
}
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, p->nrelocs);
+ dev_warn_once(p->dev, "Relocs at %d after relocations chunk end %d !\n",
+ idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = &p->relocs[idx];
@@ -2385,8 +2394,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
do {
if (p->idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- p->idx, ib_chunk->length_dw);
+ dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
@@ -2399,7 +2408,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
if (tiled) {
@@ -2417,20 +2426,20 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
if (tiled) {
@@ -2484,31 +2493,31 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
}
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
if (p->family < CHIP_RV770) {
- DRM_ERROR("Constant Fill is 7xx only !\n");
+ dev_warn_once(p->dev, "Constant Fill is 7xx only !\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -2519,7 +2528,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b8e6202f1d5b..3f9c0011244f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -834,7 +834,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++)
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+ dev_dbg(p->dev, "ib[%d]=0x%08X\n", idx, ib[idx]);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3e510327b7..9e35b14e2bf0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -554,7 +554,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* cover the whole aperture even if VRAM size is inferior to aperture size
* Novell bug 204882 + along with lots of ubuntu ones
*
- * Note 3: when limiting vram it's safe to overwritte real_vram_size because
+ * Note 3: when limiting vram it's safe to overwrite real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
* not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
@@ -562,7 +562,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* Note 4: IGP TOM addr should be the same as the aperture addr, we don't
* explicitly check for that thought.
*
- * FIXME: when reducing VRAM size align new size on power of 2.
+ * FIXME: when reducing VRAM size, align new size on power of 2.
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4dc77c398617..351b9dfcdad8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -926,10 +926,10 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
+ ref_div_max = clamp(100 / post_div, 1u, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(den/post_div, 1u), ref_div_max);
+ *ref_div = clamp(den / post_div, 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4bb242437ff6..acd89a20f272 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -346,14 +346,14 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = vzalloc(array_size(sizeof(void *),
- rdev->gart.num_cpu_pages));
+ rdev->gart.pages = vcalloc(rdev->gart.num_cpu_pages,
+ sizeof(void *));
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
- rdev->gart.num_gpu_pages));
+ rdev->gart.pages_entry = vmalloc_array(rdev->gart.num_gpu_pages,
+ sizeof(uint64_t));
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index d6aa1a3012a8..d1e8b9757a65 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,9 +136,9 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
}
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -545,9 +545,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -742,9 +742,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -908,9 +908,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -1113,9 +1113,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
}
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b4fb7e70320b..a855a96dd2ea 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -907,8 +907,7 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
static bool radeon_dpm_single_display(struct radeon_device *rdev)
{
- bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = rdev->pm.dpm.new_active_crtc_count < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && rdev->asic->dpm.vblank_too_short) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index c9fef9b61ced..818554e60537 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -455,7 +455,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
@@ -481,7 +481,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 2355a78e1b69..bdbc1bbe8a9b 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -86,7 +86,7 @@ int radeon_vce_init(struct radeon_device *rdev)
r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
if (r) {
- dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
+ dev_err(rdev->dev, "radeon_vce: can't load firmware \"%s\"\n",
fw_name);
return r;
}
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
- /* we can only work with this fw version for now */
+ /* we can only work with these fw versions for now */
if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8))))
@@ -281,7 +281,7 @@ static void radeon_vce_idle_work_handler(struct work_struct *work)
*
* @rdev: radeon_device pointer
*
- * Make sure VCE is powerd up when we want to use it
+ * Make sure VCE is powered up when we want to use it
*/
void radeon_vce_note_usage(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index af58b814e588..001b3543924a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -1013,7 +1013,7 @@ err_reset_assert:
}
static const struct dev_pm_ops rcar_lvds_pm_ops = {
- SET_RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
};
static struct platform_driver rcar_lvds_platform_driver = {
@@ -1021,7 +1021,7 @@ static struct platform_driver rcar_lvds_platform_driver = {
.remove = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
- .pm = &rcar_lvds_pm_ops,
+ .pm = pm_ptr(&rcar_lvds_pm_ops),
.of_match_table = rcar_lvds_of_table,
},
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 1af4c73f7a88..5c73a513f678 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
udelay(10);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
- ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
+ rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK);
+ rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1);
+
+ ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN;
rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
@@ -934,9 +937,234 @@ static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rcar_mipi_dsi_host_tx_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg,
+ bool is_rx_xfer)
+{
+ const bool is_tx_long = mipi_dsi_packet_format_is_long(msg->type);
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ u8 payload[16] = { 0 };
+ u32 status;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ /* Configure LP or HS command transfer. */
+ rcar_mipi_dsi_write(dsi, TXCMSETR, (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+ TXCMSETR_SPDTYP : 0);
+
+ /* Register access mode for RX transfer. */
+ if (is_rx_xfer)
+ rcar_mipi_dsi_write(dsi, RXPSETR, 0);
+
+ /* Do not use IRQ, poll for completion, the completion is quick. */
+ rcar_mipi_dsi_write(dsi, TXCMIER, 0);
+
+ /*
+ * Send the header:
+ * header[0] = Virtual Channel + Data Type
+ * header[1] = Word Count LSB (LP) or first param (SP)
+ * header[2] = Word Count MSB (LP) or second param (SP)
+ */
+ rcar_mipi_dsi_write(dsi, TXCMPHDR,
+ (is_tx_long ? TXCMPHDR_FMT : 0) |
+ TXCMPHDR_VC(msg->channel) |
+ TXCMPHDR_DT(msg->type) |
+ TXCMPHDR_DATA1(packet.header[2]) |
+ TXCMPHDR_DATA0(packet.header[1]));
+
+ if (is_tx_long) {
+ memcpy(payload, packet.payload,
+ min(msg->tx_len, sizeof(payload)));
+
+ rcar_mipi_dsi_write(dsi, TXCMPPD0R,
+ (payload[3] << 24) | (payload[2] << 16) |
+ (payload[1] << 8) | payload[0]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD1R,
+ (payload[7] << 24) | (payload[6] << 16) |
+ (payload[5] << 8) | payload[4]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD2R,
+ (payload[11] << 24) | (payload[10] << 16) |
+ (payload[9] << 8) | payload[8]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD3R,
+ (payload[15] << 24) | (payload[14] << 16) |
+ (payload[13] << 8) | payload[12]);
+ }
+
+ /* Start the transfer, RX with BTA, TX without BTA. */
+ if (is_rx_xfer) {
+ rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_BTAREQ);
+
+ /* Wait until the transmission, BTA, reception completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ (status & RXPSR_BTAREQEND),
+ 2000, 50000, false, dsi, RXPSR);
+ } else {
+ rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_TXREQ);
+
+ /* Wait until the transmission completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ (status & TXCMSR_TXREQEND),
+ 2000, 50000, false, dsi, TXCMSR);
+ }
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command transfer timeout (0x%08x)\n",
+ status);
+ return ret;
+ }
+
+ return packet.size;
+}
+
+static ssize_t rcar_mipi_dsi_host_rx_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ u8 *rx_buf = (u8 *)(msg->rx_buf);
+ u32 reg, data, status, wc;
+ int i, ret;
+
+ /* RX transfer received data validation and parsing starts here. */
+ reg = rcar_mipi_dsi_read(dsi, TOSR);
+ if (reg & TOSR_TATO) { /* Turn-Around TimeOut. */
+ /* Clear TATO Turn-Around TimeOut bit. */
+ rcar_mipi_dsi_write(dsi, TOSR, TOSR_TATO);
+ return -ETIMEDOUT;
+ }
+
+ reg = rcar_mipi_dsi_read(dsi, RXPSR);
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ /* Transfer with zero-length RX. */
+ if (!(reg & RXPSR_RCVACK)) {
+ /* No ACK on RX response received. */
+ return -EINVAL;
+ }
+ } else {
+ /* Transfer with non-zero-length RX. */
+ if (!(reg & RXPSR_RCVRESP)) {
+ /* No packet header of RX response received. */
+ return -EINVAL;
+ }
+
+ if (reg & (RXPSR_CRCERR | RXPSR_WCERR | RXPSR_AXIERR | RXPSR_OVRERR)) {
+ /* Incorrect response payload. */
+ return -ENODATA;
+ }
+
+ data = rcar_mipi_dsi_read(dsi, RXPHDR);
+ if (data & RXPHDR_FMT) { /* Long Packet Response. */
+ /* Read Long Packet Response length from packet header. */
+ wc = data & 0xffff;
+ if (wc > msg->rx_len) {
+ dev_warn(dsi->dev,
+ "Long Packet Response longer than RX buffer (%d), limited to %zu Bytes\n",
+ wc, msg->rx_len);
+ wc = msg->rx_len;
+ }
+
+ if (wc > 16) {
+ dev_warn(dsi->dev,
+ "Long Packet Response too long (%d), limited to 16 Bytes\n",
+ wc);
+ wc = 16;
+ }
+
+ for (i = 0; i < msg->rx_len; i++) {
+ if (!(i % 4))
+ data = rcar_mipi_dsi_read(dsi, RXPPD0R + i);
+
+ rx_buf[i] = data & 0xff;
+ data >>= 8;
+ }
+ } else { /* Short Packet Response. */
+ if (msg->rx_len >= 1)
+ rx_buf[0] = data & 0xff;
+ if (msg->rx_len >= 2)
+ rx_buf[1] = (data >> 8) & 0xff;
+ if (msg->rx_len >= 3) {
+ dev_warn(dsi->dev,
+ "Expected Short Packet Response too long (%zu), limited to 2 Bytes\n",
+ msg->rx_len);
+ }
+ }
+ }
+
+ if (reg & RXPSR_RCVAKE) {
+ /* Acknowledge and Error report received. */
+ return -EFAULT;
+ }
+
+ /* Wait until the bus handover to host processor completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & PPIDL0SR_DIR),
+ 2000, 50000, false, dsi, PPIDL0SR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command RX DIR timeout (0x%08x)\n", status);
+ return ret;
+ }
+
+ /* Wait until the data lane is in LP11 stop state. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & PPIDL0SR_STPST,
+ 2000, 50000, false, dsi, PPIDL0SR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command RX STPST timeout (0x%08x)\n", status);
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t rcar_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ const bool is_rx_xfer = (msg->flags & MIPI_DSI_MSG_REQ_ACK) || msg->rx_len;
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ int ret;
+
+ if (msg->tx_len > 16 || msg->rx_len > 16) {
+ /* ToDo: Implement Memory on AXI bus command mode. */
+ dev_warn(dsi->dev,
+ "Register-based command mode supports only up to 16 Bytes long payload\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = rcar_mipi_dsi_host_tx_transfer(host, msg, is_rx_xfer);
+
+ /* If TX transfer succeeded and this transfer has RX part. */
+ if (ret >= 0 && is_rx_xfer) {
+ ret = rcar_mipi_dsi_host_rx_transfer(host, msg);
+ if (ret)
+ return ret;
+
+ ret = msg->rx_len;
+ }
+
+ /*
+ * Wait a bit between commands, otherwise panels based on ILI9881C
+ * TCON may fail to correctly receive all commands sent to them.
+ * Until we can actually test with another DSI device, keep the
+ * delay here, but eventually this delay might have to be moved
+ * into the ILI9881C panel driver.
+ */
+ usleep_range(1000, 2000);
+
+ /* Clear the completion interrupt. */
+ if (!msg->rx_len)
+ rcar_mipi_dsi_write(dsi, TXCMSR, TXCMSR_TXREQEND);
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
.attach = rcar_mipi_dsi_host_attach,
.detach = rcar_mipi_dsi_host_detach,
+ .transfer = rcar_mipi_dsi_host_transfer
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
index a6b276f1d6ee..76521276e2af 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
@@ -12,6 +12,130 @@
#define LINKSR_LPBUSY (1 << 1)
#define LINKSR_HSBUSY (1 << 0)
+#define TXSETR 0x100
+#define TXSETR_LANECNT_MASK (0x3 << 0)
+
+/*
+ * DSI Command Transfer Registers
+ */
+#define TXCMSETR 0x110
+#define TXCMSETR_SPDTYP (1 << 8) /* 0:HS 1:LP */
+#define TXCMSETR_LPPDACC (1 << 0)
+#define TXCMCR 0x120
+#define TXCMCR_BTATYP (1 << 2)
+#define TXCMCR_BTAREQ (1 << 1)
+#define TXCMCR_TXREQ (1 << 0)
+#define TXCMSR 0x130
+#define TXCMSR_CLSNERR (1 << 18)
+#define TXCMSR_AXIERR (1 << 16)
+#define TXCMSR_TXREQEND (1 << 0)
+#define TXCMSCR 0x134
+#define TXCMSCR_CLSNERR (1 << 18)
+#define TXCMSCR_AXIERR (1 << 16)
+#define TXCMSCR_TXREQEND (1 << 0)
+#define TXCMIER 0x138
+#define TXCMIER_CLSNERR (1 << 18)
+#define TXCMIER_AXIERR (1 << 16)
+#define TXCMIER_TXREQEND (1 << 0)
+#define TXCMADDRSET0R 0x140
+#define TXCMPHDR 0x150
+#define TXCMPHDR_FMT (1 << 24) /* 0:SP 1:LP */
+#define TXCMPHDR_VC(n) (((n) & 0x3) << 22)
+#define TXCMPHDR_DT(n) (((n) & 0x3f) << 16)
+#define TXCMPHDR_DATA1(n) (((n) & 0xff) << 8)
+#define TXCMPHDR_DATA0(n) (((n) & 0xff) << 0)
+#define TXCMPPD0R 0x160
+#define TXCMPPD1R 0x164
+#define TXCMPPD2R 0x168
+#define TXCMPPD3R 0x16c
+
+#define RXSETR 0x200
+#define RXSETR_CRCEN (((n) & 0xf) << 24)
+#define RXSETR_ECCEN (((n) & 0xf) << 16)
+#define RXPSETR 0x210
+#define RXPSETR_LPPDACC (1 << 0)
+#define RXPSR 0x220
+#define RXPSR_ECCERR1B (1 << 28)
+#define RXPSR_UEXTRGERR (1 << 25)
+#define RXPSR_RESPTOERR (1 << 24)
+#define RXPSR_OVRERR (1 << 23)
+#define RXPSR_AXIERR (1 << 22)
+#define RXPSR_CRCERR (1 << 21)
+#define RXPSR_WCERR (1 << 20)
+#define RXPSR_UEXDTERR (1 << 19)
+#define RXPSR_UEXPKTERR (1 << 18)
+#define RXPSR_ECCERR (1 << 17)
+#define RXPSR_MLFERR (1 << 16)
+#define RXPSR_RCVACK (1 << 14)
+#define RXPSR_RCVEOT (1 << 10)
+#define RXPSR_RCVAKE (1 << 9)
+#define RXPSR_RCVRESP (1 << 8)
+#define RXPSR_BTAREQEND (1 << 0)
+#define RXPSCR 0x224
+#define RXPSCR_ECCERR1B (1 << 28)
+#define RXPSCR_UEXTRGERR (1 << 25)
+#define RXPSCR_RESPTOERR (1 << 24)
+#define RXPSCR_OVRERR (1 << 23)
+#define RXPSCR_AXIERR (1 << 22)
+#define RXPSCR_CRCERR (1 << 21)
+#define RXPSCR_WCERR (1 << 20)
+#define RXPSCR_UEXDTERR (1 << 19)
+#define RXPSCR_UEXPKTERR (1 << 18)
+#define RXPSCR_ECCERR (1 << 17)
+#define RXPSCR_MLFERR (1 << 16)
+#define RXPSCR_RCVACK (1 << 14)
+#define RXPSCR_RCVEOT (1 << 10)
+#define RXPSCR_RCVAKE (1 << 9)
+#define RXPSCR_RCVRESP (1 << 8)
+#define RXPSCR_BTAREQEND (1 << 0)
+#define RXPIER 0x228
+#define RXPIER_ECCERR1B (1 << 28)
+#define RXPIER_UEXTRGERR (1 << 25)
+#define RXPIER_RESPTOERR (1 << 24)
+#define RXPIER_OVRERR (1 << 23)
+#define RXPIER_AXIERR (1 << 22)
+#define RXPIER_CRCERR (1 << 21)
+#define RXPIER_WCERR (1 << 20)
+#define RXPIER_UEXDTERR (1 << 19)
+#define RXPIER_UEXPKTERR (1 << 18)
+#define RXPIER_ECCERR (1 << 17)
+#define RXPIER_MLFERR (1 << 16)
+#define RXPIER_RCVACK (1 << 14)
+#define RXPIER_RCVEOT (1 << 10)
+#define RXPIER_RCVAKE (1 << 9)
+#define RXPIER_RCVRESP (1 << 8)
+#define RXPIER_BTAREQEND (1 << 0)
+#define RXPADDRSET0R 0x230
+#define RXPSIZESETR 0x238
+#define RXPSIZESETR_SIZE(n) (((n) & 0xf) << 3)
+#define RXPHDR 0x240
+#define RXPHDR_FMT (1 << 24) /* 0:SP 1:LP */
+#define RXPHDR_VC(n) (((n) & 0x3) << 22)
+#define RXPHDR_DT(n) (((n) & 0x3f) << 16)
+#define RXPHDR_DATA1(n) (((n) & 0xff) << 8)
+#define RXPHDR_DATA0(n) (((n) & 0xff) << 0)
+#define RXPPD0R 0x250
+#define RXPPD1R 0x254
+#define RXPPD2R 0x258
+#define RXPPD3R 0x25c
+#define AKEPR 0x300
+#define AKEPR_VC(n) (((n) & 0x3) << 22)
+#define AKEPR_DT(n) (((n) & 0x3f) << 16)
+#define AKEPR_ERRRPT(n) (((n) & 0xffff) << 0)
+#define RXRESPTOSETR 0x400
+#define TACR 0x500
+#define TASR 0x510
+#define TASCR 0x514
+#define TAIER 0x518
+#define TOSR 0x610
+#define TOSR_TATO (1 << 2)
+#define TOSR_LRXHTO (1 << 1)
+#define TOSR_HRXTO (1 << 0)
+#define TOSCR 0x614
+#define TOSCR_TATO (1 << 2)
+#define TOSCR_LRXHTO (1 << 1)
+#define TOSCR_HRXTO (1 << 0)
+
/*
* Video Mode Register
*/
@@ -80,10 +204,7 @@
* PHY-Protocol Interface (PPI) Registers
*/
#define PPISETR 0x700
-#define PPISETR_DLEN_0 (0x1 << 0)
-#define PPISETR_DLEN_1 (0x3 << 0)
-#define PPISETR_DLEN_2 (0x7 << 0)
-#define PPISETR_DLEN_3 (0xf << 0)
+#define PPISETR_DLEN_MASK (0xf << 0)
#define PPISETR_CLEN (1 << 8)
#define PPICLCR 0x710
@@ -100,6 +221,10 @@
#define PPICLSCR_HSTOLP (1 << 27)
#define PPICLSCR_TOHS (1 << 26)
+#define PPIDL0SR 0x740
+#define PPIDL0SR_DIR (1 << 10)
+#define PPIDL0SR_STPST (1 << 6)
+
#define PPIDLSR 0x760
#define PPIDLSR_STPST (0xf << 0)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index f87337c3cbb5..3b52dfc0ea1e 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -913,7 +913,7 @@ static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
* Power Management
*/
-static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
+static int rzg2l_mipi_pm_runtime_suspend(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
@@ -923,7 +923,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
+static int rzg2l_mipi_pm_runtime_resume(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
int ret;
@@ -940,7 +940,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops rzg2l_mipi_pm_ops = {
- SET_RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -1072,7 +1072,7 @@ static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
.remove = rzg2l_mipi_dsi_remove,
.driver = {
.name = "rzg2l-mipi-dsi",
- .pm = &rzg2l_mipi_pm_ops,
+ .pm = pm_ptr(&rzg2l_mipi_pm_ops),
.of_match_table = rzg2l_mipi_dsi_of_table,
},
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index faf50d872be3..b7b025814e72 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -10,6 +10,7 @@ config DRM_ROCKCHIP
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_DP if ROCKCHIP_DW_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
@@ -61,6 +62,14 @@ config ROCKCHIP_CDN_DP
RK3399 based SoC, you should select this
option.
+config ROCKCHIP_DW_DP
+ bool "Rockchip specific extensions for Synopsys DW DP"
+ help
+ This selects support for Rockchip SoC specific extensions
+ to enable Synopsys DesignWare Cores based DisplayPort transmit
+ controller support on Rockchip SoC, If you want to enable DP on
+ rk3588 based SoC, you should select this option.
+
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
help
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 2b867cebbc12..097f062399c7 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -14,6 +14,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_DP) += dw_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 3398160ad75e..5523911b990d 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
@@ -148,7 +149,7 @@
#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
#define PX30_GRF_PD_VO_CON1 0x0438
-#define PX30_DSI_FORCETXSTOPMODE (0xf << 7)
+#define PX30_DSI_FORCETXSTOPMODE (0xfUL << 7)
#define PX30_DSI_FORCERXMODE BIT(6)
#define PX30_DSI_TURNDISABLE BIT(5)
#define PX30_DSI_LCDC_SEL BIT(0)
@@ -167,16 +168,16 @@
#define RK3399_DSI1_LCDC_SEL BIT(4)
#define RK3399_GRF_SOC_CON22 0x6258
-#define RK3399_DSI0_TURNREQUEST (0xf << 12)
-#define RK3399_DSI0_TURNDISABLE (0xf << 8)
-#define RK3399_DSI0_FORCETXSTOPMODE (0xf << 4)
-#define RK3399_DSI0_FORCERXMODE (0xf << 0)
+#define RK3399_DSI0_TURNREQUEST (0xfUL << 12)
+#define RK3399_DSI0_TURNDISABLE (0xfUL << 8)
+#define RK3399_DSI0_FORCETXSTOPMODE (0xfUL << 4)
+#define RK3399_DSI0_FORCERXMODE (0xfUL << 0)
#define RK3399_GRF_SOC_CON23 0x625c
-#define RK3399_DSI1_TURNDISABLE (0xf << 12)
-#define RK3399_DSI1_FORCETXSTOPMODE (0xf << 8)
-#define RK3399_DSI1_FORCERXMODE (0xf << 4)
-#define RK3399_DSI1_ENABLE (0xf << 0)
+#define RK3399_DSI1_TURNDISABLE (0xfUL << 12)
+#define RK3399_DSI1_FORCETXSTOPMODE (0xfUL << 8)
+#define RK3399_DSI1_FORCERXMODE (0xfUL << 4)
+#define RK3399_DSI1_ENABLE (0xfUL << 0)
#define RK3399_GRF_SOC_CON24 0x6260
#define RK3399_TXRX_MASTERSLAVEZ BIT(7)
@@ -186,8 +187,8 @@
#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0)
#define RK3568_GRF_VO_CON2 0x0368
-#define RK3568_DSI0_SKEWCALHS (0x1f << 11)
-#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI0_SKEWCALHS (0x1fUL << 11)
+#define RK3568_DSI0_FORCETXSTOPMODE (0xfUL << 4)
#define RK3568_DSI0_TURNDISABLE BIT(2)
#define RK3568_DSI0_FORCERXMODE BIT(0)
@@ -197,18 +198,16 @@
* come from. Name GRF_VO_CON3 is assumed.
*/
#define RK3568_GRF_VO_CON3 0x36c
-#define RK3568_DSI1_SKEWCALHS (0x1f << 11)
-#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI1_SKEWCALHS (0x1fUL << 11)
+#define RK3568_DSI1_FORCETXSTOPMODE (0xfUL << 4)
#define RK3568_DSI1_TURNDISABLE BIT(2)
#define RK3568_DSI1_FORCERXMODE BIT(0)
#define RV1126_GRF_DSIPHY_CON 0x10220
-#define RV1126_DSI_FORCETXSTOPMODE (0xf << 4)
+#define RV1126_DSI_FORCETXSTOPMODE (0xfUL << 4)
#define RV1126_DSI_TURNDISABLE BIT(2)
#define RV1126_DSI_FORCERXMODE BIT(0)
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
enum {
DW_DSI_USAGE_IDLE,
DW_DSI_USAGE_DSI,
@@ -1484,14 +1483,13 @@ static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
{
.reg = 0xff450000,
.lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
- .lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
- PX30_DSI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(PX30_DSI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(PX30_DSI_LCDC_SEL, 1),
.lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
- .lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
- PX30_DSI_FORCERXMODE |
- PX30_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((PX30_DSI_TURNDISABLE |
+ PX30_DSI_FORCERXMODE |
+ PX30_DSI_FORCETXSTOPMODE), 0),
.max_data_lanes = 4,
},
@@ -1502,9 +1500,9 @@ static const struct rockchip_dw_dsi_chip_data rk3128_chip_data[] = {
{
.reg = 0x10110000,
.lanecfg1_grf_reg = RK3128_GRF_LVDS_CON0,
- .lanecfg1 = HIWORD_UPDATE(0, RK3128_DSI_TURNDISABLE |
- RK3128_DSI_FORCERXMODE |
- RK3128_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3128_DSI_TURNDISABLE |
+ RK3128_DSI_FORCERXMODE |
+ RK3128_DSI_FORCETXSTOPMODE), 0),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1514,16 +1512,16 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_DSI0_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_DSI0_LCDC_SEL, 1),
.max_data_lanes = 4,
},
{
.reg = 0xff964000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_DSI1_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_DSI1_LCDC_SEL, 1),
.max_data_lanes = 4,
},
@@ -1539,13 +1537,13 @@ static int rk3399_dphy_tx1rx1_init(struct phy *phy)
* Assume ISP0 is supplied by the RX0 dphy.
*/
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_SRC_SEL_ISP0));
+ FIELD_PREP_WM16(RK3399_TXRX_SRC_SEL_ISP0, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
+ FIELD_PREP_WM16(RK3399_TXRX_MASTERSLAVEZ, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_BASEDIR));
+ FIELD_PREP_WM16(RK3399_TXRX_BASEDIR, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE, 0));
return 0;
}
@@ -1559,21 +1557,20 @@ static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
usleep_range(100, 150);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
+ FIELD_PREP_WM16(RK3399_TXRX_MASTERSLAVEZ, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(RK3399_TXRX_BASEDIR, RK3399_TXRX_BASEDIR));
+ FIELD_PREP_WM16(RK3399_TXRX_BASEDIR, 1));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_FORCERXMODE));
+ FIELD_PREP_WM16(RK3399_DSI1_FORCERXMODE, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_FORCETXSTOPMODE));
+ FIELD_PREP_WM16(RK3399_DSI1_FORCETXSTOPMODE, 0));
/* Disable lane turn around, which is ignored in receive mode */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_TURNREQUEST));
+ FIELD_PREP_WM16(RK3399_TXRX_TURNREQUEST, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(RK3399_DSI1_TURNDISABLE,
- RK3399_DSI1_TURNDISABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_TURNDISABLE, 0xf));
usleep_range(100, 150);
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
@@ -1581,8 +1578,8 @@ static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
/* Enable dphy lanes */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(GENMASK(dsi->dphy_config.lanes - 1, 0),
- RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE,
+ GENMASK(dsi->dphy_config.lanes - 1, 0)));
usleep_range(100, 150);
@@ -1594,7 +1591,7 @@ static int rk3399_dphy_tx1rx1_power_off(struct phy *phy)
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE, 0));
return 0;
}
@@ -1603,15 +1600,14 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
- RK3399_DSI0_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_DSI0_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_DSI0_LCDC_SEL, 1),
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
- .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
- RK3399_DSI0_TURNDISABLE |
- RK3399_DSI0_FORCETXSTOPMODE |
- RK3399_DSI0_FORCERXMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3399_DSI0_TURNREQUEST |
+ RK3399_DSI0_TURNDISABLE |
+ RK3399_DSI0_FORCETXSTOPMODE |
+ RK3399_DSI0_FORCERXMODE), 0),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
@@ -1619,25 +1615,23 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff968000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
- RK3399_DSI1_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_DSI1_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_DSI1_LCDC_SEL, 1),
+
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
- .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
- RK3399_DSI1_FORCETXSTOPMODE |
- RK3399_DSI1_FORCERXMODE |
- RK3399_DSI1_ENABLE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3399_DSI1_TURNDISABLE |
+ RK3399_DSI1_FORCETXSTOPMODE |
+ RK3399_DSI1_FORCERXMODE |
+ RK3399_DSI1_ENABLE), 0),
.lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
- .lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
- RK3399_TXRX_ENABLECLK,
- RK3399_TXRX_MASTERSLAVEZ |
- RK3399_TXRX_ENABLECLK |
- RK3399_TXRX_BASEDIR),
+ .lanecfg2 = (FIELD_PREP_WM16_CONST(RK3399_TXRX_MASTERSLAVEZ, 1) |
+ FIELD_PREP_WM16_CONST(RK3399_TXRX_ENABLECLK, 1) |
+ FIELD_PREP_WM16_CONST(RK3399_TXRX_BASEDIR, 0)),
.enable_grf_reg = RK3399_GRF_SOC_CON23,
- .enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
+ .enable = FIELD_PREP_WM16_CONST(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
@@ -1653,19 +1647,19 @@ static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
{
.reg = 0xfe060000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
- .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS |
- RK3568_DSI0_FORCETXSTOPMODE |
- RK3568_DSI0_TURNDISABLE |
- RK3568_DSI0_FORCERXMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RK3568_DSI0_SKEWCALHS, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_FORCETXSTOPMODE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_FORCERXMODE, 0)),
.max_data_lanes = 4,
},
{
.reg = 0xfe070000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
- .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS |
- RK3568_DSI1_FORCETXSTOPMODE |
- RK3568_DSI1_TURNDISABLE |
- RK3568_DSI1_FORCERXMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RK3568_DSI1_SKEWCALHS, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_FORCETXSTOPMODE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_FORCERXMODE, 0)),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1675,9 +1669,9 @@ static const struct rockchip_dw_dsi_chip_data rv1126_chip_data[] = {
{
.reg = 0xffb30000,
.lanecfg1_grf_reg = RV1126_GRF_DSIPHY_CON,
- .lanecfg1 = HIWORD_UPDATE(0, RV1126_DSI_TURNDISABLE |
- RV1126_DSI_FORCERXMODE |
- RV1126_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RV1126_DSI_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RV1126_DSI_FORCERXMODE, 0) |
+ FIELD_PREP_WM16_CONST(RV1126_DSI_FORCETXSTOPMODE, 0)),
.max_data_lanes = 4,
},
{ /* sentinel */ }
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
index cdd490778756..0aea764e29b2 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
@@ -437,6 +437,15 @@ static void dw_mipi_dsi2_rockchip_remove(struct platform_device *pdev)
dw_mipi_dsi2_remove(dsi2->dmd);
}
+static const struct dsigrf_reg rk3576_dsi_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0028, 1, 1 },
+ [GATING_EN] = { 0x0028, 0, 0 },
+ [IPI_SHUTDN] = { 0x0028, 3, 3 },
+ [IPI_COLORM] = { 0x0028, 2, 2 },
+ [IPI_COLOR_DEPTH] = { 0x0028, 8, 11 },
+ [IPI_FORMAT] = { 0x0028, 4, 7 },
+};
+
static const struct dsigrf_reg rk3588_dsi0_grf_reg_fields[MAX_FIELDS] = {
[TXREQCLKHS_EN] = { 0x0000, 11, 11 },
[GATING_EN] = { 0x0000, 10, 10 },
@@ -455,6 +464,15 @@ static const struct dsigrf_reg rk3588_dsi1_grf_reg_fields[MAX_FIELDS] = {
[IPI_FORMAT] = { 0x0004, 0, 3 },
};
+static const struct rockchip_dw_dsi2_chip_data rk3576_chip_data[] = {
+ {
+ .reg = 0x27d80000,
+ .grf_regs = rk3576_dsi_grf_reg_fields,
+ .max_bit_rate_per_lane = 2500000ULL,
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
{
.reg = 0xfde20000,
@@ -470,6 +488,9 @@ static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
static const struct of_device_id dw_mipi_dsi2_rockchip_dt_ids[] = {
{
+ .compatible = "rockchip,rk3576-mipi-dsi2",
+ .data = &rk3576_chip_data,
+ }, {
.compatible = "rockchip,rk3588-mipi-dsi2",
.data = &rk3588_chip_data,
},
diff --git a/drivers/gpu/drm/rockchip/dw_dp-rockchip.c b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c
new file mode 100644
index 000000000000..25ab4e46301e
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Zhang Yubing <yubing.zhang@rock-chips.com>
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+struct rockchip_dw_dp {
+ struct dw_dp *base;
+ struct device *dev;
+ struct rockchip_encoder encoder;
+};
+
+static int dw_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct drm_atomic_state *state = conn_state->state;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge = drm_bridge_chain_get_first_bridge(encoder);
+ struct drm_bridge_state *bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ u32 bus_format = bridge_state->input_bus_cfg.format;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ s->output_mode = ROCKCHIP_OUT_MODE_YUV420;
+ break;
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ s->output_mode = ROCKCHIP_OUT_MODE_S888_DUMMY;
+ break;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ default:
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ break;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
+ s->bus_format = bus_format;
+ s->bus_flags = di->bus_flags;
+ s->color_space = V4L2_COLORSPACE_DEFAULT;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_dp_encoder_helper_funcs = {
+ .atomic_check = dw_dp_encoder_atomic_check,
+};
+
+static int dw_dp_rockchip_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp_plat_data plat_data;
+ struct drm_device *drm_dev = data;
+ struct rockchip_dw_dp *dp;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dev = dev;
+ platform_set_drvdata(pdev, dp);
+
+ plat_data.max_link_rate = 810000;
+ encoder = &dp->encoder.encoder;
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder, dev->of_node, 0, 0);
+
+ ret = drmm_encoder_init(drm_dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &dw_dp_encoder_helper_funcs);
+
+ dp->base = dw_dp_bind(dev, encoder, &plat_data);
+ if (IS_ERR(dp->base)) {
+ ret = PTR_ERR(dp->base);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ return dev_err_probe(dev, ret, "Failed to init bridge connector");
+ }
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static const struct component_ops dw_dp_rockchip_component_ops = {
+ .bind = dw_dp_rockchip_bind,
+};
+
+static int dw_dp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return component_add(dev, &dw_dp_rockchip_component_ops);
+}
+
+static void dw_dp_remove(struct platform_device *pdev)
+{
+ struct rockchip_dw_dp *dp = platform_get_drvdata(pdev);
+
+ component_del(dp->dev, &dw_dp_rockchip_component_ops);
+}
+
+static const struct of_device_id dw_dp_of_match[] = {
+ { .compatible = "rockchip,rk3588-dp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dp_of_match);
+
+struct platform_driver dw_dp_driver = {
+ .probe = dw_dp_probe,
+ .remove = dw_dp_remove,
+ .driver = {
+ .name = "dw-dp",
+ .of_match_table = dw_dp_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index acb59b25d928..7b613997bb50 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -54,8 +55,6 @@
#define RK3568_HDMI_SDAIN_MSK BIT(15)
#define RK3568_HDMI_SCLIN_MSK BIT(14)
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
/**
* struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips
* @lcdsel_grf_reg: grf register offset of lcdc select
@@ -355,17 +354,14 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
- regmap_write(hdmi->regmap,
- RK3228_GRF_SOC_CON6,
- HIWORD_UPDATE(RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
- RK3228_HDMI_SCL_VSEL,
- RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
- RK3228_HDMI_SCL_VSEL));
-
- regmap_write(hdmi->regmap,
- RK3228_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK,
- RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK));
+ regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON6,
+ FIELD_PREP_WM16(RK3228_HDMI_HPD_VSEL, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SDA_VSEL, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SCL_VSEL, 1));
+
+ regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
+ FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status
@@ -377,15 +373,13 @@ dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
if (status == connector_status_connected)
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
- RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 1));
else
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
- RK3328_HDMI_SCL_5V));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 0));
return status;
}
@@ -396,21 +390,21 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
/* Enable and map pins to 3V grf-controlled io-voltage */
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
- RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
- RK3328_HDMI_HPD_5V));
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON3,
- HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
- RK3328_HDMI_HPD5V_GRF |
- RK3328_HDMI_CEC5V_GRF));
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
- RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
- RK3328_HDMI_HPD_IOE));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_SARADC, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_CEC_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_5V, 0));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON3,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_CEC5V_GRF, 0));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON2,
+ FIELD_PREP_WM16(RK3328_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_IOE, 0));
dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
}
@@ -438,8 +432,8 @@ static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_HDMI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_HDMI_LCDC_SEL, 1),
.max_tmds_clock = 340000,
};
@@ -475,8 +469,8 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_HDMI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_HDMI_LCDC_SEL, 1),
.max_tmds_clock = 594000,
};
@@ -589,10 +583,8 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
- HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
- RK3568_HDMI_SCLIN_MSK,
- RK3568_HDMI_SDAIN_MSK |
- RK3568_HDMI_SCLIN_MSK));
+ FIELD_PREP_WM16(RK3568_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3568_HDMI_SCLIN_MSK, 1));
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index 7d531b6f4c09..ed6e8f036f4b 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -66,7 +67,8 @@
#define RK3588_HDMI1_HPD_INT_MSK BIT(15)
#define RK3588_HDMI1_HPD_INT_CLR BIT(14)
#define RK3588_GRF_SOC_CON7 0x031c
-#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12)
+#define RK3588_HPD_HDMI0_IO_EN_MASK BIT(12)
+#define RK3588_HPD_HDMI1_IO_EN_MASK BIT(13)
#define RK3588_GRF_SOC_STATUS1 0x0384
#define RK3588_HDMI0_LEVEL_INT BIT(16)
#define RK3588_HDMI1_LEVEL_INT BIT(24)
@@ -80,7 +82,6 @@
#define RK3588_HDMI0_GRANT_SEL BIT(10)
#define RK3588_HDMI1_GRANT_SEL BIT(12)
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define HOTPLUG_DEBOUNCE_MS 150
#define MAX_HDMI_PORT_NUM 2
@@ -185,11 +186,11 @@ static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
u32 val;
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
- RK3588_HDMI1_HPD_INT_CLR | RK3588_HDMI1_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 0));
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR | RK3588_HDMI0_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 0));
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
@@ -218,8 +219,8 @@ static void dw_hdmi_qp_rk3576_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
u32 val;
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR,
- RK3576_HDMI_HPD_INT_CLR | RK3576_HDMI_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0));
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
regmap_write(hdmi->regmap, 0xa404, 0xffff0102);
@@ -254,7 +255,7 @@ static irqreturn_t dw_hdmi_qp_rk3576_hardirq(int irq, void *dev_id)
regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
if (intr_stat) {
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_MSK, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
return IRQ_WAKE_THREAD;
@@ -273,12 +274,12 @@ static irqreturn_t dw_hdmi_qp_rk3576_irq(int irq, void *dev_id)
if (!intr_stat)
return IRQ_NONE;
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR, RK3576_HDMI_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_CLR, 1);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
- val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
return IRQ_HANDLED;
@@ -293,11 +294,9 @@ static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
if (intr_stat) {
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
- RK3588_HDMI1_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
- RK3588_HDMI0_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_WAKE_THREAD;
}
@@ -315,20 +314,18 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
return IRQ_NONE;
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
- RK3588_HDMI1_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_CLR, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_CLR, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
if (hdmi->port_id)
- val |= HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
+ val |= FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 0);
else
- val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
+ val |= FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_HANDLED;
@@ -338,14 +335,14 @@ static void dw_hdmi_qp_rk3576_io_init(struct rockchip_hdmi_qp *hdmi)
{
u32 val;
- val = HIWORD_UPDATE(RK3576_SCLIN_MASK, RK3576_SCLIN_MASK) |
- HIWORD_UPDATE(RK3576_SDAIN_MASK, RK3576_SDAIN_MASK) |
- HIWORD_UPDATE(RK3576_HDMI_GRANT_SEL, RK3576_HDMI_GRANT_SEL) |
- HIWORD_UPDATE(RK3576_I2S_SEL_MASK, RK3576_I2S_SEL_MASK);
+ val = FIELD_PREP_WM16(RK3576_SCLIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3576_SDAIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3576_HDMI_GRANT_SEL, 1) |
+ FIELD_PREP_WM16(RK3576_I2S_SEL_MASK, 1);
regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON14, val);
- val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
}
@@ -353,27 +350,28 @@ static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi)
{
u32 val;
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ val = FIELD_PREP_WM16(RK3588_SCLIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_SDAIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_MODE_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_I2S_SEL_MASK, 1);
regmap_write(hdmi->vo_regmap,
hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
val);
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, RK3588_SET_HPD_PATH_MASK);
+ val = FIELD_PREP_WM16(RK3588_HPD_HDMI0_IO_EN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_HPD_HDMI1_IO_EN_MASK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL, RK3588_HDMI1_GRANT_SEL);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_GRANT_SEL, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, RK3588_HDMI0_GRANT_SEL);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_GRANT_SEL, 1);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 1ab3ad4bde9e..f24827dc1421 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -382,8 +383,6 @@ enum {
#define HDMI_CEC_BUSFREETIME_H 0xdd
#define HDMI_CEC_LOGICADDR 0xde
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
-
#define RK3036_GRF_SOC_CON2 0x148
#define RK3036_HDMI_PHSYNC BIT(4)
#define RK3036_HDMI_PVSYNC BIT(5)
@@ -756,10 +755,10 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
int value, psync;
if (hdmi->variant->dev_type == RK3036_HDMI) {
- psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
- value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
- psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
- value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 : 0;
+ value = FIELD_PREP_WM16(RK3036_HDMI_PHSYNC, psync);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 : 0;
+ value |= FIELD_PREP_WM16(RK3036_HDMI_PVSYNC, psync);
regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 180fad5d49ad..eb77bde9f628 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -529,6 +529,7 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
CONFIG_ROCKCHIP_ANALOGIX_DP);
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c183e82a42a5..2e86ad00979c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -87,6 +87,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder,
struct device_node *np, int port, int reg);
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
+extern struct platform_driver dw_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_rockchip_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index fa5c56f16047..9124191899ba 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -33,7 +33,6 @@
#define WIN_FEATURE_AFBDC BIT(0)
#define WIN_FEATURE_CLUSTER BIT(1)
-#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
/*
* the delay number of a window in different mode.
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index ca83d7b6bea7..2d92447d819b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -9,6 +9,9 @@
#ifndef _ROCKCHIP_LVDS_
#define _ROCKCHIP_LVDS_
+#include <linux/bits.h>
+#include <linux/hw_bitfield.h>
+
#define RK3288_LVDS_CH0_REG0 0x00
#define RK3288_LVDS_CH0_REG0_LVDS_EN BIT(7)
#define RK3288_LVDS_CH0_REG0_TTL_EN BIT(6)
@@ -106,18 +109,16 @@
#define LVDS_VESA_18 2
#define LVDS_JEIDA_18 3
-#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
-
#define PX30_LVDS_GRF_PD_VO_CON0 0x434
-#define PX30_LVDS_TIE_CLKS(val) HIWORD_UPDATE(val, 8, 8)
-#define PX30_LVDS_INVERT_CLKS(val) HIWORD_UPDATE(val, 9, 9)
-#define PX30_LVDS_INVERT_DCLK(val) HIWORD_UPDATE(val, 5, 5)
+#define PX30_LVDS_TIE_CLKS(val) FIELD_PREP_WM16(BIT(8), (val))
+#define PX30_LVDS_INVERT_CLKS(val) FIELD_PREP_WM16(BIT(9), (val))
+#define PX30_LVDS_INVERT_DCLK(val) FIELD_PREP_WM16(BIT(5), (val))
#define PX30_LVDS_GRF_PD_VO_CON1 0x438
-#define PX30_LVDS_FORMAT(val) HIWORD_UPDATE(val, 14, 13)
-#define PX30_LVDS_MODE_EN(val) HIWORD_UPDATE(val, 12, 12)
-#define PX30_LVDS_MSBSEL(val) HIWORD_UPDATE(val, 11, 11)
-#define PX30_LVDS_P2S_EN(val) HIWORD_UPDATE(val, 6, 6)
-#define PX30_LVDS_VOP_SEL(val) HIWORD_UPDATE(val, 1, 1)
+#define PX30_LVDS_FORMAT(val) FIELD_PREP_WM16(GENMASK(14, 13), (val))
+#define PX30_LVDS_MODE_EN(val) FIELD_PREP_WM16(BIT(12), (val))
+#define PX30_LVDS_MSBSEL(val) FIELD_PREP_WM16(BIT(11), (val))
+#define PX30_LVDS_P2S_EN(val) FIELD_PREP_WM16(BIT(6), (val))
+#define PX30_LVDS_VOP_SEL(val) FIELD_PREP_WM16(BIT(1), (val))
#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 45c5e3987813..38c49030c7ab 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/hw_bitfield.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -1695,8 +1696,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(1), 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0,
+ FIELD_PREP_WM16(GENMASK(6, 5), val));
break;
case ROCKCHIP_VOP2_EP_HDMI1:
div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
@@ -1707,8 +1709,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(4), 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0,
+ FIELD_PREP_WM16(GENMASK(8, 7), val));
break;
case ROCKCHIP_VOP2_EP_EDP0:
div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
@@ -1718,7 +1721,7 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(0), 1));
break;
case ROCKCHIP_VOP2_EP_EDP1:
div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
@@ -1728,7 +1731,7 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(3), 1));
break;
case ROCKCHIP_VOP2_EP_MIPI0:
div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3d06f72531ba..5a4697f636f2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -285,9 +285,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
return 0;
sched = entity->rq->sched;
- /**
- * The client will not queue more IBs during this fini, consume existing
- * queued IBs or discard them on SIGKILL
+ /*
+ * The client will not queue more jobs during this fini - consume
+ * existing queued ones, or discard them on SIGKILL.
*/
if (current->flags & PF_EXITING) {
if (timeout)
@@ -300,7 +300,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
drm_sched_entity_is_idle(entity));
}
- /* For killed process disable any more IBs enqueue right now */
+ /* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
@@ -324,9 +324,9 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
/*
- * If consumption of existing IBs wasn't completed. Forcefully remove
- * them here. Also makes sure that the scheduler won't touch this entity
- * any more.
+ * If consumption of existing jobs wasn't completed forcefully remove
+ * them. Also makes sure that the scheduler won't touch this entity any
+ * more.
*/
drm_sched_entity_kill(entity);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e2cda28a1af4..46119aacb809 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -349,37 +349,16 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
}
/**
- * __drm_sched_run_free_queue - enqueue free-job work
+ * drm_sched_run_free_queue - enqueue free-job work
* @sched: scheduler instance
*/
-static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
if (!READ_ONCE(sched->pause_submit))
queue_work(sched->submit_wq, &sched->work_free_job);
}
/**
- * drm_sched_run_free_queue - enqueue free-job work if ready
- * @sched: scheduler instance
- */
-static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
-{
- struct drm_sched_job *job;
-
- job = list_first_entry_or_null(&sched->pending_list,
- struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished))
- __drm_sched_run_free_queue(sched);
-}
-
-static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
-{
- spin_lock(&sched->job_list_lock);
- drm_sched_run_free_queue(sched);
- spin_unlock(&sched->job_list_lock);
-}
-
-/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -398,7 +377,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
- __drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue(sched);
}
/**
@@ -1134,12 +1113,16 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* drm_sched_get_finished_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
+ * @have_more: are there more finished jobs on the list
+ *
+ * Informs the caller through @have_more whether there are more finished jobs
+ * besides the returned one.
*
* Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
{
struct drm_sched_job *job, *next;
@@ -1147,22 +1130,25 @@ drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
-
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
- /* make the scheduled timestamp more accurate */
+
+ *have_more = false;
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
-
if (next) {
+ /* make the scheduled timestamp more accurate */
if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&next->s_fence->scheduled.flags))
next->s_fence->scheduled.timestamp =
dma_fence_timestamp(&job->s_fence->finished);
+
+ *have_more = dma_fence_is_signaled(&next->s_fence->finished);
+
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -1221,12 +1207,15 @@ static void drm_sched_free_job_work(struct work_struct *w)
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
+ bool have_more;
- job = drm_sched_get_finished_job(sched);
- if (job)
+ job = drm_sched_get_finished_job(sched, &have_more);
+ if (job) {
sched->ops->free_job(job);
+ if (have_more)
+ drm_sched_run_free_queue(sched);
+ }
- drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1435,6 +1424,22 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
* Prevents reinsertion and marks job_queue as idle,
* it will be removed from the rq in drm_sched_entity_fini()
* eventually
+ *
+ * FIXME:
+ * This lacks the proper spin_lock(&s_entity->lock) and
+ * is, therefore, a race condition. Most notably, it
+ * can race with drm_sched_entity_push_job(). The lock
+ * cannot be taken here, however, because this would
+ * lead to lock inversion -> deadlock.
+ *
+ * The best solution probably is to enforce the life
+ * time rule of all entities having to be torn down
+ * before their scheduler. Then, however, locking could
+ * be dropped alltogether from this function.
+ *
+ * For now, this remains a potential race in all
+ * drivers that keep entities alive for longer than
+ * the scheduler.
*/
s_entity->stopped = true;
spin_unlock(&rq->lock);
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index 65acffc3fea8..8e9ae7d980eb 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -219,7 +219,7 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
unsigned long flags;
if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
- job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ job->flags |= DRM_MOCK_SCHED_JOB_RESET_SKIPPED;
return DRM_GPU_SCHED_STAT_NO_HANG;
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 63d4f2ac7074..7f31d35780cc 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -11,7 +11,6 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/list.h>
-#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -95,9 +94,10 @@ struct drm_mock_sched_job {
struct completion done;
-#define DRM_MOCK_SCHED_JOB_DONE 0x1
-#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
-#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_RESET_SKIPPED 0x8
unsigned long flags;
struct list_head link;
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 55eb142bd7c5..82a41a456b0a 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -317,8 +317,8 @@ static void drm_sched_skip_reset(struct kunit *test)
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
- job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
- 0);
+ job->flags & DRM_MOCK_SCHED_JOB_RESET_SKIPPED,
+ DRM_MOCK_SCHED_JOB_RESET_SKIPPED);
i = drm_mock_sched_advance(sched, 1);
KUNIT_ASSERT_EQ(test, i, 1);
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
index 453eb7e045e5..a6c4a6738ded 100644
--- a/drivers/gpu/drm/sitronix/st7571-i2c.c
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -151,6 +151,7 @@ struct st7571_device {
bool ignore_nak;
bool grayscale;
+ bool inverted;
u32 height_mm;
u32 width_mm;
u32 startline;
@@ -218,10 +219,11 @@ static int st7571_send_command_list(struct st7571_device *st7571,
return ret;
}
-static inline u8 st7571_transform_xy(const char *p, int x, int y)
+static inline u8 st7571_transform_xy(const char *p, int x, int y, u8 bpp)
{
int xrest = x % 8;
u8 result = 0;
+ u8 row_len = 16 * bpp;
/*
* Transforms an (x, y) pixel coordinate into a vertical 8-bit
@@ -236,7 +238,7 @@ static inline u8 st7571_transform_xy(const char *p, int x, int y)
for (int i = 0; i < 8; i++) {
int row_idx = y + i;
- u8 byte = p[row_idx * 16 + x];
+ u8 byte = p[row_idx * row_len + x];
u8 bit = (byte >> xrest) & 1;
result |= (bit << i);
@@ -303,11 +305,11 @@ static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
struct iosys_map dst;
switch (fb->format->format) {
- case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
- dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 4);
iosys_map_set_vaddr(&dst, st7571->hwbuf);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ drm_fb_xrgb8888_to_gray2(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
break;
case DRM_FORMAT_R1:
@@ -333,7 +335,7 @@ static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct d
for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
for (int x = rect->x1; x < rect->x2; x++)
- row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 1);
st7571_set_position(st7571, rect->x1, y);
@@ -358,14 +360,13 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
switch (format) {
- case DRM_FORMAT_XRGB8888:
- /* Threated as monochrome (R1) */
- fallthrough;
case DRM_FORMAT_R1:
- x1 = rect->x1;
- x2 = rect->x2;
+ x1 = rect->x1 * 1;
+ x2 = rect->x2 * 1;
break;
case DRM_FORMAT_R2:
+ fallthrough;
+ case DRM_FORMAT_XRGB8888:
x1 = rect->x1 * 2;
x2 = rect->x2 * 2;
break;
@@ -373,7 +374,7 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
for (int x = x1; x < x2; x++)
- row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 2);
st7571_set_position(st7571, rect->x1, y);
@@ -386,15 +387,15 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
* even if the format is monochrome.
*
* The bit values maps to the following grayscale:
- * 0 0 = White
- * 0 1 = Light gray
- * 1 0 = Dark gray
- * 1 1 = Black
+ * 0 0 = Black
+ * 0 1 = Dark gray
+ * 1 0 = Light gray
+ * 1 1 = White
*
* For monochrome formats, write the same value twice to get
* either a black or white pixel.
*/
- if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ if (format == DRM_FORMAT_R1)
regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
}
}
@@ -792,6 +793,7 @@ static int st7567_parse_dt(struct st7571_device *st7567)
of_property_read_u32(np, "width-mm", &st7567->width_mm);
of_property_read_u32(np, "height-mm", &st7567->height_mm);
+ st7567->inverted = of_property_read_bool(np, "sitronix,inverted");
st7567->pformat = &st7571_monochrome;
st7567->bpp = 1;
@@ -819,6 +821,7 @@ static int st7571_parse_dt(struct st7571_device *st7571)
of_property_read_u32(np, "width-mm", &st7571->width_mm);
of_property_read_u32(np, "height-mm", &st7571->height_mm);
st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+ st7571->inverted = of_property_read_bool(np, "sitronix,inverted");
if (st7571->grayscale) {
st7571->pformat = &st7571_grayscale;
@@ -873,7 +876,7 @@ static int st7567_lcd_init(struct st7571_device *st7567)
ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
- ST7571_SET_REVERSE(0),
+ ST7571_SET_REVERSE(st7567->inverted ? 1 : 0),
ST7571_SET_ENTIRE_DISPLAY_ON(0),
};
@@ -917,7 +920,7 @@ static int st7571_lcd_init(struct st7571_device *st7571)
ST7571_SET_COLOR_MODE(st7571->pformat->mode),
ST7571_COMMAND_SET_NORMAL,
- ST7571_SET_REVERSE(0),
+ ST7571_SET_REVERSE(st7571->inverted ? 1 : 0),
ST7571_SET_ENTIRE_DISPLAY_ON(0),
};
@@ -1024,7 +1027,7 @@ static void st7571_remove(struct i2c_client *client)
drm_dev_unplug(&st7571->dev);
}
-struct st7571_panel_data st7567_config = {
+static const struct st7571_panel_data st7567_config = {
.init = st7567_lcd_init,
.parse_dt = st7567_parse_dt,
.constraints = {
@@ -1036,7 +1039,7 @@ struct st7571_panel_data st7567_config = {
},
};
-struct st7571_panel_data st7571_config = {
+static const struct st7571_panel_data st7571_config = {
.init = st7571_lcd_init,
.parse_dt = st7571_parse_dt,
.constraints = {
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 7c935870f7d2..b52f5fd592a1 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -74,8 +74,7 @@ static int ssd130x_spi_probe(struct spi_device *spi)
t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL);
if (!t)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate SPI transport data\n");
+ return -ENOMEM;
t->spi = spi;
t->dc = dc;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 03684062309b..b76606e9a82d 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -744,7 +744,7 @@ static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
inv_zy = DIV_ROUND_UP(src_h, dst_h);
- return (inv_zy <= lfw) ? true : false;
+ return inv_zy <= lfw;
}
/**
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 8ebcaf953782..ab00d1a6140c 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -236,8 +236,18 @@ static void stm_drm_platform_shutdown(struct platform_device *pdev)
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
}
+static struct ltdc_plat_data stm_drm_plat_data = {
+ .pad_max_freq_hz = 90000000,
+};
+
+static struct ltdc_plat_data stm_drm_plat_data_mp25 = {
+ .pad_max_freq_hz = 150000000,
+};
+
static const struct of_device_id drv_dt_ids[] = {
- { .compatible = "st,stm32-ltdc"},
+ { .compatible = "st,stm32-ltdc", .data = &stm_drm_plat_data, },
+ { .compatible = "st,stm32mp251-ltdc", .data = &stm_drm_plat_data_mp25, },
+ { .compatible = "st,stm32mp255-ltdc", .data = &stm_drm_plat_data_mp25, },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, drv_dt_ids);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index ba315c66a04d..d1501e86a5b1 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -51,6 +52,7 @@
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
#define HWVER_40100 0x040100
+#define HWVER_40101 0x040101
/*
* The address of some registers depends on the HW version: such registers have
@@ -641,7 +643,7 @@ static inline void ltdc_set_ycbcr_config(struct drm_plane *plane, u32 drm_pix_fm
break;
default:
/* RGB or not a YCbCr supported format */
- DRM_ERROR("Unsupported pixel format: %u\n", drm_pix_fmt);
+ drm_err(plane->dev, "Unsupported pixel format: %u\n", drm_pix_fmt);
return;
}
@@ -664,18 +666,19 @@ static inline void ltdc_set_ycbcr_coeffs(struct drm_plane *plane)
u32 lofs = plane->index * LAY_OFS;
if (enc != DRM_COLOR_YCBCR_BT601 && enc != DRM_COLOR_YCBCR_BT709) {
- DRM_ERROR("color encoding %d not supported, use bt601 by default\n", enc);
+ drm_err(plane->dev, "color encoding %d not supported, use bt601 by default\n", enc);
/* set by default color encoding to DRM_COLOR_YCBCR_BT601 */
enc = DRM_COLOR_YCBCR_BT601;
}
if (ran != DRM_COLOR_YCBCR_LIMITED_RANGE && ran != DRM_COLOR_YCBCR_FULL_RANGE) {
- DRM_ERROR("color range %d not supported, use limited range by default\n", ran);
+ drm_err(plane->dev,
+ "color range %d not supported, use limited range by default\n", ran);
/* set by default color range to DRM_COLOR_YCBCR_LIMITED_RANGE */
ran = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
- DRM_DEBUG_DRIVER("Color encoding=%d, range=%d\n", enc, ran);
+ drm_err(plane->dev, "Color encoding=%d, range=%d\n", enc, ran);
regmap_write(ldev->regmap, LTDC_L1CYR0R + lofs,
ltdc_ycbcr2rgb_coeffs[enc][ran][0]);
regmap_write(ldev->regmap, LTDC_L1CYR1R + lofs,
@@ -774,7 +777,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
pm_runtime_get_sync(ddev->dev);
@@ -798,7 +801,7 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_device *ddev = crtc->dev;
int layer_index = 0;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
drm_crtc_vblank_off(crtc);
@@ -835,9 +838,15 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
int target_max = target + CLK_TOLERANCE_HZ;
int result;
+ if (ldev->lvds_clk) {
+ result = clk_round_rate(ldev->lvds_clk, target);
+ drm_dbg_driver(crtc->dev, "lvds pixclk rate target %d, available %d\n",
+ target, result);
+ }
+
result = clk_round_rate(ldev->pixel_clk, target);
- DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+ drm_dbg_driver(crtc->dev, "clk rate target %d, available %d\n", target, result);
/* Filter modes according to the max frequency supported by the pads */
if (result > ldev->caps.pad_max_freq_hz)
@@ -872,14 +881,14 @@ static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
int rate = mode->clock * 1000;
if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
+ drm_err(crtc->dev, "Cannot set rate (%dHz) for pixel clk\n", rate);
return false;
}
adjusted_mode->clock = clk_get_rate(ldev->pixel_clk) / 1000;
- DRM_DEBUG_DRIVER("requested clock %dkHz, adjusted clock %dkHz\n",
- mode->clock, adjusted_mode->clock);
+ drm_dbg_driver(crtc->dev, "requested clock %dkHz, adjusted clock %dkHz\n",
+ mode->clock, adjusted_mode->clock);
return true;
}
@@ -934,20 +943,20 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!pm_runtime_active(ddev->dev)) {
ret = pm_runtime_get_sync(ddev->dev);
if (ret) {
- DRM_ERROR("Failed to set mode, cannot get sync\n");
+ drm_err(crtc->dev, "Failed to set mode, cannot get sync\n");
return;
}
}
- DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
- DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
- DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
- mode->hsync_start - mode->hdisplay,
- mode->htotal - mode->hsync_end,
- mode->hsync_end - mode->hsync_start,
- mode->vsync_start - mode->vdisplay,
- mode->vtotal - mode->vsync_end,
- mode->vsync_end - mode->vsync_start);
+ drm_dbg_driver(crtc->dev, "CRTC:%d mode:%s\n", crtc->base.id, mode->name);
+ drm_dbg_driver(crtc->dev, "Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
+ drm_dbg_driver(crtc->dev, " hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
+ mode->hsync_start - mode->hdisplay,
+ mode->htotal - mode->hsync_end,
+ mode->hsync_end - mode->hsync_start,
+ mode->vsync_start - mode->vdisplay,
+ mode->vtotal - mode->vsync_end,
+ mode->vsync_end - mode->vsync_start);
/* Convert video timings to ltdc timings */
hsync = mode->hsync_end - mode->hsync_start - 1;
@@ -1033,7 +1042,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *ddev = crtc->dev;
struct drm_pending_vblank_event *event = crtc->state->event;
- DRM_DEBUG_ATOMIC("\n");
+ drm_dbg_atomic(crtc->dev, "\n");
ltdc_crtc_update_clut(crtc);
@@ -1121,7 +1130,7 @@ static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_crtc_state *state = crtc->state;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
if (state->enable)
regmap_set_bits(ldev->regmap, LTDC_IER, IER_LIE);
@@ -1135,7 +1144,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE);
}
@@ -1144,11 +1153,11 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
struct ltdc_device *ldev;
int ret;
- DRM_DEBUG_DRIVER("\n");
-
if (!crtc)
return -ENODEV;
+ drm_dbg_driver(crtc->dev, "\n");
+
ldev = crtc_to_ltdc(crtc);
if (source && strcmp(source, "auto") == 0) {
@@ -1168,14 +1177,14 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
static int ltdc_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source, size_t *values_cnt)
{
- DRM_DEBUG_DRIVER("\n");
-
if (!crtc)
return -ENODEV;
+ drm_dbg_driver(crtc->dev, "\n");
+
if (source && strcmp(source, "auto") != 0) {
- DRM_DEBUG_DRIVER("Unknown CRC source %s for %s\n",
- source, crtc->name);
+ drm_dbg_driver(crtc->dev, "Unknown CRC source %s for %s\n",
+ source, crtc->name);
return -EINVAL;
}
@@ -1233,7 +1242,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = new_plane_state->fb;
u32 src_w, src_h;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(plane->dev, "\n");
if (!fb)
return 0;
@@ -1244,7 +1253,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
/* Reject scaling */
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
- DRM_DEBUG_DRIVER("Scaling is not supported");
+ drm_dbg_driver(plane->dev, "Scaling is not supported");
return -EINVAL;
}
@@ -1270,7 +1279,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
enum ltdc_pix_fmt pf;
if (!newstate->crtc || !fb) {
- DRM_DEBUG_DRIVER("fb or crtc NULL");
+ drm_dbg_driver(plane->dev, "fb or crtc NULL");
return;
}
@@ -1280,11 +1289,11 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
src_w = newstate->src_w >> 16;
src_h = newstate->src_h >> 16;
- DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
- plane->base.id, fb->base.id,
- src_w, src_h, src_x, src_y,
- newstate->crtc_w, newstate->crtc_h,
- newstate->crtc_x, newstate->crtc_y);
+ drm_dbg_driver(plane->dev, "plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
+ plane->base.id, fb->base.id,
+ src_w, src_h, src_x, src_y,
+ newstate->crtc_w, newstate->crtc_h,
+ newstate->crtc_x, newstate->crtc_y);
regmap_read(ldev->regmap, LTDC_BPCR, &bpcr);
@@ -1312,8 +1321,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
val = ltdc_set_flexible_pixel_format(plane, pf);
if (val == NB_PF) {
- DRM_ERROR("Pixel format %.4s not supported\n",
- (char *)&fb->format->format);
+ drm_err(fb->dev, "Pixel format %.4s not supported\n",
+ (char *)&fb->format->format);
val = 0; /* set by default ARGB 32 bits */
}
regmap_write_bits(ldev->regmap, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
@@ -1350,7 +1359,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (newstate->rotation & DRM_MODE_REFLECT_Y)
paddr += (fb->pitches[0] * (y1 - y0));
- DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
+ drm_dbg_driver(fb->dev, "fb: phys 0x%08x", paddr);
regmap_write(ldev->regmap, LTDC_L1CFBAR + lofs, paddr);
/* Configures the color frame buffer pitch in bytes & line length */
@@ -1517,8 +1526,8 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
- DRM_DEBUG_DRIVER("CRTC:%d plane:%d\n",
- oldstate->crtc->base.id, plane->base.id);
+ drm_dbg_driver(plane->dev, "CRTC:%d plane:%d\n",
+ oldstate->crtc->base.id, plane->base.id);
}
static void ltdc_plane_atomic_print_state(struct drm_printer *p,
@@ -1632,7 +1641,7 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_create_alpha_property(plane);
- DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
+ drm_dbg_driver(plane->dev, "plane:%d created\n", plane->base.id);
return plane;
}
@@ -1647,7 +1656,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY, 0);
if (!primary) {
- DRM_ERROR("Can not create primary plane\n");
+ drm_err(ddev, "Can not create primary plane\n");
return -EINVAL;
}
@@ -1668,7 +1677,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
- DRM_ERROR("Can not initialize CRTC\n");
+ drm_err(ddev, "Can not initialize CRTC\n");
return ret;
}
@@ -1677,13 +1686,13 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
drm_mode_crtc_set_gamma_size(crtc, CLUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, CLUT_SIZE);
- DRM_DEBUG_DRIVER("CRTC:%d created\n", crtc->base.id);
+ drm_dbg_driver(ddev, "CRTC:%d created\n", crtc->base.id);
/* Add planes. Note : the first layer is used by primary plane */
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
- DRM_ERROR("Can not create overlay plane %d\n", i);
+ drm_err(ddev, "Can not create overlay plane %d\n", i);
return -ENOMEM;
}
if (ldev->caps.dynamic_zorder)
@@ -1704,7 +1713,7 @@ static void ltdc_encoder_disable(struct drm_encoder *encoder)
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/* Disable LTDC */
regmap_clear_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
@@ -1718,7 +1727,7 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/* set fifo underrun threshold register */
if (ldev->caps.fifo_threshold)
@@ -1734,7 +1743,7 @@ static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
{
struct drm_device *ddev = encoder->dev;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/*
* Set to default state the pinctrl only with DPI type.
@@ -1770,7 +1779,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
if (ret)
return ret;
- DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
+ drm_dbg_driver(encoder->dev, "Bridge encoder:%d created\n", encoder->base.id);
return 0;
}
@@ -1779,6 +1788,7 @@ static int ltdc_get_caps(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
u32 bus_width_log2, lcr, gc2r;
+ const struct ltdc_plat_data *pdata = of_device_get_match_data(ddev->dev);
/*
* at least 1 layer must be managed & the number of layers
@@ -1794,6 +1804,8 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.bus_width = 8 << bus_width_log2;
regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version);
+ ldev->caps.pad_max_freq_hz = pdata->pad_max_freq_hz;
+
switch (ldev->caps.hw_version) {
case HWVER_10200:
case HWVER_10300:
@@ -1811,7 +1823,6 @@ static int ltdc_get_caps(struct drm_device *ddev)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
- ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
ldev->caps.nb_irq = 2;
@@ -1842,6 +1853,7 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.fifo_threshold = false;
break;
case HWVER_40100:
+ case HWVER_40101:
ldev->caps.layer_ofs = LAY_OFS_1;
ldev->caps.layer_regs = ltdc_layer_regs_a2;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2;
@@ -1849,7 +1861,6 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2);
ldev->caps.pix_fmt_flex = true;
ldev->caps.non_alpha_only_l1 = false;
- ldev->caps.pad_max_freq_hz = 90000000;
ldev->caps.nb_irq = 2;
ldev->caps.ycbcr_input = true;
ldev->caps.ycbcr_output = true;
@@ -1870,8 +1881,12 @@ void ltdc_suspend(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+ if (ldev->lvds_clk)
+ clk_disable_unprepare(ldev->lvds_clk);
}
int ltdc_resume(struct drm_device *ddev)
@@ -1879,15 +1894,29 @@ int ltdc_resume(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
ret = clk_prepare_enable(ldev->pixel_clk);
if (ret) {
- DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
+ drm_err(ddev, "failed to enable pixel clock (%d)\n", ret);
return ret;
}
- return 0;
+ if (ldev->bus_clk) {
+ ret = clk_prepare_enable(ldev->bus_clk);
+ if (ret) {
+ drm_err(ddev, "failed to enable bus clock (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (ldev->lvds_clk) {
+ ret = clk_prepare_enable(ldev->lvds_clk);
+ if (ret)
+ drm_err(ddev, "failed to prepare lvds clock\n");
+ }
+
+ return ret;
}
int ltdc_load(struct drm_device *ddev)
@@ -1903,7 +1932,7 @@ int ltdc_load(struct drm_device *ddev)
int irq, i, nb_endpoints;
int ret = -ENODEV;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
/* Get number of endpoints */
nb_endpoints = of_graph_get_endpoint_count(np);
@@ -1913,15 +1942,29 @@ int ltdc_load(struct drm_device *ddev)
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
if (PTR_ERR(ldev->pixel_clk) != -EPROBE_DEFER)
- DRM_ERROR("Unable to get lcd clock\n");
+ drm_err(ddev, "Unable to get lcd clock\n");
return PTR_ERR(ldev->pixel_clk);
}
if (clk_prepare_enable(ldev->pixel_clk)) {
- DRM_ERROR("Unable to prepare pixel clock\n");
+ drm_err(ddev, "Unable to prepare pixel clock\n");
return -ENODEV;
}
+ if (of_device_is_compatible(np, "st,stm32mp251-ltdc") ||
+ of_device_is_compatible(np, "st,stm32mp255-ltdc")) {
+ ldev->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(ldev->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(ldev->bus_clk),
+ "Unable to get bus clock\n");
+
+ ret = clk_prepare_enable(ldev->bus_clk);
+ if (ret) {
+ drm_err(ddev, "Unable to prepare bus clock\n");
+ return ret;
+ }
+ }
+
/* Get endpoints if any */
for (i = 0; i < nb_endpoints; i++) {
ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
@@ -1939,7 +1982,7 @@ int ltdc_load(struct drm_device *ddev)
if (panel) {
bridge = drmm_panel_bridge_add(ddev, panel);
if (IS_ERR(bridge)) {
- DRM_ERROR("panel-bridge endpoint %d\n", i);
+ drm_err(ddev, "panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
goto err;
}
@@ -1949,12 +1992,16 @@ int ltdc_load(struct drm_device *ddev)
ret = ltdc_encoder_init(ddev, bridge);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("init encoder endpoint %d\n", i);
+ drm_err(ddev, "init encoder endpoint %d\n", i);
goto err;
}
}
}
+ ldev->lvds_clk = devm_clk_get(dev, "lvds");
+ if (IS_ERR(ldev->lvds_clk))
+ ldev->lvds_clk = NULL;
+
rstc = devm_reset_control_get_exclusive(dev, NULL);
mutex_init(&ldev->err_lock);
@@ -1967,29 +2014,29 @@ int ltdc_load(struct drm_device *ddev)
ldev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->regs)) {
- DRM_ERROR("Unable to get ltdc registers\n");
+ drm_err(ddev, "Unable to get ltdc registers\n");
ret = PTR_ERR(ldev->regs);
goto err;
}
ldev->regmap = devm_regmap_init_mmio(&pdev->dev, ldev->regs, &stm32_ltdc_regmap_cfg);
if (IS_ERR(ldev->regmap)) {
- DRM_ERROR("Unable to regmap ltdc registers\n");
+ drm_err(ddev, "Unable to regmap ltdc registers\n");
ret = PTR_ERR(ldev->regmap);
goto err;
}
ret = ltdc_get_caps(ddev);
if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
+ drm_err(ddev, "hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
goto err;
}
/* Disable all interrupts */
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_MASK);
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+ drm_dbg_driver(ddev, "ltdc hw version 0x%08x\n", ldev->caps.hw_version);
/* initialize default value for fifo underrun threshold & clear interrupt error counters */
ldev->transfer_err = 0;
@@ -2008,32 +2055,35 @@ int ltdc_load(struct drm_device *ddev)
ltdc_irq_thread, IRQF_ONESHOT,
dev_name(dev), ddev);
if (ret) {
- DRM_ERROR("Failed to register LTDC interrupt\n");
+ drm_err(ddev, "Failed to register LTDC interrupt\n");
goto err;
}
}
crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
- DRM_ERROR("Failed to allocate crtc\n");
+ drm_err(ddev, "Failed to allocate crtc\n");
ret = -ENOMEM;
goto err;
}
ret = ltdc_crtc_init(ddev, crtc);
if (ret) {
- DRM_ERROR("Failed to init crtc\n");
+ drm_err(ddev, "Failed to init crtc\n");
goto err;
}
ret = drm_vblank_init(ddev, NB_CRTC);
if (ret) {
- DRM_ERROR("Failed calling drm_vblank_init()\n");
+ drm_err(ddev, "Failed calling drm_vblank_init()\n");
goto err;
}
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+
pinctrl_pm_select_sleep_state(ddev->dev);
pm_runtime_enable(ddev->dev);
@@ -2042,12 +2092,15 @@ int ltdc_load(struct drm_device *ddev)
err:
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+
return ret;
}
void ltdc_unload(struct drm_device *ddev)
{
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
pm_runtime_disable(ddev->dev);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 9d488043ffdb..17b51a7ce28e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -40,10 +40,16 @@ struct fps_info {
ktime_t last_timestamp;
};
+struct ltdc_plat_data {
+ int pad_max_freq_hz; /* max frequency supported by pad */
+};
+
struct ltdc_device {
void __iomem *regs;
struct regmap *regmap;
struct clk *pixel_clk; /* lcd pixel clock */
+ struct clk *lvds_clk; /* lvds pixel clock */
+ struct clk *bus_clk; /* bus clock */
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
u32 irq_status;
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index 1424b63dde99..89633e30ca62 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -132,7 +132,7 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
struct drm_sysfb_crtc_state {
struct drm_crtc_state base;
- /* Primary-plane format; required for color mgmt. */
+ /* CRTC input color format; required for color mgmt. */
const struct drm_format_info *format;
};
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index 1bcdb5ee8f09..ddb4a7523ee6 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -210,7 +210,12 @@ int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- if (new_fb->format != sysfb->fb_format) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = sysfb->fb_format;
+
+ if (new_fb->format != new_sysfb_crtc_state->format) {
void *buf;
/* format conversion necessary; reserve buffer */
@@ -220,11 +225,6 @@ int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
return -ENOMEM;
}
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
- new_sysfb_crtc_state->format = new_fb->format;
-
return 0;
}
EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
@@ -238,7 +238,9 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
unsigned int dst_pitch = sysfb->fb_pitch;
- const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+ const struct drm_format_info *dst_format = sysfb_crtc_state->format;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
@@ -370,16 +372,19 @@ EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
struct drm_sysfb_crtc_state *sysfb_crtc_state;
if (crtc->state)
drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
- if (sysfb_crtc_state)
+ if (sysfb_crtc_state) {
+ sysfb_crtc_state->format = sysfb->fb_format;
__drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
- else
+ } else {
__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
}
EXPORT_SYMBOL(drm_sysfb_crtc_reset);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
index 0b3fb874a51f..885864168c54 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -79,22 +79,19 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
const struct screen_info *si)
{
const struct drm_format_info *format = NULL;
- u32 bits_per_pixel;
+ struct pixel_format pixel;
size_t i;
+ int ret;
- bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+ ret = screen_info_pixel_format(si, &pixel);
+ if (ret)
+ return NULL;
for (i = 0; i < nformats; ++i) {
- const struct pixel_format *f = &formats[i].pixel;
-
- if (bits_per_pixel == f->bits_per_pixel &&
- si->red_size == f->red.length &&
- si->red_pos == f->red.offset &&
- si->green_size == f->green.length &&
- si->green_pos == f->green.offset &&
- si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset) {
- format = drm_format_info(formats[i].fourcc);
+ const struct drm_sysfb_format *f = &formats[i];
+
+ if (pixel_format_equal(&pixel, &f->pixel)) {
+ format = drm_format_info(f->fourcc);
break;
}
}
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 8530a3ef8a7a..0358164a623c 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -4,7 +4,7 @@
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/minmax.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -179,22 +179,17 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
static struct resource *
simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
{
- struct device_node *np;
- struct resource *res;
+ struct resource r, *res;
int err;
- np = of_parse_phandle(of_node, "memory-region", 0);
- if (!np)
+ err = of_reserved_mem_region_to_resource(of_node, 0, &r);
+ if (err)
return NULL;
- res = devm_kzalloc(dev->dev, sizeof(*res), GFP_KERNEL);
+ res = devm_kmemdup(dev->dev, &r, sizeof(r), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- err = of_address_to_resource(np, 0, res);
- if (err)
- return ERR_PTR(err);
-
if (of_property_present(of_node, "reg"))
drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n");
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index 90615e9ac86b..16a4b52d45c6 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -46,6 +46,7 @@ static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *de
{ PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
{ PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
{ PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_C8, DRM_FORMAT_C8, },
};
return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
@@ -82,7 +83,7 @@ static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
}
/*
- * Palette
+ * Color LUT
*/
static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
@@ -128,7 +129,7 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
}
#endif
-static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+static void vesadrm_set_color_lut(struct drm_crtc *crtc, unsigned int index,
u16 red, u16 green, u16 blue)
{
struct drm_device *dev = crtc->dev;
@@ -149,15 +150,15 @@ static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB565:
- drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_color_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -175,15 +176,53 @@ static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB565:
- drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_color_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_fill_palette_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ drm_crtc_fill_palette_8(crtc, vesadrm_set_color_lut);
+ break;
+ case DRM_FORMAT_RGB332:
+ drm_crtc_fill_palette_332(crtc, vesadrm_set_color_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for palette\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_load_palette_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ drm_crtc_load_palette_8(crtc, lut, vesadrm_set_color_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -200,8 +239,67 @@ static const u64 vesadrm_primary_plane_format_modifiers[] = {
DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
+static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ ret = drm_sysfb_plane_helper_atomic_check(plane, new_state);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ /*
+ * Fix up format conversion for specific cases
+ */
+
+ switch (sysfb->fb_format->format) {
+ case DRM_FORMAT_C8:
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+
+ switch (new_fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ /*
+ * Reduce XRGB8888 to RGB332. Each resulting pixel is an index
+ * into the C8 hardware palette, which stores RGB332 colors.
+ */
+ if (new_sysfb_crtc_state->format->format != DRM_FORMAT_RGB332) {
+ new_sysfb_crtc_state->format =
+ drm_format_info(DRM_FORMAT_RGB332);
+ new_crtc_state->color_mgmt_changed = true;
+ }
+ break;
+ case DRM_FORMAT_C8:
+ /*
+ * Restore original output. Emulation of XRGB8888 set RBG332
+ * output format and hardware palette. This needs to be undone
+ * when we switch back to DRM_FORMAT_C8.
+ */
+ if (new_sysfb_crtc_state->format->format == DRM_FORMAT_RGB332) {
+ new_sysfb_crtc_state->format = sysfb->fb_format;
+ new_crtc_state->color_mgmt_changed = true;
+ }
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
- DRM_SYSFB_PLANE_HELPER_FUNCS,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = vesadrm_primary_plane_helper_atomic_check,
+ .atomic_update = drm_sysfb_plane_helper_atomic_update,
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable,
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
@@ -223,15 +321,36 @@ static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
* plane's color format.
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- if (sysfb_crtc_state->format == sysfb->fb_format) {
- if (crtc_state->gamma_lut)
- vesadrm_load_gamma_lut(vesa,
- sysfb_crtc_state->format,
- crtc_state->gamma_lut->data);
- else
+ switch (sysfb->fb_format->format) {
+ /*
+ * Index formats
+ */
+ case DRM_FORMAT_C8:
+ if (sysfb_crtc_state->format->format == DRM_FORMAT_RGB332) {
+ vesadrm_fill_palette_lut(vesa, sysfb_crtc_state->format);
+ } else if (crtc->state->gamma_lut) {
+ vesadrm_load_palette_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ } else {
+ vesadrm_fill_palette_lut(vesa, sysfb_crtc_state->format);
+ }
+ break;
+ /*
+ * Component formats
+ */
+ default:
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ } else {
vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
- } else {
- vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ }
+ break;
}
}
}
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
index d6c4dd1194a0..3a20c788c51f 100644
--- a/drivers/gpu/drm/tests/drm_exec_test.c
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -150,14 +150,22 @@ static void test_prepare(struct kunit *test)
static void test_prepare_array(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
- struct drm_gem_object gobj1 = { };
- struct drm_gem_object gobj2 = { };
- struct drm_gem_object *array[] = { &gobj1, &gobj2 };
+ struct drm_gem_object *gobj1;
+ struct drm_gem_object *gobj2;
+ struct drm_gem_object *array[] = {
+ (gobj1 = kunit_kzalloc(test, sizeof(*gobj1), GFP_KERNEL)),
+ (gobj2 = kunit_kzalloc(test, sizeof(*gobj2), GFP_KERNEL)),
+ };
struct drm_exec exec;
int ret;
- drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
- drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
+ if (!gobj1 || !gobj2) {
+ KUNIT_FAIL(test, "Failed to allocate GEM objects.\n");
+ return;
+ }
+
+ drm_gem_private_object_init(priv->drm, gobj1, PAGE_SIZE);
+ drm_gem_private_object_init(priv->drm, gobj2, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
@@ -166,8 +174,8 @@ static void test_prepare_array(struct kunit *test)
KUNIT_EXPECT_EQ(test, ret, 0);
drm_exec_fini(&exec);
- drm_gem_private_object_fini(&gobj1);
- drm_gem_private_object_fini(&gobj2);
+ drm_gem_private_object_fini(gobj1);
+ drm_gem_private_object_fini(gobj2);
}
static void test_multiple_loops(struct kunit *test)
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index a2f40a5c7703..da89fd01c337 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -91,7 +91,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct dispc_device *dispc = tidss->dispc;
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
- const struct drm_display_mode *mode;
+ struct drm_display_mode *mode;
enum drm_mode_status ok;
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -108,6 +108,9 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ drm_mode_set_crtcinfo(mode, 0);
+
return dispc_vp_bus_check(dispc, hw_videoport, crtc_state);
}
@@ -225,7 +228,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
tidss_runtime_get(tidss);
r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
- mode->clock * 1000);
+ mode->crtc_clock * 1000);
if (r != 0)
return;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index c0277fa36425..7c8c15a5c39b 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -4,6 +4,7 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -594,79 +595,53 @@ void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport)
* number. For example 7:0
*/
-static u32 FLD_MASK(u32 start, u32 end)
-{
- return ((1 << (start - end + 1)) - 1) << end;
-}
-
-static u32 FLD_VAL(u32 val, u32 start, u32 end)
-{
- return (val << end) & FLD_MASK(start, end);
-}
-
-static u32 FLD_GET(u32 val, u32 start, u32 end)
-{
- return (val & FLD_MASK(start, end)) >> end;
-}
-
-static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
-{
- return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
-}
-
-static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
-{
- return FLD_GET(dispc_read(dispc, idx), start, end);
-}
-
-static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
- u32 start, u32 end)
-{
- dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
- start, end));
-}
-
-static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
-}
-
-static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
- u32 val, u32 start, u32 end)
-{
- dispc_vid_write(dispc, hw_plane, idx,
- FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
- val, start, end));
-}
-
-static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
-}
-
-static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
- u32 start, u32 end)
-{
- dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
- val, start, end));
-}
-
-__maybe_unused
-static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
-}
-
-static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
- u32 val, u32 start, u32 end)
-{
- dispc_ovr_write(dispc, ovr, idx,
- FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
- val, start, end));
-}
+#define REG_GET(dispc, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_read((dispc), (idx))))
+
+#define REG_FLD_MOD(dispc, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_read(_dispc, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_write(_dispc, _idx, _reg); \
+ })
+
+#define VID_REG_GET(dispc, hw_plane, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_vid_read((dispc), (hw_plane), (idx))))
+
+#define VID_REG_FLD_MOD(dispc, hw_plane, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _hw_plane = (hw_plane); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_vid_read(_dispc, _hw_plane, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_vid_write(_dispc, _hw_plane, _idx, _reg); \
+ })
+
+#define VP_REG_GET(dispc, vp, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_vp_read((dispc), (vp), (idx))))
+
+#define VP_REG_FLD_MOD(dispc, vp, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _vp = (vp); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_vp_read(_dispc, _vp, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_vp_write(_dispc, _vp, _idx, _reg); \
+ })
+
+#define OVR_REG_FLD_MOD(dispc, ovr, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _ovr = (ovr); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_ovr_read(_dispc, _ovr, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_ovr_write(_dispc, _ovr, _idx, _reg); \
+ })
static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
{
@@ -1139,7 +1114,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc,
v = 3;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v,
+ DISPC_VP_CONTROL_DATALINES_MASK);
}
static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport,
@@ -1162,7 +1138,8 @@ static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport
oldi_cfg |= BIT(7); /* DEPOL */
- oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1);
+ FIELD_MODIFY(DISPC_VP_DSS_OLDI_CFG_MAP_MASK, &oldi_cfg,
+ fmt->am65x_oldi_mode_reg_val);
oldi_cfg |= BIT(12); /* SOFTRST */
@@ -1215,23 +1192,23 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
+ hsw = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbp = mode->crtc_htotal - mode->crtc_hsync_end;
- vfp = mode->vsync_start - mode->vdisplay;
- vsw = mode->vsync_end - mode->vsync_start;
- vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
- FLD_VAL(hsw - 1, 7, 0) |
- FLD_VAL(hfp - 1, 19, 8) |
- FLD_VAL(hbp - 1, 31, 20));
+ FIELD_PREP(DISPC_VP_TIMING_H_SYNC_PULSE_MASK, hsw - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_H_FRONT_PORCH_MASK, hfp - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_H_BACK_PORCH_MASK, hbp - 1));
dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
- FLD_VAL(vsw - 1, 7, 0) |
- FLD_VAL(vfp, 19, 8) |
- FLD_VAL(vbp, 31, 20));
+ FIELD_PREP(DISPC_VP_TIMING_V_SYNC_PULSE_MASK, vsw - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_V_FRONT_PORCH_MASK, vfp) |
+ FIELD_PREP(DISPC_VP_TIMING_V_BACK_PORCH_MASK, vbp));
ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
@@ -1254,24 +1231,28 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
ieo = false;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
- FLD_VAL(align, 18, 18) |
- FLD_VAL(onoff, 17, 17) |
- FLD_VAL(rf, 16, 16) |
- FLD_VAL(ieo, 15, 15) |
- FLD_VAL(ipc, 14, 14) |
- FLD_VAL(ihs, 13, 13) |
- FLD_VAL(ivs, 12, 12));
+ FIELD_PREP(DISPC_VP_POL_FREQ_ALIGN_MASK, align) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_ONOFF_MASK, onoff) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_RF_MASK, rf) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IEO_MASK, ieo) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IPC_MASK, ipc) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IHS_MASK, ihs) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IVS_MASK, ivs));
dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
- FLD_VAL(mode->hdisplay - 1, 11, 0) |
- FLD_VAL(mode->vdisplay - 1, 27, 16));
+ FIELD_PREP(DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK,
+ mode->crtc_hdisplay - 1) |
+ FIELD_PREP(DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK,
+ mode->crtc_vdisplay - 1));
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
{
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
@@ -1285,13 +1266,16 @@ void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
{
- return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL,
+ DISPC_VP_CONTROL_GOBIT_MASK);
}
void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
{
- WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL,
+ DISPC_VP_CONTROL_GOBIT_MASK));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1,
+ DISPC_VP_CONTROL_GOBIT_MASK);
}
enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
@@ -1491,11 +1475,11 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_id, 4, 1);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- x, 17, 6);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- y, 30, 19);
+ hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), x,
+ DISPC_OVR_ATTRIBUTES_POSX_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), y,
+ DISPC_OVR_ATTRIBUTES_POSY_MASK);
}
static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
@@ -1505,11 +1489,11 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_id, 4, 1);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
- x, 13, 0);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
- y, 29, 16);
+ hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), x,
+ DISPC_OVR_ATTRIBUTES2_POSX_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), y,
+ DISPC_OVR_ATTRIBUTES2_POSY_MASK);
}
void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
@@ -1544,7 +1528,7 @@ void dispc_ovr_enable_layer(struct dispc_device *dispc,
return;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- !!enable, 0, 0);
+ !!enable, DISPC_OVR_ATTRIBUTES_ENABLE_MASK);
}
/* CSC */
@@ -1580,14 +1564,14 @@ struct dispc_csc_coef {
static
void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
{
-#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+#define OVAL(x, y) (FIELD_PREP(GENMASK(15, 3), x) | FIELD_PREP(GENMASK(31, 19), y))
regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
#undef OVAL
}
-#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+#define CVAL(x, y) (FIELD_PREP(GENMASK(10, 0), x) | FIELD_PREP(GENMASK(26, 16), y))
static
void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
{
@@ -1767,7 +1751,8 @@ static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
bool enable)
{
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable,
+ DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK);
}
/* SCALER */
@@ -1826,7 +1811,8 @@ static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
c1 = coefs->c1[phase];
c2 = coefs->c2[phase];
- c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+ c12 = FIELD_PREP(GENMASK(19, 10), c1) | FIELD_PREP(GENMASK(29, 20),
+ c2);
dispc_vid_write(dispc, hw_plane, reg, c12);
}
@@ -2023,20 +2009,20 @@ static void dispc_vid_set_scaling(struct dispc_device *dispc,
u32 fourcc)
{
/* HORIZONTAL RESIZE ENABLE */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->scale_x, 7, 7);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_x,
+ DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK);
/* VERTICAL RESIZE ENABLE */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->scale_y, 8, 8);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_y,
+ DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK);
/* Skip the rest if no scaling is used */
if (!sp->scale_x && !sp->scale_y)
return;
/* VERTICAL 5-TAPS */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->five_taps, 21, 21);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->five_taps,
+ DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK);
if (dispc_fourcc_is_yuv(fourcc)) {
if (sp->scale_x) {
@@ -2126,7 +2112,7 @@ static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
if (dispc_color_formats[i].fourcc == fourcc) {
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
dispc_color_formats[i].dss_code,
- 6, 1);
+ DISPC_VID_ATTRIBUTES_FORMAT_MASK);
return;
}
}
@@ -2248,7 +2234,8 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32);
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
- (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+ FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK, scale.in_h - 1) |
+ FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK, scale.in_w - 1));
/* For YUV422 format we use the macropixel size for pixel inc */
if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
@@ -2285,8 +2272,10 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
if (!lite) {
dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
- (state->crtc_w - 1) |
- ((state->crtc_h - 1) << 16));
+ FIELD_PREP(DISPC_VID_SIZE_SIZEY_MASK,
+ state->crtc_h - 1) |
+ FIELD_PREP(DISPC_VID_SIZE_SIZEX_MASK,
+ state->crtc_w - 1));
dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
}
@@ -2300,38 +2289,45 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
}
dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
- 0xFF & (state->alpha >> 8));
+ FIELD_PREP(DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK,
+ state->alpha >> 8));
if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
- 28, 28);
+ DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK);
else
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
- 28, 28);
+ DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK);
}
void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
{
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable,
+ DISPC_VID_ATTRIBUTES_ENABLE_MASK);
}
static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
{
- return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS,
+ DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK);
}
static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
u32 hw_plane, u32 low, u32 high)
{
dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
- FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+ FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK, high) |
+ FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK, low));
}
static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
u32 hw_plane, u32 low, u32 high)
{
dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
- FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+ FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK,
+ high) |
+ FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK,
+ low));
}
static void dispc_k2g_plane_init(struct dispc_device *dispc)
@@ -2341,9 +2337,11 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev, "%s()\n", __func__);
/* MFLAG_CTRL = ENABLED */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK);
/* MFLAG_START = MFLAGNORMALSTARTMODE */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK);
for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
@@ -2380,7 +2378,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
* register is ignored.
*/
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
- 19, 19);
+ DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK);
}
}
@@ -2392,13 +2390,15 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev, "%s()\n", __func__);
- REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
- REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, DSS_CBA_CFG_PRI_LO_MASK);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, DSS_CBA_CFG_PRI_HI_MASK);
/* MFLAG_CTRL = ENABLED */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK);
/* MFLAG_START = MFLAGNORMALSTARTMODE */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK);
for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
@@ -2431,7 +2431,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* Prefech up to PRELOAD value */
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
- 19, 19);
+ DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK);
}
}
@@ -2461,7 +2461,8 @@ static void dispc_vp_init(struct dispc_device *dispc)
/* Enable the gamma Shadow bit-field for all VPs*/
for (i = 0; i < dispc->feat->num_vps; i++)
- VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1,
+ DISPC_VP_CONFIG_GAMMAENABLE_MASK);
}
static void dispc_initial_config(struct dispc_device *dispc)
@@ -2472,8 +2473,8 @@ static void dispc_initial_config(struct dispc_device *dispc)
/* Note: Hardcoded DPI routing on J721E for now */
if (dispc->feat->subrev == DISPC_J721E) {
dispc_write(dispc, DISPC_CONNECTIONS,
- FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
- FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ FIELD_PREP(DISPC_CONNECTIONS_DPI_0_CONN_MASK, 2) | /* VP1 to DPI0 */
+ FIELD_PREP(DISPC_CONNECTIONS_DPI_1_CONN_MASK, 8) /* VP3 to DPI1 */
);
}
}
@@ -2651,8 +2652,8 @@ static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
}
-#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
- FLD_VAL(xB, 31, 22))
+#define CVAL(xR, xG, xB) (FIELD_PREP(GENMASK(9, 0), xR) | FIELD_PREP(GENMASK(20, 11), xG) | \
+ FIELD_PREP(GENMASK(31, 22), xB))
static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
u32 *regval)
@@ -2694,8 +2695,8 @@ static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
cprenable = 1;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
- cprenable, 15, 15);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, cprenable,
+ DISPC_VP_CONFIG_CPR_MASK);
}
static s16 dispc_S31_32_to_s3_8(s64 coef)
@@ -2760,8 +2761,8 @@ static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
colorconvenable = 1;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
- colorconvenable, 24, 24);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, colorconvenable,
+ DISPC_VP_CONFIG_COLORCONVENABLE_MASK);
}
static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
@@ -2816,26 +2817,26 @@ int dispc_runtime_resume(struct dispc_device *dispc)
clk_prepare_enable(dispc->fclk);
- if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ if (REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_FUNC_RESETDONE) == 0)
dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
dispc_read(dispc, DSS_REVISION));
dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
- REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
- REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(1, 1)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(2, 2)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(3, 3)));
if (dispc->feat->subrev == DISPC_AM625 ||
dispc->feat->subrev == DISPC_AM65X)
dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
- REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
- REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(5, 5)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(6, 6)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(7, 7)));
dev_dbg(dispc->dev, "DISPC IDLE %d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+ REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_IDLE_STATUS));
dispc_initial_config(dispc);
@@ -2912,7 +2913,8 @@ static void dispc_softreset_k2g(struct dispc_device *dispc)
spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags);
for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
- VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
static int dispc_softreset(struct dispc_device *dispc)
@@ -2926,7 +2928,7 @@ static int dispc_softreset(struct dispc_device *dispc)
}
/* Soft reset */
- REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, DSS_SYSCONFIG_SOFTRESET_MASK);
/* Wait for reset to complete */
ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
val, val & 1, 100, 5000);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index b8614f62186c..60c1b400eb89 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -7,11 +7,14 @@
#ifndef __TIDSS_DISPC_H__
#define __TIDSS_DISPC_H__
+#include <drm/drm_color_mgmt.h>
+
#include "tidss_drv.h"
struct dispc_device;
struct drm_crtc_state;
+struct drm_plane_state;
enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index 50a3f28250ef..382027dddce8 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -56,7 +56,12 @@ enum dispc_common_regs {
#define DSS_REVISION REG(DSS_REVISION)
#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSCONFIG_SOFTRESET_MASK GENMASK(1, 1)
+
#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DSS_SYSSTATUS_DISPC_IDLE_STATUS GENMASK(9, 9)
+#define DSS_SYSSTATUS_DISPC_FUNC_RESETDONE GENMASK(0, 0)
+
#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
@@ -70,9 +75,15 @@ enum dispc_common_regs {
#define WB_IRQSTATUS REG(WB_IRQSTATUS)
#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK GENMASK(6, 6)
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK GENMASK(1, 0)
+
#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DSS_CBA_CFG_PRI_HI_MASK GENMASK(5, 3)
+#define DSS_CBA_CFG_PRI_LO_MASK GENMASK(2, 0)
+
#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
@@ -88,6 +99,9 @@ enum dispc_common_regs {
#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_CONNECTIONS_DPI_1_CONN_MASK GENMASK(7, 4)
+#define DISPC_CONNECTIONS_DPI_0_CONN_MASK GENMASK(3, 0)
+
#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
@@ -102,13 +116,27 @@ enum dispc_common_regs {
#define DISPC_VID_ACCUV2_0 0x18
#define DISPC_VID_ACCUV2_1 0x1c
#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK GENMASK(28, 28)
+#define DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK GENMASK(21, 21)
+#define DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK GENMASK(19, 19)
+#define DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK GENMASK(9, 9)
+#define DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK GENMASK(8, 8)
+#define DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK GENMASK(7, 7)
+#define DISPC_VID_ATTRIBUTES_FORMAT_MASK GENMASK(6, 1)
+#define DISPC_VID_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_VID_ATTRIBUTES2 0x24
#define DISPC_VID_BA_0 0x28
#define DISPC_VID_BA_1 0x2c
#define DISPC_VID_BA_UV_0 0x30
#define DISPC_VID_BA_UV_1 0x34
#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK GENMASK(15, 0)
+
#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK GENMASK(31, 16)
+#define DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK GENMASK(15, 0)
+
#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
#define DISPC_VID_FIRH 0x5c
@@ -137,15 +165,26 @@ enum dispc_common_regs {
#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK GENMASK(7, 0)
+
#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK GENMASK(31, 16)
+#define DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK GENMASK(15, 0)
+
#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK GENMASK(27, 16)
+#define DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK GENMASK(11, 0)
+
#define DISPC_VID_PIXEL_INC 0x210
#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
#define DISPC_VID_PRELOAD 0x218
#define DISPC_VID_ROW_INC 0x21c
#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_SIZE_SIZEY_MASK GENMASK(27, 16)
+#define DISPC_VID_SIZE_SIZEX_MASK GENMASK(11, 0)
+
#define DISPC_VID_BA_EXT_0 0x22c
#define DISPC_VID_BA_EXT_1 0x230
#define DISPC_VID_BA_UV_EXT_0 0x234
@@ -173,11 +212,27 @@ enum dispc_common_regs {
#define DISPC_OVR_TRANS_COLOR_MIN 0x18
#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES_POSY_MASK GENMASK(30, 19)
+#define DISPC_OVR_ATTRIBUTES_POSX_MASK GENMASK(17, 6)
+#define DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK GENMASK(4, 1)
+#define DISPC_OVR_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+#define DISPC_OVR_ATTRIBUTES2_POSY_MASK GENMASK(29, 16)
+#define DISPC_OVR_ATTRIBUTES2_POSX_MASK GENMASK(13, 0)
+
/* VP */
#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONFIG_COLORCONVENABLE_MASK GENMASK(24, 24)
+#define DISPC_VP_CONFIG_CPR_MASK GENMASK(15, 15)
+#define DISPC_VP_CONFIG_GAMMAENABLE_MASK GENMASK(2, 2)
+
#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CONTROL_DATALINES_MASK GENMASK(10, 8)
+#define DISPC_VP_CONTROL_GOBIT_MASK GENMASK(5, 5)
+#define DISPC_VP_CONTROL_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_VP_CSC_COEF0 0x8
#define DISPC_VP_CSC_COEF1 0xc
#define DISPC_VP_CSC_COEF2 0x10
@@ -189,9 +244,28 @@ enum dispc_common_regs {
#define DISPC_VP_DATA_CYCLE_2 0x1c
#define DISPC_VP_LINE_NUMBER 0x44
#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_POL_FREQ_ALIGN_MASK GENMASK(18, 18)
+#define DISPC_VP_POL_FREQ_ONOFF_MASK GENMASK(17, 17)
+#define DISPC_VP_POL_FREQ_RF_MASK GENMASK(16, 16)
+#define DISPC_VP_POL_FREQ_IEO_MASK GENMASK(15, 15)
+#define DISPC_VP_POL_FREQ_IPC_MASK GENMASK(14, 14)
+#define DISPC_VP_POL_FREQ_IHS_MASK GENMASK(13, 13)
+#define DISPC_VP_POL_FREQ_IVS_MASK GENMASK(12, 12)
+
#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK GENMASK(11, 0)
+#define DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK GENMASK(27, 16)
+
#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_H_SYNC_PULSE_MASK GENMASK(7, 0)
+#define DISPC_VP_TIMING_H_FRONT_PORCH_MASK GENMASK(19, 8)
+#define DISPC_VP_TIMING_H_BACK_PORCH_MASK GENMASK(31, 20)
+
#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_TIMING_V_SYNC_PULSE_MASK GENMASK(7, 0)
+#define DISPC_VP_TIMING_V_FRONT_PORCH_MASK GENMASK(19, 8)
+#define DISPC_VP_TIMING_V_BACK_PORCH_MASK GENMASK(31, 20)
+
#define DISPC_VP_CSC_COEF3 0x5c
#define DISPC_VP_CSC_COEF4 0x60
#define DISPC_VP_CSC_COEF5 0x64
@@ -220,6 +294,8 @@ enum dispc_common_regs {
#define DISPC_VP_SAFETY_LFSR_SEED 0x110
#define DISPC_VP_GAMMA_TABLE 0x120
#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_CFG_MAP_MASK GENMASK(3, 1)
+
#define DISPC_VP_DSS_OLDI_STATUS 0x164
#define DISPC_VP_DSS_OLDI_LB 0x168
#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index a1b12e52aca4..27d9a8fd541f 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/aperture.h>
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
@@ -192,12 +193,20 @@ static int tidss_probe(struct platform_device *pdev)
goto err_irq_uninstall;
}
+ /* Remove possible early fb before setting up the fbdev */
+ ret = aperture_remove_all_conflicting_devices(tidss_driver.name);
+ if (ret)
+ goto err_drm_dev_unreg;
+
drm_client_setup(ddev, NULL);
dev_dbg(dev, "%s done\n", __func__);
return 0;
+err_drm_dev_unreg:
+ drm_dev_unregister(ddev);
+
err_irq_uninstall:
tidss_irq_uninstall(ddev);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d14d5d28f0a3..84454a4855d1 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -9,6 +9,8 @@
#include <linux/spinlock.h>
+#include <drm/drm_device.h>
+
#define TIDSS_MAX_PORTS 4
#define TIDSS_MAX_PLANES 4
#define TIDSS_MAX_OLDI_TXES 2
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c
index 8f25159d0666..7688251beba2 100644
--- a/drivers/gpu/drm/tidss/tidss_oldi.c
+++ b/drivers/gpu/drm/tidss/tidss_oldi.c
@@ -464,7 +464,6 @@ int tidss_oldi_init(struct tidss_device *tidss)
* which may still be connected.
* Continue to search for that.
*/
- ret = 0;
continue;
}
goto err_put_node;
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
index aecaf2728406..92c560c3a621 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.h
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -7,6 +7,8 @@
#ifndef __TIDSS_PLANE_H__
#define __TIDSS_PLANE_H__
+#include <drm/drm_plane.h>
+
#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
struct tidss_device;
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
index 9c560d0fdac0..9824d02d9d1f 100644
--- a/drivers/gpu/drm/tidss/tidss_scale_coefs.h
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+struct device;
+
struct tidss_scale_coefs {
s16 c2[16];
s16 c1[16];
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 06e54694a7f2..7d9e85e932d7 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -82,6 +82,21 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
+config DRM_PIXPAPER
+ tristate "DRM support for PIXPAPER display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM driver for the Mayqueen Pixpaper e-ink display panel.
+
+ This driver supports small e-paper displays connected over SPI,
+ with a resolution of 122x250 and XRGB8888 framebuffer format.
+ It is intended for low-power embedded applications.
+
+ If M is selected, the module will be built as pixpaper.ko.
+
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 4a9ff61ec254..48d30bf6152f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
+obj-$(CONFIG_DRM_PIXPAPER) += pixpaper.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 8d3b7c4fa6a4..d2d5e9f1269f 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -252,7 +252,7 @@ static int bochs_hw_init(struct bochs_device *bochs)
}
bochs->ioports = 1;
} else {
- dev_err(dev->dev, "I/O ports are not supported\n");
+ drm_err(dev, "I/O ports are not supported\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/tiny/pixpaper.c b/drivers/gpu/drm/tiny/pixpaper.c
new file mode 100644
index 000000000000..32598fb2fee7
--- /dev/null
+++ b/drivers/gpu/drm/tiny/pixpaper.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DRM driver for PIXPAPER e-ink panel
+ *
+ * Author: LiangCheng Wang <zaq14760@gmail.com>,
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+
+/*
+ * Note on Undocumented Commands/Registers:
+ *
+ * Several commands and register parameters defined in this header are not
+ * documented in the datasheet. Their values and usage have been derived
+ * through analysis of existing userspace example programs.
+ *
+ * These 'unknown' definitions are crucial for the proper initialization
+ * and stable operation of the panel. Modifying these values without
+ * thorough understanding may lead to display anomalies, panel damage,
+ * or unexpected behavior.
+ */
+
+/* Command definitions */
+#define PIXPAPER_CMD_PANEL_SETTING 0x00 /* R00H: Panel settings */
+#define PIXPAPER_CMD_POWER_SETTING 0x01 /* R01H: Power settings */
+#define PIXPAPER_CMD_POWER_OFF 0x02 /* R02H: Power off */
+#define PIXPAPER_CMD_POWER_OFF_SEQUENCE 0x03 /* R03H: Power off sequence */
+#define PIXPAPER_CMD_POWER_ON 0x04 /* R04H: Power on */
+#define PIXPAPER_CMD_BOOSTER_SOFT_START 0x06 /* R06H: Booster soft start */
+#define PIXPAPER_CMD_DEEP_SLEEP 0x07 /* R07H: Deep sleep */
+#define PIXPAPER_CMD_DATA_START_TRANSMISSION 0x10
+/* R10H: Data transmission start */
+#define PIXPAPER_CMD_DISPLAY_REFRESH 0x12 /* R12H: Display refresh */
+#define PIXPAPER_CMD_PLL_CONTROL 0x30 /* R30H: PLL control */
+#define PIXPAPER_CMD_TEMP_SENSOR_CALIB 0x41
+/* R41H: Temperature sensor calibration */
+#define PIXPAPER_CMD_UNKNOWN_4D 0x4D /* R4DH: Unknown command */
+#define PIXPAPER_CMD_VCOM_INTERVAL 0x50 /* R50H: VCOM interval */
+#define PIXPAPER_CMD_UNKNOWN_60 0x60 /* R60H: Unknown command */
+#define PIXPAPER_CMD_RESOLUTION_SETTING 0x61 /* R61H: Resolution settings */
+#define PIXPAPER_CMD_GATE_SOURCE_START 0x65 /* R65H: Gate/source start */
+#define PIXPAPER_CMD_UNKNOWN_B4 0xB4 /* RB4H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_B5 0xB5 /* RB5H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_E0 0xE0 /* RE0H: Unknown command */
+#define PIXPAPER_CMD_POWER_SAVING 0xE3 /* RE3H: Power saving */
+#define PIXPAPER_CMD_UNKNOWN_E7 0xE7 /* RE7H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_E9 0xE9 /* RE9H: Unknown command */
+
+/* R00H PSR - First Parameter */
+#define PIXPAPER_PSR_RST_N BIT(0)
+/* Bit 0: RST_N, 1=no effect (default), 0=reset with booster OFF */
+#define PIXPAPER_PSR_SHD_N BIT(1)
+/* Bit 1: SHD_N, 1=booster ON (default), 0=booster OFF */
+#define PIXPAPER_PSR_SHL BIT(2)
+/* Bit 2: SHL, 1=shift right (default), 0=shift left */
+#define PIXPAPER_PSR_UD BIT(3)
+/* Bit 3: UD, 1=scan up (default), 0=scan down */
+#define PIXPAPER_PSR_PST_MODE BIT(5)
+/* Bit 5: PST_MODE, 0=frame scanning (default), 1=external */
+#define PIXPAPER_PSR_RES_MASK (3 << 6)
+/* Bits 7-6: RES[1:0], resolution setting */
+#define PIXPAPER_PSR_RES_176x296 (0x0 << 6) /* 00: 176x296 */
+#define PIXPAPER_PSR_RES_128x296 (0x1 << 6) /* 01: 128x296 */
+#define PIXPAPER_PSR_RES_128x250 (0x2 << 6) /* 10: 128x250 */
+#define PIXPAPER_PSR_RES_112x204 (0x3 << 6) /* 11: 112x204 */
+#define PIXPAPER_PSR_CONFIG \
+ (PIXPAPER_PSR_RST_N | PIXPAPER_PSR_SHD_N | PIXPAPER_PSR_SHL | \
+ PIXPAPER_PSR_UD)
+/* 0x0F: Default settings, resolution set by R61H */
+
+/* R00H PSR - Second Parameter */
+#define PIXPAPER_PSR2_VC_LUTZ \
+ (1 << 0) /* Bit 0: VC_LUTZ, 1=VCOM float after refresh (default), 0=no effect */
+#define PIXPAPER_PSR2_NORG \
+ (1 << 1) /* Bit 1: NORG, 1=VCOM to GND before power off, 0=no effect (default) */
+#define PIXPAPER_PSR2_TIEG \
+ (1 << 2) /* Bit 2: TIEG, 1=VGN to GND on power off, 0=no effect (default) */
+#define PIXPAPER_PSR2_TS_AUTO \
+ (1 << 3) /* Bit 3: TS_AUTO, 1=sensor on RST_N low to high (default), 0=on booster */
+#define PIXPAPER_PSR2_VCMZ \
+ (1 << 4) /* Bit 4: VCMZ, 1=VCOM always floating, 0=no effect (default) */
+#define PIXPAPER_PSR2_FOPT \
+ (1 << 5) /* Bit 5: FOPT, 0=scan 1 frame (default), 1=no scan, HiZ */
+#define PIXPAPER_PSR_CONFIG2 \
+ (PIXPAPER_PSR2_VC_LUTZ | \
+ PIXPAPER_PSR2_TS_AUTO) /* 0x09: Default VCOM and temp sensor settings */
+
+/* R01H PWR - Power Setting Register */
+/* First Parameter */
+#define PIXPAPER_PWR_VDG_EN \
+ (1 << 0) /* Bit 0: VDG_EN, 1=internal DCDC for VGP/VGN (default), 0=external */
+#define PIXPAPER_PWR_VDS_EN \
+ (1 << 1) /* Bit 1: VDS_EN, 1=internal regulator for VSP/VSN (default), 0=external */
+#define PIXPAPER_PWR_VSC_EN \
+ (1 << 2) /* Bit 2: VSC_EN, 1=internal regulator for VSPL (default), 0=external */
+#define PIXPAPER_PWR_V_MODE \
+ (1 << 3) /* Bit 3: V_MODE, 0=Mode0 (default), 1=Mode1 */
+#define PIXPAPER_PWR_CONFIG1 \
+ (PIXPAPER_PWR_VDG_EN | PIXPAPER_PWR_VDS_EN | \
+ PIXPAPER_PWR_VSC_EN) /* 0x07: Internal power for VGP/VGN, VSP/VSN, VSPL */
+
+/* Second Parameter */
+#define PIXPAPER_PWR_VGPN_MASK \
+ (3 << 0) /* Bits 1-0: VGPN, VGP/VGN voltage levels */
+#define PIXPAPER_PWR_VGPN_20V (0x0 << 0) /* 00: VGP=20V, VGN=-20V (default) */
+#define PIXPAPER_PWR_VGPN_17V (0x1 << 0) /* 01: VGP=17V, VGN=-17V */
+#define PIXPAPER_PWR_VGPN_15V (0x2 << 0) /* 10: VGP=15V, VGN=-15V */
+#define PIXPAPER_PWR_VGPN_10V (0x3 << 0) /* 11: VGP=10V, VGN=-10V */
+#define PIXPAPER_PWR_CONFIG2 PIXPAPER_PWR_VGPN_20V /* 0x00: VGP=20V, VGN=-20V */
+
+/* Third, Fourth, Sixth Parameters (VSP_1, VSPL_0, VSPL_1) */
+#define PIXPAPER_PWR_VSP_8_2V 0x22 /* VSP_1/VSPL_1: 8.2V (34 decimal) */
+#define PIXPAPER_PWR_VSPL_15V 0x78 /* VSPL_0: 15V (120 decimal) */
+
+/* Fifth Parameter (VSN_1) */
+#define PIXPAPER_PWR_VSN_4V 0x0A /* VSN_1: -4V (10 decimal) */
+
+/* R03H PFS - Power Off Sequence Setting Register */
+/* First Parameter */
+#define PIXPAPER_PFS_T_VDS_OFF_MASK \
+ (3 << 0) /* Bits 1-0: T_VDS_OFF, VSP/VSN power-off sequence */
+#define PIXPAPER_PFS_T_VDS_OFF_20MS (0x0 << 0) /* 00: 20 ms (default) */
+#define PIXPAPER_PFS_T_VDS_OFF_40MS (0x1 << 0) /* 01: 40 ms */
+#define PIXPAPER_PFS_T_VDS_OFF_60MS (0x2 << 0) /* 10: 60 ms */
+#define PIXPAPER_PFS_T_VDS_OFF_80MS (0x3 << 0) /* 11: 80 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_MASK \
+ (3 << 4) /* Bits 5-4: T_VDPG_OFF, VGP/VGN power-off sequence */
+#define PIXPAPER_PFS_T_VDPG_OFF_20MS (0x0 << 4) /* 00: 20 ms (default) */
+#define PIXPAPER_PFS_T_VDPG_OFF_40MS (0x1 << 4) /* 01: 40 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_60MS (0x2 << 4) /* 10: 60 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_80MS (0x3 << 4) /* 11: 80 ms */
+#define PIXPAPER_PFS_CONFIG1 \
+ (PIXPAPER_PFS_T_VDS_OFF_20MS | \
+ PIXPAPER_PFS_T_VDPG_OFF_20MS) /* 0x10: Default 20 ms for VSP/VSN and VGP/VGN */
+
+/* Second Parameter */
+#define PIXPAPER_PFS_VGP_EXT_MASK \
+ (0xF << 0) /* Bits 3-0: VGP_EXT, VGP extension time */
+#define PIXPAPER_PFS_VGP_EXT_0MS (0x0 << 0) /* 0000: 0 ms */
+#define PIXPAPER_PFS_VGP_EXT_500MS (0x1 << 0) /* 0001: 500 ms */
+#define PIXPAPER_PFS_VGP_EXT_1000MS (0x2 << 0) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_VGP_EXT_1500MS (0x3 << 0) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_VGP_EXT_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_VGP_EXT_2500MS (0x5 << 0) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_VGP_EXT_3000MS (0x6 << 0) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_VGP_EXT_3500MS (0x7 << 0) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_VGP_EXT_4000MS (0x8 << 0) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_VGP_EXT_4500MS (0x9 << 0) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_VGP_EXT_5000MS (0xA << 0) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_VGP_EXT_5500MS (0xB << 0) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_VGP_EXT_6000MS (0xC << 0) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_VGP_EXT_6500MS (0xD << 0) /* 1101: 6500 ms */
+#define PIXPAPER_PFS_VGP_LEN_MASK \
+ (0xF << 4) /* Bits 7-4: VGP_LEN, VGP at 10V during power-off */
+#define PIXPAPER_PFS_VGP_LEN_0MS (0x0 << 4) /* 0000: 0 ms */
+#define PIXPAPER_PFS_VGP_LEN_500MS (0x1 << 4) /* 0001: 500 ms */
+#define PIXPAPER_PFS_VGP_LEN_1000MS (0x2 << 4) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_VGP_LEN_1500MS (0x3 << 4) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_VGP_LEN_2000MS (0x4 << 4) /* 0100: 2000 ms */
+#define PIXPAPER_PFS_VGP_LEN_2500MS (0x5 << 4) /* 0101: 2500 ms (default) */
+#define PIXPAPER_PFS_VGP_LEN_3000MS (0x6 << 4) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_VGP_LEN_3500MS (0x7 << 4) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_VGP_LEN_4000MS (0x8 << 4) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_VGP_LEN_4500MS (0x9 << 4) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_VGP_LEN_5000MS (0xA << 4) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_VGP_LEN_5500MS (0xB << 4) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_VGP_LEN_6000MS (0xC << 4) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_VGP_LEN_6500MS (0xD << 4) /* 1101: 6500 ms */
+#define PIXPAPER_PFS_CONFIG2 \
+ (PIXPAPER_PFS_VGP_EXT_1000MS | \
+ PIXPAPER_PFS_VGP_LEN_2500MS) /* 0x54: VGP extension 1000 ms, VGP at 10V for 2500 ms */
+
+/* Third Parameter */
+#define PIXPAPER_PFS_XON_LEN_MASK \
+ (0xF << 0) /* Bits 3-0: XON_LEN, XON enable time */
+#define PIXPAPER_PFS_XON_LEN_0MS (0x0 << 0) /* 0000: 0 ms */
+#define PIXPAPER_PFS_XON_LEN_500MS (0x1 << 0) /* 0001: 500 ms */
+#define PIXPAPER_PFS_XON_LEN_1000MS (0x2 << 0) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_XON_LEN_1500MS (0x3 << 0) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_XON_LEN_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_XON_LEN_2500MS (0x5 << 0) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_XON_LEN_3000MS (0x6 << 0) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_XON_LEN_3500MS (0x7 << 0) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_XON_LEN_4000MS (0x8 << 0) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_XON_LEN_4500MS (0x9 << 0) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_XON_LEN_5000MS (0xA << 0) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_XON_LEN_5500MS (0xB << 0) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_XON_LEN_6000MS (0xC << 0) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_XON_DLY_MASK \
+ (0xF << 4) /* Bits 7-4: XON_DLY, XON delay time */
+#define PIXPAPER_PFS_XON_DLY_0MS (0x0 << 4) /* 0000: 0 ms */
+#define PIXPAPER_PFS_XON_DLY_500MS (0x1 << 4) /* 0001: 500 ms */
+#define PIXPAPER_PFS_XON_DLY_1000MS (0x2 << 4) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_XON_DLY_1500MS (0x3 << 4) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_XON_DLY_2000MS (0x4 << 4) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_XON_DLY_2500MS (0x5 << 4) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_XON_DLY_3000MS (0x6 << 4) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_XON_DLY_3500MS (0x7 << 4) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_XON_DLY_4000MS (0x8 << 4) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_XON_DLY_4500MS (0x9 << 4) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_XON_DLY_5000MS (0xA << 4) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_XON_DLY_5500MS (0xB << 4) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_XON_DLY_6000MS (0xC << 4) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_CONFIG3 \
+ (PIXPAPER_PFS_XON_LEN_2000MS | \
+ PIXPAPER_PFS_XON_DLY_2000MS) /* 0x44: XON enable and delay at 2000 ms */
+
+/* R06H BTST - Booster Soft Start Command */
+/* First Parameter */
+#define PIXPAPER_BTST_PHA_SFT_MASK \
+ (3 << 0) /* Bits 1-0: PHA_SFT, soft start period for phase A */
+#define PIXPAPER_BTST_PHA_SFT_10MS (0x0 << 0) /* 00: 10 ms (default) */
+#define PIXPAPER_BTST_PHA_SFT_20MS (0x1 << 0) /* 01: 20 ms */
+#define PIXPAPER_BTST_PHA_SFT_30MS (0x2 << 0) /* 10: 30 ms */
+#define PIXPAPER_BTST_PHA_SFT_40MS (0x3 << 0) /* 11: 40 ms */
+#define PIXPAPER_BTST_PHB_SFT_MASK \
+ (3 << 2) /* Bits 3-2: PHB_SFT, soft start period for phase B */
+#define PIXPAPER_BTST_PHB_SFT_10MS (0x0 << 2) /* 00: 10 ms (default) */
+#define PIXPAPER_BTST_PHB_SFT_20MS (0x1 << 2) /* 01: 20 ms */
+#define PIXPAPER_BTST_PHB_SFT_30MS (0x2 << 2) /* 10: 30 ms */
+#define PIXPAPER_BTST_PHB_SFT_40MS (0x3 << 2) /* 11: 40 ms */
+#define PIXPAPER_BTST_CONFIG1 \
+ (PIXPAPER_BTST_PHA_SFT_40MS | \
+ PIXPAPER_BTST_PHB_SFT_40MS) /* 0x0F: 40 ms for phase A and B */
+
+/* Second to Seventh Parameters (Driving Strength or Minimum OFF Time) */
+#define PIXPAPER_BTST_CONFIG2 0x0A /* Strength11 */
+#define PIXPAPER_BTST_CONFIG3 0x2F /* Period48 */
+#define PIXPAPER_BTST_CONFIG4 0x25 /* Strength38 */
+#define PIXPAPER_BTST_CONFIG5 0x22 /* Period35 */
+#define PIXPAPER_BTST_CONFIG6 0x2E /* Strength47 */
+#define PIXPAPER_BTST_CONFIG7 0x21 /* Period34 */
+
+/* R12H: DRF (Display Refresh) */
+#define PIXPAPER_DRF_VCOM_AC 0x00 /* AC VCOM: VCOM follows LUTC (default) */
+#define PIXPAPER_DRF_VCOM_DC 0x01 /* DC VCOM: VCOM fixed to VCOMDC */
+
+/* R30H PLL - PLL Control Register */
+/* First Parameter */
+#define PIXPAPER_PLL_FR_MASK (0x7 << 0) /* Bits 2-0: FR, frame rate */
+#define PIXPAPER_PLL_FR_12_5HZ (0x0 << 0) /* 000: 12.5 Hz */
+#define PIXPAPER_PLL_FR_25HZ (0x1 << 0) /* 001: 25 Hz */
+#define PIXPAPER_PLL_FR_50HZ (0x2 << 0) /* 010: 50 Hz (default) */
+#define PIXPAPER_PLL_FR_65HZ (0x3 << 0) /* 011: 65 Hz */
+#define PIXPAPER_PLL_FR_75HZ (0x4 << 0) /* 100: 75 Hz */
+#define PIXPAPER_PLL_FR_85HZ (0x5 << 0) /* 101: 85 Hz */
+#define PIXPAPER_PLL_FR_100HZ (0x6 << 0) /* 110: 100 Hz */
+#define PIXPAPER_PLL_FR_120HZ (0x7 << 0) /* 111: 120 Hz */
+#define PIXPAPER_PLL_DFR \
+ (1 << 3) /* Bit 3: Dynamic frame rate, 0=disabled (default), 1=enabled */
+#define PIXPAPER_PLL_CONFIG \
+ (PIXPAPER_PLL_FR_50HZ) /* 0x02: 50 Hz, dynamic frame rate disabled */
+
+/* R41H TSE - Temperature Sensor Calibration Register */
+/* First Parameter */
+#define PIXPAPER_TSE_TO_MASK \
+ (0xF << 0) /* Bits 3-0: TO[3:0], temperature offset */
+#define PIXPAPER_TSE_TO_POS_0C (0x0 << 0) /* 0000: +0°C (default) */
+#define PIXPAPER_TSE_TO_POS_0_5C (0x1 << 0) /* 0001: +0.5°C */
+#define PIXPAPER_TSE_TO_POS_1C (0x2 << 0) /* 0010: +1°C */
+#define PIXPAPER_TSE_TO_POS_1_5C (0x3 << 0) /* 0011: +1.5°C */
+#define PIXPAPER_TSE_TO_POS_2C (0x4 << 0) /* 0100: +2°C */
+#define PIXPAPER_TSE_TO_POS_2_5C (0x5 << 0) /* 0101: +2.5°C */
+#define PIXPAPER_TSE_TO_POS_3C (0x6 << 0) /* 0110: +3°C */
+#define PIXPAPER_TSE_TO_POS_3_5C (0x7 << 0) /* 0111: +3.5°C */
+#define PIXPAPER_TSE_TO_NEG_4C (0x8 << 0) /* 1000: -4°C */
+#define PIXPAPER_TSE_TO_NEG_3_5C (0x9 << 0) /* 1001: -3.5°C */
+#define PIXPAPER_TSE_TO_NEG_3C (0xA << 0) /* 1010: -3°C */
+#define PIXPAPER_TSE_TO_NEG_2_5C (0xB << 0) /* 1011: -2.5°C */
+#define PIXPAPER_TSE_TO_NEG_2C (0xC << 0) /* 1100: -2°C */
+#define PIXPAPER_TSE_TO_NEG_1_5C (0xD << 0) /* 1101: -1.5°C */
+#define PIXPAPER_TSE_TO_NEG_1C (0xE << 0) /* 1110: -1°C */
+#define PIXPAPER_TSE_TO_NEG_0_5C (0xF << 0) /* 1111: -0.5°C */
+#define PIXPAPER_TSE_TO_FINE_MASK \
+ (0x3 << 4) /* Bits 5-4: TO[5:4], fine adjustment for positive offsets */
+#define PIXPAPER_TSE_TO_FINE_0C (0x0 << 4) /* 00: +0.0°C (default) */
+#define PIXPAPER_TSE_TO_FINE_0_25C (0x1 << 4) /* 01: +0.25°C */
+#define PIXPAPER_TSE_ENABLE \
+ (0 << 7) /* Bit 7: TSE, 0=internal sensor enabled (default), 1=disabled (external) */
+#define PIXPAPER_TSE_DISABLE \
+ (1 << 7) /* Bit 7: TSE, 1=internal sensor disabled, use external */
+#define PIXPAPER_TSE_CONFIG \
+ (PIXPAPER_TSE_TO_POS_0C | PIXPAPER_TSE_TO_FINE_0C | \
+ PIXPAPER_TSE_ENABLE) /* 0x00: Internal sensor enabled, +0°C offset */
+
+/* R4DH */
+#define PIXPAPER_UNKNOWN_4D_CONFIG \
+ 0x78 /* This value is essential for initialization, derived from userspace examples. */
+
+/* R50H CDI - VCOM and DATA Interval Setting Register */
+/* First Parameter */
+#define PIXPAPER_CDI_INTERVAL_MASK \
+ (0xF << 0) /* Bits 3-0: CDI[3:0], VCOM and data interval (hsync) */
+#define PIXPAPER_CDI_17_HSYNC (0x0 << 0) /* 0000: 17 hsync */
+#define PIXPAPER_CDI_16_HSYNC (0x1 << 0) /* 0001: 16 hsync */
+#define PIXPAPER_CDI_15_HSYNC (0x2 << 0) /* 0010: 15 hsync */
+#define PIXPAPER_CDI_14_HSYNC (0x3 << 0) /* 0011: 14 hsync */
+#define PIXPAPER_CDI_13_HSYNC (0x4 << 0) /* 0100: 13 hsync */
+#define PIXPAPER_CDI_12_HSYNC (0x5 << 0) /* 0101: 12 hsync */
+#define PIXPAPER_CDI_11_HSYNC (0x6 << 0) /* 0110: 11 hsync */
+#define PIXPAPER_CDI_10_HSYNC (0x7 << 0) /* 0111: 10 hsync (default) */
+#define PIXPAPER_CDI_9_HSYNC (0x8 << 0) /* 1000: 9 hsync */
+#define PIXPAPER_CDI_8_HSYNC (0x9 << 0) /* 1001: 8 hsync */
+#define PIXPAPER_CDI_7_HSYNC (0xA << 0) /* 1010: 7 hsync */
+#define PIXPAPER_CDI_6_HSYNC (0xB << 0) /* 1011: 6 hsync */
+#define PIXPAPER_CDI_5_HSYNC (0xC << 0) /* 1100: 5 hsync */
+#define PIXPAPER_CDI_4_HSYNC (0xD << 0) /* 1101: 4 hsync */
+#define PIXPAPER_CDI_3_HSYNC (0xE << 0) /* 1110: 3 hsync */
+#define PIXPAPER_CDI_2_HSYNC (0xF << 0) /* 1111: 2 hsync */
+#define PIXPAPER_CDI_DDX \
+ (1 << 4) /* Bit 4: DDX, 0=grayscale mapping 0, 1=grayscale mapping 1 (default) */
+#define PIXPAPER_CDI_VBD_MASK \
+ (0x7 << 5) /* Bits 7-5: VBD[2:0], border data selection */
+#define PIXPAPER_CDI_VBD_FLOAT (0x0 << 5) /* 000: Floating (DDX=0 or 1) */
+#define PIXPAPER_CDI_VBD_GRAY3_DDX0 \
+ (0x1 << 5) /* 001: Gray3 (border_buf=011) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY2_DDX0 \
+ (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY1_DDX0 \
+ (0x3 << 5) /* 011: Gray1 (border_buf=001) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY0_DDX0 \
+ (0x4 << 5) /* 100: Gray0 (border_buf=000) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY0_DDX1 \
+ (0x0 << 5) /* 000: Gray0 (border_buf=000) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY1_DDX1 \
+ (0x1 << 5) /* 001: Gray1 (border_buf=001) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY2_DDX1 \
+ (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY3_DDX1 \
+ (0x3 << 5) /* 011: Gray3 (border_buf=011) when DDX=1 */
+#define PIXPAPER_CDI_VBD_FLOAT_DDX1 (0x4 << 5) /* 100: Floating when DDX=1 */
+#define PIXPAPER_CDI_CONFIG \
+ (PIXPAPER_CDI_10_HSYNC | PIXPAPER_CDI_DDX | \
+ PIXPAPER_CDI_VBD_GRAY1_DDX1) /* 0x37: 10 hsync, DDX=1, border Gray1 */
+
+/* R60H */
+#define PIXPAPER_UNKNOWN_60_CONFIG1 \
+ 0x02 /* This value is essential for initialization, derived from userspace examples. */
+#define PIXPAPER_UNKNOWN_60_CONFIG2 \
+ 0x02 /* This value is essential for initialization, derived from userspace examples. */
+
+/* R61H TRES - Resolution Setting Register */
+#define PIXPAPER_TRES_HRES_H \
+ ((PIXPAPER_PANEL_BUFFER_WIDTH >> 8) & \
+ 0xFF) /* HRES[9:8]: High byte of horizontal resolution (128) */
+#define PIXPAPER_TRES_HRES_L \
+ (PIXPAPER_PANEL_BUFFER_WIDTH & \
+ 0xFF) /* HRES[7:0]: Low byte of horizontal resolution (128 = 0x80) */
+#define PIXPAPER_TRES_VRES_H \
+ ((PIXPAPER_HEIGHT >> 8) & \
+ 0xFF) /* VRES[9:8]: High byte of vertical resolution (250) */
+#define PIXPAPER_TRES_VRES_L \
+ (PIXPAPER_HEIGHT & \
+ 0xFF) /* VRES[7:0]: Low byte of vertical resolution (250 = 0xFA) */
+
+/* R65H GSST - Gate/Source Start Setting Register */
+#define PIXPAPER_GSST_S_START 0x00 /* S_Start[7:0]: First source line (S0) */
+#define PIXPAPER_GSST_RESERVED 0x00 /* Reserved byte */
+#define PIXPAPER_GSST_G_START_H \
+ 0x00 /* G_Start[8]: High bit of first gate line (G0) */
+#define PIXPAPER_GSST_G_START_L \
+ 0x00 /* G_Start[7:0]: Low byte of first gate line (G0) */
+
+/* RB4H */
+#define PIXPAPER_UNKNOWN_B4_CONFIG \
+ 0xD0 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RB5H */
+#define PIXPAPER_UNKNOWN_B5_CONFIG \
+ 0x03 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE0H */
+#define PIXPAPER_UNKNOWN_E0_CONFIG \
+ 0x00 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE3H PWS - Power Saving Register */
+/* First Parameter */
+#define PIXPAPER_PWS_VCOM_W_MASK \
+ (0xF \
+ << 4) /* Bits 7-4: VCOM_W[3:0], VCOM power-saving width (line periods) */
+#define PIXPAPER_PWS_VCOM_W_0 (0x0 << 4) /* 0000: 0 line periods */
+#define PIXPAPER_PWS_VCOM_W_1 (0x1 << 4) /* 0001: 1 line period */
+#define PIXPAPER_PWS_VCOM_W_2 (0x2 << 4) /* 0010: 2 line periods */
+#define PIXPAPER_PWS_VCOM_W_3 (0x3 << 4) /* 0011: 3 line periods */
+#define PIXPAPER_PWS_VCOM_W_4 (0x4 << 4) /* 0100: 4 line periods */
+#define PIXPAPER_PWS_VCOM_W_5 (0x5 << 4) /* 0101: 5 line periods */
+#define PIXPAPER_PWS_VCOM_W_6 (0x6 << 4) /* 0110: 6 line periods */
+#define PIXPAPER_PWS_VCOM_W_7 (0x7 << 4) /* 0111: 7 line periods */
+#define PIXPAPER_PWS_VCOM_W_8 (0x8 << 4) /* 1000: 8 line periods */
+#define PIXPAPER_PWS_VCOM_W_9 (0x9 << 4) /* 1001: 9 line periods */
+#define PIXPAPER_PWS_VCOM_W_10 (0xA << 4) /* 1010: 10 line periods */
+#define PIXPAPER_PWS_VCOM_W_11 (0xB << 4) /* 1011: 11 line periods */
+#define PIXPAPER_PWS_VCOM_W_12 (0xC << 4) /* 1100: 12 line periods */
+#define PIXPAPER_PWS_VCOM_W_13 (0xD << 4) /* 1101: 13 line periods */
+#define PIXPAPER_PWS_VCOM_W_14 (0xE << 4) /* 1110: 14 line periods */
+#define PIXPAPER_PWS_VCOM_W_15 (0xF << 4) /* 1111: 15 line periods */
+#define PIXPAPER_PWS_SD_W_MASK \
+ (0xF << 0) /* Bits 3-0: SD_W[3:0], source power-saving width (660 ns units) */
+#define PIXPAPER_PWS_SD_W_0 (0x0 << 0) /* 0000: 0 ns */
+#define PIXPAPER_PWS_SD_W_1 (0x1 << 0) /* 0001: 660 ns */
+#define PIXPAPER_PWS_SD_W_2 (0x2 << 0) /* 0010: 1320 ns */
+#define PIXPAPER_PWS_SD_W_3 (0x3 << 0) /* 0011: 1980 ns */
+#define PIXPAPER_PWS_SD_W_4 (0x4 << 0) /* 0100: 2640 ns */
+#define PIXPAPER_PWS_SD_W_5 (0x5 << 0) /* 0101: 3300 ns */
+#define PIXPAPER_PWS_SD_W_6 (0x6 << 0) /* 0110: 3960 ns */
+#define PIXPAPER_PWS_SD_W_7 (0x7 << 0) /* 0111: 4620 ns */
+#define PIXPAPER_PWS_SD_W_8 (0x8 << 0) /* 1000: 5280 ns */
+#define PIXPAPER_PWS_SD_W_9 (0x9 << 0) /* 1001: 5940 ns */
+#define PIXPAPER_PWS_SD_W_10 (0xA << 0) /* 1010: 6600 ns */
+#define PIXPAPER_PWS_SD_W_11 (0xB << 0) /* 1011: 7260 ns */
+#define PIXPAPER_PWS_SD_W_12 (0xC << 0) /* 1100: 7920 ns */
+#define PIXPAPER_PWS_SD_W_13 (0xD << 0) /* 1101: 8580 ns */
+#define PIXPAPER_PWS_SD_W_14 (0xE << 0) /* 1110: 9240 ns */
+#define PIXPAPER_PWS_SD_W_15 (0xF << 0) /* 1111: 9900 ns */
+#define PIXPAPER_PWS_CONFIG \
+ (PIXPAPER_PWS_VCOM_W_2 | \
+ PIXPAPER_PWS_SD_W_2) /* 0x22: VCOM 2 line periods (160 µs), source 1320 ns */
+
+/* RE7H */
+#define PIXPAPER_UNKNOWN_E7_CONFIG \
+ 0x1C /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE9H */
+#define PIXPAPER_UNKNOWN_E9_CONFIG \
+ 0x01 /* This value is essential for initialization, derived from userspace examples. */
+
+MODULE_IMPORT_NS("DMA_BUF");
+
+/*
+ * The panel has a visible resolution of 122x250.
+ * However, the controller requires the horizontal resolution to be aligned to 128 pixels.
+ * No porch or sync timing values are provided in the datasheet, so we define minimal
+ * placeholder values to satisfy the DRM framework.
+ */
+
+/* Panel visible resolution */
+#define PIXPAPER_WIDTH 122
+#define PIXPAPER_HEIGHT 250
+
+/* Controller requires 128 horizontal pixels total (for memory alignment) */
+#define PIXPAPER_HTOTAL 128
+#define PIXPAPER_HFP 2
+#define PIXPAPER_HSYNC 2
+#define PIXPAPER_HBP (PIXPAPER_HTOTAL - PIXPAPER_WIDTH - PIXPAPER_HFP - PIXPAPER_HSYNC)
+
+/*
+ * According to the datasheet, the total vertical blanking must be 55 lines,
+ * regardless of how the vertical back porch is set.
+ * Here we allocate VFP=2, VSYNC=2, and VBP=51 to sum up to 55 lines.
+ * Total vertical lines = 250 (visible) + 55 (blanking) = 305.
+ */
+#define PIXPAPER_VTOTAL (250 + 55)
+#define PIXPAPER_VFP 2
+#define PIXPAPER_VSYNC 2
+#define PIXPAPER_VBP (55 - PIXPAPER_VFP - PIXPAPER_VSYNC)
+
+/*
+ * Pixel clock calculation:
+ * pixel_clock = htotal * vtotal * refresh_rate
+ * = 128 * 305 * 50
+ * = 1,952,000 Hz = 1952 kHz
+ */
+#define PIXPAPER_PIXEL_CLOCK 1952
+
+#define PIXPAPER_WIDTH_MM 24 /* approximate from 23.7046mm */
+#define PIXPAPER_HEIGHT_MM 49 /* approximate from 48.55mm */
+
+#define PIXPAPER_SPI_BITS_PER_WORD 8
+#define PIXPAPER_SPI_SPEED_DEFAULT 1000000
+
+#define PIXPAPER_PANEL_BUFFER_WIDTH 128
+#define PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW (PIXPAPER_PANEL_BUFFER_WIDTH / 4)
+
+#define PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL 60
+#define PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL 200
+#define PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN 180
+
+struct pixpaper_error_ctx {
+ int errno_code;
+};
+
+struct pixpaper_panel {
+ struct drm_device drm;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset;
+ struct gpio_desc *busy;
+ struct gpio_desc *dc;
+};
+
+static inline struct pixpaper_panel *to_pixpaper_panel(struct drm_device *drm)
+{
+ return container_of(drm, struct pixpaper_panel, drm);
+}
+
+static void pixpaper_wait_for_panel(struct pixpaper_panel *panel)
+{
+ unsigned int timeout_ms = 10000;
+ unsigned long timeout_jiffies = jiffies + msecs_to_jiffies(timeout_ms);
+
+ usleep_range(1000, 1500);
+ while (gpiod_get_value_cansleep(panel->busy) != 1) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ drm_warn(&panel->drm, "Busy wait timed out\n");
+ return;
+ }
+ usleep_range(100, 200);
+ }
+}
+
+static void pixpaper_spi_sync(struct spi_device *spi, struct spi_message *msg,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ int ret = spi_sync(spi, msg);
+
+ if (ret < 0)
+ err->errno_code = ret;
+}
+
+static void pixpaper_send_cmd(struct pixpaper_panel *panel, u8 cmd,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ struct spi_transfer xfer = {
+ .tx_buf = &cmd,
+ .len = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ gpiod_set_value_cansleep(panel->dc, 0);
+ usleep_range(1, 5);
+ pixpaper_spi_sync(panel->spi, &msg, err);
+}
+
+static void pixpaper_send_data(struct pixpaper_panel *panel, u8 data,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ struct spi_transfer xfer = {
+ .tx_buf = &data,
+ .len = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ gpiod_set_value_cansleep(panel->dc, 1);
+ usleep_range(1, 5);
+ pixpaper_spi_sync(panel->spi, &msg, err);
+}
+
+static int pixpaper_panel_hw_init(struct pixpaper_panel *panel)
+{
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ gpiod_set_value_cansleep(panel->reset, 0);
+ msleep(50);
+ gpiod_set_value_cansleep(panel->reset, 1);
+ msleep(50);
+
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_4D, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_4D_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_PANEL_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG, &err);
+ pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG2, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSPL_15V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSN_4V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF_SEQUENCE, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG3, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_BOOSTER_SOFT_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG3, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG4, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG5, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG6, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG7, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_PLL_CONTROL, &err);
+ pixpaper_send_data(panel, PIXPAPER_PLL_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_TEMP_SENSOR_CALIB, &err);
+ pixpaper_send_data(panel, PIXPAPER_TSE_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_VCOM_INTERVAL, &err);
+ pixpaper_send_data(panel, PIXPAPER_CDI_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_60, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG2, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_RESOLUTION_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_HRES_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_HRES_L, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_VRES_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_VRES_L, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_GATE_SOURCE_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_S_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_RESERVED, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_G_START_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_G_START_L, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E7, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E7_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SAVING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWS_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E0, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E0_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B4, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B4_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B5, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B5_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E9, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E9_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ return 0;
+
+init_fail:
+ drm_err(&panel->drm, "Hardware initialization failed (err=%d)\n",
+ err.errno_code);
+ return err.errno_code;
+}
+
+/*
+ * Convert framebuffer pixels to 2-bit e-paper format:
+ * 00 - White
+ * 01 - Black
+ * 10 - Yellow
+ * 11 - Red
+ */
+static u8 pack_pixels_to_byte(__le32 *src_pixels, int i, int j,
+ struct drm_framebuffer *fb)
+{
+ u8 packed_byte = 0;
+ int k;
+
+ for (k = 0; k < 4; k++) {
+ int current_pixel_x = j * 4 + k;
+ u8 two_bit_val;
+
+ if (current_pixel_x < PIXPAPER_WIDTH) {
+ u32 pixel_offset =
+ (i * (fb->pitches[0] / 4)) + current_pixel_x;
+ u32 pixel = le32_to_cpu(src_pixels[pixel_offset]);
+ u32 r = (pixel >> 16) & 0xFF;
+ u32 g = (pixel >> 8) & 0xFF;
+ u32 b = pixel & 0xFF;
+
+ if (r < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b00;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ b > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL) {
+ two_bit_val = 0b01;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b11;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g > PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b10;
+ } else {
+ two_bit_val = 0b01;
+ }
+ } else {
+ two_bit_val = 0b01;
+ }
+
+ packed_byte |= two_bit_val << ((3 - k) * 2);
+ }
+
+ return packed_byte;
+}
+
+static int pixpaper_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state,
+ new_crtc_state, DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ return 0;
+}
+
+static int pixpaper_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
+}
+
+static void pixpaper_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+ struct drm_device *drm = &panel->drm;
+ int idx;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code);
+ goto exit_drm_dev;
+ }
+
+ pixpaper_wait_for_panel(panel);
+
+ drm_dbg(drm, "Panel enabled and powered on\n");
+
+exit_drm_dev:
+ drm_dev_exit(idx);
+}
+
+static void pixpaper_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+ struct drm_device *drm = &panel->drm;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send POF command: %d\n", err.errno_code);
+ goto exit_drm_dev;
+ }
+ pixpaper_wait_for_panel(panel);
+
+ drm_dbg(drm, "Panel disabled\n");
+
+exit_drm_dev:
+ drm_dev_exit(idx);
+}
+
+static void pixpaper_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+
+ struct drm_device *drm = &panel->drm;
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct iosys_map map = shadow_plane_state->data[0];
+ void *vaddr = map.vaddr;
+ int i, j, idx;
+ __le32 *src_pixels = NULL;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ drm_dbg(drm, "Starting frame update (phys=%dx%d, buf_w=%d)\n",
+ PIXPAPER_WIDTH, PIXPAPER_HEIGHT, PIXPAPER_PANEL_BUFFER_WIDTH);
+
+ if (!fb || !plane_state->visible) {
+ drm_err_once(drm, "No framebuffer or plane not visible, skipping update\n");
+ goto update_cleanup;
+ }
+
+ src_pixels = (__le32 *)vaddr;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_DATA_START_TRANSMISSION, &err);
+ if (err.errno_code)
+ goto update_cleanup;
+
+ pixpaper_wait_for_panel(panel);
+
+ for (i = 0; i < PIXPAPER_HEIGHT; i++) {
+ for (j = 0; j < PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW; j++) {
+ u8 packed_byte =
+ pack_pixels_to_byte(src_pixels, i, j, fb);
+
+ pixpaper_wait_for_panel(panel);
+ pixpaper_send_data(panel, packed_byte, &err);
+ }
+ }
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code);
+ goto update_cleanup;
+ }
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_DISPLAY_REFRESH, &err);
+ pixpaper_send_data(panel, PIXPAPER_DRF_VCOM_AC, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed sending data after DRF: %d\n", err.errno_code);
+ goto update_cleanup;
+ }
+ pixpaper_wait_for_panel(panel);
+
+update_cleanup:
+ if (err.errno_code && err.errno_code != -ETIMEDOUT)
+ drm_err_once(drm, "Frame update function failed with error %d\n", err.errno_code);
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_display_mode pixpaper_mode = {
+ .clock = PIXPAPER_PIXEL_CLOCK,
+ .hdisplay = PIXPAPER_WIDTH,
+ .hsync_start = PIXPAPER_WIDTH + PIXPAPER_HFP,
+ .hsync_end = PIXPAPER_WIDTH + PIXPAPER_HFP + PIXPAPER_HSYNC,
+ .htotal = PIXPAPER_HTOTAL,
+ .vdisplay = PIXPAPER_HEIGHT,
+ .vsync_start = PIXPAPER_HEIGHT + PIXPAPER_VFP,
+ .vsync_end = PIXPAPER_HEIGHT + PIXPAPER_VFP + PIXPAPER_VSYNC,
+ .vtotal = PIXPAPER_VTOTAL,
+ .width_mm = PIXPAPER_WIDTH_MM,
+ .height_mm = PIXPAPER_HEIGHT_MM,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int pixpaper_connector_get_modes(struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &pixpaper_mode);
+}
+
+static const struct drm_plane_funcs pixpaper_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+static const struct drm_plane_helper_funcs pixpaper_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = pixpaper_plane_helper_atomic_check,
+ .atomic_update = pixpaper_plane_atomic_update,
+};
+
+static const struct drm_crtc_funcs pixpaper_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static enum drm_mode_status
+pixpaper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == PIXPAPER_WIDTH &&
+ mode->vdisplay == PIXPAPER_HEIGHT) {
+ return MODE_OK;
+ }
+ return MODE_BAD;
+}
+
+static const struct drm_crtc_helper_funcs pixpaper_crtc_helper_funcs = {
+ .mode_valid = pixpaper_mode_valid,
+ .atomic_check = pixpaper_crtc_helper_atomic_check,
+ .atomic_enable = pixpaper_crtc_atomic_enable,
+ .atomic_disable = pixpaper_crtc_atomic_disable,
+};
+
+static const struct drm_encoder_funcs pixpaper_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_funcs pixpaper_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs pixpaper_connector_helper_funcs = {
+ .get_modes = pixpaper_connector_get_modes,
+};
+
+DEFINE_DRM_GEM_FOPS(pixpaper_fops);
+
+static struct drm_driver pixpaper_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &pixpaper_fops,
+ .name = "pixpaper",
+ .desc = "DRM driver for PIXPAPER e-ink",
+ .major = 1,
+ .minor = 0,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct drm_mode_config_funcs pixpaper_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int pixpaper_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct pixpaper_panel *panel;
+ struct drm_device *drm;
+ int ret;
+
+ panel = devm_drm_dev_alloc(dev, &pixpaper_drm_driver,
+ struct pixpaper_panel, drm);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ drm = &panel->drm;
+ panel->spi = spi;
+ spi_set_drvdata(spi, panel);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = PIXPAPER_SPI_BITS_PER_WORD;
+
+ if (!spi->max_speed_hz) {
+ drm_warn(drm,
+ "spi-max-frequency not specified in DT, using default %u Hz\n",
+ PIXPAPER_SPI_SPEED_DEFAULT);
+ spi->max_speed_hz = PIXPAPER_SPI_SPEED_DEFAULT;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ drm_err(drm, "SPI setup failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ drm_err(drm, "Failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ panel->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset))
+ return PTR_ERR(panel->reset);
+
+ panel->busy = devm_gpiod_get(dev, "busy", GPIOD_IN);
+ if (IS_ERR(panel->busy))
+ return PTR_ERR(panel->busy);
+
+ panel->dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->dc))
+ return PTR_ERR(panel->dc);
+
+ ret = pixpaper_panel_hw_init(panel);
+ if (ret) {
+ drm_err(drm, "Panel hardware initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+ drm->mode_config.funcs = &pixpaper_mode_config_funcs;
+ drm->mode_config.min_width = PIXPAPER_WIDTH;
+ drm->mode_config.max_width = PIXPAPER_WIDTH;
+ drm->mode_config.min_height = PIXPAPER_HEIGHT;
+ drm->mode_config.max_height = PIXPAPER_HEIGHT;
+
+ ret = drm_universal_plane_init(drm, &panel->plane, 1,
+ &pixpaper_plane_funcs,
+ (const uint32_t[]){ DRM_FORMAT_XRGB8888 },
+ 1, NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(&panel->plane, &pixpaper_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &panel->crtc, &panel->plane, NULL,
+ &pixpaper_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(&panel->crtc, &pixpaper_crtc_helper_funcs);
+
+ ret = drm_encoder_init(drm, &panel->encoder, &pixpaper_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+ panel->encoder.possible_crtcs = drm_crtc_mask(&panel->crtc);
+
+ ret = drm_connector_init(drm, &panel->connector,
+ &pixpaper_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(&panel->connector,
+ &pixpaper_connector_helper_funcs);
+ drm_connector_attach_encoder(&panel->connector, &panel->encoder);
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(drm, NULL);
+
+ return 0;
+}
+
+static void pixpaper_remove(struct spi_device *spi)
+{
+ struct pixpaper_panel *panel = spi_get_drvdata(spi);
+
+ if (!panel)
+ return;
+
+ drm_dev_unplug(&panel->drm);
+ drm_atomic_helper_shutdown(&panel->drm);
+}
+
+static const struct spi_device_id pixpaper_ids[] = { { "pixpaper", 0 }, {} };
+MODULE_DEVICE_TABLE(spi, pixpaper_ids);
+
+static const struct of_device_id pixpaper_dt_ids[] = {
+ { .compatible = "mayqueen,pixpaper" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pixpaper_dt_ids);
+
+static struct spi_driver pixpaper_spi_driver = {
+ .driver = {
+ .name = "pixpaper",
+ .of_match_table = pixpaper_dt_ids,
+ },
+ .id_table = pixpaper_ids,
+ .probe = pixpaper_probe,
+ .remove = pixpaper_remove,
+};
+
+module_spi_driver(pixpaper_spi_driver);
+
+MODULE_AUTHOR("LiangCheng Wang");
+MODULE_DESCRIPTION("DRM SPI driver for PIXPAPER e-ink panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 5c3b51eb0a97..4824f863fdba 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -510,13 +510,12 @@ static void repaper_get_temperature(struct repaper_epd *epd)
epd->factored_stage_time = epd->stage_time * factor10x / 10;
}
-static int repaper_fb_dirty(struct drm_framebuffer *fb,
+static int repaper_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap,
struct drm_format_conv_state *fmtcnv_state)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
unsigned int dst_pitch = 0;
- struct iosys_map dst, vmap;
+ struct iosys_map dst;
struct drm_rect clip;
int idx, ret = 0;
u8 *buf = NULL;
@@ -546,8 +545,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
goto out_free;
iosys_map_set_vaddr(&dst, buf);
- iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -832,16 +830,15 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_rect rect;
if (!pipe->crtc.state->active)
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- repaper_fb_dirty(state->fb, &fmtcnv_state);
-
- drm_format_conv_state_release(&fmtcnv_state);
+ repaper_fb_dirty(state->fb, shadow_plane_state->data,
+ &shadow_plane_state->fmtcnv_state);
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -849,6 +846,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static int repaper_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
index 03d2850310c4..64272cd0f6e2 100644
--- a/drivers/gpu/drm/tiny/sharp-memory.c
+++ b/drivers/gpu/drm/tiny/sharp-memory.c
@@ -126,28 +126,28 @@ static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer,
static void sharp_memory_set_tx_buffer_data(u8 *buffer,
struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
struct drm_rect clip,
u32 pitch,
struct drm_format_conv_state *fmtcnv_state)
{
int ret;
- struct iosys_map dst, vmap;
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct iosys_map dst;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
iosys_map_set_vaddr(&dst, buffer);
- iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state);
+ drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static int sharp_memory_update_display(struct sharp_memory_device *smd,
struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
struct drm_rect clip,
struct drm_format_conv_state *fmtcnv_state)
{
@@ -163,7 +163,7 @@ static int sharp_memory_update_display(struct sharp_memory_device *smd,
sharp_memory_set_tx_buffer_mode(&tx_buffer[0],
SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom);
sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch);
- sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state);
+ sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state);
ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size);
@@ -206,7 +206,8 @@ static int sharp_memory_clear_display(struct sharp_memory_device *smd)
return ret;
}
-static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect,
+static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap,
+ struct drm_rect *rect,
struct drm_format_conv_state *fmtconv_state)
{
struct drm_rect clip;
@@ -218,7 +219,7 @@ static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *r
clip.y1 = rect->y1;
clip.y2 = rect->y2;
- sharp_memory_update_display(smd, fb, clip, fmtconv_state);
+ sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state);
}
static int sharp_memory_plane_atomic_check(struct drm_plane *plane,
@@ -242,7 +243,7 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *plane_state = plane->state;
- struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct sharp_memory_device *smd;
struct drm_rect rect;
@@ -251,15 +252,15 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
return;
if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect))
- sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state);
-
- drm_format_conv_state_release(&fmtcnv_state);
+ sharp_memory_fb_dirty(plane_state->fb, shadow_plane_state->data,
+ &rect, &shadow_plane_state->fmtcnv_state);
}
static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = {
.prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = sharp_memory_plane_atomic_check,
.atomic_update = sharp_memory_plane_atomic_update,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
};
static bool sharp_memory_format_mod_supported(struct drm_plane *plane,
@@ -273,9 +274,7 @@ static const struct drm_plane_funcs sharp_memory_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
.format_mod_supported = sharp_memory_format_mod_supported,
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f4d9e68b21e7..29423ceeec5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1283,3 +1283,18 @@ int ttm_bo_populate(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_populate);
+
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret != 0)
+ return ret;
+
+ ret = ttm_bo_populate(bo, ctx);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_setup_export);
diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig
new file mode 100644
index 000000000000..4b55308fd2eb
--- /dev/null
+++ b/drivers/gpu/drm/tyr/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+config DRM_TYR
+ tristate "Tyr (Rust DRM support for ARM Mali CSF-based GPUs)"
+ depends on DRM=y
+ depends on RUST
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ default n
+ help
+ Rust DRM driver for ARM Mali CSF-based GPUs.
+
+ This driver is for Mali (or Immortalis) Valhall Gxxx GPUs.
+
+ Note that the Mali-G68 and Mali-G78, while Valhall architecture, will
+ be supported with the panfrost driver as they are not CSF GPUs.
+
+ if M is selected, the module will be called tyr. This driver is work
+ in progress and may not be functional.
diff --git a/drivers/gpu/drm/tyr/Makefile b/drivers/gpu/drm/tyr/Makefile
new file mode 100644
index 000000000000..ba545f65f2c0
--- /dev/null
+++ b/drivers/gpu/drm/tyr/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+obj-$(CONFIG_DRM_TYR) += tyr.o
diff --git a/drivers/gpu/drm/tyr/driver.rs b/drivers/gpu/drm/tyr/driver.rs
new file mode 100644
index 000000000000..d5625dd1e41c
--- /dev/null
+++ b/drivers/gpu/drm/tyr/driver.rs
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::c_str;
+use kernel::clk::Clk;
+use kernel::clk::OptionalClk;
+use kernel::device::Bound;
+use kernel::device::Core;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::drm;
+use kernel::drm::ioctl;
+use kernel::new_mutex;
+use kernel::of;
+use kernel::platform;
+use kernel::prelude::*;
+use kernel::regulator;
+use kernel::regulator::Regulator;
+use kernel::sizes::SZ_2M;
+use kernel::sync::Arc;
+use kernel::sync::Mutex;
+use kernel::time;
+use kernel::types::ARef;
+
+use crate::file::File;
+use crate::gem::TyrObject;
+use crate::gpu;
+use crate::gpu::GpuInfo;
+use crate::regs;
+
+pub(crate) type IoMem = kernel::io::mem::IoMem<SZ_2M>;
+
+/// Convenience type alias for the DRM device type for this driver.
+pub(crate) type TyrDevice = drm::Device<TyrDriver>;
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct TyrDriver {
+ device: ARef<TyrDevice>,
+}
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct TyrData {
+ pub(crate) pdev: ARef<platform::Device>,
+
+ #[pin]
+ clks: Mutex<Clocks>,
+
+ #[pin]
+ regulators: Mutex<Regulators>,
+
+ /// Some information on the GPU.
+ ///
+ /// This is mainly queried by userspace, i.e.: Mesa.
+ pub(crate) gpu_info: GpuInfo,
+}
+
+// Both `Clk` and `Regulator` do not implement `Send` or `Sync`, but they
+// should. There are patches on the mailing list to address this, but they have
+// not landed yet.
+//
+// For now, add this workaround so that this patch compiles with the promise
+// that it will be removed in a future patch.
+//
+// SAFETY: This will be removed in a future patch.
+unsafe impl Send for TyrData {}
+// SAFETY: This will be removed in a future patch.
+unsafe impl Sync for TyrData {}
+
+fn issue_soft_reset(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result {
+ regs::GPU_CMD.write(dev, iomem, regs::GPU_CMD_SOFT_RESET)?;
+
+ // TODO: We cannot poll, as there is no support in Rust currently, so we
+ // sleep. Change this when read_poll_timeout() is implemented in Rust.
+ kernel::time::delay::fsleep(time::Delta::from_millis(100));
+
+ if regs::GPU_IRQ_RAWSTAT.read(dev, iomem)? & regs::GPU_IRQ_RAWSTAT_RESET_COMPLETED == 0 {
+ dev_err!(dev, "GPU reset failed with errno\n");
+ dev_err!(
+ dev,
+ "GPU_INT_RAWSTAT is {}\n",
+ regs::GPU_IRQ_RAWSTAT.read(dev, iomem)?
+ );
+
+ return Err(EIO);
+ }
+
+ Ok(())
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <TyrDriver as platform::Driver>::IdInfo,
+ [
+ (of::DeviceId::new(c_str!("rockchip,rk3588-mali")), ()),
+ (of::DeviceId::new(c_str!("arm,mali-valhall-csf")), ())
+ ]
+);
+
+impl platform::Driver for TyrDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ let core_clk = Clk::get(pdev.as_ref(), Some(c_str!("core")))?;
+ let stacks_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("stacks")))?;
+ let coregroup_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("coregroup")))?;
+
+ core_clk.prepare_enable()?;
+ stacks_clk.prepare_enable()?;
+ coregroup_clk.prepare_enable()?;
+
+ let mali_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("mali"))?;
+ let sram_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("sram"))?;
+
+ let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
+ let iomem = Arc::pin_init(request.iomap_sized::<SZ_2M>(), GFP_KERNEL)?;
+
+ issue_soft_reset(pdev.as_ref(), &iomem)?;
+ gpu::l2_power_on(pdev.as_ref(), &iomem)?;
+
+ let gpu_info = GpuInfo::new(pdev.as_ref(), &iomem)?;
+ gpu_info.log(pdev);
+
+ let platform: ARef<platform::Device> = pdev.into();
+
+ let data = try_pin_init!(TyrData {
+ pdev: platform.clone(),
+ clks <- new_mutex!(Clocks {
+ core: core_clk,
+ stacks: stacks_clk,
+ coregroup: coregroup_clk,
+ }),
+ regulators <- new_mutex!(Regulators {
+ mali: mali_regulator,
+ sram: sram_regulator,
+ }),
+ gpu_info,
+ });
+
+ let tdev: ARef<TyrDevice> = drm::Device::new(pdev.as_ref(), data)?;
+ drm::driver::Registration::new_foreign_owned(&tdev, pdev.as_ref(), 0)?;
+
+ let driver = KBox::pin_init(try_pin_init!(TyrDriver { device: tdev }), GFP_KERNEL)?;
+
+ // We need this to be dev_info!() because dev_dbg!() does not work at
+ // all in Rust for now, and we need to see whether probe succeeded.
+ dev_info!(pdev.as_ref(), "Tyr initialized correctly.\n");
+ Ok(driver)
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for TyrDriver {
+ fn drop(self: Pin<&mut Self>) {}
+}
+
+#[pinned_drop]
+impl PinnedDrop for TyrData {
+ fn drop(self: Pin<&mut Self>) {
+ // TODO: the type-state pattern for Clks will fix this.
+ let clks = self.clks.lock();
+ clks.core.disable_unprepare();
+ clks.stacks.disable_unprepare();
+ clks.coregroup.disable_unprepare();
+ }
+}
+
+// We need to retain the name "panthor" to achieve drop-in compatibility with
+// the C driver in the userspace stack.
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 1,
+ minor: 5,
+ patchlevel: 0,
+ name: c_str!("panthor"),
+ desc: c_str!("ARM Mali Tyr DRM driver"),
+};
+
+#[vtable]
+impl drm::Driver for TyrDriver {
+ type Data = TyrData;
+ type File = File;
+ type Object = drm::gem::Object<TyrObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (PANTHOR_DEV_QUERY, drm_panthor_dev_query, ioctl::RENDER_ALLOW, File::dev_query),
+ }
+}
+
+#[pin_data]
+struct Clocks {
+ core: Clk,
+ stacks: OptionalClk,
+ coregroup: OptionalClk,
+}
+
+#[pin_data]
+struct Regulators {
+ mali: Regulator<regulator::Enabled>,
+ sram: Regulator<regulator::Enabled>,
+}
diff --git a/drivers/gpu/drm/tyr/file.rs b/drivers/gpu/drm/tyr/file.rs
new file mode 100644
index 000000000000..0ef432947b73
--- /dev/null
+++ b/drivers/gpu/drm/tyr/file.rs
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::drm;
+use kernel::prelude::*;
+use kernel::uaccess::UserSlice;
+use kernel::uapi;
+
+use crate::driver::TyrDevice;
+use crate::TyrDriver;
+
+#[pin_data]
+pub(crate) struct File {}
+
+/// Convenience type alias for our DRM `File` type
+pub(crate) type DrmFile = drm::file::File<File>;
+
+impl drm::file::DriverFile for File {
+ type Driver = TyrDriver;
+
+ fn open(_dev: &drm::Device<Self::Driver>) -> Result<Pin<KBox<Self>>> {
+ KBox::try_pin_init(try_pin_init!(Self {}), GFP_KERNEL)
+ }
+}
+
+impl File {
+ pub(crate) fn dev_query(
+ tdev: &TyrDevice,
+ devquery: &mut uapi::drm_panthor_dev_query,
+ _file: &DrmFile,
+ ) -> Result<u32> {
+ if devquery.pointer == 0 {
+ match devquery.type_ {
+ uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => {
+ devquery.size = core::mem::size_of_val(&tdev.gpu_info) as u32;
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ } else {
+ match devquery.type_ {
+ uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => {
+ let mut writer = UserSlice::new(
+ UserPtr::from_addr(devquery.pointer as usize),
+ devquery.size as usize,
+ )
+ .writer();
+
+ writer.write(&tdev.gpu_info)?;
+
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/tyr/gem.rs b/drivers/gpu/drm/tyr/gem.rs
new file mode 100644
index 000000000000..1273bf89dbd5
--- /dev/null
+++ b/drivers/gpu/drm/tyr/gem.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use crate::driver::TyrDevice;
+use crate::driver::TyrDriver;
+use kernel::drm::gem;
+use kernel::prelude::*;
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct TyrObject {}
+
+impl gem::DriverObject for TyrObject {
+ type Driver = TyrDriver;
+
+ fn new(_dev: &TyrDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(TyrObject {})
+ }
+}
diff --git a/drivers/gpu/drm/tyr/gpu.rs b/drivers/gpu/drm/tyr/gpu.rs
new file mode 100644
index 000000000000..6c582910dd5d
--- /dev/null
+++ b/drivers/gpu/drm/tyr/gpu.rs
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::bits::genmask_u32;
+use kernel::device::Bound;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::platform;
+use kernel::prelude::*;
+use kernel::time;
+use kernel::transmute::AsBytes;
+
+use crate::driver::IoMem;
+use crate::regs;
+
+/// Struct containing information that can be queried by userspace. This is read from
+/// the GPU's registers.
+///
+/// # Invariants
+///
+/// - The layout of this struct identical to the C `struct drm_panthor_gpu_info`.
+#[repr(C)]
+pub(crate) struct GpuInfo {
+ pub(crate) gpu_id: u32,
+ pub(crate) gpu_rev: u32,
+ pub(crate) csf_id: u32,
+ pub(crate) l2_features: u32,
+ pub(crate) tiler_features: u32,
+ pub(crate) mem_features: u32,
+ pub(crate) mmu_features: u32,
+ pub(crate) thread_features: u32,
+ pub(crate) max_threads: u32,
+ pub(crate) thread_max_workgroup_size: u32,
+ pub(crate) thread_max_barrier_size: u32,
+ pub(crate) coherency_features: u32,
+ pub(crate) texture_features: [u32; 4],
+ pub(crate) as_present: u32,
+ pub(crate) pad0: u32,
+ pub(crate) shader_present: u64,
+ pub(crate) l2_present: u64,
+ pub(crate) tiler_present: u64,
+ pub(crate) core_features: u32,
+ pub(crate) pad: u32,
+}
+
+impl GpuInfo {
+ pub(crate) fn new(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<Self> {
+ let gpu_id = regs::GPU_ID.read(dev, iomem)?;
+ let csf_id = regs::GPU_CSF_ID.read(dev, iomem)?;
+ let gpu_rev = regs::GPU_REVID.read(dev, iomem)?;
+ let core_features = regs::GPU_CORE_FEATURES.read(dev, iomem)?;
+ let l2_features = regs::GPU_L2_FEATURES.read(dev, iomem)?;
+ let tiler_features = regs::GPU_TILER_FEATURES.read(dev, iomem)?;
+ let mem_features = regs::GPU_MEM_FEATURES.read(dev, iomem)?;
+ let mmu_features = regs::GPU_MMU_FEATURES.read(dev, iomem)?;
+ let thread_features = regs::GPU_THREAD_FEATURES.read(dev, iomem)?;
+ let max_threads = regs::GPU_THREAD_MAX_THREADS.read(dev, iomem)?;
+ let thread_max_workgroup_size = regs::GPU_THREAD_MAX_WORKGROUP_SIZE.read(dev, iomem)?;
+ let thread_max_barrier_size = regs::GPU_THREAD_MAX_BARRIER_SIZE.read(dev, iomem)?;
+ let coherency_features = regs::GPU_COHERENCY_FEATURES.read(dev, iomem)?;
+
+ let texture_features = regs::GPU_TEXTURE_FEATURES0.read(dev, iomem)?;
+
+ let as_present = regs::GPU_AS_PRESENT.read(dev, iomem)?;
+
+ let shader_present = u64::from(regs::GPU_SHADER_PRESENT_LO.read(dev, iomem)?);
+ let shader_present =
+ shader_present | u64::from(regs::GPU_SHADER_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ let tiler_present = u64::from(regs::GPU_TILER_PRESENT_LO.read(dev, iomem)?);
+ let tiler_present =
+ tiler_present | u64::from(regs::GPU_TILER_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ let l2_present = u64::from(regs::GPU_L2_PRESENT_LO.read(dev, iomem)?);
+ let l2_present = l2_present | u64::from(regs::GPU_L2_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ Ok(Self {
+ gpu_id,
+ gpu_rev,
+ csf_id,
+ l2_features,
+ tiler_features,
+ mem_features,
+ mmu_features,
+ thread_features,
+ max_threads,
+ thread_max_workgroup_size,
+ thread_max_barrier_size,
+ coherency_features,
+ // TODO: Add texture_features_{1,2,3}.
+ texture_features: [texture_features, 0, 0, 0],
+ as_present,
+ pad0: 0,
+ shader_present,
+ l2_present,
+ tiler_present,
+ core_features,
+ pad: 0,
+ })
+ }
+
+ pub(crate) fn log(&self, pdev: &platform::Device) {
+ let major = (self.gpu_id >> 16) & 0xff;
+ let minor = (self.gpu_id >> 8) & 0xff;
+ let status = self.gpu_id & 0xff;
+
+ let model_name = if let Some(model) = GPU_MODELS
+ .iter()
+ .find(|&f| f.major == major && f.minor == minor)
+ {
+ model.name
+ } else {
+ "unknown"
+ };
+
+ dev_info!(
+ pdev.as_ref(),
+ "mali-{} id 0x{:x} major 0x{:x} minor 0x{:x} status 0x{:x}",
+ model_name,
+ self.gpu_id >> 16,
+ major,
+ minor,
+ status
+ );
+
+ dev_info!(
+ pdev.as_ref(),
+ "Features: L2:{:#x} Tiler:{:#x} Mem:{:#x} MMU:{:#x} AS:{:#x}",
+ self.l2_features,
+ self.tiler_features,
+ self.mem_features,
+ self.mmu_features,
+ self.as_present
+ );
+
+ dev_info!(
+ pdev.as_ref(),
+ "shader_present=0x{:016x} l2_present=0x{:016x} tiler_present=0x{:016x}",
+ self.shader_present,
+ self.l2_present,
+ self.tiler_present
+ );
+ }
+
+ /// Returns the number of virtual address bits supported by the GPU.
+ #[expect(dead_code)]
+ pub(crate) fn va_bits(&self) -> u32 {
+ self.mmu_features & genmask_u32(0..=7)
+ }
+
+ /// Returns the number of physical address bits supported by the GPU.
+ #[expect(dead_code)]
+ pub(crate) fn pa_bits(&self) -> u32 {
+ (self.mmu_features >> 8) & genmask_u32(0..=7)
+ }
+}
+
+// SAFETY: `GpuInfo`'s invariant guarantees that it is the same type that is
+// already exposed to userspace by the C driver. This implies that it fulfills
+// the requirements for `AsBytes`.
+//
+// This means:
+//
+// - No implicit padding,
+// - No kernel pointers,
+// - No interior mutability.
+unsafe impl AsBytes for GpuInfo {}
+
+struct GpuModels {
+ name: &'static str,
+ major: u32,
+ minor: u32,
+}
+
+const GPU_MODELS: [GpuModels; 1] = [GpuModels {
+ name: "g610",
+ major: 10,
+ minor: 7,
+}];
+
+#[allow(dead_code)]
+pub(crate) struct GpuId {
+ pub(crate) arch_major: u32,
+ pub(crate) arch_minor: u32,
+ pub(crate) arch_rev: u32,
+ pub(crate) prod_major: u32,
+ pub(crate) ver_major: u32,
+ pub(crate) ver_minor: u32,
+ pub(crate) ver_status: u32,
+}
+
+impl From<u32> for GpuId {
+ fn from(value: u32) -> Self {
+ GpuId {
+ arch_major: (value & genmask_u32(28..=31)) >> 28,
+ arch_minor: (value & genmask_u32(24..=27)) >> 24,
+ arch_rev: (value & genmask_u32(20..=23)) >> 20,
+ prod_major: (value & genmask_u32(16..=19)) >> 16,
+ ver_major: (value & genmask_u32(12..=15)) >> 12,
+ ver_minor: (value & genmask_u32(4..=11)) >> 4,
+ ver_status: value & genmask_u32(0..=3),
+ }
+ }
+}
+
+/// Powers on the l2 block.
+pub(crate) fn l2_power_on(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result {
+ regs::L2_PWRON_LO.write(dev, iomem, 1)?;
+
+ // TODO: We cannot poll, as there is no support in Rust currently, so we
+ // sleep. Change this when read_poll_timeout() is implemented in Rust.
+ kernel::time::delay::fsleep(time::Delta::from_millis(100));
+
+ if regs::L2_READY_LO.read(dev, iomem)? != 1 {
+ dev_err!(dev, "Failed to power on the GPU\n");
+ return Err(EIO);
+ }
+
+ Ok(())
+}
diff --git a/drivers/gpu/drm/tyr/regs.rs b/drivers/gpu/drm/tyr/regs.rs
new file mode 100644
index 000000000000..f46933aaa221
--- /dev/null
+++ b/drivers/gpu/drm/tyr/regs.rs
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+// We don't expect that all the registers and fields will be used, even in the
+// future.
+//
+// Nevertheless, it is useful to have most of them defined, like the C driver
+// does.
+#![allow(dead_code)]
+
+use kernel::bits::bit_u32;
+use kernel::device::Bound;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::prelude::*;
+
+use crate::driver::IoMem;
+
+/// Represents a register in the Register Set
+///
+/// TODO: Replace this with the Nova `register!()` macro when it is available.
+/// In particular, this will automatically give us 64bit register reads and
+/// writes.
+pub(crate) struct Register<const OFFSET: usize>;
+
+impl<const OFFSET: usize> Register<OFFSET> {
+ #[inline]
+ pub(crate) fn read(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<u32> {
+ let value = (*iomem).access(dev)?.read32(OFFSET);
+ Ok(value)
+ }
+
+ #[inline]
+ pub(crate) fn write(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>, value: u32) -> Result {
+ (*iomem).access(dev)?.write32(value, OFFSET);
+ Ok(())
+ }
+}
+
+pub(crate) const GPU_ID: Register<0x0> = Register;
+pub(crate) const GPU_L2_FEATURES: Register<0x4> = Register;
+pub(crate) const GPU_CORE_FEATURES: Register<0x8> = Register;
+pub(crate) const GPU_CSF_ID: Register<0x1c> = Register;
+pub(crate) const GPU_REVID: Register<0x280> = Register;
+pub(crate) const GPU_TILER_FEATURES: Register<0xc> = Register;
+pub(crate) const GPU_MEM_FEATURES: Register<0x10> = Register;
+pub(crate) const GPU_MMU_FEATURES: Register<0x14> = Register;
+pub(crate) const GPU_AS_PRESENT: Register<0x18> = Register;
+pub(crate) const GPU_IRQ_RAWSTAT: Register<0x20> = Register;
+
+pub(crate) const GPU_IRQ_RAWSTAT_FAULT: u32 = bit_u32(0);
+pub(crate) const GPU_IRQ_RAWSTAT_PROTECTED_FAULT: u32 = bit_u32(1);
+pub(crate) const GPU_IRQ_RAWSTAT_RESET_COMPLETED: u32 = bit_u32(8);
+pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_SINGLE: u32 = bit_u32(9);
+pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_ALL: u32 = bit_u32(10);
+pub(crate) const GPU_IRQ_RAWSTAT_CLEAN_CACHES_COMPLETED: u32 = bit_u32(17);
+pub(crate) const GPU_IRQ_RAWSTAT_DOORBELL_STATUS: u32 = bit_u32(18);
+pub(crate) const GPU_IRQ_RAWSTAT_MCU_STATUS: u32 = bit_u32(19);
+
+pub(crate) const GPU_IRQ_CLEAR: Register<0x24> = Register;
+pub(crate) const GPU_IRQ_MASK: Register<0x28> = Register;
+pub(crate) const GPU_IRQ_STAT: Register<0x2c> = Register;
+pub(crate) const GPU_CMD: Register<0x30> = Register;
+pub(crate) const GPU_CMD_SOFT_RESET: u32 = 1 | (1 << 8);
+pub(crate) const GPU_CMD_HARD_RESET: u32 = 1 | (2 << 8);
+pub(crate) const GPU_THREAD_FEATURES: Register<0xac> = Register;
+pub(crate) const GPU_THREAD_MAX_THREADS: Register<0xa0> = Register;
+pub(crate) const GPU_THREAD_MAX_WORKGROUP_SIZE: Register<0xa4> = Register;
+pub(crate) const GPU_THREAD_MAX_BARRIER_SIZE: Register<0xa8> = Register;
+pub(crate) const GPU_TEXTURE_FEATURES0: Register<0xb0> = Register;
+pub(crate) const GPU_SHADER_PRESENT_LO: Register<0x100> = Register;
+pub(crate) const GPU_SHADER_PRESENT_HI: Register<0x104> = Register;
+pub(crate) const GPU_TILER_PRESENT_LO: Register<0x110> = Register;
+pub(crate) const GPU_TILER_PRESENT_HI: Register<0x114> = Register;
+pub(crate) const GPU_L2_PRESENT_LO: Register<0x120> = Register;
+pub(crate) const GPU_L2_PRESENT_HI: Register<0x124> = Register;
+pub(crate) const L2_READY_LO: Register<0x160> = Register;
+pub(crate) const L2_READY_HI: Register<0x164> = Register;
+pub(crate) const L2_PWRON_LO: Register<0x1a0> = Register;
+pub(crate) const L2_PWRON_HI: Register<0x1a4> = Register;
+pub(crate) const L2_PWRTRANS_LO: Register<0x220> = Register;
+pub(crate) const L2_PWRTRANS_HI: Register<0x204> = Register;
+pub(crate) const L2_PWRACTIVE_LO: Register<0x260> = Register;
+pub(crate) const L2_PWRACTIVE_HI: Register<0x264> = Register;
+
+pub(crate) const MCU_CONTROL: Register<0x700> = Register;
+pub(crate) const MCU_CONTROL_ENABLE: u32 = 1;
+pub(crate) const MCU_CONTROL_AUTO: u32 = 2;
+pub(crate) const MCU_CONTROL_DISABLE: u32 = 0;
+
+pub(crate) const MCU_STATUS: Register<0x704> = Register;
+pub(crate) const MCU_STATUS_DISABLED: u32 = 0;
+pub(crate) const MCU_STATUS_ENABLED: u32 = 1;
+pub(crate) const MCU_STATUS_HALT: u32 = 2;
+pub(crate) const MCU_STATUS_FATAL: u32 = 3;
+
+pub(crate) const GPU_COHERENCY_FEATURES: Register<0x300> = Register;
+
+pub(crate) const JOB_IRQ_RAWSTAT: Register<0x1000> = Register;
+pub(crate) const JOB_IRQ_CLEAR: Register<0x1004> = Register;
+pub(crate) const JOB_IRQ_MASK: Register<0x1008> = Register;
+pub(crate) const JOB_IRQ_STAT: Register<0x100c> = Register;
+
+pub(crate) const JOB_IRQ_GLOBAL_IF: u32 = bit_u32(31);
+
+pub(crate) const MMU_IRQ_RAWSTAT: Register<0x2000> = Register;
+pub(crate) const MMU_IRQ_CLEAR: Register<0x2004> = Register;
+pub(crate) const MMU_IRQ_MASK: Register<0x2008> = Register;
+pub(crate) const MMU_IRQ_STAT: Register<0x200c> = Register;
diff --git a/drivers/gpu/drm/tyr/tyr.rs b/drivers/gpu/drm/tyr/tyr.rs
new file mode 100644
index 000000000000..861d1db43072
--- /dev/null
+++ b/drivers/gpu/drm/tyr/tyr.rs
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+//! Arm Mali Tyr DRM driver.
+//!
+//! The name "Tyr" is inspired by Norse mythology, reflecting Arm's tradition of
+//! naming their GPUs after Nordic mythological figures and places.
+
+use crate::driver::TyrDriver;
+
+mod driver;
+mod file;
+mod gem;
+mod gpu;
+mod regs;
+
+kernel::module_platform_driver! {
+ type: TyrDriver,
+ name: "tyr",
+ authors: ["The Tyr driver authors"],
+ description: "Arm Mali Tyr DRM driver",
+ license: "Dual MIT/GPL",
+}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 5e997ae8bc9c..c5a3bbbc74c5 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -46,6 +46,7 @@ MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support.");
static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_get_param *args = data;
static const u32 reg_map[] = {
@@ -107,6 +108,16 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES:
args->value = !!v3d->gemfs;
return 0;
+ case DRM_V3D_PARAM_GLOBAL_RESET_COUNTER:
+ mutex_lock(&v3d->reset_lock);
+ args->value = v3d->reset_counter;
+ mutex_unlock(&v3d->reset_lock);
+ return 0;
+ case DRM_V3D_PARAM_CONTEXT_RESET_COUNTER:
+ mutex_lock(&v3d->reset_lock);
+ args->value = v3d_priv->reset_counter;
+ mutex_unlock(&v3d->reset_lock);
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -146,12 +157,24 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
+ unsigned long irqflags;
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++)
+ for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_job *job = queue->active_job;
+
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
+ if (job && job->base.entity == &v3d_priv->sched_entity[q]) {
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ job->file_priv = NULL;
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
+ }
+ }
+
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 411e47702f8a..0317f3d7452a 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -58,6 +58,10 @@ struct v3d_queue_state {
/* Stores the GPU stats for this queue in the global context. */
struct v3d_stats stats;
+
+ /* Currently active job for this queue */
+ struct v3d_job *active_job;
+ spinlock_t queue_lock;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -159,18 +163,8 @@ struct v3d_dev {
struct work_struct overflow_mem_work;
- struct v3d_bin_job *bin_job;
- struct v3d_render_job *render_job;
- struct v3d_tfu_job *tfu_job;
- struct v3d_csd_job *csd_job;
-
struct v3d_queue_state queue[V3D_MAX_QUEUES];
- /* Spinlock used to synchronize the overflow memory
- * management against bin job submission.
- */
- spinlock_t job_lock;
-
/* Used to track the active perfmon if any. */
struct v3d_perfmon *active_perfmon;
@@ -204,6 +198,11 @@ struct v3d_dev {
* all jobs.
*/
struct v3d_perfmon *global_perfmon;
+
+ /* Global reset counter. The counter must be incremented when
+ * a GPU reset happens. It must be protected by @reset_lock.
+ */
+ unsigned int reset_counter;
};
static inline struct v3d_dev *
@@ -233,6 +232,12 @@ struct v3d_file_priv {
/* Stores the GPU stats for a specific queue for this fd. */
struct v3d_stats stats[V3D_MAX_QUEUES];
+
+ /* Per-fd reset counter, must be incremented when a job submitted
+ * by this fd causes a GPU reset. It must be protected by
+ * &struct v3d_dev->reset_lock.
+ */
+ unsigned int reset_counter;
};
struct v3d_bo {
@@ -316,9 +321,9 @@ struct v3d_job {
struct v3d_perfmon *perfmon;
/* File descriptor of the process that submitted the job that could be used
- * for collecting stats by process of GPU usage.
+ * to collect per-process information about the GPU.
*/
- struct drm_file *file;
+ struct v3d_file_priv *file_priv;
/* Callback for the freeing of the job on refcount going to 0. */
void (*free)(struct kref *ref);
@@ -559,7 +564,7 @@ void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
-struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
@@ -603,7 +608,7 @@ void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count);
void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count);
-void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
+void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 89840ed212c0..8f8471adae34 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -3,8 +3,9 @@
#include "v3d_drv.h"
-struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q)
{
+ struct v3d_queue_state *queue = &v3d->queue[q];
struct v3d_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
@@ -12,10 +13,10 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
return ERR_PTR(-ENOMEM);
fence->dev = &v3d->drm;
- fence->queue = queue;
- fence->seqno = ++v3d->queue[queue].emit_seqno;
- dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock,
- v3d->queue[queue].fence_context, fence->seqno);
+ fence->queue = q;
+ fence->seqno = ++queue->emit_seqno;
+ dma_fence_init(&fence->base, &v3d_fence_ops, &queue->queue_lock,
+ queue->fence_context, fence->seqno);
return &fence->base;
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 37bf5eecdd2c..c77d90aa9b82 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -271,10 +271,11 @@ v3d_gem_init(struct drm_device *dev)
queue->fence_context = dma_fence_context_alloc(1);
memset(&queue->stats, 0, sizeof(queue->stats));
seqcount_init(&queue->stats.lock);
+
+ spin_lock_init(&queue->queue_lock);
}
spin_lock_init(&v3d->mm_lock);
- spin_lock_init(&v3d->job_lock);
ret = drmm_mutex_init(dev, &v3d->bo_lock);
if (ret)
return ret;
@@ -324,6 +325,7 @@ void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
+ enum v3d_queue q;
v3d_sched_fini(v3d);
v3d_gemfs_fini(v3d);
@@ -331,10 +333,8 @@ v3d_gem_destroy(struct drm_device *dev)
/* Waiting for jobs to finish would need to be done before
* unregistering V3D.
*/
- WARN_ON(v3d->bin_job);
- WARN_ON(v3d->render_job);
- WARN_ON(v3d->tfu_job);
- WARN_ON(v3d->csd_job);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ WARN_ON(v3d->queue[q].active_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
index 8ec6ed82b3d9..c1a30166c099 100644
--- a/drivers/gpu/drm/v3d/v3d_gemfs.c
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -7,11 +7,6 @@
#include "v3d_drv.h"
-static int add_param(struct fs_context *fc, const char *key, const char *val)
-{
- return vfs_parse_fs_string(fc, key, val, strlen(val));
-}
-
void v3d_gemfs_init(struct v3d_dev *v3d)
{
struct file_system_type *type;
@@ -38,9 +33,9 @@ void v3d_gemfs_init(struct v3d_dev *v3d)
fc = fs_context_for_mount(type, SB_KERNMOUNT);
if (IS_ERR(fc))
goto err;
- ret = add_param(fc, "source", "tmpfs");
+ ret = vfs_parse_fs_string(fc, "source", "tmpfs");
if (!ret)
- ret = add_param(fc, "huge", "within_size");
+ ret = vfs_parse_fs_string(fc, "huge", "within_size");
if (!ret)
gemfs = fc_mount_longterm(fc);
put_fs_context(fc);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index a515a301e480..31ecc5b4ba5a 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -42,6 +42,8 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
+ struct v3d_bin_job *bin_job;
struct drm_gem_object *obj;
unsigned long irqflags;
@@ -60,15 +62,17 @@ v3d_overflow_mem_work(struct work_struct *work)
* bin job got scheduled, that's fine. We'll just give them
* some binner pool anyway.
*/
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- if (!v3d->bin_job) {
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ bin_job = (struct v3d_bin_job *)queue->active_job;
+
+ if (!bin_job) {
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
goto out;
}
drm_gem_object_get(obj);
- list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ list_add_tail(&bo->unref_head, &bin_job->render->unref_list);
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
v3d_mmu_flush_all(v3d);
@@ -79,6 +83,20 @@ out:
drm_gem_object_put(obj);
}
+static void
+v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q,
+ void (*trace_irq)(struct drm_device *, uint64_t))
+{
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_fence *fence = to_v3d_fence(queue->active_job->irq_fence);
+
+ v3d_job_update_stats(queue->active_job, q);
+ trace_irq(&v3d->drm, fence->seqno);
+
+ queue->active_job = NULL;
+ dma_fence_signal(&fence->base);
+}
+
static irqreturn_t
v3d_irq(int irq, void *arg)
{
@@ -102,41 +120,17 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
- trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
-
- v3d->bin_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_BIN, trace_v3d_bcl_irq);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
- trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
-
- v3d->render_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_RENDER, trace_v3d_rcl_irq);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->csd_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
- trace_v3d_csd_irq(&v3d->drm, fence->seqno);
-
- v3d->csd_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_CSD, trace_v3d_csd_irq);
status = IRQ_HANDLED;
}
@@ -168,15 +162,7 @@ v3d_hub_irq(int irq, void *arg)
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
if (intsts & V3D_HUB_INT_TFUC) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
- trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
-
- v3d->tfu_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_TFU, trace_v3d_tfu_irq);
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index cb9df8822472..0ec06bfbbebb 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -139,7 +139,7 @@ static void
v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
{
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_file_priv *file = job->file_priv;
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
@@ -194,11 +194,11 @@ v3d_stats_update(struct v3d_stats *stats, u64 now)
}
void
-v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q)
{
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
- struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_stats *global_stats = &queue->stats;
u64 now = local_clock();
unsigned long flags;
@@ -209,10 +209,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
preempt_disable();
/* Don't update the local stats if the file context has already closed */
- if (file)
- v3d_stats_update(&file->stats[queue], now);
- else
- drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
+ spin_lock(&queue->queue_lock);
+ if (job->file_priv)
+ v3d_stats_update(&job->file_priv->stats[q], now);
+ spin_unlock(&queue->queue_lock);
v3d_stats_update(global_stats, now);
@@ -226,27 +226,28 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
if (unlikely(job->base.base.s_fence->finished.error)) {
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- v3d->bin_job = NULL;
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ queue->active_job = NULL;
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
return NULL;
}
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
*/
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- v3d->bin_job = job;
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ queue->active_job = &job->base;
/* Clear out the overflow allocation, so we don't
* reuse the overflow attached to a previous job.
*/
V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
v3d_invalidate_caches(v3d);
@@ -290,11 +291,11 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->render_job = NULL;
+ v3d->queue[V3D_RENDER].active_job = NULL;
return NULL;
}
- v3d->render_job = job;
+ v3d->queue[V3D_RENDER].active_job = &job->base;
/* Can we avoid this flush? We need to be careful of
* scheduling, though -- imagine job0 rendering to texture and
@@ -338,11 +339,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->tfu_job = NULL;
+ v3d->queue[V3D_TFU].active_job = NULL;
return NULL;
}
- v3d->tfu_job = job;
+ v3d->queue[V3D_TFU].active_job = &job->base;
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
@@ -386,11 +387,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
int i, csd_cfg0_reg;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->csd_job = NULL;
+ v3d->queue[V3D_CSD].active_job = NULL;
return NULL;
}
- v3d->csd_job = job;
+ v3d->queue[V3D_CSD].active_job = &job->base;
v3d_invalidate_caches(v3d);
@@ -574,7 +575,7 @@ static void
v3d_reset_performance_queries(struct v3d_cpu_job *job)
{
struct v3d_performance_query_info *performance_query = &job->performance_query;
- struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_file_priv *v3d_priv = job->base.file_priv;
struct v3d_dev *v3d = job->base.v3d;
struct v3d_perfmon *perfmon;
@@ -604,7 +605,7 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
{
struct v3d_performance_query_info *performance_query =
&job->performance_query;
- struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_file_priv *v3d_priv = job->base.file_priv;
struct v3d_performance_query *perf_query =
&performance_query->queries[query];
struct v3d_dev *v3d = job->base.v3d;
@@ -700,6 +701,7 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
v3d_job_update_stats(&job->base, V3D_CPU);
+ /* Synchronous operation, so no fence to wait on. */
return NULL;
}
@@ -715,19 +717,24 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
v3d_job_update_stats(job, V3D_CACHE_CLEAN);
+ /* Synchronous operation, so no fence to wait on. */
return NULL;
}
static enum drm_gpu_sched_stat
-v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job,
+ enum v3d_queue q)
{
- enum v3d_queue q;
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_file_priv *v3d_priv = job->file_priv;
+ unsigned long irqflags;
+ enum v3d_queue i;
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_stop(&v3d->queue[q].sched, sched_job);
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_stop(&v3d->queue[i].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
@@ -735,13 +742,18 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* get the GPU back into the init state */
v3d_reset(v3d);
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(&v3d->queue[q].sched);
+ v3d->reset_counter++;
+ spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
+ if (v3d_priv)
+ v3d_priv->reset_counter++;
+ spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
+
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_resubmit_jobs(&v3d->queue[i].sched);
/* Unblock schedulers and restart their jobs. */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, 0);
- }
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_start(&v3d->queue[i].sched, 0);
mutex_unlock(&v3d->reset_lock);
@@ -769,7 +781,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
}
static enum drm_gpu_sched_stat
@@ -791,11 +803,11 @@ v3d_render_job_timedout(struct drm_sched_job *sched_job)
}
static enum drm_gpu_sched_stat
-v3d_generic_job_timedout(struct drm_sched_job *sched_job)
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU);
}
static enum drm_gpu_sched_stat
@@ -814,7 +826,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
@@ -831,7 +843,7 @@ static const struct drm_sched_backend_ops v3d_render_sched_ops = {
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.run_job = v3d_tfu_job_run,
- .timedout_job = v3d_generic_job_timedout,
+ .timedout_job = v3d_tfu_job_timedout,
.free_job = v3d_sched_job_free,
};
@@ -843,13 +855,11 @@ static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.run_job = v3d_cache_clean_job_run,
- .timedout_job = v3d_generic_job_timedout,
.free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
.run_job = v3d_cpu_job_run,
- .timedout_job = v3d_generic_job_timedout,
.free_job = v3d_cpu_job_free
};
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 5171ffe9012d..f3652e90683c 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -166,7 +166,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- job->file = file_priv;
+ job->file_priv = v3d_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
1, v3d_priv, file_priv->client_id);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index e5805ca646c7..c3315935d8bc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -131,9 +131,8 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
- }
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 7dfb2006c561..1c15cbf326b7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -162,18 +162,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID))
vgdev->has_edid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC))
vgdev->has_indirect = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID))
vgdev->has_resource_assign_uuid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB))
vgdev->has_resource_blob = true;
- }
+
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -193,9 +193,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT))
vgdev->has_context_init = true;
- }
DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 5517cff8715c..e6363c887500 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -47,6 +47,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
if (handle < 0)
return handle;
*resid = handle + 1;
@@ -56,9 +57,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
- if (!virtio_gpu_virglrenderer_workaround) {
+ if (!virtio_gpu_virglrenderer_workaround)
ida_free(&vgdev->resource_ida, id - 1);
- }
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 698ea7adb951..29e4b458ae57 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -120,7 +120,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 55a15e247dd1..8181b22b9b46 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -248,6 +248,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
+
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -468,6 +469,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
+
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
index ff4566cf9925..b0d78a81d2df 100644
--- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -200,6 +200,7 @@ static void vkms_config_test_get_planes(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_planes, 0);
plane_cfg1 = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
vkms_config_for_each_plane(config, plane_cfg) {
n_planes++;
if (plane_cfg != plane_cfg1)
@@ -209,6 +210,7 @@ static void vkms_config_test_get_planes(struct kunit *test)
n_planes = 0;
plane_cfg2 = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
vkms_config_for_each_plane(config, plane_cfg) {
n_planes++;
if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
@@ -242,6 +244,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test)
KUNIT_FAIL(test, "Unexpected CRTC");
crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
vkms_config_for_each_crtc(config, crtc_cfg) {
if (crtc_cfg != crtc_cfg1)
@@ -249,6 +252,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test)
}
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
vkms_config_for_each_crtc(config, crtc_cfg) {
if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
@@ -280,6 +284,7 @@ static void vkms_config_test_get_encoders(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_encoders, 0);
encoder_cfg1 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
vkms_config_for_each_encoder(config, encoder_cfg) {
n_encoders++;
if (encoder_cfg != encoder_cfg1)
@@ -289,6 +294,7 @@ static void vkms_config_test_get_encoders(struct kunit *test)
n_encoders = 0;
encoder_cfg2 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
vkms_config_for_each_encoder(config, encoder_cfg) {
n_encoders++;
if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
@@ -324,6 +330,7 @@ static void vkms_config_test_get_connectors(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_connectors, 0);
connector_cfg1 = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
vkms_config_for_each_connector(config, connector_cfg) {
n_connectors++;
if (connector_cfg != connector_cfg1)
@@ -333,6 +340,7 @@ static void vkms_config_test_get_connectors(struct kunit *test)
n_connectors = 0;
connector_cfg2 = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
vkms_config_for_each_connector(config, connector_cfg) {
n_connectors++;
if (connector_cfg != connector_cfg1 &&
@@ -370,7 +378,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test)
/* Invalid: Too many planes */
for (n = 0; n <= 32; n++)
- vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_plane(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -395,6 +403,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: No primary plane */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -402,11 +411,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Multiple primary planes */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -419,11 +430,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Multiple cursor planes */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -437,12 +450,16 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Second CRTC without primary plane */
crtc_cfg = vkms_config_create_crtc(config);
encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg);
+
err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
/* Valid: Second CRTC with a primary plane */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -486,7 +503,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test)
/* Invalid: Too many CRTCs */
for (n = 0; n <= 32; n++)
- vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_crtc(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -509,7 +526,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test)
/* Invalid: Too many encoders */
for (n = 0; n <= 32; n++)
- vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_encoder(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -531,12 +548,15 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
/* Invalid: Encoder without a possible CRTC */
encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg);
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
/* Valid: Second CRTC with shared encoder */
crtc_cfg2 = vkms_config_create_crtc(config);
-
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
+
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -577,7 +597,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test)
/* Invalid: Too many connectors */
for (n = 0; n <= 32; n++)
- connector_cfg = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_connector(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -669,13 +689,19 @@ static void vkms_config_test_plane_attach_crtc(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
overlay_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, overlay_cfg);
vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+
primary_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, primary_cfg);
vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+
cursor_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cursor_cfg);
vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
crtc_cfg = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg);
/* No primary or cursor planes */
KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
@@ -735,6 +761,11 @@ static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
crtc_cfg1 = vkms_config_create_crtc(config);
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+
/* No possible CRTCs */
vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
KUNIT_FAIL(test, "Unexpected possible CRTC");
@@ -799,6 +830,11 @@ static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
crtc_cfg1 = vkms_config_create_crtc(config);
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+
/* No possible CRTCs */
vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
KUNIT_FAIL(test, "Unexpected possible CRTC");
@@ -863,6 +899,11 @@ static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
encoder_cfg1 = vkms_config_create_encoder(config);
encoder_cfg2 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+
/* No possible encoders */
vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
possible_encoder)
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
index 2e1daef94831..a7788fbc45dc 100644
--- a/drivers/gpu/drm/vkms/tests/vkms_format_test.c
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -14,20 +14,20 @@
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
/**
- * struct pixel_yuv_u8 - Internal representation of a pixel color.
- * @y: Luma value, stored in 8 bits, without padding, using
+ * struct pixel_yuv_u16 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 16 bits, without padding, using
* machine endianness
- * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * @u: Blue difference chroma value, stored in 16 bits, without padding, using
* machine endianness
- * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * @v: Red difference chroma value, stored in 16 bits, without padding, using
* machine endianness
*/
-struct pixel_yuv_u8 {
- u8 y, u, v;
+struct pixel_yuv_u16 {
+ u16 y, u, v;
};
/*
- * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * struct yuv_u16_to_argb_u16_case - Reference values to test the color
* conversions in VKMS between YUV to ARGB
*
* @encoding: Encoding used to convert RGB to YUV
@@ -39,13 +39,13 @@ struct pixel_yuv_u8 {
* @format_pair.yuv: Same color as @format_pair.rgb, but converted to
* YUV using @encoding and @range.
*/
-struct yuv_u8_to_argb_u16_case {
+struct yuv_u16_to_argb_u16_case {
enum drm_color_encoding encoding;
enum drm_color_range range;
size_t n_colors;
struct format_pair {
char *name;
- struct pixel_yuv_u8 yuv;
+ struct pixel_yuv_u16 yuv;
struct pixel_argb_u16 argb;
} colors[TEST_BUFF_SIZE];
};
@@ -57,14 +57,14 @@ struct yuv_u8_to_argb_u16_case {
* For more information got to the docs:
* https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
*/
-static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+static struct yuv_u16_to_argb_u16_case yuv_u16_to_argb_u16_cases[] = {
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
* K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
*
@@ -76,13 +76,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c8b, 0x54ce, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x9645, 0x2b33, 0x14d1 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d2f, 0xffff, 0x6b2f }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -90,7 +90,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -101,13 +101,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x517b, 0x5a34, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x908e, 0x35cc, 0x2237 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x28f7, 0xf000, 0x6dc9 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -115,7 +115,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -126,21 +126,21 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x366d, 0x62ac, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb717, 0x1d55, 0x0bbd }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x127c, 0xffff, 0x7443 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
* K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
* in_bits = 16,
- * int_legal = False,
+ * in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -151,13 +151,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3e8f, 0x6656, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xaca1, 0x29aa, 0x1a45 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1fd0, 0xf000, 0x75bb }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -165,7 +165,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -176,13 +176,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4340, 0x5c41, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad91, 0x23bf, 0x0a4c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f2e, 0xffff, 0x75b5 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -190,7 +190,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -201,32 +201,30 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4988, 0x60b9, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa47b, 0x2f47, 0x1902 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1cfd, 0xf000, 0x76fe }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
};
/*
- * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * vkms_format_test_yuv_u16_to_argb_u16 - Testing the conversion between YUV
* colors to ARGB colors in VKMS
*
* This test will use the functions get_conversion_matrix_to_argb_u16 and
- * argb_u16_from_yuv888 to convert YUV colors (stored in
- * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ * argb_u16_from_yuv161616 to convert YUV colors (stored in
+ * yuv_u16_to_argb_u16_cases) into ARGB colors.
*
* The conversion between YUV and RGB is not totally reversible, so there may be
* some difference between the expected value and the result.
- * In addition, there may be some rounding error as the input color is 8 bits
- * and output color is 16 bits.
*/
-static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+static void vkms_format_test_yuv_u16_to_argb_u16(struct kunit *test)
{
- const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ const struct yuv_u16_to_argb_u16_case *param = test->param_value;
struct pixel_argb_u16 argb;
for (size_t i = 0; i < param->n_colors; i++) {
@@ -236,7 +234,8 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
get_conversion_matrix_to_argb_u16
(DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
- argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+ argb = argb_u16_from_yuv161616(&matrix, color->yuv.y, color->yuv.u,
+ color->yuv.v);
KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
"On the A channel of the color %s expected 0x%04x, got 0x%04x",
@@ -253,19 +252,19 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
}
}
-static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
- char *desc)
+static void vkms_format_test_yuv_u16_to_argb_u16_case_desc(struct yuv_u16_to_argb_u16_case *t,
+ char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
}
-KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
- vkms_format_test_yuv_u8_to_argb_u16_case_desc
+KUNIT_ARRAY_PARAM(yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_cases,
+ vkms_format_test_yuv_u16_to_argb_u16_case_desc
);
static struct kunit_case vkms_format_test_cases[] = {
- KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_gen_params),
{}
};
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 6d0227c6635a..dfb8e13cba87 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -259,16 +259,27 @@ static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
}
-VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
- const struct conversion_matrix *matrix)
+static struct pixel_argb_u16 argb_u16_from_BGR565(const __le16 *pixel)
+{
+ struct pixel_argb_u16 out_pixel;
+
+ out_pixel = argb_u16_from_RGB565(pixel);
+ swap(out_pixel.r, out_pixel.b);
+
+ return out_pixel;
+}
+
+VISIBLE_IF_KUNIT
+struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix,
+ u16 y, u16 channel_1, u16 channel_2)
{
u16 r, g, b;
s64 fp_y, fp_channel_1, fp_channel_2;
s64 fp_r, fp_g, fp_b;
- fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
- fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
- fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+ fp_y = drm_int2fixp((int)y - matrix->y_offset * 257);
+ fp_channel_1 = drm_int2fixp((int)channel_1 - 128 * 257);
+ fp_channel_2 = drm_int2fixp((int)channel_2 - 128 * 257);
fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
@@ -290,7 +301,65 @@ VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1,
return argb_u16_from_u16161616(0xffff, r, g, b);
}
-EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv161616);
+
+/**
+ * READ_LINE() - Generic generator for a read_line function which can be used for format with one
+ * plane and a block_h == block_w == 1.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: Temporary pixel name used in the @__VA_ARGS__ parameters
+ * @pixel_type: Used to specify the type you want to cast the pixel pointer
+ * @callback: Callback to call for each pixels. This fonction should take @__VA_ARGS__ as parameter
+ * and return a pixel_argb_u16
+ * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_name to access current
+ * pixel.
+ */
+#define READ_LINE(function_name, pixel_name, pixel_type, callback, ...) \
+static void function_name(const struct vkms_plane_state *plane, int x_start, \
+ int y_start, enum pixel_read_direction direction, int count, \
+ struct pixel_argb_u16 out_pixel[]) \
+{ \
+ struct pixel_argb_u16 *end = out_pixel + count; \
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); \
+ u8 *src_pixels; \
+ \
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); \
+ \
+ while (out_pixel < end) { \
+ pixel_type *(pixel_name) = (pixel_type *)src_pixels; \
+ *out_pixel = (callback)(__VA_ARGS__); \
+ out_pixel += 1; \
+ src_pixels += step; \
+ } \
+}
+
+/**
+ * READ_LINE_ARGB8888() - Generic generator for ARGB8888 formats.
+ * The pixel type used is u8, so pixel_name[0]..pixel_name[n] are the n components of the pixel.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters
+ * @a: alpha value
+ * @r: red value
+ * @g: green value
+ * @b: blue value
+ */
+#define READ_LINE_ARGB8888(function_name, pixel_name, a, r, g, b) \
+ READ_LINE(function_name, pixel_name, u8, argb_u16_from_u8888, a, r, g, b)
+/**
+ * READ_LINE_le16161616() - Generic generator for ARGB16161616 formats.
+ * The pixel type used is u16, so pixel_name[0]..pixel_name[n] are the n components of the pixel.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters
+ * @a: alpha value
+ * @r: red value
+ * @g: green value
+ * @b: blue value
+ */
+#define READ_LINE_le16161616(function_name, pixel_name, a, r, g, b) \
+ READ_LINE(function_name, pixel_name, __le16, argb_u16_from_le16161616, a, r, g, b)
/*
* The following functions are read_line function for each pixel format supported by VKMS.
@@ -378,138 +447,27 @@ static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
}
-static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- while (out_pixel < end) {
- *out_pixel = argb_u16_from_gray8(*src_pixels);
- src_pixels += step;
- out_pixel += 1;
- }
-}
-
-static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- /* Switch blue and red pixels. */
- *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- while (out_pixel < end) {
- u16 *px = (u16 *)src_pixels;
- *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
+READ_LINE_ARGB8888(XRGB8888_read_line, px, 0xFF, px[2], px[1], px[0])
+READ_LINE_ARGB8888(XBGR8888_read_line, px, 0xFF, px[0], px[1], px[2])
-static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- __le16 *px = (__le16 *)src_pixels;
- *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
+READ_LINE_ARGB8888(ARGB8888_read_line, px, px[3], px[2], px[1], px[0])
+READ_LINE_ARGB8888(ABGR8888_read_line, px, px[3], px[0], px[1], px[2])
+READ_LINE_ARGB8888(RGBA8888_read_line, px, px[0], px[3], px[2], px[1])
+READ_LINE_ARGB8888(BGRA8888_read_line, px, px[0], px[1], px[2], px[3])
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+READ_LINE_ARGB8888(RGB888_read_line, px, 0xFF, px[2], px[1], px[0])
+READ_LINE_ARGB8888(BGR888_read_line, px, 0xFF, px[0], px[1], px[2])
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+READ_LINE_le16161616(ARGB16161616_read_line, px, px[3], px[2], px[1], px[0])
+READ_LINE_le16161616(ABGR16161616_read_line, px, px[3], px[0], px[1], px[2])
+READ_LINE_le16161616(XRGB16161616_read_line, px, cpu_to_le16(0xFFFF), px[2], px[1], px[0])
+READ_LINE_le16161616(XBGR16161616_read_line, px, cpu_to_le16(0xFFFF), px[0], px[1], px[2])
- while (out_pixel < end) {
- __le16 *px = (__le16 *)src_pixels;
+READ_LINE(RGB565_read_line, px, __le16, argb_u16_from_RGB565, px)
+READ_LINE(BGR565_read_line, px, __le16, argb_u16_from_BGR565, px)
- *out_pixel = argb_u16_from_RGB565(px);
- out_pixel += 1;
- src_pixels += step;
- }
-}
+READ_LINE(R8_read_line, px, u8, argb_u16_from_gray8, *px)
/*
* This callback can be used for YUV formats where U and V values are
@@ -521,35 +479,57 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
* - Convert YUV and YVU with the same function (a column swap is needed when setting up
* plane->conversion_matrix)
*/
-static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- u8 *y_plane;
- u8 *uv_plane;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
- &y_plane);
- packed_pixels_addr_1x1(plane->frame_info,
- x_start / plane->frame_info->fb->format->hsub,
- y_start / plane->frame_info->fb->format->vsub, 1,
- &uv_plane);
- int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
- int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
- int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
- const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
-
- for (int i = 0; i < count; i++) {
- *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
- conversion_matrix);
- out_pixel += 1;
- y_plane += step_y;
- if ((i + subsampling_offset + 1) % subsampling == 0)
- uv_plane += step_uv;
- }
-}
+/**
+ * READ_LINE_YUV_SEMIPLANAR() - Generic generator for a read_line function which can be used for yuv
+ * formats with two planes and block_w == block_h == 1.
+ *
+ * @function_name: Function name to generate
+ * @pixel_1_name: temporary pixel name for the first plane used in the @__VA_ARGS__ parameters
+ * @pixel_2_name: temporary pixel name for the second plane used in the @__VA_ARGS__ parameters
+ * @pixel_1_type: Used to specify the type you want to cast the pixel pointer on the plane 1
+ * @pixel_2_type: Used to specify the type you want to cast the pixel pointer on the plane 2
+ * @callback: Callback to call for each pixels. This function should take
+ * (struct conversion_matrix*, @__VA_ARGS__) as parameter and return a pixel_argb_u16
+ * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_1_name and @pixel_2_name
+ * to access current pixel values
+ */
+#define READ_LINE_YUV_SEMIPLANAR(function_name, pixel_1_name, pixel_2_name, pixel_1_type, \
+ pixel_2_type, callback, ...) \
+static void function_name(const struct vkms_plane_state *plane, int x_start, \
+ int y_start, enum pixel_read_direction direction, int count, \
+ struct pixel_argb_u16 out_pixel[]) \
+{ \
+ u8 *plane_1; \
+ u8 *plane_2; \
+ \
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, \
+ &plane_1); \
+ packed_pixels_addr_1x1(plane->frame_info, \
+ x_start / plane->frame_info->fb->format->hsub, \
+ y_start / plane->frame_info->fb->format->vsub, 1, \
+ &plane_2); \
+ int step_1 = get_block_step_bytes(plane->frame_info->fb, direction, 0); \
+ int step_2 = get_block_step_bytes(plane->frame_info->fb, direction, 1); \
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction); \
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start); \
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix; \
+ \
+ for (int i = 0; i < count; i++) { \
+ pixel_1_type *(pixel_1_name) = (pixel_1_type *)plane_1; \
+ pixel_2_type *(pixel_2_name) = (pixel_2_type *)plane_2; \
+ *out_pixel = (callback)(conversion_matrix, __VA_ARGS__); \
+ out_pixel += 1; \
+ plane_1 += step_1; \
+ if ((i + subsampling_offset + 1) % subsampling == 0) \
+ plane_2 += step_2; \
+ } \
+}
+
+READ_LINE_YUV_SEMIPLANAR(YUV888_semiplanar_read_line, y, uv, u8, u8, argb_u16_from_yuv161616,
+ y[0] * 257, uv[0] * 257, uv[1] * 257)
+READ_LINE_YUV_SEMIPLANAR(YUV161616_semiplanar_read_line, y, uv, u16, u16, argb_u16_from_yuv161616,
+ y[0], uv[0], uv[1])
/*
* This callback can be used for YUV format where each color component is
* stored in a different plane (often called planar formats). It will
@@ -586,8 +566,9 @@ static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_sta
const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
for (int i = 0; i < count; i++) {
- *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
- conversion_matrix);
+ *out_pixel = argb_u16_from_yuv161616(conversion_matrix,
+ *y_plane * 257, *channel_1_plane * 257,
+ *channel_2_plane * 257);
out_pixel += 1;
y_plane += step_y;
if ((i + subsampling_offset + 1) % subsampling == 0) {
@@ -712,23 +693,43 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
switch (format) {
case DRM_FORMAT_ARGB8888:
return &ARGB8888_read_line;
- case DRM_FORMAT_XRGB8888:
- return &XRGB8888_read_line;
case DRM_FORMAT_ABGR8888:
return &ABGR8888_read_line;
+ case DRM_FORMAT_BGRA8888:
+ return &BGRA8888_read_line;
+ case DRM_FORMAT_RGBA8888:
+ return &RGBA8888_read_line;
+ case DRM_FORMAT_XRGB8888:
+ return &XRGB8888_read_line;
+ case DRM_FORMAT_XBGR8888:
+ return &XBGR8888_read_line;
+ case DRM_FORMAT_RGB888:
+ return &RGB888_read_line;
+ case DRM_FORMAT_BGR888:
+ return &BGR888_read_line;
case DRM_FORMAT_ARGB16161616:
return &ARGB16161616_read_line;
+ case DRM_FORMAT_ABGR16161616:
+ return &ABGR16161616_read_line;
case DRM_FORMAT_XRGB16161616:
return &XRGB16161616_read_line;
+ case DRM_FORMAT_XBGR16161616:
+ return &XBGR16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_BGR565:
+ return &BGR565_read_line;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
- return &semi_planar_yuv_read_line;
+ return &YUV888_semiplanar_read_line;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return &YUV161616_semiplanar_read_line;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YUV444:
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index b4fe62ab9c65..eeb208cdd6b1 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -14,8 +14,8 @@ void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encod
struct conversion_matrix *matrix);
#if IS_ENABLED(CONFIG_KUNIT)
-struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
- const struct conversion_matrix *matrix);
+struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix,
+ u16 y, u16 channel_1, u16 channel_2);
#endif
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 8d7ca0cdd79f..2ee3749e2b28 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -77,9 +77,22 @@ int vkms_output_init(struct vkms_device *vkmsdev)
return ret;
}
+ encoder_cfg->encoder->possible_clones |=
+ drm_encoder_mask(encoder_cfg->encoder);
+
vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
encoder_cfg->encoder->possible_crtcs |=
drm_crtc_mask(&possible_crtc->crtc->crtc);
+
+ if (vkms_config_crtc_get_writeback(possible_crtc)) {
+ struct drm_encoder *wb_encoder =
+ &possible_crtc->crtc->wb_encoder;
+
+ encoder_cfg->encoder->possible_clones |=
+ drm_encoder_mask(wb_encoder);
+ wb_encoder->possible_clones |=
+ drm_encoder_mask(encoder_cfg->encoder);
+ }
}
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e3fdd161d0f0..e592e47a5736 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -14,11 +14,19 @@
static const u32 vkms_formats[] = {
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_XBGR16161616,
DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
@@ -31,6 +39,9 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
DRM_FORMAT_R1,
DRM_FORMAT_R2,
DRM_FORMAT_R4,
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index fe163271d5b5..45d69a3b85f6 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -174,6 +174,8 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev,
if (ret)
return ret;
vkms_output->wb_encoder.possible_crtcs |= drm_crtc_mask(&vkms_output->crtc);
+ vkms_output->wb_encoder.possible_clones |=
+ drm_encoder_mask(&vkms_output->wb_encoder);
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c2294abbe753..00be92da5509 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_to_timespec64(f->timestamp);
+ ts = ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 714d5702dfd7..7219f6b884b6 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -40,12 +40,12 @@ config DRM_XE
select DRM_TTM
select DRM_TTM_HELPER
select DRM_EXEC
+ select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_GPUVM
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
- select HMM_MIRROR
select REGMAP if I2C
help
Driver for Intel Xe2 series GPUs and later. Experimental support
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 01735c6ece8b..87902b4bd6d3 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -104,6 +104,7 @@ config DRM_XE_DEBUG_GUC
config DRM_XE_USERPTR_INVAL_INJECT
bool "Inject userptr invalidation -EINVAL errors"
+ depends on DRM_GPUSVM
default n
help
Choose this option when debugging error paths that
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 07c71a29963d..d9c6cf0f189e 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -35,6 +35,7 @@ $(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe
xe-y += xe_bb.o \
xe_bo.o \
xe_bo_evict.o \
+ xe_dep_scheduler.o \
xe_devcoredump.o \
xe_device.o \
xe_device_sysfs.o \
@@ -60,7 +61,6 @@ xe-y += xe_bb.o \
xe_gt_pagefault.o \
xe_gt_sysfs.o \
xe_gt_throttle.o \
- xe_gt_tlb_invalidation.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
@@ -75,16 +75,20 @@ xe-y += xe_bb.o \
xe_guc_log.o \
xe_guc_pc.o \
xe_guc_submit.o \
+ xe_guc_tlb_inval.o \
xe_heci_gsc.o \
xe_huc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
xe_hw_engine_group.o \
+ xe_hw_error.o \
xe_hw_fence.o \
xe_irq.o \
+ xe_late_bind_fw.o \
xe_lrc.o \
xe_migrate.o \
xe_mmio.o \
+ xe_mmio_gem.o \
xe_mocs.o \
xe_module.o \
xe_nvm.o \
@@ -95,6 +99,7 @@ xe-y += xe_bb.o \
xe_pcode.o \
xe_pm.o \
xe_preempt_fence.o \
+ xe_psmi.o \
xe_pt.o \
xe_pt_walk.o \
xe_pxp.o \
@@ -114,6 +119,8 @@ xe-y += xe_bb.o \
xe_sync.o \
xe_tile.o \
xe_tile_sysfs.o \
+ xe_tlb_inval.o \
+ xe_tlb_inval_job.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
@@ -124,7 +131,9 @@ xe-y += xe_bb.o \
xe_tuning.o \
xe_uc.o \
xe_uc_fw.o \
+ xe_validation.o \
xe_vm.o \
+ xe_vm_madvise.o \
xe_vram.o \
xe_vram_freq.o \
xe_vsec.o \
@@ -133,8 +142,8 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_I2C) += xe_i2c.o
-xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_GPUSVM) += xe_userptr.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
@@ -149,6 +158,7 @@ xe-y += \
xe_memirq.o \
xe_sriov.o \
xe_sriov_vf.o \
+ xe_sriov_vf_ccs.o \
xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
@@ -202,6 +212,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
+ display/xe_panic.o \
display/xe_plane_initial.o \
display/xe_tdf.o
@@ -317,6 +328,7 @@ ifeq ($(CONFIG_DEBUG_FS),y)
xe_gt_stats.o \
xe_guc_debugfs.o \
xe_huc_debugfs.o \
+ xe_tile_debugfs.o \
xe_uc_debugfs.o
xe-$(CONFIG_PCI_IOV) += xe_gt_sriov_pf_debugfs.o
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index b9f67d7a00d8..31090c69dfbe 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -155,6 +155,8 @@ enum xe_guc_action {
XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
XE_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
+ XE_GUC_ACTION_TEST_G2G_SEND = 0xF001,
+ XE_GUC_ACTION_TEST_G2G_RECV = 0xF002,
XE_GUC_ACTION_LIMIT
};
@@ -194,6 +196,14 @@ enum xe_guc_register_context_multi_lrc_param_offsets {
XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
};
+enum xe_guc_context_wq_item_offsets {
+ XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN = 0,
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW,
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS,
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID,
+ XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL,
+};
+
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index b28c8fa061f7..ce5c59517528 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -210,6 +210,11 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+enum slpc_power_profile {
+ SLPC_POWER_PROFILE_BASE = 0x0,
+ SLPC_POWER_PROFILE_POWER_SAVING = 0x1
+};
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index ecf748fd87df..ad76b4baf42e 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -63,6 +63,7 @@ enum xe_guc_load_status {
XE_GUC_LOAD_STATUS_HWCONFIG_START = 0x05,
XE_GUC_LOAD_STATUS_HWCONFIG_DONE = 0x06,
XE_GUC_LOAD_STATUS_HWCONFIG_ERROR = 0x07,
+ XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH = 0x08,
XE_GUC_LOAD_STATUS_GDT_DONE = 0x10,
XE_GUC_LOAD_STATUS_IDT_DONE = 0x20,
XE_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
@@ -75,6 +76,8 @@ enum xe_guc_load_status {
XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
XE_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
+ XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG = 0x76,
XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
XE_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index d7719d0e36ca..265a135e7061 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -415,12 +415,14 @@ enum {
*/
enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT = 0x9004,
GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
+ GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG = 0x900c,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index 41d39d67817a..f097fc6d5127 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -8,6 +8,7 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_res_cursor.h"
+#include "xe_validation.h"
struct xe_bo;
@@ -21,7 +22,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
u32 start, u32 end)
{
struct xe_bo *bo;
- int err;
+ int err = 0;
u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
if (start < SZ_4K)
@@ -32,21 +33,13 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
start = ALIGN(start, align);
}
- bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
- NULL, size, start, end,
- ttm_bo_type_kernel, flags, 0);
+ bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
+ size, start, end, ttm_bo_type_kernel, flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
bo = NULL;
return err;
}
- err = xe_bo_pin(bo);
- xe_bo_unlock_vm_held(bo);
-
- if (err) {
- xe_bo_put(fb->bo);
- bo = NULL;
- }
fb->bo = bo;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 9b7572e06f34..b8269391bc69 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -12,7 +12,6 @@
#include <drm/drm_drv.h>
-#include "i915_utils.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
@@ -26,34 +25,13 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
#define IS_BROADWELL(dev_priv) (dev_priv && 0)
-#define IS_SKYLAKE(dev_priv) (dev_priv && 0)
#define IS_BROXTON(dev_priv) (dev_priv && 0)
-#define IS_KABYLAKE(dev_priv) (dev_priv && 0)
#define IS_GEMINILAKE(dev_priv) (dev_priv && 0)
-#define IS_COFFEELAKE(dev_priv) (dev_priv && 0)
-#define IS_COMETLAKE(dev_priv) (dev_priv && 0)
-#define IS_ICELAKE(dev_priv) (dev_priv && 0)
-#define IS_JASPERLAKE(dev_priv) (dev_priv && 0)
-#define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0)
-#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE)
-#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
-#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
-#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
- IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
-#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
-#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
-#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
-#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE)
-
-#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c
index 43b10a2cc508..1421c2a7b64d 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_utils.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_utils.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_utils.h"
bool i915_vtd_active(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 910632f57c3d..27437c22bd70 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,12 +1,7 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
-#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
-#include <drm/drm_panic.h>
-
-#include "intel_fb.h"
-#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
@@ -64,89 +59,3 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}
-
-struct xe_panic_data {
- struct page **pages;
- int page;
- void *vaddr;
-};
-
-struct xe_framebuffer {
- struct intel_framebuffer base;
- struct xe_panic_data panic;
-};
-
-static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
-{
- return &container_of_const(fb, struct xe_framebuffer, base)->panic;
-}
-
-static void xe_panic_kunmap(struct xe_panic_data *panic)
-{
- if (panic->vaddr) {
- drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
- kunmap_local(panic->vaddr);
- panic->vaddr = NULL;
- }
-}
-
-/*
- * The scanout buffer pages are not mapped, so for each pixel,
- * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
- * Try to keep the map from the previous pixel, to avoid too much map/unmap.
- */
-static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
- unsigned int y, u32 color)
-{
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct xe_panic_data *panic = to_xe_panic_data(fb);
- struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
- unsigned int new_page;
- unsigned int offset;
-
- if (fb->panic_tiling)
- offset = fb->panic_tiling(sb->width, x, y);
- else
- offset = y * sb->pitch[0] + x * sb->format->cpp[0];
-
- new_page = offset >> PAGE_SHIFT;
- offset = offset % PAGE_SIZE;
- if (new_page != panic->page) {
- xe_panic_kunmap(panic);
- panic->page = new_page;
- panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
- panic->page);
- }
- if (panic->vaddr) {
- u32 *pix = panic->vaddr + offset;
- *pix = color;
- }
-}
-
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
-{
- struct xe_framebuffer *xe_fb;
-
- xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
- if (xe_fb)
- return &xe_fb->base;
- return NULL;
-}
-
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
-{
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct xe_panic_data *panic = to_xe_panic_data(fb);
-
- panic->page = -1;
- sb->set_pixel = xe_panic_page_set_pixel;
- return 0;
-}
-
-void intel_bo_panic_finish(struct intel_framebuffer *fb)
-{
- struct xe_panic_data *panic = to_xe_panic_data(fb);
-
- xe_panic_kunmap(panic);
- panic->page = -1;
-}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index fba9617a75a5..8ea9a472113c 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -41,12 +41,12 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
- NULL, size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT);
+ if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+ size,
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT, false);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -54,10 +54,10 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT);
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size,
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_GGTT, false);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e2e0771cf274..19e691fccf8c 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -20,7 +20,7 @@
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
-#include "intel_display_core.h"
+#include "intel_display_device.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -37,13 +37,6 @@
/* Xe device functions */
-static bool has_display(struct xe_device *xe)
-{
- struct intel_display *display = xe->display;
-
- return HAS_DISPLAY(display);
-}
-
/**
* xe_display_driver_probe_defer - Detect if we need to wait for other drivers
* early on
@@ -96,6 +89,7 @@ static void xe_display_fini_early(void *arg)
if (!xe->info.probe_display)
return;
+ intel_hpd_cancel_work(display);
intel_display_driver_remove_nogem(display);
intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
@@ -289,7 +283,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_enable(display);
}
@@ -302,14 +296,14 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_dmc_resume(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(display);
intel_hpd_init(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -332,7 +326,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
intel_power_domains_disable(display);
drm_client_dev_suspend(&xe->drm, false);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -340,9 +334,11 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
+ intel_encoder_block_all_hpds(display);
+
intel_hpd_cancel_work(display);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
}
@@ -362,7 +358,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
intel_power_domains_disable(display);
drm_client_dev_suspend(&xe->drm, false);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -370,9 +366,10 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
+ intel_encoder_block_all_hpds(display);
intel_hpd_cancel_work(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -461,23 +458,25 @@ void xe_display_pm_resume(struct xe_device *xe)
intel_dmc_resume(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_display_driver_resume_access(display);
intel_hpd_init(display);
- if (has_display(xe)) {
+ intel_encoder_unblock_all_hpds(display);
+
+ if (intel_display_device_present(display)) {
intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(display);
}
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -542,7 +541,7 @@ int xe_display_probe(struct xe_device *xe)
xe->display = display;
- if (has_display(xe))
+ if (intel_display_device_present(display))
return 0;
no_display:
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68d1387d81a0..8ada1cbcb16c 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -14,5 +14,5 @@ bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
struct xe_device *xe = to_xe_device(display->drm);
- return XE_WA(xe_root_mmio_gt(xe), 16023588340);
+ return XE_GT_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 9f941fc2e36b..58581d7aaae6 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -43,11 +43,11 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
return false;
/* Set scanout flag for WC mapping */
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
- NULL, PAGE_ALIGN(size),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+ PAGE_ALIGN(size),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
if (IS_ERR(obj)) {
kfree(vma);
return false;
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index c38fba18effe..1fd4a815e784 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -16,6 +16,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_pm.h"
+#include "xe_vram_types.h"
static void
write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
@@ -101,29 +102,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_PAGE_SIZE);
if (IS_DGFX(xe))
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM0 |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
else
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
if (IS_ERR(dpt))
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -280,7 +281,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
- int ret;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int ret = 0;
if (!vma)
return ERR_PTR(-ENODEV);
@@ -289,7 +292,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *vram = xe_device_get_root_tile(xe)->mem.vram;
/*
* If we need to able to access the clear-color value stored in
@@ -297,7 +300,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
* accessible. This is important on small-bar systems where
* only some subset of VRAM is CPU accessible.
*/
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
+ if (xe_vram_region_io_size(vram) < xe_vram_region_usable_size(vram)) {
ret = -EINVAL;
goto err;
}
@@ -307,17 +310,22 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
* Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
* assumptions are incorrect for framebuffers
*/
- ret = ttm_bo_reserve(&bo->ttm, false, false, NULL);
- if (ret)
- goto err;
-
- if (IS_DGFX(xe))
- ret = xe_bo_migrate(bo, XE_PL_VRAM0);
- else
- ret = xe_bo_validate(bo, NULL, true);
- if (!ret)
- ttm_bo_pin(&bo->ttm);
- ttm_bo_unreserve(&bo->ttm);
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ if (IS_DGFX(xe))
+ ret = xe_bo_migrate(bo, XE_PL_VRAM0, NULL, &exec);
+ else
+ ret = xe_bo_validate(bo, NULL, true, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ if (!ret)
+ ttm_bo_pin(&bo->ttm);
+ }
if (ret)
goto err;
@@ -382,6 +390,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
const struct intel_plane_state *old_plane_state)
{
struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
struct xe_device *xe = to_xe_device(fb->base.dev);
struct intel_display *display = xe->display;
struct i915_vma *vma;
@@ -405,6 +414,10 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
found:
refcount_inc(&vma->ref);
new_plane_state->ggtt_vma = vma;
+
+ new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ plane->surf_offset(new_plane_state);
+
return true;
}
@@ -431,6 +444,10 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
return PTR_ERR(vma);
new_plane_state->ggtt_vma = vma;
+
+ new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ plane->surf_offset(new_plane_state);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 30f1073141fc..4ae847b628e2 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -72,10 +72,10 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
int ret = 0;
/* allocate object of two page for HDCP command memory and store it */
- bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
diff --git a/drivers/gpu/drm/xe/display/xe_panic.c b/drivers/gpu/drm/xe/display/xe_panic.c
new file mode 100644
index 000000000000..f32b23338331
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_panic.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
+
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_panic.h"
+#include "xe_bo.h"
+
+struct intel_panic {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+static void xe_panic_kunmap(struct intel_panic *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct intel_panic *panic = fb->panic;
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+ unsigned int new_page;
+ unsigned int offset;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ xe_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
+ panic->page);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_panic *intel_panic_alloc(void)
+{
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
+
+ return panic;
+}
+
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
+{
+ panic->page = -1;
+ sb->set_pixel = xe_panic_page_set_pixel;
+ return 0;
+}
+
+void intel_panic_finish(struct intel_panic *panic)
+{
+ xe_panic_kunmap(panic);
+ panic->page = -1;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index dcbc4b2d3fd9..94f00def811b 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,6 +10,7 @@
#include "xe_ggtt.h"
#include "xe_mmio.h"
+#include "i915_vma.h"
#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_core.h"
@@ -21,6 +22,7 @@
#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
#include <generated/xe_wa_oob.h>
@@ -103,7 +105,7 @@ initial_plane_bo(struct xe_device *xe,
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
- if (phys_base >= tile0->mem.vram.usable_size) {
+ if (phys_base >= xe_vram_region_usable_size(tile0->mem.vram)) {
drm_err(&xe->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
@@ -121,7 +123,7 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ if (XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display))
return NULL;
/*
@@ -138,8 +140,8 @@ initial_plane_bo(struct xe_device *xe,
page_size);
size -= base;
- bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base,
- ttm_bo_type_kernel, flags);
+ bo = xe_bo_create_pin_map_at_novm(xe, tile0, size, phys_base,
+ ttm_bo_type_kernel, flags, 0, false);
if (IS_ERR(bo)) {
drm_dbg(&xe->drm,
"Failed to create bo phys_base=%pa size %u with flags %x: %li\n",
@@ -234,6 +236,9 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
goto nofb;
plane_state->ggtt_vma = vma;
+
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index e3f5e8bb3ebc..c47b290e0e9f 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -65,6 +65,7 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LRM_ASYNC REG_BIT(21)
#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
#define MI_LRR_DST_CS_MMIO REG_BIT(19)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 7ade41e2b7b3..f4c3e1187a00 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -111,6 +111,9 @@
#define PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS REG_BIT(14)
#define CS_PRIORITY_MEM_READ REG_BIT(7)
+#define CS_DEBUG_MODE2(base) XE_REG((base) + 0xd8, XE_REG_OPTION_MASKED)
+#define INSTRUCTION_STATE_CACHE_INVALIDATE REG_BIT(6)
+
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 9b66cc972a63..180be82672ab 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -13,6 +13,8 @@
/* Definitions of GSC H/W registers, bits, etc */
+#define BMG_GSC_HECI1_BASE 0x373000
+
#define MTL_GSC_HECI1_BASE 0x00116000
#define MTL_GSC_HECI2_BASE 0x00117000
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 5cd5ab8529c5..06cb6b02ec64 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -42,7 +42,7 @@
#define FORCEWAKE_ACK_GSC XE_REG(0xdf8)
#define FORCEWAKE_ACK_GT_MTL XE_REG(0xdfc)
-#define MCFG_MCR_SELECTOR XE_REG(0xfd0)
+#define STEER_SEMAPHORE XE_REG(0xfd0)
#define MTL_MCR_SELECTOR XE_REG(0xfd4)
#define SF_MCR_SELECTOR XE_REG(0xfd8)
#define MCR_SELECTOR XE_REG(0xfdc)
@@ -522,6 +522,7 @@
#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+#define EUSTALL_PERF_SAMPLING_DISABLE REG_BIT(5)
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
diff --git a/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h b/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h
new file mode 100644
index 000000000000..c146b9ef44eb
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_HW_ERROR_REGS_H_
+#define _XE_HW_ERROR_REGS_H_
+
+#define HEC_UNCORR_ERR_STATUS(base) XE_REG((base) + 0x118)
+#define UNCORR_FW_REPORTED_ERR BIT(6)
+
+#define HEC_UNCORR_FW_ERR_DW0(base) XE_REG((base) + 0x124)
+
+#define DEV_ERR_STAT_NONFATAL 0x100178
+#define DEV_ERR_STAT_CORRECTABLE 0x10017c
+#define DEV_ERR_STAT_REG(x) XE_REG(_PICK_EVEN((x), \
+ DEV_ERR_STAT_CORRECTABLE, \
+ DEV_ERR_STAT_NONFATAL))
+#define XE_CSC_ERROR BIT(17)
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index 13635e4331d4..7c2a3a140142 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -18,6 +18,7 @@
#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
+#define ERROR_IRQ(x) REG_BIT(26 + (x))
#define DISPLAY_IRQ REG_BIT(16)
#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 1b101edb838b..b5eff383902c 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -40,7 +40,4 @@
#define INDIRECT_CTX_RING_START_UDW (0x08 + 1)
#define INDIRECT_CTX_RING_CTL (0x0a + 1)
-#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6)
-#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd)
-
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index 2995d72c3f78..264e9baf949c 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -21,4 +21,14 @@
#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
#define SG_REMAP_BITS REG_GENMASK(31, 24)
+#define BMG_MODS_RESIDENCY_OFFSET (0x4D0)
+#define BMG_G2_RESIDENCY_OFFSET (0x530)
+#define BMG_G6_RESIDENCY_OFFSET (0x538)
+#define BMG_G8_RESIDENCY_OFFSET (0x540)
+#define BMG_G10_RESIDENCY_OFFSET (0x548)
+
+#define BMG_PCIE_LINK_L0_RESIDENCY_OFFSET (0x570)
+#define BMG_PCIE_LINK_L1_RESIDENCY_OFFSET (0x578)
+#define BMG_PCIE_LINK_L1_2_RESIDENCY_OFFSET (0x580)
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 7b40cc8be1c9..2294cf89f3e1 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -23,7 +23,7 @@
static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
bool clear, u64 get_val, u64 assign_val,
- struct kunit *test)
+ struct kunit *test, struct drm_exec *exec)
{
struct dma_fence *fence;
struct ttm_tt *ttm;
@@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
u32 offset;
/* Move bo to VRAM if not already there. */
- ret = xe_bo_validate(bo, NULL, false);
+ ret = xe_bo_validate(bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate bo.\n");
return ret;
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo);
+ ret = xe_bo_evict(bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -132,14 +132,15 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
/* TODO: Sanity check */
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
else
kunit_info(test, "Testing system memory\n");
- bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags, exec);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -149,18 +150,18 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
- test);
+ test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data survives migration.\n");
ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
- 0xdeadbeefdeadbeefULL, test);
+ 0xdeadbeefdeadbeefULL, test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
- ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test);
+ ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test, exec);
out_unlock:
xe_bo_unlock(bo);
@@ -210,6 +211,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
struct xe_gt *__gt;
int err, i, id;
@@ -218,25 +220,25 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for (i = 0; i < 2; ++i) {
xe_vm_lock(vm, false);
- bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
+ bo = xe_bo_create_user(xe, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo_flags, exec);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "bo create err=%pe\n", bo);
break;
}
- external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
+ external = xe_bo_create_user(xe, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo_flags, NULL);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
}
xe_bo_lock(external, false);
- err = xe_bo_pin_external(external, false);
+ err = xe_bo_pin_external(external, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo pin err=%pe\n",
@@ -294,7 +296,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
if (i) {
down_read(&vm->lock);
xe_vm_lock(vm, false);
- err = xe_bo_validate(bo, bo->vm, false);
+ err = xe_bo_validate(bo, bo->vm, false, exec);
xe_vm_unlock(vm);
up_read(&vm->lock);
if (err) {
@@ -303,7 +305,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
xe_bo_lock(external, false);
- err = xe_bo_validate(external, NULL, false);
+ err = xe_bo_validate(external, NULL, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo valid err=%pe\n",
@@ -495,9 +497,9 @@ static int shrink_test_run_device(struct xe_device *xe)
INIT_LIST_HEAD(&link->link);
/* We can create bos using WC caching here. But it is slower. */
- bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ bo = xe_bo_create_user(xe, NULL, XE_BO_SHRINK_SIZE,
DRM_XE_GEM_CPU_CACHING_WB,
- XE_BO_FLAG_SYSTEM);
+ XE_BO_FLAG_SYSTEM, NULL);
if (IS_ERR(bo)) {
if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 121f17c112ec..a7e548a2bdfb 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -27,7 +27,8 @@ static bool is_dynamic(struct dma_buf_test_params *params)
}
static void check_residency(struct kunit *test, struct xe_bo *exported,
- struct xe_bo *imported, struct dma_buf *dmabuf)
+ struct xe_bo *imported, struct dma_buf *dmabuf,
+ struct drm_exec *exec)
{
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
u32 mem_type;
@@ -57,16 +58,12 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
return;
/*
- * Evict exporter. Note that the gem object dma_buf member isn't
- * set from xe_gem_prime_export(), and it's needed for the move_notify()
- * functionality, so hack that up here. Evicting the exported bo will
+ * Evict exporter. Evicting the exported bo will
* evict also the imported bo through the move_notify() functionality if
* importer is on a different device. If they're on the same device,
* the exporter and the importer should be the same bo.
*/
- swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported);
- swap(exported->ttm.base.dma_buf, dmabuf);
+ ret = xe_bo_evict(exported, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
@@ -81,7 +78,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
}
/* Re-validate the importer. This should move also exporter in. */
- ret = xe_bo_validate(imported, NULL, false);
+ ret = xe_bo_validate(imported, NULL, false, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
@@ -117,8 +114,8 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
- bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- params->mem_mask);
+ bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
+ params->mem_mask, NULL);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -131,6 +128,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(dmabuf));
goto out;
}
+ bo->ttm.base.dma_buf = dmabuf;
import = xe_gem_prime_import(&xe->drm, dmabuf);
if (!IS_ERR(import)) {
@@ -145,11 +143,12 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
int err;
/* Is everything where we expect it to be? */
xe_bo_lock(import_bo, false);
- err = xe_bo_validate(import_bo, NULL, false);
+ err = xe_bo_validate(import_bo, NULL, false, exec);
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
@@ -162,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
err == -ERESTARTSYS);
if (!err)
- check_residency(test, bo, import_bo, dmabuf);
+ check_residency(test, bo, import_bo, dmabuf, exec);
xe_bo_unlock(import_bo);
}
drm_gem_object_put(import);
@@ -178,6 +177,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
}
+ bo->ttm.base.dma_buf = NULL;
dma_buf_put(dmabuf);
out:
drm_gem_object_put(&bo->ttm.base);
@@ -198,7 +198,7 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_FLAG_VRAM0,
+ {.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
@@ -230,7 +230,8 @@ static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c
new file mode 100644
index 000000000000..3b213fcae916
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/delay.h>
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_kunit_helpers.h"
+#include "tests/xe_pci_test.h"
+#include "tests/xe_test.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_pm.h"
+
+/*
+ * There are different ways to allocate the G2G buffers. The plan for this test
+ * is to make sure that all the possible options work. The particular option
+ * chosen by the driver may vary from one platform to another, it may also change
+ * with time. So to ensure consistency of testing, the relevant driver code is
+ * replicated here to guarantee it won't change without the test being updated
+ * to keep testing the other options.
+ *
+ * In order to test the actual code being used by the driver, there is also the
+ * 'default' scheme. That will use the official driver routines to test whatever
+ * method the driver is using on the current platform at the current time.
+ */
+enum {
+ /* Driver defined allocation scheme */
+ G2G_CTB_TYPE_DEFAULT,
+ /* Single buffer in host memory */
+ G2G_CTB_TYPE_HOST,
+ /* Single buffer in a specific tile, loops across all tiles */
+ G2G_CTB_TYPE_TILE,
+};
+
+/*
+ * Payload is opaque to GuC. So KMD can define any structure or size it wants.
+ */
+struct g2g_test_payload {
+ u32 tx_dev;
+ u32 tx_tile;
+ u32 rx_dev;
+ u32 rx_tile;
+ u32 seqno;
+};
+
+static void g2g_test_send(struct kunit *test, struct xe_guc *guc,
+ u32 far_tile, u32 far_dev,
+ struct g2g_test_payload *payload)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 *action, total;
+ size_t payload_len;
+ int ret;
+
+ static_assert(IS_ALIGNED(sizeof(*payload), sizeof(u32)));
+ payload_len = sizeof(*payload) / sizeof(u32);
+
+ total = 4 + payload_len;
+ action = kunit_kmalloc_array(test, total, sizeof(*action), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, action);
+
+ action[0] = XE_GUC_ACTION_TEST_G2G_SEND;
+ action[1] = far_tile;
+ action[2] = far_dev;
+ action[3] = payload_len;
+ memcpy(action + 4, payload, payload_len * sizeof(u32));
+
+ atomic_inc(&xe->g2g_test_count);
+
+ /*
+ * Should specify the expected response notification here. Problem is that
+ * the response will be coming from a different GuC. By the end, it should
+ * all add up as long as an equal number of messages are sent from each GuC
+ * and to each GuC. However, in the middle negative reservation space errors
+ * and such like can occur. Rather than add intrusive changes to the CT layer
+ * it is simpler to just not bother counting it at all. The system should be
+ * idle when running the selftest, and the selftest's notification total size
+ * is well within the G2H allocation size. So there should be no issues with
+ * needing to block for space, which is all the tracking code is really for.
+ */
+ ret = xe_guc_ct_send(&guc->ct, action, total, 0, 0);
+ kunit_kfree(test, action);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G send failed: %d [%d:%d -> %d:%d]\n", ret,
+ gt_to_tile(gt)->id, G2G_DEV(gt), far_tile, far_dev);
+}
+
+/*
+ * NB: Can't use KUNIT_ASSERT and friends in here as this is called asynchronously
+ * from the G2H notification handler. Need that to actually complete rather than
+ * thread-abort in order to keep the rest of the driver alive!
+ */
+int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *rx_gt = guc_to_gt(guc), *test_gt, *tx_gt = NULL;
+ u32 tx_tile, tx_dev, rx_tile, rx_dev, idx, got_len;
+ struct g2g_test_payload *payload;
+ size_t payload_len;
+ int ret = 0, i;
+
+ payload_len = sizeof(*payload) / sizeof(u32);
+
+ if (unlikely(len != (G2H_LEN_DW_G2G_NOTIFY_MIN + payload_len))) {
+ xe_gt_err(rx_gt, "G2G test notification invalid length %u", len);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ tx_tile = msg[0];
+ tx_dev = msg[1];
+ got_len = msg[2];
+ payload = (struct g2g_test_payload *)(msg + 3);
+
+ rx_tile = gt_to_tile(rx_gt)->id;
+ rx_dev = G2G_DEV(rx_gt);
+
+ if (got_len != payload_len) {
+ xe_gt_err(rx_gt, "G2G: Invalid payload length: %u vs %zu\n", got_len, payload_len);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ if (payload->tx_dev != tx_dev || payload->tx_tile != tx_tile ||
+ payload->rx_dev != rx_dev || payload->rx_tile != rx_tile) {
+ xe_gt_err(rx_gt, "G2G: Invalid payload: %d:%d -> %d:%d vs %d:%d -> %d:%d! [%d]\n",
+ payload->tx_tile, payload->tx_dev, payload->rx_tile, payload->rx_dev,
+ tx_tile, tx_dev, rx_tile, rx_dev, payload->seqno);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ if (!xe->g2g_test_array) {
+ xe_gt_err(rx_gt, "G2G: Missing test array!\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for_each_gt(test_gt, xe, i) {
+ if (gt_to_tile(test_gt)->id != tx_tile)
+ continue;
+
+ if (G2G_DEV(test_gt) != tx_dev)
+ continue;
+
+ if (tx_gt) {
+ xe_gt_err(rx_gt, "G2G: Got duplicate TX GTs: %d vs %d for %d:%d!\n",
+ tx_gt->info.id, test_gt->info.id, tx_tile, tx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ tx_gt = test_gt;
+ }
+ if (!tx_gt) {
+ xe_gt_err(rx_gt, "G2G: Failed to find a TX GT for %d:%d!\n", tx_tile, tx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ idx = (tx_gt->info.id * xe->info.gt_count) + rx_gt->info.id;
+
+ if (xe->g2g_test_array[idx] != payload->seqno - 1) {
+ xe_gt_err(rx_gt, "G2G: Seqno mismatch %d vs %d for %d:%d -> %d:%d!\n",
+ xe->g2g_test_array[idx], payload->seqno - 1,
+ tx_tile, tx_dev, rx_tile, rx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ xe->g2g_test_array[idx] = payload->seqno;
+
+done:
+ atomic_dec(&xe->g2g_test_count);
+ return ret;
+}
+
+/*
+ * Send the given seqno from all GuCs to all other GuCs in tile/GT order
+ */
+static void g2g_test_in_order(struct kunit *test, struct xe_device *xe, u32 seqno)
+{
+ struct xe_gt *near_gt, *far_gt;
+ int i, j;
+
+ for_each_gt(near_gt, xe, i) {
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+
+ for_each_gt(far_gt, xe, j) {
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ struct g2g_test_payload payload;
+
+ if (far_gt->info.id == near_gt->info.id)
+ continue;
+
+ payload.tx_dev = near_dev;
+ payload.tx_tile = near_tile;
+ payload.rx_dev = far_dev;
+ payload.rx_tile = far_tile;
+ payload.seqno = seqno;
+ g2g_test_send(test, &near_gt->uc.guc, far_tile, far_dev, &payload);
+ }
+ }
+}
+
+#define WAIT_TIME_MS 100
+#define WAIT_COUNT (1000 / WAIT_TIME_MS)
+
+static void g2g_wait_for_complete(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+ struct kunit *test = kunit_get_current_test();
+ int wait = 0;
+
+ /* Wait for all G2H messages to be received */
+ while (atomic_read(&xe->g2g_test_count)) {
+ if (++wait > WAIT_COUNT)
+ break;
+
+ msleep(WAIT_TIME_MS);
+ }
+
+ KUNIT_ASSERT_EQ_MSG(test, 0, atomic_read(&xe->g2g_test_count),
+ "Timed out waiting for notifications\n");
+ kunit_info(test, "Got all notifications back\n");
+}
+
+#undef WAIT_TIME_MS
+#undef WAIT_COUNT
+
+static void g2g_clean_array(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+
+ xe->g2g_test_array = NULL;
+}
+
+#define NUM_LOOPS 16
+
+static void g2g_run_test(struct kunit *test, struct xe_device *xe)
+{
+ u32 seqno, max_array;
+ int ret, i, j;
+
+ max_array = xe->info.gt_count * xe->info.gt_count;
+ xe->g2g_test_array = kunit_kcalloc(test, max_array, sizeof(u32), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe->g2g_test_array);
+
+ ret = kunit_add_action_or_reset(test, g2g_clean_array, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
+
+ /*
+ * Send incrementing seqnos from all GuCs to all other GuCs in tile/GT order.
+ * Tile/GT order doesn't really mean anything to the hardware but it is going
+ * to be a fixed sequence every time.
+ *
+ * Verify that each one comes back having taken the correct route.
+ */
+ ret = kunit_add_action(test, g2g_wait_for_complete, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
+ for (seqno = 1; seqno < NUM_LOOPS; seqno++)
+ g2g_test_in_order(test, xe, seqno);
+ seqno--;
+
+ kunit_release_action(test, &g2g_wait_for_complete, xe);
+
+ /* Check for the final seqno in each slot */
+ for (i = 0; i < xe->info.gt_count; i++) {
+ for (j = 0; j < xe->info.gt_count; j++) {
+ u32 idx = (j * xe->info.gt_count) + i;
+
+ if (i == j)
+ KUNIT_ASSERT_EQ_MSG(test, 0, xe->g2g_test_array[idx],
+ "identity seqno modified: %d for %dx%d!\n",
+ xe->g2g_test_array[idx], i, j);
+ else
+ KUNIT_ASSERT_EQ_MSG(test, seqno, xe->g2g_test_array[idx],
+ "invalid seqno: %d vs %d for %dx%d!\n",
+ xe->g2g_test_array[idx], seqno, i, j);
+ }
+ }
+
+ kunit_kfree(test, xe->g2g_test_array);
+ kunit_release_action(test, &g2g_clean_array, xe);
+
+ kunit_info(test, "Test passed\n");
+}
+
+#undef NUM_LOOPS
+
+static void g2g_ct_stop(struct xe_guc *guc)
+{
+ struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, t;
+
+ for_each_gt(remote_gt, xe, i) {
+ u32 tile, dev;
+
+ if (remote_gt->info.id == gt->info.id)
+ continue;
+
+ tile = gt_to_tile(remote_gt)->id;
+ dev = G2G_DEV(remote_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+}
+
+/* Size of a single allocation that contains all G2G CTBs across all GTs */
+static u32 g2g_ctb_size(struct kunit *test, struct xe_device *xe)
+{
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ kunit_info(test, "Size: (%d * %d / 2) * %d * 0x%08X + 0x%08X => 0x%08X [%d]\n",
+ count, count - 1, XE_G2G_TYPE_LIMIT, G2G_BUFFER_SIZE, G2G_DESC_AREA_SIZE,
+ num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE,
+ num_channels * XE_G2G_TYPE_LIMIT);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+/*
+ * Use the driver's regular CTB allocation scheme.
+ */
+static void g2g_alloc_default(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int i;
+
+ kunit_info(test, "Default [tiles = %d, GTs = %d]\n",
+ xe->info.tile_count, xe->info.gt_count);
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G alloc failed: %pe", ERR_PTR(ret));
+ continue;
+ }
+}
+
+static void g2g_distribute(struct kunit *test, struct xe_device *xe, struct xe_bo *bo)
+{
+ struct xe_gt *root_gt, *gt;
+ int i;
+
+ root_gt = xe_device_get_gt(xe, 0);
+ root_gt->uc.guc.g2g.bo = bo;
+ root_gt->uc.guc.g2g.owned = true;
+ kunit_info(test, "[%d.%d] Assigned 0x%p\n", gt_to_tile(root_gt)->id, root_gt->info.id, bo);
+
+ for_each_gt(gt, xe, i) {
+ if (gt->info.id != 0) {
+ gt->uc.guc.g2g.owned = false;
+ gt->uc.guc.g2g.bo = xe_bo_get(bo);
+ kunit_info(test, "[%d.%d] Pinned 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, gt->uc.guc.g2g.bo);
+ }
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt->uc.guc.g2g.bo);
+ }
+}
+
+/*
+ * Allocate a single blob on the host and split between all G2G CTBs.
+ */
+static void g2g_alloc_host(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ kunit_info(test, "Host [tiles = %d, GTs = %d]\n", xe->info.tile_count, xe->info.gt_count);
+
+ g2g_size = g2g_ctb_size(test, xe);
+ bo = xe_managed_bo_create_pin_map(xe, xe_device_get_root_tile(xe), g2g_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+ kunit_info(test, "[HST] G2G buffer create: 0x%p\n", bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+
+ g2g_distribute(test, xe, bo);
+}
+
+/*
+ * Allocate a single blob on the given tile and split between all G2G CTBs.
+ */
+static void g2g_alloc_tile(struct kunit *test, struct xe_device *xe, struct xe_tile *tile)
+{
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ KUNIT_ASSERT_TRUE(test, IS_DGFX(xe));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tile);
+
+ kunit_info(test, "Tile %d [tiles = %d, GTs = %d]\n",
+ tile->id, xe->info.tile_count, xe->info.gt_count);
+
+ g2g_size = g2g_ctb_size(test, xe);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+ kunit_info(test, "[%d.*] G2G buffer create: 0x%p\n", tile->id, bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+
+ g2g_distribute(test, xe, bo);
+}
+
+static void g2g_free(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ struct xe_bo *bo;
+ int i;
+
+ for_each_gt(gt, xe, i) {
+ bo = gt->uc.guc.g2g.bo;
+ if (!bo)
+ continue;
+
+ if (gt->uc.guc.g2g.owned) {
+ xe_managed_bo_unpin_map_no_vm(bo);
+ kunit_info(test, "[%d.%d] Unmapped 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, bo);
+ } else {
+ xe_bo_put(bo);
+ kunit_info(test, "[%d.%d] Unpinned 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, bo);
+ }
+
+ gt->uc.guc.g2g.bo = NULL;
+ }
+}
+
+static void g2g_stop(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int i;
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (!guc->g2g.bo)
+ continue;
+
+ g2g_ct_stop(guc);
+ }
+
+ g2g_free(test, xe);
+}
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static int g2g_slot_flat(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int g2g_register_flat(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type, bool have_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 near_tile = gt_to_tile(gt)->id;
+ u32 near_dev = G2G_DEV(gt);
+ u32 max = xe->info.gt_count;
+ int idx;
+ u32 base, desc, buf;
+
+ if (!guc->g2g.bo)
+ return -ENODEV;
+
+ idx = g2g_slot_flat(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, idx >= 0);
+
+ base = guc_bo_ggtt_addr(guc, guc->g2g.bo);
+ desc = base + idx * G2G_DESC_SIZE;
+ buf = base + idx * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(guc->g2g.bo));
+
+ return guc_action_register_g2g_buffer(guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void g2g_start(struct kunit *test, struct xe_guc *guc)
+{
+ struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i;
+ int t, ret;
+ bool have_dev;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, guc->g2g.bo);
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ KUNIT_ASSERT_TRUE(test,
+ (gt->info.type == XE_GT_TYPE_MAIN) ||
+ (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(remote_gt, xe, i) {
+ u32 tile, dev;
+
+ if (remote_gt->info.id == gt->info.id)
+ continue;
+
+ tile = gt_to_tile(remote_gt)->id;
+ dev = G2G_DEV(remote_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ ret = g2g_register_flat(guc, tile, dev, t, have_dev);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G register failed: %pe", ERR_PTR(ret));
+ }
+ }
+}
+
+static void g2g_reinit(struct kunit *test, struct xe_device *xe, int ctb_type, struct xe_tile *tile)
+{
+ struct xe_gt *gt;
+ int i, found = 0;
+
+ g2g_stop(test, xe);
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ KUNIT_ASSERT_NULL(test, guc->g2g.bo);
+ }
+
+ switch (ctb_type) {
+ case G2G_CTB_TYPE_DEFAULT:
+ g2g_alloc_default(test, xe);
+ break;
+
+ case G2G_CTB_TYPE_HOST:
+ g2g_alloc_host(test, xe);
+ break;
+
+ case G2G_CTB_TYPE_TILE:
+ g2g_alloc_tile(test, xe, tile);
+ break;
+
+ default:
+ KUNIT_ASSERT_TRUE(test, false);
+ }
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (!guc->g2g.bo)
+ continue;
+
+ if (ctb_type == G2G_CTB_TYPE_DEFAULT)
+ guc_g2g_start(guc);
+ else
+ g2g_start(test, guc);
+ found++;
+ }
+
+ KUNIT_ASSERT_GT_MSG(test, found, 1, "insufficient G2G channels running: %d", found);
+
+ kunit_info(test, "Testing across %d GTs\n", found);
+}
+
+static void g2g_recreate_ctb(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+ struct kunit *test = kunit_get_current_test();
+
+ g2g_stop(test, xe);
+
+ if (xe_guc_g2g_wanted(xe))
+ g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
+}
+
+static void g2g_pm_runtime_put(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+
+ xe_pm_runtime_put(xe);
+}
+
+static void g2g_pm_runtime_get(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = kunit_add_action_or_reset(test, g2g_pm_runtime_put, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register runtime PM action\n");
+}
+
+static void g2g_check_skip(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt;
+ int i;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "not supported from a VF");
+
+ if (xe->info.gt_count <= 1)
+ kunit_skip(test, "not enough GTs");
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (guc->fw.build_type == CSS_UKERNEL_INFO_BUILDTYPE_PROD)
+ kunit_skip(test,
+ "G2G test interface not available in production firmware builds\n");
+ }
+}
+
+/*
+ * Simple test that does not try to recreate the CTBs.
+ * Requires that the platform already enables G2G comms
+ * but has no risk of leaving the system in a broken state
+ * afterwards.
+ */
+static void xe_live_guc_g2g_kunit_default(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ if (!xe_guc_g2g_wanted(xe))
+ kunit_skip(test, "G2G not enabled");
+
+ g2g_check_skip(test);
+
+ g2g_pm_runtime_get(test);
+
+ kunit_info(test, "Testing default CTBs\n");
+ g2g_run_test(test, xe);
+
+ kunit_release_action(test, &g2g_pm_runtime_put, xe);
+}
+
+/*
+ * More complex test that re-creates the CTBs in various location to
+ * test access to each location from each GuC. Can be run even on
+ * systems that do not enable G2G by default. On the other hand,
+ * because it recreates the CTBs, if something goes wrong it could
+ * leave the system with broken G2G comms.
+ */
+static void xe_live_guc_g2g_kunit_allmem(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ int ret;
+
+ g2g_check_skip(test);
+
+ g2g_pm_runtime_get(test);
+
+ /* Make sure to leave the system as we found it */
+ ret = kunit_add_action_or_reset(test, g2g_recreate_ctb, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register CTB re-creation action\n");
+
+ kunit_info(test, "Testing CTB type 'default'...\n");
+ g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
+ g2g_run_test(test, xe);
+
+ kunit_info(test, "Testing CTB type 'host'...\n");
+ g2g_reinit(test, xe, G2G_CTB_TYPE_HOST, NULL);
+ g2g_run_test(test, xe);
+
+ if (IS_DGFX(xe)) {
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id) {
+ kunit_info(test, "Testing CTB type 'tile: #%d'...\n", id);
+
+ g2g_reinit(test, xe, G2G_CTB_TYPE_TILE, tile);
+ g2g_run_test(test, xe);
+ }
+ } else {
+ kunit_info(test, "Skipping local memory on integrated platform\n");
+ }
+
+ kunit_release_action(test, g2g_recreate_ctb, xe);
+ kunit_release_action(test, g2g_pm_runtime_put, xe);
+}
+
+static struct kunit_case xe_guc_g2g_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_default, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_allmem, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_guc_g2g_test_suite = {
+ .name = "xe_guc_g2g",
+ .test_cases = xe_guc_g2g_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_guc_g2g_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 81277c77016d..c55e46f1ae92 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -10,12 +10,14 @@ extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
+extern struct kunit_suite xe_guc_g2g_test_suite;
kunit_test_suite(xe_bo_test_suite);
kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
+kunit_test_suite(xe_guc_g2g_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index edd1e701aa1c..5904d658d1f2 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -70,7 +70,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
} } while (0)
static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test, u32 region)
+ struct kunit *test, u32 region, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
@@ -84,14 +84,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED,
+ exec);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
return;
}
- err = xe_bo_validate(remote, NULL, false);
+ err = xe_bo_validate(remote, NULL, false, exec);
if (err) {
KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
str, err);
@@ -161,13 +162,13 @@ out_unlock:
}
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
- test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
+ test_copy(m, bo, test, XE_BO_FLAG_SYSTEM, exec);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
u32 region;
@@ -178,10 +179,11 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
region = XE_BO_FLAG_VRAM1;
else
region = XE_BO_FLAG_VRAM0;
- test_copy(m, bo, test, region);
+ test_copy(m, bo, test, region, exec);
}
-static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
+static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
+ struct drm_exec *exec)
{
struct xe_tile *tile = m->tile;
struct xe_device *xe = tile_to_xe(tile);
@@ -202,7 +204,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -210,7 +213,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -220,7 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -290,10 +295,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
- test_copy_sysmem(m, tiny, test);
+ test_copy_sysmem(m, tiny, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying small buffer object to other vram\n");
- test_copy_vram(m, tiny, test);
+ test_copy_vram(m, tiny, exec, test);
}
/* Clear a big bo */
@@ -312,10 +317,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
- test_copy_sysmem(m, big, test);
+ test_copy_sysmem(m, big, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying big buffer object to other vram\n");
- test_copy_vram(m, big, test);
+ test_copy_vram(m, big, exec, test);
}
out:
@@ -343,10 +348,11 @@ static int migrate_test_run_device(struct xe_device *xe)
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, false);
- xe_migrate_sanity_test(m, test);
+ xe_migrate_sanity_test(m, test, exec);
xe_vm_unlock(m->q->vm);
}
@@ -490,7 +496,7 @@ err_sync:
static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
struct dma_fence *fence;
u64 expected, retval;
@@ -509,7 +515,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo);
+ ret = xe_bo_evict(vram_bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
@@ -538,7 +544,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Restore vram buffer object\n");
- ret = xe_bo_validate(vram_bo, NULL, false);
+ ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
return;
@@ -636,13 +642,14 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
{
struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ struct drm_exec *exec;
long ret;
- sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ sys_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -650,8 +657,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
return;
}
+ exec = XE_VALIDATION_OPT_OUT;
xe_bo_lock(sys_bo, false);
- ret = xe_bo_validate(sys_bo, NULL, false);
+ ret = xe_bo_validate(sys_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_sysbo;
@@ -664,10 +672,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(sys_bo);
- ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ ccs_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -676,7 +684,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(ccs_bo, false);
- ret = xe_bo_validate(ccs_bo, NULL, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_ccsbo;
@@ -689,10 +697,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(ccs_bo);
- vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ vram_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
@@ -700,7 +708,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(vram_bo, false);
- ret = xe_bo_validate(vram_bo, NULL, false);
+ ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
goto free_vrambo;
@@ -713,7 +721,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
test_clear(xe, tile, sys_bo, vram_bo, test);
- test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, exec, test);
xe_bo_unlock(vram_bo);
xe_bo_lock(vram_bo, false);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 9c715e59f030..49b37dfd4e58 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,12 +12,219 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+#define PLATFORM_CASE(platform__, graphics_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
+ media_verx100__, media_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .graphics_verx100 = graphics_verx100__, \
+ .media_verx100 = media_verx100__, \
+ .step = { .graphics = STEP_ ## graphics_step__, \
+ .media = STEP_ ## media_step__ } \
+ }
+
+static const struct xe_pci_fake_data cases[] = {
+ PLATFORM_CASE(TIGERLAKE, B0),
+ PLATFORM_CASE(DG1, A0),
+ PLATFORM_CASE(DG1, B0),
+ PLATFORM_CASE(ALDERLAKE_S, A0),
+ PLATFORM_CASE(ALDERLAKE_S, B0),
+ PLATFORM_CASE(ALDERLAKE_S, C0),
+ PLATFORM_CASE(ALDERLAKE_S, D0),
+ PLATFORM_CASE(ALDERLAKE_P, A0),
+ PLATFORM_CASE(ALDERLAKE_P, B0),
+ PLATFORM_CASE(ALDERLAKE_P, C0),
+ SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
+ SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
+ SUBPLATFORM_CASE(DG2, G10, C0),
+ SUBPLATFORM_CASE(DG2, G11, B1),
+ SUBPLATFORM_CASE(DG2, G12, A1),
+ GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
+ GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
+ GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
+ GMDID_CASE(PANTHERLAKE, 3000, A0, 3000, A0),
+};
+
+KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc);
+
+/**
+ * xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_pci_fake_data parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
+ */
+const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc)
+{
+ return platform_gen_params(test, prev, desc);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_gen_params);
+
+static const struct xe_device_desc *lookup_desc(enum xe_platform p)
+{
+ const struct xe_device_desc *desc;
+ const struct pci_device_id *ids;
+
+ for (ids = pciidlist; ids->driver_data; ids++) {
+ desc = (const void *)ids->driver_data;
+ if (desc->platform == p)
+ return desc;
+ }
+ return NULL;
+}
+
+static const struct xe_subplatform_desc *lookup_sub_desc(enum xe_platform p, enum xe_subplatform s)
+{
+ const struct xe_device_desc *desc = lookup_desc(p);
+ const struct xe_subplatform_desc *spd;
+
+ if (desc && desc->subplatforms)
+ for (spd = desc->subplatforms; spd->subplatform; spd++)
+ if (spd->subplatform == s)
+ return spd;
+ return NULL;
+}
+
+static const char *lookup_platform_name(enum xe_platform p)
+{
+ const struct xe_device_desc *desc = lookup_desc(p);
+
+ return desc ? desc->platform_name : "INVALID";
+}
+
+static const char *__lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
+{
+ const struct xe_subplatform_desc *desc = lookup_sub_desc(p, s);
+
+ return desc ? desc->name : "INVALID";
+}
+
+static const char *lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
+{
+ return s == XE_SUBPLATFORM_NONE ? "" : __lookup_subplatform_name(p, s);
+}
+
+static const char *subplatform_prefix(enum xe_subplatform s)
+{
+ return s == XE_SUBPLATFORM_NONE ? "" : " ";
+}
+
+static const char *step_prefix(enum xe_step step)
+{
+ return step == STEP_NONE ? "" : " ";
+}
+
+static const char *step_name(enum xe_step step)
+{
+ return step == STEP_NONE ? "" : xe_step_name(step);
+}
+
+static const char *sriov_prefix(enum xe_sriov_mode mode)
+{
+ return mode <= XE_SRIOV_MODE_NONE ? "" : " ";
+}
+
+static const char *sriov_name(enum xe_sriov_mode mode)
+{
+ return mode <= XE_SRIOV_MODE_NONE ? "" : xe_sriov_mode_to_string(mode);
+}
+
+static const char *lookup_graphics_name(unsigned int verx100)
+{
+ const struct xe_ip *ip = find_graphics_ip(verx100);
+
+ return ip ? ip->name : "";
+}
+
+static const char *lookup_media_name(unsigned int verx100)
+{
+ const struct xe_ip *ip = find_media_ip(verx100);
+
+ return ip ? ip->name : "";
+}
+
+/**
+ * xe_pci_fake_data_desc - Describe struct xe_pci_fake_data parameter
+ * @param: the &struct xe_pci_fake_data parameter to describe
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares description of the struct xe_pci_fake_data parameter.
+ *
+ * It is tailored for use in parameterized KUnit tests where parameter generator
+ * is based on the struct xe_pci_fake_data arrays.
+ */
+void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc)
+{
+ if (param->graphics_verx100 || param->media_verx100)
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s %u.%02u(%s)%s%s %u.%02u(%s)%s%s%s%s",
+ lookup_platform_name(param->platform),
+ subplatform_prefix(param->subplatform),
+ lookup_subplatform_name(param->platform, param->subplatform),
+ param->graphics_verx100 / 100, param->graphics_verx100 % 100,
+ lookup_graphics_name(param->graphics_verx100),
+ step_prefix(param->step.graphics), step_name(param->step.graphics),
+ param->media_verx100 / 100, param->media_verx100 % 100,
+ lookup_media_name(param->media_verx100),
+ step_prefix(param->step.media), step_name(param->step.media),
+ sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
+ else
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s%s%s%s%s",
+ lookup_platform_name(param->platform),
+ subplatform_prefix(param->subplatform),
+ lookup_subplatform_name(param->platform, param->subplatform),
+ step_prefix(param->step.graphics), step_name(param->step.graphics),
+ sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_desc);
+
static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
param->verx100 / 100, param->verx100 % 100, param->name);
}
+/*
+ * Pre-GMDID Graphics and Media IPs definitions.
+ *
+ * Mimic the way GMDID IPs are declared so the same
+ * param generator can be used for both
+ */
+static const struct xe_ip pre_gmdid_graphics_ips[] = {
+ graphics_ip_xelp,
+ graphics_ip_xelpp,
+ graphics_ip_xehpg,
+ graphics_ip_xehpc,
+};
+
+static const struct xe_ip pre_gmdid_media_ips[] = {
+ media_ip_xem,
+ media_ip_xehpm,
+};
+
+KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(pre_gmdid_media_ip, pre_gmdid_media_ips, xe_ip_kunit_desc);
+
KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
@@ -44,9 +251,16 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
+const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
- return graphics_ip_gen_params(prev, desc);
+ const void *next = pre_gmdid_graphics_ip_gen_params(test, prev, desc);
+
+ if (next)
+ return next;
+ if (is_insidevar(prev, pre_gmdid_graphics_ips))
+ prev = NULL;
+
+ return graphics_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
@@ -61,9 +275,16 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
+const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
- return media_ip_gen_params(prev, desc);
+ const void *next = pre_gmdid_media_ip_gen_params(test, prev, desc);
+
+ if (next)
+ return next;
+ if (is_insidevar(prev, pre_gmdid_media_ips))
+ prev = NULL;
+
+ return media_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
@@ -78,9 +299,9 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_id_gen_param(const void *prev, char *desc)
+const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc)
{
- const struct pci_device_id *pci = pci_id_gen_params(prev, desc);
+ const struct pci_device_id *pci = pci_id_gen_params(test, prev, desc);
return pci->driver_data ? pci : NULL;
}
@@ -94,13 +315,18 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
if (type == GMDID_MEDIA) {
*ver = data->media_verx100;
- *revid = xe_step_to_gmdid(data->media_step);
+ *revid = xe_step_to_gmdid(data->step.media);
} else {
*ver = data->graphics_verx100;
- *revid = xe_step_to_gmdid(data->graphics_step);
+ *revid = xe_step_to_gmdid(data->step.graphics);
}
}
+static void fake_xe_info_probe_tile_count(struct xe_device *xe)
+{
+ /* Nothing to do, just use the statically defined value. */
+}
+
int xe_pci_fake_device_init(struct xe_device *xe)
{
struct kunit *test = kunit_get_current_test();
@@ -138,6 +364,8 @@ done:
data->sriov_mode : XE_SRIOV_MODE_NONE;
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
+ kunit_activate_static_stub(test, xe_info_probe_tile_count,
+ fake_xe_info_probe_tile_count);
xe_info_init_early(xe, desc, subplatform_desc);
xe_info_init(xe, desc);
@@ -159,7 +387,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
* Return: pointer to the next &struct xe_device ready to be used as a parameter
* or NULL if there are no more Xe devices on the system.
*/
-const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+const void *xe_pci_live_device_gen_param(struct kunit *test, const void *prev, char *desc)
{
const struct xe_device *xe = prev;
struct device *dev = xe ? xe->drm.dev : NULL;
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ce4d2b86b778..30505d1cbefc 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -7,9 +7,11 @@
#define _XE_PCI_TEST_H_
#include <linux/types.h>
+#include <kunit/test.h>
#include "xe_platform_types.h"
#include "xe_sriov_types.h"
+#include "xe_step_types.h"
struct xe_device;
@@ -17,17 +19,18 @@ struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
+ struct xe_step_info step;
u32 graphics_verx100;
u32 media_verx100;
- u32 graphics_step;
- u32 media_step;
};
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc);
+void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc);
-const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
-const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
-const void *xe_pci_id_gen_param(const void *prev, char *desc);
-const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_live_device_gen_param(struct kunit *test, const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index c96d1fe34151..49d191043dfa 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -15,86 +15,10 @@
#include "xe_tuning.h"
#include "xe_wa.h"
-struct platform_test_case {
- const char *name;
- enum xe_platform platform;
- enum xe_subplatform subplatform;
- u32 graphics_verx100;
- u32 media_verx100;
- struct xe_step_info step;
-};
-
-#define PLATFORM_CASE(platform__, graphics_step__) \
- { \
- .name = #platform__ " (" #graphics_step__ ")", \
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_NONE, \
- .step = { .graphics = STEP_ ## graphics_step__ } \
- }
-
-
-#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
- { \
- .name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
- .step = { .graphics = STEP_ ## graphics_step__ } \
- }
-
-#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
- media_verx100__, media_step__) \
- { \
- .name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_NONE, \
- .graphics_verx100 = graphics_verx100__, \
- .media_verx100 = media_verx100__, \
- .step = { .graphics = STEP_ ## graphics_step__, \
- .media = STEP_ ## media_step__ } \
- }
-
-static const struct platform_test_case cases[] = {
- PLATFORM_CASE(TIGERLAKE, B0),
- PLATFORM_CASE(DG1, A0),
- PLATFORM_CASE(DG1, B0),
- PLATFORM_CASE(ALDERLAKE_S, A0),
- PLATFORM_CASE(ALDERLAKE_S, B0),
- PLATFORM_CASE(ALDERLAKE_S, C0),
- PLATFORM_CASE(ALDERLAKE_S, D0),
- PLATFORM_CASE(ALDERLAKE_P, A0),
- PLATFORM_CASE(ALDERLAKE_P, B0),
- PLATFORM_CASE(ALDERLAKE_P, C0),
- SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
- SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
- SUBPLATFORM_CASE(DG2, G10, C0),
- SUBPLATFORM_CASE(DG2, G11, B1),
- SUBPLATFORM_CASE(DG2, G12, A1),
- GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
- GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
- GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
- GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
- GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
- GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
-};
-
-static void platform_desc(const struct platform_test_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(platform, cases, platform_desc);
-
static int xe_wa_test_init(struct kunit *test)
{
- const struct platform_test_case *param = test->param_value;
- struct xe_pci_fake_data data = {
- .platform = param->platform,
- .subplatform = param->subplatform,
- .graphics_verx100 = param->graphics_verx100,
- .media_verx100 = param->media_verx100,
- .graphics_step = param->step.graphics,
- .media_step = param->step.media,
- };
+ const struct xe_pci_fake_data *param = test->param_value;
+ struct xe_pci_fake_data data = *param;
struct xe_device *xe;
struct device *dev;
int ret;
@@ -119,13 +43,6 @@ static int xe_wa_test_init(struct kunit *test)
return 0;
}
-static void xe_wa_test_exit(struct kunit *test)
-{
- struct xe_device *xe = test->priv;
-
- drm_kunit_helper_free_device(test, xe->drm.dev);
-}
-
static void xe_wa_gt(struct kunit *test)
{
struct xe_device *xe = test->priv;
@@ -143,14 +60,13 @@ static void xe_wa_gt(struct kunit *test)
}
static struct kunit_case xe_wa_tests[] = {
- KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params),
+ KUNIT_CASE_PARAM(xe_wa_gt, xe_pci_fake_data_gen_params),
{}
};
static struct kunit_suite xe_rtp_test_suite = {
.name = "xe_wa",
.init = xe_wa_test_init,
- .exit = xe_wa_test_exit,
.test_cases = xe_wa_tests,
};
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 68fe70ce2be3..a818eaa05b7d 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -12,6 +12,7 @@
#include "xe_gt_types.h"
#include "xe_step.h"
+#include "xe_vram.h"
/**
* DOC: Xe Asserts
@@ -145,7 +146,8 @@
const struct xe_tile *__tile = (tile); \
char __buf[10] __maybe_unused; \
xe_assert_msg(tile_to_xe(__tile), condition, "tile: %u VRAM %s\n" msg, \
- __tile->id, ({ string_get_size(__tile->mem.vram.actual_physical_size, 1, \
+ __tile->id, ({ string_get_size( \
+ xe_vram_region_actual_physical_size(__tile->mem.vram), 1, \
STRING_UNITS_2, __buf, sizeof(__buf)); __buf; }), ## arg); \
})
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 5ce0e26822f2..6d20229c11de 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -60,6 +60,41 @@ err:
return ERR_PTR(err);
}
+struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id)
+{
+ struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_sa_manager *bb_pool;
+ int err;
+
+ if (!bb)
+ return ERR_PTR(-ENOMEM);
+ /*
+ * We need to allocate space for the requested number of dwords &
+ * one additional MI_BATCH_BUFFER_END dword. Since the whole SA
+ * is submitted to HW, we need to make sure that the last instruction
+ * is not over written when the last chunk of SA is allocated for BB.
+ * So, this extra DW acts as a guard here.
+ */
+
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
+ bb->bo = xe_sa_bo_new(bb_pool, 4 * (dwords + 1));
+
+ if (IS_ERR(bb->bo)) {
+ err = PTR_ERR(bb->bo);
+ goto err;
+ }
+
+ bb->cs = xe_sa_bo_cpu_addr(bb->bo);
+ bb->len = 0;
+
+ return bb;
+err:
+ kfree(bb);
+ return ERR_PTR(err);
+}
+
static struct xe_sched_job *
__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
{
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
index b5cc65506696..2a8adc9a6dee 100644
--- a/drivers/gpu/drm/xe/xe_bb.h
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -13,8 +13,11 @@ struct dma_fence;
struct xe_gt;
struct xe_exec_queue;
struct xe_sched_job;
+enum xe_sriov_vf_ccs_rw_ctxs;
struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm);
+struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id);
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index bae7ff2e5927..8422f3cab113 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -33,9 +33,11 @@
#include "xe_pxp.h"
#include "xe_res_cursor.h"
#include "xe_shrinker.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
[XE_PL_SYSTEM] = "system",
@@ -200,6 +202,8 @@ static bool force_contiguous(u32 bo_flags)
else if (bo_flags & XE_BO_FLAG_PINNED &&
!(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
return true; /* needs vmap */
+ else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
+ return true;
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
@@ -966,6 +970,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
dma_fence_put(fence);
xe_pm_runtime_put(xe);
+ /*
+ * CCS meta data is migrated from TT -> SMEM. So, let us detach the
+ * BBs from BO as it is no longer needed.
+ */
+ if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT &&
+ new_mem->mem_type == XE_PL_SYSTEM)
+ xe_sriov_vf_ccs_detach_bo(bo);
+
+ if (IS_VF_CCS_READY(xe) &&
+ ((move_lacks_source && new_mem->mem_type == XE_PL_TT) ||
+ (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) &&
+ handle_system_ccs)
+ ret = xe_sriov_vf_ccs_attach_bo(bo);
+
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
ttm_bo->ttm) {
@@ -976,6 +994,9 @@ out:
if (timeout < 0)
ret = timeout;
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_detach_bo(bo);
+
xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
@@ -1120,42 +1141,47 @@ out_unref:
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
{
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_bo *backup;
int ret = 0;
- xe_bo_lock(bo, false);
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ xe_assert(xe, !ret);
+ xe_assert(xe, !bo->backup_obj);
- xe_assert(xe, !bo->backup_obj);
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ break;
- /*
- * Since this is called from the PM notifier we might have raced with
- * someone unpinning this after we dropped the pinned list lock and
- * grabbing the above bo lock.
- */
- if (!xe_bo_is_pinned(bo))
- goto out_unlock_bo;
+ if (!xe_bo_is_vram(bo))
+ break;
- if (!xe_bo_is_vram(bo))
- goto out_unlock_bo;
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ break;
- if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
- goto out_unlock_bo;
+ backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED, &exec);
+ if (IS_ERR(backup)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(backup);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
- DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
- if (IS_ERR(backup)) {
- ret = PTR_ERR(backup);
- goto out_unlock_bo;
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
}
- backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
- ttm_bo_pin(&backup->ttm);
- bo->backup_obj = backup;
-
-out_unlock_bo:
- xe_bo_unlock(bo);
return ret;
}
@@ -1181,57 +1207,12 @@ int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
return 0;
}
-/**
- * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
- * @bo: The buffer object to move.
- *
- * On successful completion, the object memory will be moved to system memory.
- *
- * This is needed to for special handling of pinned VRAM object during
- * suspend-resume.
- *
- * Return: 0 on success. Negative error code on failure.
- */
-int xe_bo_evict_pinned(struct xe_bo *bo)
+static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
{
- struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
- struct xe_bo *backup = bo->backup_obj;
- bool backup_created = false;
+ struct xe_device *xe = xe_bo_device(bo);
bool unmap = false;
int ret = 0;
- xe_bo_lock(bo, false);
-
- if (WARN_ON(!bo->ttm.resource)) {
- ret = -EINVAL;
- goto out_unlock_bo;
- }
-
- if (WARN_ON(!xe_bo_is_pinned(bo))) {
- ret = -EINVAL;
- goto out_unlock_bo;
- }
-
- if (!xe_bo_is_vram(bo))
- goto out_unlock_bo;
-
- if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
- goto out_unlock_bo;
-
- if (!backup) {
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
- NULL, xe_bo_size(bo),
- DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
- if (IS_ERR(backup)) {
- ret = PTR_ERR(backup);
- goto out_unlock_bo;
- }
- backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
- backup_created = true;
- }
-
if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
struct xe_migrate *migrate;
struct dma_fence *fence;
@@ -1241,14 +1222,11 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
else
migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
+ xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
if (ret)
goto out_backup;
- ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
- if (ret)
- goto out_backup;
-
fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
backup->ttm.resource, false);
if (IS_ERR(fence)) {
@@ -1258,8 +1236,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
dma_resv_add_fence(bo->ttm.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
- dma_resv_add_fence(backup->ttm.base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
} else {
ret = xe_bo_vmap(backup);
@@ -1269,7 +1245,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (iosys_map_is_null(&bo->vmap)) {
ret = xe_bo_vmap(bo);
if (ret)
- goto out_backup;
+ goto out_vunmap;
unmap = true;
}
@@ -1279,15 +1255,78 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (!bo->backup_obj)
bo->backup_obj = backup;
-
-out_backup:
+out_vunmap:
xe_bo_vunmap(backup);
- if (ret && backup_created)
- xe_bo_put(backup);
-out_unlock_bo:
+out_backup:
if (unmap)
xe_bo_vunmap(bo);
- xe_bo_unlock(bo);
+
+ return ret;
+}
+
+/**
+ * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
+ * @bo: The buffer object to move.
+ *
+ * On successful completion, the object memory will be moved to system memory.
+ *
+ * This is needed to for special handling of pinned VRAM object during
+ * suspend-resume.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_evict_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ int ret = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ xe_assert(xe, !ret);
+
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!xe_bo_is_vram(bo))
+ break;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ break;
+
+ if (!backup) {
+ backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
+ xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED, &exec);
+ if (IS_ERR(backup)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(backup);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
+
+ ret = xe_bo_evict_pinned_copy(bo, backup);
+ }
+
+ if (ret && backup_created)
+ xe_bo_put(backup);
+
return ret;
}
@@ -1337,10 +1376,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (ret)
goto out_unlock_bo;
- ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
- if (ret)
- goto out_unlock_bo;
-
fence = xe_migrate_copy(migrate, backup, bo,
backup->ttm.resource, bo->ttm.resource,
false);
@@ -1351,8 +1386,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
dma_resv_add_fence(bo->ttm.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
- dma_resv_add_fence(backup->ttm.base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
} else {
ret = xe_bo_vmap(backup);
@@ -1503,9 +1536,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+
if (!xe_bo_is_xe_bo(ttm_bo))
return;
+ if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev)))
+ xe_sriov_vf_ccs_detach_bo(bo);
+
/*
* Object is idle and about to be destroyed. Release the
* dma-buf attachment.
@@ -1687,50 +1725,246 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
-static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
+static bool should_migrate_to_smem(struct xe_bo *bo)
+{
+ /*
+ * NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+
+ return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
+ bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
+}
+
+/* Populate the bo if swapped out, or migrate if the access mode requires that. */
+static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
+ struct drm_exec *exec)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ int err = 0;
+
+ if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
+ xe_assert(xe_bo_device(bo),
+ dma_resv_test_signaled(tbo->base.resv, DMA_RESV_USAGE_KERNEL) ||
+ (tbo->ttm && ttm_tt_is_populated(tbo->ttm)));
+ err = ttm_bo_populate(&bo->ttm, ctx);
+ } else if (should_migrate_to_smem(bo)) {
+ xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
+ err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
+ }
+
+ return err;
+}
+
+/* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */
+static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo)
+{
+ vm_fault_t ret;
+
+ trace_xe_bo_cpu_fault(bo);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+ /*
+ * When TTM is actually called to insert PTEs, ensure no blocking conditions
+ * remain, in which case TTM may drop locks and return VM_FAULT_RETRY.
+ */
+ xe_assert(xe, ret != VM_FAULT_RETRY);
+
+ if (ret == VM_FAULT_NOPAGE &&
+ mem_type_is_vram(bo->ttm.resource->mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (list_empty(&bo->vram_userfault_link))
+ list_add(&bo->vram_userfault_link,
+ &xe->mem_access.vram_userfault.list);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
+
+ return ret;
+}
+
+static vm_fault_t xe_err_to_fault_t(int err)
+{
+ switch (err) {
+ case 0:
+ case -EINTR:
+ case -ERESTARTSYS:
+ case -EAGAIN:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ case -ENOSPC:
+ return VM_FAULT_OOM;
+ default:
+ break;
+ }
+ return VM_FAULT_SIGBUS;
+}
+
+static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo)
+{
+ dma_resv_assert_held(tbo->base.resv);
+
+ return tbo->ttm &&
+ (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) ==
+ TTM_TT_FLAG_EXTERNAL;
+}
+
+static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe,
+ struct xe_bo *bo, bool needs_rpm)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ vm_fault_t ret = VM_FAULT_RETRY;
+ struct xe_validation_ctx ctx;
+ struct ttm_operation_ctx tctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ .gfp_retry_mayfail = true,
+
+ };
+ int err;
+
+ if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
+ return VM_FAULT_RETRY;
+
+ err = xe_validation_ctx_init(&ctx, &xe->val, NULL,
+ (struct xe_val_flags) {
+ .interruptible = true,
+ .no_block = true
+ });
+ if (err)
+ goto out_pm;
+
+ if (!dma_resv_trylock(tbo->base.resv))
+ goto out_validation;
+
+ if (xe_ttm_bo_is_imported(tbo)) {
+ ret = VM_FAULT_SIGBUS;
+ drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
+ goto out_unlock;
+ }
+
+ err = xe_bo_fault_migrate(bo, &tctx, NULL);
+ if (err) {
+ /* Return VM_FAULT_RETRY on these errors. */
+ if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY)
+ ret = xe_err_to_fault_t(err);
+ goto out_unlock;
+ }
+
+ if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL))
+ ret = __xe_bo_cpu_fault(vmf, xe, bo);
+
+out_unlock:
+ dma_resv_unlock(tbo->base.resv);
+out_validation:
+ xe_validation_ctx_fini(&ctx);
+out_pm:
+ if (needs_rpm)
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
+ bool retry_after_wait = false;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
vm_fault_t ret;
+ int err = 0;
int idx;
- if (needs_rpm)
- xe_pm_runtime_get(xe);
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
- ret = ttm_bo_vm_reserve(tbo, vmf);
- if (ret)
+ ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
+ if (ret != VM_FAULT_RETRY)
goto out;
- if (drm_dev_enter(ddev, &idx)) {
- trace_xe_bo_cpu_fault(bo);
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
- drm_dev_exit(idx);
+ if (fault_flag_allow_retry_first(vmf->flags)) {
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out;
+ retry_after_wait = true;
+ xe_bo_get(bo);
+ mmap_read_unlock(vmf->vma->vm_mm);
} else {
- ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ ret = VM_FAULT_NOPAGE;
}
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- goto out;
/*
- * ttm_bo_vm_reserve() already has dma_resv_lock.
+ * The fastpath failed and we were not required to return and retry immediately.
+ * We're now running in one of two modes:
+ *
+ * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying
+ * to resolve blocking waits. But we can't resolve the fault since the
+ * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath
+ * should succeed. But it may fail since we drop the bo lock.
+ *
+ * 2) retry_after_wait == false: The fastpath failed, typically even after
+ * a retry. Do whatever's necessary to resolve the fault.
+ *
+ * This construct is recommended to avoid excessive waits under the mmap_lock.
*/
- if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
- mutex_lock(&xe->mem_access.vram_userfault.lock);
- if (list_empty(&bo->vram_userfault_link))
- list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
- mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
+ if (needs_rpm)
+ xe_pm_runtime_get(xe);
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ struct ttm_operation_ctx tctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .gfp_retry_mayfail = retry_after_wait,
+ };
+ long lerr;
+
+ err = drm_exec_lock_obj(&exec, &tbo->base);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+
+ if (xe_ttm_bo_is_imported(tbo)) {
+ err = -EFAULT;
+ drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
+ break;
+ }
+
+ err = xe_bo_fault_migrate(bo, &tctx, &exec);
+ if (err) {
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+
+ lerr = dma_resv_wait_timeout(tbo->base.resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (lerr < 0) {
+ err = lerr;
+ break;
+ }
+
+ if (!retry_after_wait)
+ ret = __xe_bo_cpu_fault(vmf, xe, bo);
}
+ /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */
+ if (err && !retry_after_wait)
+ ret = xe_err_to_fault_t(err);
- dma_resv_unlock(tbo->base.resv);
-out:
if (needs_rpm)
xe_pm_runtime_put(xe);
+ if (retry_after_wait)
+ xe_bo_put(bo);
+out:
+ drm_dev_exit(idx);
+
return ret;
}
@@ -1774,7 +2008,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
}
static const struct vm_operations_struct xe_gem_vm_ops = {
- .fault = xe_gem_fault,
+ .fault = xe_bo_cpu_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = xe_bo_vm_access,
@@ -1822,11 +2056,32 @@ void xe_bo_free(struct xe_bo *bo)
kfree(bo);
}
-struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
- struct xe_tile *tile, struct dma_resv *resv,
- struct ttm_lru_bulk_move *bulk, size_t size,
- u16 cpu_caching, enum ttm_bo_type type,
- u32 flags)
+/**
+ * xe_bo_init_locked() - Initialize or create an xe_bo.
+ * @xe: The xe device.
+ * @bo: An already allocated buffer object or NULL
+ * if the function should allocate a new one.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @resv: Pointer to a locked shared reservation object to use fo this bo,
+ * or NULL for the xe_bo to use its own.
+ * @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
+ * @size: The storage size to use for the bo.
+ * @cpu_caching: The cpu caching used for system memory backing store.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Initialize or create an xe buffer object. On failure, any allocated buffer
+ * object passed in @bo will have been unreferenced.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags, struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = true,
@@ -1895,6 +2150,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
ctx.resv = resv;
}
+ xe_validation_assert_exec(xe, exec, &bo->ttm.base);
if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
if (WARN_ON(err)) {
@@ -1996,7 +2252,7 @@ __xe_bo_create_locked(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
u16 cpu_caching, enum ttm_bo_type type, u32 flags,
- u64 alignment)
+ u64 alignment, struct drm_exec *exec)
{
struct xe_bo *bo = NULL;
int err;
@@ -2017,11 +2273,11 @@ __xe_bo_create_locked(struct xe_device *xe,
}
}
- bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
- vm && !xe_vm_in_fault_mode(vm) &&
- flags & XE_BO_FLAG_USER ?
- &vm->lru_bulk_move : NULL, size,
- cpu_caching, type, flags);
+ bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
+ vm && !xe_vm_in_fault_mode(vm) &&
+ flags & XE_BO_FLAG_USER ?
+ &vm->lru_bulk_move : NULL, size,
+ cpu_caching, type, flags, exec);
if (IS_ERR(bo))
return bo;
@@ -2055,9 +2311,10 @@ __xe_bo_create_locked(struct xe_device *xe,
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
- start + xe_bo_size(bo), U64_MAX);
+ start + xe_bo_size(bo), U64_MAX,
+ exec);
} else {
- err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec);
}
if (err)
goto err_unlock_put_bo;
@@ -2074,82 +2331,166 @@ err_unlock_put_bo:
return ERR_PTR(err);
}
-struct xe_bo *
-xe_bo_create_locked_range(struct xe_device *xe,
- struct xe_tile *tile, struct xe_vm *vm,
- size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags, u64 alignment)
-{
- return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
- flags, alignment);
-}
-
+/**
+ * xe_bo_create_locked() - Create a BO
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @vm: The local vm or NULL for external objects.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Create a locked xe BO with no range- nor alignment restrictions.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec)
{
return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
- flags, 0);
+ flags, 0, exec);
}
-struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- u16 cpu_caching,
- u32 flags)
+static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u16 cpu_caching,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment, bool intr)
{
- struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
- cpu_caching, ttm_bo_type_device,
- flags | XE_BO_FLAG_USER, 0);
- if (!IS_ERR(bo))
- xe_bo_unlock_vm_held(bo);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int ret = 0;
- return bo;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
+ ret) {
+ bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL,
+ cpu_caching, type, flags, alignment, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ } else {
+ xe_bo_unlock(bo);
+ }
+ }
+
+ return ret ? ERR_PTR(ret) : bo;
}
-struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+/**
+ * xe_bo_create_user() - Create a user BO
+ * @xe: The xe device.
+ * @vm: The local vm or NULL for external objects.
+ * @size: The storage size to use for the bo.
+ * @cpu_caching: The caching mode to be used for system backing store.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL
+ * if such a transaction should be initiated by the call.
+ *
+ * Create a bo on behalf of user-space.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_create_user(struct xe_device *xe,
+ struct xe_vm *vm, size_t size,
+ u16 cpu_caching,
+ u32 flags, struct drm_exec *exec)
{
- struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
+ struct xe_bo *bo;
- if (!IS_ERR(bo))
- xe_bo_unlock_vm_held(bo);
+ flags |= XE_BO_FLAG_USER;
+
+ if (vm || exec) {
+ xe_assert(xe, exec);
+ bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL,
+ cpu_caching, ttm_bo_type_device,
+ flags, 0, exec);
+ if (!IS_ERR(bo))
+ xe_bo_unlock_vm_held(bo);
+ } else {
+ bo = xe_bo_create_novm(xe, NULL, size, cpu_caching,
+ ttm_bo_type_device, flags, 0, true);
+ }
return bo;
}
-struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags)
+/**
+ * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @start: Start of fixed VRAM range or 0.
+ * @end: End of fixed VRAM range or ~0ULL.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ *
+ * Create an Xe BO with range- and options. If @start and @end indicate
+ * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement
+ * only.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags)
{
- return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
- type, flags, 0);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int err = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end,
+ 0, type, flags, 0, &exec);
+ if (IS_ERR(bo)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+
+ err = xe_bo_pin(bo, &exec);
+ xe_bo_unlock(bo);
+ if (err) {
+ xe_bo_put(bo);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
+
+ return err ? ERR_PTR(err) : bo;
}
-struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
- struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags,
- u64 alignment)
+static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment, struct drm_exec *exec)
{
struct xe_bo *bo;
int err;
u64 start = offset == ~0ull ? 0 : offset;
- u64 end = offset == ~0ull ? offset : start + size;
+ u64 end = offset == ~0ull ? ~0ull : start + size;
if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
flags |= XE_BO_FLAG_GGTT;
- bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
- alignment);
+ bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
+ alignment, exec);
if (IS_ERR(bo))
return bo;
- err = xe_bo_pin(bo);
+ err = xe_bo_pin(bo, exec);
if (err)
goto err_put;
@@ -2169,11 +2510,100 @@ err_put:
return ERR_PTR(err);
}
+/**
+ * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @offset: Optional VRAM offset or %~0ull for don't care.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @alignment: GGTT alignment.
+ * @intr: Whether to execute any waits for backing store interruptible.
+ *
+ * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment
+ * options. The bo will be external and not associated with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *
+xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 offset, enum ttm_bo_type type, u32 flags,
+ u64 alignment, bool intr)
+{
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int ret = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
+ ret) {
+ bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset,
+ type, flags, alignment, &exec);
+ if (IS_ERR(bo)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ }
+ }
+
+ return ret ? ERR_PTR(ret) : bo;
+}
+
+/**
+ * xe_bo_create_pin_map() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * @vm: The vm to associate the buffer object with. The vm's resv must be locked
+ * with the transaction represented by @exec.
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, and
+ * previously used for locking @vm's resv.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
+ * configured for interruptible locking.
+ */
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec)
{
- return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
+ return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
+ 0, exec);
+}
+
+/**
+ * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @intr: Whether to execut any waits for backing store interruptible.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, enum ttm_bo_type type, u32 flags,
+ bool intr)
+{
+ return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
}
static void __xe_bo_unpin_map_no_vm(void *arg)
@@ -2188,8 +2618,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
int ret;
KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
-
- bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
+ bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
if (IS_ERR(bo))
return bo;
@@ -2200,6 +2629,11 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
return bo;
}
+void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo)
+{
+ devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo);
+}
+
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags)
{
@@ -2272,6 +2706,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
* xe_bo_pin_external - pin an external BO
* @bo: buffer object to be pinned
* @in_place: Pin in current placement, don't attempt to migrate.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
* BO. Unique call compared to xe_bo_pin as this function has it own set of
@@ -2279,7 +2714,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
+int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec)
{
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -2289,7 +2724,7 @@ int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
if (!xe_bo_is_pinned(bo)) {
if (!in_place) {
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
}
@@ -2312,7 +2747,17 @@ int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
return 0;
}
-int xe_bo_pin(struct xe_bo *bo)
+/**
+ * xe_bo_pin() - Pin a kernel bo after potentially migrating it
+ * @bo: The kernel bo to pin.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Attempts to migrate a bo to @bo->placement. If that succeeds,
+ * pins the bo.
+ *
+ * Return: %0 on success, negative error code on migration failure.
+ */
+int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec)
{
struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
@@ -2334,7 +2779,7 @@ int xe_bo_pin(struct xe_bo *bo)
/* We only expect at most 1 pin */
xe_assert(xe, !xe_bo_is_pinned(bo));
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
@@ -2427,6 +2872,7 @@ void xe_bo_unpin(struct xe_bo *bo)
* NULL. Used together with @allow_res_evict.
* @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
* reservation object.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Make sure the bo is in allowed placement, migrating it if necessary. If
* needed, other bos will be evicted. If bos selected for eviction shares
@@ -2436,7 +2882,8 @@ void xe_bo_unpin(struct xe_bo *bo)
* Return: 0 on success, negative error code on failure. May return
* -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
*/
-int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
+ struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = true,
@@ -2458,6 +2905,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
+ xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
xe_vm_clear_validating(vm, allow_res_evict);
@@ -2653,8 +3101,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_vm *vm = NULL;
- ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
@@ -2728,25 +3177,26 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
-retry:
- if (vm) {
- err = xe_vm_lock(vm, true);
- if (err)
- goto out_vm;
+ err = 0;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ if (vm) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching,
+ bo_flags, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
}
-
- bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
- bo_flags);
-
- if (vm)
- xe_vm_unlock(vm);
-
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry;
+ if (err)
goto out_vm;
- }
if (args->extensions) {
err = gem_create_user_extensions(xe, bo, args->extensions, 0);
@@ -2895,6 +3345,9 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
* xe_bo_migrate - Migrate an object to the desired region id
* @bo: The buffer object to migrate.
* @mem_type: The TTM region type to migrate to.
+ * @tctx: A pointer to a struct ttm_operation_ctx or NULL if
+ * a default interruptibe ctx is to be used.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Attempt to migrate the buffer object to the desired memory region. The
* buffer object may not be pinned, and must be locked.
@@ -2906,7 +3359,8 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
* Return: 0 on success. Negative error code on failure. In particular may
* return -EINTR or -ERESTARTSYS if signal pending.
*/
-int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx,
+ struct drm_exec *exec)
{
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
struct ttm_operation_ctx ctx = {
@@ -2918,6 +3372,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_place requested;
xe_bo_assert_held(bo);
+ tctx = tctx ? tctx : &ctx;
if (bo->ttm.resource->mem_type == mem_type)
return 0;
@@ -2944,19 +3399,22 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
}
- return ttm_bo_validate(&bo->ttm, &placement, &ctx);
+ if (!tctx->no_wait_gpu)
+ xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
+ return ttm_bo_validate(&bo->ttm, &placement, tctx);
}
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo)
+int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
@@ -3116,11 +3574,11 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
page_size);
- bo = xe_bo_create_user(xe, NULL, NULL, args->size,
+ bo = xe_bo_create_user(xe, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9ce94d252015..a77af42b5f9e 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -10,8 +10,10 @@
#include "xe_bo_types.h"
#include "xe_macros.h"
+#include "xe_validation.h"
#include "xe_vm_types.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
@@ -23,8 +25,9 @@
#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
#define XE_BO_FLAG_STOLEN BIT(4)
+#define XE_BO_FLAG_VRAM(vram) (XE_BO_FLAG_VRAM0 << ((vram)->id))
#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
- XE_BO_FLAG_VRAM0 << (tile)->id : \
+ XE_BO_FLAG_VRAM((tile)->mem.vram) : \
XE_BO_FLAG_SYSTEM)
#define XE_BO_FLAG_GGTT BIT(5)
#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
@@ -86,40 +89,34 @@ struct sg_table;
struct xe_bo *xe_bo_alloc(void);
void xe_bo_free(struct xe_bo *bo);
-struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
- struct xe_tile *tile, struct dma_resv *resv,
- struct ttm_lru_bulk_move *bulk, size_t size,
- u16 cpu_caching, enum ttm_bo_type type,
- u32 flags);
-struct xe_bo *
-xe_bo_create_locked_range(struct xe_device *xe,
- struct xe_tile *tile, struct xe_vm *vm,
- size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags, u64 alignment);
+struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags, struct drm_exec *exec);
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- u16 cpu_caching,
- u32 flags);
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec);
+struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t size,
+ u16 cpu_caching, u32 flags, struct drm_exec *exec);
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
- struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags,
- u64 alignment);
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec);
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, enum ttm_bo_type type, u32 flags,
+ bool intr);
+struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *
+xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 offset, enum ttm_bo_type type,
+ u32 flags, u64 alignment, bool intr);
struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
size_t size, u32 flags);
+void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags);
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
@@ -198,11 +195,12 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
}
}
-int xe_bo_pin_external(struct xe_bo *bo, bool in_place);
-int xe_bo_pin(struct xe_bo *bo);
+int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec);
+int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec);
void xe_bo_unpin_external(struct xe_bo *bo);
void xe_bo_unpin(struct xe_bo *bo);
-int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
+ struct drm_exec *exec);
static inline bool xe_bo_is_pinned(struct xe_bo *bo)
{
@@ -283,8 +281,9 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo);
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *ctc,
+ struct drm_exec *exec);
+int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec);
int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
@@ -313,6 +312,21 @@ static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
return PAGE_ALIGN(xe_bo_size(bo));
}
+/**
+ * xe_bo_has_valid_ccs_bb - Check if CCS's BBs were setup for the BO.
+ * @bo: the &xe_bo to check
+ *
+ * The CCS's BBs should only be setup by the driver VF, but it is safe
+ * to call this function also by non-VF driver.
+ *
+ * Return: true iff the CCS's BBs are setup, false otherwise.
+ */
+static inline bool xe_bo_has_valid_ccs_bb(struct xe_bo *bo)
+{
+ return bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] &&
+ bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX];
+}
+
static inline bool xe_bo_has_pages(struct xe_bo *bo)
{
if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index ff560d82496f..d4fe3c8dca5b 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -9,6 +9,7 @@
#include <linux/iosys-map.h>
#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
@@ -24,7 +25,9 @@ struct xe_vm;
/* TODO: To be selected with VM_MADVISE */
#define XE_BO_PRIORITY_NORMAL 1
-/** @xe_bo: XE buffer object */
+/**
+ * struct xe_bo - Xe buffer object
+ */
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
@@ -46,7 +49,7 @@ struct xe_bo {
struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
- /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
+ /** @kmap: TTM bo kmap object for internal use only. Keep off. */
struct ttm_bo_kmap_obj kmap;
/** @pinned_link: link to present / evicted list of pinned BO */
struct list_head pinned_link;
@@ -60,6 +63,14 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
+ /** @attr: User controlled attributes for bo */
+ struct {
+ /**
+ * @atomic_access: type of atomic access bo needs
+ * protected by bo dma-resv lock
+ */
+ u32 atomic_access;
+ } attr;
/**
* @pxp_key_instance: PXP key instance this BO was created against. A
* 0 in this variable indicates that the BO does not use PXP encryption.
@@ -73,9 +84,12 @@ struct xe_bo {
/** @created: Whether the bo has passed initial creation */
bool created;
- /** @ccs_cleared */
+ /** @ccs_cleared: true means that CCS region of BO is already cleared */
bool ccs_cleared;
+ /** @bb_ccs: BB instructions of CCS read/write. Valid only for VF */
+ struct xe_bb *bb_ccs[XE_SRIOV_VF_CCS_CTX_COUNT];
+
/**
* @cpu_caching: CPU caching mode. Currently only used for userspace
* objects. Exceptions are system memory on DGFX, which is always
@@ -87,9 +101,10 @@ struct xe_bo {
struct drm_pagemap_devmem devmem_allocation;
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
- struct list_head vram_userfault_link;
+ struct list_head vram_userfault_link;
- /** @min_align: minimum alignment needed for this BO if different
+ /**
+ * @min_align: minimum alignment needed for this BO if different
* from default
*/
u64 min_align;
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index 58c1f397c68c..8a9b950e7a6d 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -4,42 +4,67 @@
*/
#include <linux/bitops.h>
+#include <linux/ctype.h>
#include <linux/configfs.h>
+#include <linux/cleanup.h>
#include <linux/find.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
+#include "instructions/xe_mi_commands.h"
#include "xe_configfs.h"
-#include "xe_module.h"
-
#include "xe_hw_engine_types.h"
+#include "xe_module.h"
+#include "xe_pci_types.h"
/**
* DOC: Xe Configfs
*
* Overview
- * =========
+ * ========
*
* Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
- * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
- * The user can create devices under this directory and configure them as necessary
- * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ * configfs subsystem called ``xe`` that creates a directory in the mounted
+ * configfs directory. The user can create devices under this directory and
+ * configure them as necessary. See Documentation/filesystems/configfs.rst for
+ * more information about how configfs works.
*
* Create devices
- * ===============
+ * ==============
*
- * In order to create a device, the user has to create a directory inside ``'xe'``::
+ * To create a device, the ``xe`` module should already be loaded, but some
+ * attributes can only be set before binding the device. It can be accomplished
+ * by blocking the driver autoprobe::
*
- * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ * # echo 0 > /sys/bus/pci/drivers_autoprobe
+ * # modprobe xe
+ *
+ * In order to create a device, the user has to create a directory inside ``xe``::
+ *
+ * # mkdir /sys/kernel/config/xe/0000:03:00.0/
*
* Every device created is populated by the driver with entries that can be
* used to configure it::
*
* /sys/kernel/config/xe/
- * .. 0000:03:00.0/
- * ... survivability_mode
+ * ├── 0000:00:02.0
+ * │   └── ...
+ * ├── 0000:00:02.1
+ * │   └── ...
+ * :
+ * └── 0000:03:00.0
+ * ├── survivability_mode
+ * ├── engines_allowed
+ * └── enable_psmi
+ *
+ * After configuring the attributes as per next section, the device can be
+ * probed with::
+ *
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
+ * # # or
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
*
* Configure Attributes
* ====================
@@ -51,7 +76,8 @@
* effect when probing the device. Example to enable it::
*
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
- * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * This attribute can only be set before binding to the device.
*
* Allowed engines:
* ----------------
@@ -77,27 +103,105 @@
* available for migrations, but it's disabled. This is intended for debugging
* purposes only.
*
+ * This attribute can only be set before binding to the device.
+ *
+ * PSMI
+ * ----
+ *
+ * Enable extra debugging capabilities to trace engine execution. Only useful
+ * during early platform enabling and requires additional hardware connected.
+ * Once it's enabled, additionals WAs are added and runtime configuration is
+ * done via debugfs. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
+ *
+ * This attribute can only be set before binding to the device.
+ *
+ * Context restore BB
+ * ------------------
+ *
+ * Allow to execute a batch buffer during any context switches. When the
+ * GPU is restoring the context, it executes additional commands. It's useful
+ * for testing additional workarounds and validating certain HW behaviors: it's
+ * not intended for normal execution and will taint the kernel with TAINT_TEST
+ * when used.
+ *
+ * Currently this is implemented only for post and mid context restore.
+ * Examples:
+ *
+ * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
+ * normal context restore::
+ *
+ * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
+ * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
+ *
+ * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
+ * beginning of the context restore::
+ *
+ * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
+ * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
+
+ * #. Load certain values in a couple of registers (it can be used as a simpler
+ * alternative to the `cmd`) action::
+ *
+ * # cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
+ * rcs reg 4F100 DEADBEEF
+ * rcs reg 4F104 FFFFFFFF
+ * EOF
+ *
+ * .. note::
+ *
+ * When using multiple lines, make sure to use a command that is
+ * implemented with a single write syscall, like HEREDOC.
+ *
+ * These attributes can only be set before binding to the device.
+ *
* Remove devices
* ==============
*
* The created device directories can be removed using ``rmdir``::
*
- * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ * # rmdir /sys/kernel/config/xe/0000:03:00.0/
*/
-struct xe_config_device {
+/* Similar to struct xe_bb, but not tied to HW (yet) */
+struct wa_bb {
+ u32 *cs;
+ u32 len; /* in dwords */
+};
+
+struct xe_config_group_device {
struct config_group group;
- bool survivability_mode;
- u64 engines_allowed;
+ struct xe_config_device {
+ u64 engines_allowed;
+ struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
+ struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
+ bool survivability_mode;
+ bool enable_psmi;
+ } config;
/* protects attributes */
struct mutex lock;
+ /* matching descriptor */
+ const struct xe_device_desc *desc;
+};
+
+static const struct xe_config_device device_defaults = {
+ .engines_allowed = U64_MAX,
+ .survivability_mode = false,
+ .enable_psmi = false,
};
+static void set_device_defaults(struct xe_config_device *config)
+{
+ *config = device_defaults;
+}
+
struct engine_info {
const char *cls;
u64 mask;
+ enum xe_engine_class engine_class;
};
/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
@@ -105,17 +209,48 @@ struct engine_info {
#define MAX_ENGINE_INSTANCE_CHARS 2
static const struct engine_info engine_info[] = {
- { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
- { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
- { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
- { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
- { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
- { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
};
+static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_group_device, group);
+}
+
static struct xe_config_device *to_xe_config_device(struct config_item *item)
{
- return container_of(to_config_group(item), struct xe_config_device, group);
+ return &to_xe_config_group_device(item)->config;
+}
+
+static bool is_bound(struct xe_config_group_device *dev)
+{
+ unsigned int domain, bus, slot, function;
+ struct pci_dev *pdev;
+ const char *name;
+ bool ret;
+
+ lockdep_assert_held(&dev->lock);
+
+ name = dev->group.cg_item.ci_name;
+ if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
+ return false;
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return false;
+
+ ret = pci_get_drvdata(pdev);
+ pci_dev_put(pdev);
+
+ if (ret)
+ pci_dbg(pdev, "Already bound to driver\n");
+
+ return ret;
}
static ssize_t survivability_mode_show(struct config_item *item, char *page)
@@ -127,7 +262,7 @@ static ssize_t survivability_mode_show(struct config_item *item, char *page)
static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
{
- struct xe_config_device *dev = to_xe_config_device(item);
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
bool survivability_mode;
int ret;
@@ -135,9 +270,11 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
if (ret)
return ret;
- mutex_lock(&dev->lock);
- dev->survivability_mode = survivability_mode;
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.survivability_mode = survivability_mode;
return len;
}
@@ -166,7 +303,18 @@ static ssize_t engines_allowed_show(struct config_item *item, char *page)
return p - page;
}
-static bool lookup_engine_mask(const char *pattern, u64 *mask)
+/*
+ * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
+ * instance in @pattern.
+ *
+ * Examples of inputs:
+ * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
+ * mask == BIT_ULL(XE_HW_ENGINE_RCS0)
+ * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
+ * mask == XE_HW_ENGINE_RCS_MASK
+ * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
+ */
+static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
{
for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
u8 instance;
@@ -176,70 +324,359 @@ static bool lookup_engine_mask(const char *pattern, u64 *mask)
continue;
pattern += strlen(engine_info[i].cls);
+ if (!mask && !*pattern)
+ return &engine_info[i];
if (!strcmp(pattern, "*")) {
*mask = engine_info[i].mask;
- return true;
+ return &engine_info[i];
}
if (kstrtou8(pattern, 10, &instance))
- return false;
+ return NULL;
bit = __ffs64(engine_info[i].mask) + instance;
if (bit >= fls64(engine_info[i].mask))
- return false;
+ return NULL;
*mask = BIT_ULL(bit);
- return true;
+ return &engine_info[i];
}
- return false;
+ return NULL;
+}
+
+static int parse_engine(const char *s, const char *end_chars, u64 *mask,
+ const struct engine_info **pinfo)
+{
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+ const struct engine_info *info;
+ size_t len;
+
+ len = strcspn(s, end_chars);
+ if (len >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, s, len);
+ buf[len] = '\0';
+
+ info = lookup_engine_info(buf, mask);
+ if (!info)
+ return -ENOENT;
+
+ if (pinfo)
+ *pinfo = info;
+
+ return len;
}
static ssize_t engines_allowed_store(struct config_item *item, const char *page,
size_t len)
{
- struct xe_config_device *dev = to_xe_config_device(item);
- size_t patternlen, p;
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ ssize_t patternlen, p;
u64 mask, val = 0;
for (p = 0; p < len; p += patternlen + 1) {
- char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
-
- patternlen = strcspn(page + p, ",\n");
- if (patternlen >= sizeof(buf))
+ patternlen = parse_engine(page + p, ",\n", &mask, NULL);
+ if (patternlen < 0)
return -EINVAL;
- memcpy(buf, page + p, patternlen);
- buf[patternlen] = '\0';
+ val |= mask;
+ }
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.engines_allowed = val;
+
+ return len;
+}
+
+static ssize_t enable_psmi_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->enable_psmi);
+}
+
+static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ bool val;
+ int ret;
+
+ ret = kstrtobool(page, &val);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.enable_psmi = val;
+
+ return len;
+}
+
+static bool wa_bb_read_advance(bool dereference, char **p,
+ const char *append, size_t len,
+ size_t *max_size)
+{
+ if (dereference) {
+ if (len >= *max_size)
+ return false;
+ *max_size -= len;
+ if (append)
+ memcpy(*p, append, len);
+ }
+
+ *p += len;
+
+ return true;
+}
+
+static ssize_t wa_bb_show(struct xe_config_group_device *dev,
+ struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
+ char *data, size_t sz)
+{
+ char *p = data;
+
+ guard(mutex)(&dev->lock);
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ enum xe_engine_class ec = engine_info[i].engine_class;
+ size_t len;
+
+ if (!wa_bb[ec].len)
+ continue;
+
+ len = snprintf(p, sz, "%s:", engine_info[i].cls);
+ if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
+ return -ENOBUFS;
+
+ for (size_t j = 0; j < wa_bb[ec].len; j++) {
+ len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
+ if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
+ return -ENOBUFS;
+ }
+
+ if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
+ return -ENOBUFS;
+ }
+
+ if (!wa_bb_read_advance(data, &p, "", 1, &sz))
+ return -ENOBUFS;
+
+ /* Reserve one more to match check for '\0' */
+ if (!data)
+ p++;
+
+ return p - data;
+}
+
+static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
+}
+
+static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
+}
- if (!lookup_engine_mask(buf, &mask))
+static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
+{
+ if (wa_bb->cs)
+ wa_bb->cs[wa_bb->len] = val;
+
+ wa_bb->len++;
+}
+
+static ssize_t parse_hex(const char *line, u32 *pval)
+{
+ char numstr[12];
+ const char *p;
+ ssize_t numlen;
+
+ p = line + strspn(line, " \t");
+ if (!*p || *p == '\n')
+ return 0;
+
+ numlen = strcspn(p, " \t\n");
+ if (!numlen || numlen >= sizeof(numstr) - 1)
+ return -EINVAL;
+
+ memcpy(numstr, p, numlen);
+ numstr[numlen] = '\0';
+ p += numlen;
+
+ if (kstrtou32(numstr, 16, pval))
+ return -EINVAL;
+
+ return p - line;
+}
+
+/*
+ * Parse lines with the format
+ *
+ * <engine-class> cmd <u32> <u32...>
+ * <engine-class> reg <u32_addr> <u32_val>
+ *
+ * and optionally save them in @wa_bb[i].cs is non-NULL.
+ *
+ * Return the number of dwords parsed.
+ */
+static ssize_t parse_wa_bb_lines(const char *lines,
+ struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
+{
+ ssize_t dwords = 0, ret;
+ const char *p;
+
+ for (p = lines; *p; p++) {
+ const struct engine_info *info = NULL;
+ u32 val, val2;
+
+ /* Also allow empty lines */
+ p += strspn(p, " \t\n");
+ if (!*p)
+ break;
+
+ ret = parse_engine(p, " \t\n", NULL, &info);
+ if (ret < 0)
+ return ret;
+
+ p += ret;
+ p += strspn(p, " \t");
+
+ if (str_has_prefix(p, "cmd")) {
+ for (p += strlen("cmd"); *p;) {
+ ret = parse_hex(p, &val);
+ if (ret < 0)
+ return -EINVAL;
+ if (!ret)
+ break;
+
+ p += ret;
+ dwords++;
+ wa_bb_append(&wa_bb[info->engine_class], val);
+ }
+ } else if (str_has_prefix(p, "reg")) {
+ p += strlen("reg");
+ ret = parse_hex(p, &val);
+ if (ret <= 0)
+ return -EINVAL;
+
+ p += ret;
+ ret = parse_hex(p, &val2);
+ if (ret <= 0)
+ return -EINVAL;
+
+ p += ret;
+ dwords += 3;
+ wa_bb_append(&wa_bb[info->engine_class],
+ MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
+ wa_bb_append(&wa_bb[info->engine_class], val);
+ wa_bb_append(&wa_bb[info->engine_class], val2);
+ } else {
return -EINVAL;
+ }
+ }
- val |= mask;
+ return dwords;
+}
+
+static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
+ struct xe_config_group_device *dev,
+ const char *page, size_t len)
+{
+ /* tmp_wa_bb must match wa_bb's size */
+ struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
+ ssize_t count, class;
+ u32 *tmp;
+
+ /* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
+ count = parse_wa_bb_lines(page, tmp_wa_bb);
+ if (count < 0)
+ return count;
+
+ guard(mutex)(&dev->lock);
+
+ if (is_bound(dev))
+ return -EBUSY;
+
+ /*
+ * 2. Allocate a u32 array and set the pointers to the right positions
+ * according to the length of each class' wa_bb
+ */
+ tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (!count) {
+ memset(wa_bb, 0, sizeof(tmp_wa_bb));
+ return len;
}
- mutex_lock(&dev->lock);
- dev->engines_allowed = val;
- mutex_unlock(&dev->lock);
+ for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ tmp_wa_bb[class].cs = tmp + count;
+ count += tmp_wa_bb[class].len;
+ tmp_wa_bb[class].len = 0;
+ }
+
+ /* 3. Parse wa_bb lines again, this time saving the values */
+ count = parse_wa_bb_lines(page, tmp_wa_bb);
+ if (count < 0)
+ return count;
+
+ memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
return len;
}
-CONFIGFS_ATTR(, survivability_mode);
+static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
+ const char *data, size_t sz)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
+}
+
+static ssize_t ctx_restore_post_bb_store(struct config_item *item,
+ const char *data, size_t sz)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
+}
+
+CONFIGFS_ATTR(, ctx_restore_mid_bb);
+CONFIGFS_ATTR(, ctx_restore_post_bb);
+CONFIGFS_ATTR(, enable_psmi);
CONFIGFS_ATTR(, engines_allowed);
+CONFIGFS_ATTR(, survivability_mode);
static struct configfs_attribute *xe_config_device_attrs[] = {
- &attr_survivability_mode,
+ &attr_ctx_restore_mid_bb,
+ &attr_ctx_restore_post_bb,
+ &attr_enable_psmi,
&attr_engines_allowed,
+ &attr_survivability_mode,
NULL,
};
static void xe_config_device_release(struct config_item *item)
{
- struct xe_config_device *dev = to_xe_config_device(item);
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
mutex_destroy(&dev->lock);
+
+ kfree(dev->config.ctx_restore_post_bb[0].cs);
kfree(dev);
}
@@ -247,35 +684,106 @@ static struct configfs_item_operations xe_config_device_ops = {
.release = xe_config_device_release,
};
+static bool xe_config_device_is_visible(struct config_item *item,
+ struct configfs_attribute *attr, int n)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ if (attr == &attr_survivability_mode) {
+ if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
+ return false;
+ }
+
+ return true;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .is_visible = xe_config_device_is_visible,
+};
+
static const struct config_item_type xe_config_device_type = {
.ct_item_ops = &xe_config_device_ops,
+ .ct_group_ops = &xe_config_device_group_ops,
.ct_attrs = xe_config_device_attrs,
.ct_owner = THIS_MODULE,
};
+static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
+{
+ struct device_driver *driver = driver_find("xe", &pci_bus_type);
+ struct pci_driver *drv = to_pci_driver(driver);
+ const struct pci_device_id *ids = drv ? drv->id_table : NULL;
+ const struct pci_device_id *found = pci_match_id(ids, pdev);
+
+ return found ? (const void *)found->driver_data : NULL;
+}
+
+static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
+{
+ struct pci_dev *physfn = pci_physfn(virtfn);
+
+ pci_dev_get(physfn);
+ pci_dev_put(virtfn);
+ return physfn;
+}
+
static struct config_group *xe_config_make_device_group(struct config_group *group,
const char *name)
{
unsigned int domain, bus, slot, function;
- struct xe_config_device *dev;
+ struct xe_config_group_device *dev;
+ const struct xe_device_desc *match;
struct pci_dev *pdev;
+ char canonical[16];
+ int vfnumber = 0;
int ret;
- ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
if (ret != 4)
return ERR_PTR(-EINVAL);
+ ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
+ PCI_SLOT(PCI_DEVFN(slot, function)),
+ PCI_FUNC(PCI_DEVFN(slot, function)));
+ if (ret != 12 || strcmp(name, canonical))
+ return ERR_PTR(-EINVAL);
+
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev && function)
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
+ if (!pdev && slot)
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
if (!pdev)
return ERR_PTR(-ENODEV);
+
+ if (PCI_DEVFN(slot, function) != pdev->devfn) {
+ pdev = get_physfn_instead(pdev);
+ vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
+ if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
+ pci_dev_put(pdev);
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ match = xe_match_desc(pdev);
+ if (match && vfnumber && !match->has_sriov) {
+ pci_info(pdev, "xe driver does not support VFs on this device\n");
+ match = NULL;
+ } else if (!match) {
+ pci_info(pdev, "xe driver does not support configuration of this device\n");
+ }
+
pci_dev_put(pdev);
+ if (!match)
+ return ERR_PTR(-ENOENT);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
- /* Default values */
- dev->engines_allowed = U64_MAX;
+ dev->desc = match;
+ set_device_defaults(&dev->config);
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
@@ -284,12 +792,12 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
return &dev->group;
}
-static struct configfs_group_operations xe_config_device_group_ops = {
+static struct configfs_group_operations xe_config_group_ops = {
.make_group = xe_config_make_device_group,
};
static const struct config_item_type xe_configfs_type = {
- .ct_group_ops = &xe_config_device_group_ops,
+ .ct_group_ops = &xe_config_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -302,102 +810,180 @@ static struct configfs_subsystem xe_configfs = {
},
};
-static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
{
struct config_item *item;
- char name[64];
-
- snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
- pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
mutex_lock(&xe_configfs.su_mutex);
- item = config_group_find_item(&xe_configfs.su_group, name);
+ item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
mutex_unlock(&xe_configfs.su_mutex);
if (!item)
return NULL;
- return to_xe_config_device(item);
+ return to_xe_config_group_device(item);
+}
+
+static void dump_custom_dev_config(struct pci_dev *pdev,
+ struct xe_config_group_device *dev)
+{
+#define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
+ if (dev->config.attr_ != device_defaults.attr_) \
+ pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
+ dev->config.attr_); \
+ } while (0)
+
+ PRI_CUSTOM_ATTR("%llx", engines_allowed);
+ PRI_CUSTOM_ATTR("%d", enable_psmi);
+ PRI_CUSTOM_ATTR("%d", survivability_mode);
+
+#undef PRI_CUSTOM_ATTR
+}
+
+/**
+ * xe_configfs_check_device() - Test if device was configured by configfs
+ * @pdev: the &pci_dev device to test
+ *
+ * Try to find the configfs group that belongs to the specified pci device
+ * and print a diagnostic message if different than the default value.
+ */
+void xe_configfs_check_device(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+
+ if (!dev)
+ return;
+
+ /* memcmp here is safe as both are zero-initialized */
+ if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
+ pci_info(pdev, "Found custom settings in configfs\n");
+ dump_custom_dev_config(pdev, dev);
+ }
+
+ config_group_put(&dev->group);
}
/**
* xe_configfs_get_survivability_mode - get configfs survivability mode attribute
* @pdev: pci device
*
- * find the configfs group that belongs to the pci device and return
- * the survivability mode attribute
- *
- * Return: survivability mode if config group is found, false otherwise
+ * Return: survivability_mode attribute in configfs
*/
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
bool mode;
if (!dev)
- return false;
+ return device_defaults.survivability_mode;
- mode = dev->survivability_mode;
- config_item_put(&dev->group.cg_item);
+ mode = dev->config.survivability_mode;
+ config_group_put(&dev->group);
return mode;
}
/**
- * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
* @pdev: pci device
*
- * find the configfs group that belongs to the pci device and clear survivability
- * mode attribute
+ * Return: engine mask with allowed engines set in configfs
*/
-void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u64 engines_allowed;
if (!dev)
- return;
+ return device_defaults.engines_allowed;
- mutex_lock(&dev->lock);
- dev->survivability_mode = 0;
- mutex_unlock(&dev->lock);
+ engines_allowed = dev->config.engines_allowed;
+ config_group_put(&dev->group);
- config_item_put(&dev->group.cg_item);
+ return engines_allowed;
}
/**
- * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
* @pdev: pci device
*
- * Find the configfs group that belongs to the pci device and return
- * the mask of engines allowed to be used.
+ * Return: enable_psmi setting in configfs
+ */
+bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ bool ret;
+
+ if (!dev)
+ return false;
+
+ ret = dev->config.enable_psmi;
+ config_group_put(&dev->group);
+
+ return ret;
+}
+
+/**
+ * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
+ * @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
*
- * Return: engine mask with allowed engines
+ * Return: Number of dwords used in the mid_ctx_restore setting in configfs
*/
-u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
+ enum xe_engine_class class,
+ const u32 **cs)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
- u64 engines_allowed;
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u32 len;
if (!dev)
- return U64_MAX;
+ return 0;
- engines_allowed = dev->engines_allowed;
- config_item_put(&dev->group.cg_item);
+ if (cs)
+ *cs = dev->config.ctx_restore_mid_bb[class].cs;
- return engines_allowed;
+ len = dev->config.ctx_restore_mid_bb[class].len;
+ config_group_put(&dev->group);
+
+ return len;
+}
+
+/**
+ * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
+ * @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
+ *
+ * Return: Number of dwords used in the post_ctx_restore setting in configfs
+ */
+u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
+ enum xe_engine_class class,
+ const u32 **cs)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u32 len;
+
+ if (!dev)
+ return 0;
+
+ *cs = dev->config.ctx_restore_post_bb[class].cs;
+ len = dev->config.ctx_restore_post_bb[class].len;
+ config_group_put(&dev->group);
+
+ return len;
}
int __init xe_configfs_init(void)
{
- struct config_group *root = &xe_configfs.su_group;
int ret;
- config_group_init(root);
+ config_group_init(&xe_configfs.su_group);
mutex_init(&xe_configfs.su_mutex);
ret = configfs_register_subsystem(&xe_configfs);
if (ret) {
- pr_err("Error %d while registering %s subsystem\n",
- ret, root->cg_item.ci_namebuf);
+ mutex_destroy(&xe_configfs.su_mutex);
return ret;
}
@@ -407,5 +993,5 @@ int __init xe_configfs_init(void)
void xe_configfs_exit(void)
{
configfs_unregister_subsystem(&xe_configfs);
+ mutex_destroy(&xe_configfs.su_mutex);
}
-
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index fb8764008089..c61e0e47ed94 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -8,20 +8,32 @@
#include <linux/limits.h>
#include <linux/types.h>
+#include <xe_hw_engine_types.h>
+
struct pci_dev;
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
int xe_configfs_init(void);
void xe_configfs_exit(void);
+void xe_configfs_check_device(struct pci_dev *pdev);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
-void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
+bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev);
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs);
+u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs);
#else
static inline int xe_configfs_init(void) { return 0; }
static inline void xe_configfs_exit(void) { }
+static inline void xe_configfs_check_device(struct pci_dev *pdev) { }
static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
-static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
+static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; }
+static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs) { return 0; }
+static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs) { return 0; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 26e9d146ccbf..cd977dbd1ef6 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -11,18 +11,24 @@
#include <drm/drm_debugfs.h>
+#include "regs/xe_pmt.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt_debugfs.h"
#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
+#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_psmi.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_vf.h"
#include "xe_step.h"
+#include "xe_tile_debugfs.h"
#include "xe_wa.h"
+#include "xe_vsec.h"
#ifdef CONFIG_DRM_XE_DEBUG
#include "xe_bo_evict.h"
@@ -31,6 +37,24 @@
#endif
DECLARE_FAULT_ATTR(gt_reset_failure);
+DECLARE_FAULT_ATTR(inject_csc_hw_error);
+
+static void read_residency_counter(struct xe_device *xe, struct xe_mmio *mmio,
+ u32 offset, const char *name, struct drm_printer *p)
+{
+ u64 residency = 0;
+ int ret;
+
+ ret = xe_pmt_telem_read(to_pci_dev(xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &residency, offset, sizeof(residency));
+ if (ret != sizeof(residency)) {
+ drm_warn(&xe->drm, "%s counter failed to read, ret %d\n", name, ret);
+ return;
+ }
+
+ drm_printf(p, "%s : %llu\n", name, residency);
+}
static struct xe_device *node_to_xe(struct drm_info_node *node)
{
@@ -102,12 +126,72 @@ static int workaround_info(struct seq_file *m, void *data)
return 0;
}
+static int dgfx_pkg_residencies_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe;
+ struct xe_mmio *mmio;
+ struct drm_printer p;
+
+ xe = node_to_xe(m->private);
+ p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(xe);
+ mmio = xe_root_tile_mmio(xe);
+ static const struct {
+ u32 offset;
+ const char *name;
+ } residencies[] = {
+ {BMG_G2_RESIDENCY_OFFSET, "Package G2"},
+ {BMG_G6_RESIDENCY_OFFSET, "Package G6"},
+ {BMG_G8_RESIDENCY_OFFSET, "Package G8"},
+ {BMG_G10_RESIDENCY_OFFSET, "Package G10"},
+ {BMG_MODS_RESIDENCY_OFFSET, "Package ModS"}
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(residencies); i++)
+ read_residency_counter(xe, mmio, residencies[i].offset, residencies[i].name, &p);
+
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
+static int dgfx_pcie_link_residencies_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe;
+ struct xe_mmio *mmio;
+ struct drm_printer p;
+
+ xe = node_to_xe(m->private);
+ p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(xe);
+ mmio = xe_root_tile_mmio(xe);
+
+ static const struct {
+ u32 offset;
+ const char *name;
+ } residencies[] = {
+ {BMG_PCIE_LINK_L0_RESIDENCY_OFFSET, "PCIE LINK L0 RESIDENCY"},
+ {BMG_PCIE_LINK_L1_RESIDENCY_OFFSET, "PCIE LINK L1 RESIDENCY"},
+ {BMG_PCIE_LINK_L1_2_RESIDENCY_OFFSET, "PCIE LINK L1.2 RESIDENCY"}
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(residencies); i++)
+ read_residency_counter(xe, mmio, residencies[i].offset, residencies[i].name, &p);
+
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
{ .name = "sriov_info", .show = sriov_info, },
{ .name = "workarounds", .show = workaround_info, },
};
+static const struct drm_info_list debugfs_residencies[] = {
+ { .name = "dgfx_pkg_residencies", .show = dgfx_pkg_residencies_show, },
+ { .name = "dgfx_pcie_link_residencies", .show = dgfx_pcie_link_residencies_show, },
+};
+
static int forcewake_open(struct inode *inode, struct file *file)
{
struct xe_device *xe = inode->i_private;
@@ -247,20 +331,68 @@ static const struct file_operations atomic_svm_timeslice_ms_fops = {
.write = atomic_svm_timeslice_ms_set,
};
+static ssize_t disable_late_binding_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_late_bind *late_bind = &xe->late_bind;
+ char buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", late_bind->disable);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t disable_late_binding_set(struct file *f, const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_late_bind *late_bind = &xe->late_bind;
+ u32 uval;
+ ssize_t ret;
+
+ ret = kstrtouint_from_user(ubuf, size, sizeof(uval), &uval);
+ if (ret)
+ return ret;
+
+ if (uval > 1)
+ return -EINVAL;
+
+ late_bind->disable = !!uval;
+ return size;
+}
+
+static const struct file_operations disable_late_binding_fops = {
+ .owner = THIS_MODULE,
+ .read = disable_late_binding_show,
+ .write = disable_late_binding_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
struct drm_minor *minor = xe->drm.primary;
struct dentry *root = minor->debugfs_root;
struct ttm_resource_manager *man;
+ struct xe_tile *tile;
struct xe_gt *gt;
u32 mem_type;
+ u8 tile_id;
u8 id;
drm_debugfs_create_files(debugfs_list,
ARRAY_SIZE(debugfs_list),
root, minor);
+ if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(debugfs_residencies,
+ ARRAY_SIZE(debugfs_residencies),
+ root, minor);
+ fault_create_debugfs_attr("inject_csc_hw_error", root,
+ &inject_csc_hw_error);
+ }
+
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
@@ -270,6 +402,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
&atomic_svm_timeslice_ms_fops);
+ debugfs_create_file("disable_late_binding", 0600, root, xe,
+ &disable_late_binding_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
@@ -288,13 +423,20 @@ void xe_debugfs_register(struct xe_device *xe)
if (man)
ttm_resource_manager_create_debugfs(man, root, "stolen_mm");
+ for_each_tile(tile, xe, tile_id)
+ xe_tile_debugfs_register(tile);
+
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
xe_pxp_debugfs_register(xe->pxp);
+ xe_psmi_debugfs_register(xe);
+
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
if (IS_SRIOV_PF(xe))
xe_sriov_pf_debugfs_register(xe, root);
+ else if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_debugfs_register(xe, root);
}
diff --git a/drivers/gpu/drm/xe/xe_dep_job_types.h b/drivers/gpu/drm/xe/xe_dep_job_types.h
new file mode 100644
index 000000000000..c6a484f24c8c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_job_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_DEP_JOB_TYPES_H_
+#define _XE_DEP_JOB_TYPES_H_
+
+#include <drm/gpu_scheduler.h>
+
+struct xe_dep_job;
+
+/** struct xe_dep_job_ops - Generic Xe dependency job operations */
+struct xe_dep_job_ops {
+ /** @run_job: Run generic Xe dependency job */
+ struct dma_fence *(*run_job)(struct xe_dep_job *job);
+ /** @free_job: Free generic Xe dependency job */
+ void (*free_job)(struct xe_dep_job *job);
+};
+
+/** struct xe_dep_job - Generic dependency Xe job */
+struct xe_dep_job {
+ /** @drm: base DRM scheduler job */
+ struct drm_sched_job drm;
+ /** @ops: dependency job operations */
+ const struct xe_dep_job_ops *ops;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.c b/drivers/gpu/drm/xe/xe_dep_scheduler.c
new file mode 100644
index 000000000000..9bd3bfd2e526
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_scheduler.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include <drm/gpu_scheduler.h>
+
+#include "xe_dep_job_types.h"
+#include "xe_dep_scheduler.h"
+#include "xe_device_types.h"
+
+/**
+ * DOC: Xe Dependency Scheduler
+ *
+ * The Xe dependency scheduler is a simple wrapper built around the DRM
+ * scheduler to execute jobs once their dependencies are resolved (i.e., all
+ * input fences specified as dependencies are signaled). The jobs that are
+ * executed contain virtual functions to run (execute) and free the job,
+ * allowing a single dependency scheduler to handle jobs performing different
+ * operations.
+ *
+ * Example use cases include deferred resource freeing, TLB invalidations after
+ * bind jobs, etc.
+ */
+
+/** struct xe_dep_scheduler - Generic Xe dependency scheduler */
+struct xe_dep_scheduler {
+ /** @sched: DRM GPU scheduler */
+ struct drm_gpu_scheduler sched;
+ /** @entity: DRM scheduler entity */
+ struct drm_sched_entity entity;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
+};
+
+static struct dma_fence *xe_dep_scheduler_run_job(struct drm_sched_job *drm_job)
+{
+ struct xe_dep_job *dep_job =
+ container_of(drm_job, typeof(*dep_job), drm);
+
+ return dep_job->ops->run_job(dep_job);
+}
+
+static void xe_dep_scheduler_free_job(struct drm_sched_job *drm_job)
+{
+ struct xe_dep_job *dep_job =
+ container_of(drm_job, typeof(*dep_job), drm);
+
+ dep_job->ops->free_job(dep_job);
+}
+
+static const struct drm_sched_backend_ops sched_ops = {
+ .run_job = xe_dep_scheduler_run_job,
+ .free_job = xe_dep_scheduler_free_job,
+};
+
+/**
+ * xe_dep_scheduler_create() - Generic Xe dependency scheduler create
+ * @xe: Xe device
+ * @submit_wq: Submit workqueue struct (can be NULL)
+ * @name: Name of dependency scheduler
+ * @job_limit: Max dependency jobs that can be scheduled
+ *
+ * Create a generic Xe dependency scheduler and initialize internal DRM
+ * scheduler objects.
+ *
+ * Return: Generic Xe dependency scheduler object on success, ERR_PTR failure
+ */
+struct xe_dep_scheduler *
+xe_dep_scheduler_create(struct xe_device *xe,
+ struct workqueue_struct *submit_wq,
+ const char *name, u32 job_limit)
+{
+ struct xe_dep_scheduler *dep_scheduler;
+ struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .submit_wq = submit_wq,
+ .num_rqs = 1,
+ .credit_limit = job_limit,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = name,
+ .dev = xe->drm.dev,
+ };
+ int err;
+
+ dep_scheduler = kzalloc(sizeof(*dep_scheduler), GFP_KERNEL);
+ if (!dep_scheduler)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_sched_init(&dep_scheduler->sched, &args);
+ if (err)
+ goto err_free;
+
+ sched = &dep_scheduler->sched;
+ err = drm_sched_entity_init(&dep_scheduler->entity, 0, &sched, 1, NULL);
+ if (err)
+ goto err_sched;
+
+ init_rcu_head(&dep_scheduler->rcu);
+
+ return dep_scheduler;
+
+err_sched:
+ drm_sched_fini(&dep_scheduler->sched);
+err_free:
+ kfree(dep_scheduler);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_dep_scheduler_fini() - Generic Xe dependency scheduler finalize
+ * @dep_scheduler: Generic Xe dependency scheduler object
+ *
+ * Finalize internal DRM scheduler objects and free generic Xe dependency
+ * scheduler object
+ */
+void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler)
+{
+ drm_sched_entity_fini(&dep_scheduler->entity);
+ drm_sched_fini(&dep_scheduler->sched);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(dep_scheduler, rcu);
+}
+
+/**
+ * xe_dep_scheduler_entity() - Retrieve a generic Xe dependency scheduler
+ * DRM scheduler entity
+ * @dep_scheduler: Generic Xe dependency scheduler object
+ *
+ * Return: The generic Xe dependency scheduler's DRM scheduler entity
+ */
+struct drm_sched_entity *
+xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler)
+{
+ return &dep_scheduler->entity;
+}
diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.h b/drivers/gpu/drm/xe/xe_dep_scheduler.h
new file mode 100644
index 000000000000..853961eec64b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_scheduler.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+struct drm_sched_entity;
+struct workqueue_struct;
+struct xe_dep_scheduler;
+struct xe_device;
+
+struct xe_dep_scheduler *
+xe_dep_scheduler_create(struct xe_device *xe,
+ struct workqueue_struct *submit_wq,
+ const char *name, u32 job_limit);
+
+void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler);
+
+struct drm_sched_entity *
+xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 6ece4defa9df..fdb7b7498920 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -45,6 +45,7 @@
#include "xe_hwmon.h"
#include "xe_i2c.h"
#include "xe_irq.h"
+#include "xe_late_bind_fw.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_nvm.h"
@@ -54,6 +55,7 @@
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_psmi.h"
#include "xe_pxp.h"
#include "xe_query.h"
#include "xe_shrinker.h"
@@ -63,7 +65,9 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
+#include "xe_vm_madvise.h"
#include "xe_vram.h"
+#include "xe_vram_types.h"
#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -200,6 +204,9 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
+ DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -451,6 +458,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
+ xe_validation_device_init(&xe->val);
+
init_waitqueue_head(&xe->ufence_wq);
init_rwsem(&xe->usm.lock);
@@ -524,7 +533,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
* re-init and saving/restoring (or re-populating) the wiped memory. Since we
* perform the FLR as the very last action before releasing access to the HW
* during the driver release flow, we don't attempt recovery at all, because
- * if/when a new instance of i915 is bound to the device it will do a full
+ * if/when a new instance of Xe is bound to the device it will do a full
* re-init anyway.
*/
static void __xe_driver_flr(struct xe_device *xe)
@@ -688,6 +697,21 @@ static void sriov_update_device_info(struct xe_device *xe)
}
}
+static int xe_device_vram_alloc(struct xe_device *xe)
+{
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ xe->mem.vram = vram;
+ return 0;
+}
+
/**
* xe_device_probe_early: Device early probe
* @xe: xe device instance
@@ -722,7 +746,7 @@ int xe_device_probe_early(struct xe_device *xe)
* possible, but still return the previous error for error
* propagation
*/
- err = xe_survivability_mode_enable(xe);
+ err = xe_survivability_mode_boot_enable(xe);
if (err)
return err;
@@ -735,6 +759,10 @@ int xe_device_probe_early(struct xe_device *xe)
xe->wedged.mode = xe_modparam.wedged_mode;
+ err = xe_device_vram_alloc(xe);
+ if (err)
+ return err;
+
return 0;
}
ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
@@ -863,7 +891,7 @@ int xe_device_probe(struct xe_device *xe)
}
if (xe->tiles->media_gt &&
- XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
XE_DEVICE_WA_DISABLE(xe, 15015404425);
err = xe_devcoredump_init(xe);
@@ -876,6 +904,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
+ err = xe_late_bind_init(&xe->late_bind);
+ if (err)
+ return err;
+
err = xe_oa_init(xe);
if (err)
return err;
@@ -888,6 +920,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
+ err = xe_psmi_init(xe);
+ if (err)
+ return err;
+
err = drm_dev_register(&xe->drm, 0);
if (err)
return err;
@@ -921,6 +957,10 @@ int xe_device_probe(struct xe_device *xe)
xe_vsec_init(xe);
+ err = xe_sriov_init_late(xe);
+ if (err)
+ goto err_unregister_display;
+
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_unregister_display:
@@ -1019,7 +1059,7 @@ void xe_device_l2_flush(struct xe_device *xe)
gt = xe_root_mmio_gt(xe);
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
@@ -1063,7 +1103,7 @@ void xe_device_td_flush(struct xe_device *xe)
return;
root_gt = xe_root_mmio_gt(xe);
- if (XE_WA(root_gt, 16023588340)) {
+ if (XE_GT_WA(root_gt, 16023588340)) {
/* A transient flush is not sufficient: flush the L2 */
xe_device_l2_flush(xe);
} else {
@@ -1134,11 +1174,63 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
}
/**
+ * DOC: Xe Device Wedging
+ *
+ * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
+ * When device is in wedged state, every IOCTL will be blocked and GT cannot be
+ * used. Certain critical errors like gt reset failure, firmware failures can cause
+ * the device to be wedged. The default recovery method for a wedged state
+ * is rebind/bus-reset.
+ *
+ * Another recovery method is vendor-specific. Below are the cases that send
+ * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
+ *
+ * Case: Firmware Flash
+ * --------------------
+ *
+ * Identification Hint
+ * +++++++++++++++++++
+ *
+ * ``WEDGED=vendor-specific`` drm device wedged uevent with
+ * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
+ * admin/userspace consumer about the need for a firmware flash.
+ *
+ * Recovery Procedure
+ * ++++++++++++++++++
+ *
+ * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
+ * the below steps
+ *
+ * - Check Runtime Survivability mode sysfs.
+ * If enabled, firmware flash is required to recover the device.
+ *
+ * /sys/bus/pci/devices/<device>/survivability_mode
+ *
+ * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash
+ * firmware and restore device to normal operation.
+ */
+
+/**
+ * xe_device_set_wedged_method - Set wedged recovery method
+ * @xe: xe device instance
+ * @method: recovery method to set
+ *
+ * Set wedged recovery method to be sent in drm wedged uevent.
+ */
+void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
+{
+ xe->wedged.method = method;
+}
+
+/**
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a module
- * re-probe (unbind + bind).
+ * This is a final state that can only be cleared with the recovery method
+ * specified in the drm wedged uevent. The method can be set using
+ * xe_device_set_wedged_method before declaring the device as wedged. If no method
+ * is set, reprobe (unbind/re-bind) will be sent by default.
+ *
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
* failure or guc loading failure. Userspace will be notified of this state
@@ -1172,13 +1264,18 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
-
- /* Notify userspace of wedged device */
- drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
- NULL);
}
for_each_gt(gt, xe, id)
xe_gt_declare_wedged(gt);
+
+ if (xe_device_wedged(xe)) {
+ /* If no wedge recovery method is set, use default */
+ if (!xe->wedged.method)
+ xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
+ DRM_WEDGE_RECOVERY_BUS_RESET);
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index bc802e066a7d..32cc6323b7f6 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -187,6 +187,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
return atomic_read(&xe->wedged.flag);
}
+void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method);
void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 927ee7991696..c5151c86a98a 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -71,12 +71,21 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+static struct attribute *vram_attrs[] = {
+ &dev_attr_vram_d3cold_threshold.attr,
+ NULL
+};
+
+static const struct attribute_group vram_attr_group = {
+ .attrs = vram_attrs,
+};
+
static ssize_t
lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
+ u32 cap = 0, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
@@ -115,7 +124,7 @@ lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *a
{
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
+ u32 cap = 0, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
@@ -149,62 +158,44 @@ out:
}
static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
-static int late_bind_create_files(struct device *dev)
-{
- struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
- struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap;
- int ret;
-
- xe_pm_runtime_get(xe);
-
- ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
- &cap, NULL);
- if (ret) {
- if (ret == -ENXIO) {
- drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
- ret = 0;
- }
- goto out;
- }
-
- if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
- ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
- if (ret)
- goto out;
- }
-
- if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
- ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
-out:
- xe_pm_runtime_put(xe);
-
- return ret;
-}
+static struct attribute *late_bind_attrs[] = {
+ &dev_attr_lb_fan_control_version.attr,
+ &dev_attr_lb_voltage_regulator_version.attr,
+ NULL
+};
-static void late_bind_remove_files(struct device *dev)
+static umode_t late_bind_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap;
+ u32 cap = 0;
int ret;
xe_pm_runtime_get(xe);
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
+ xe_pm_runtime_put(xe);
if (ret)
- goto out;
+ return 0;
- if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
- sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+ if (attr == &dev_attr_lb_fan_control_version.attr &&
+ REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
+ return attr->mode;
+ if (attr == &dev_attr_lb_voltage_regulator_version.attr &&
+ REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ return attr->mode;
- if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
- sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
-out:
- xe_pm_runtime_put(xe);
+ return 0;
}
+static const struct attribute_group late_bind_attr_group = {
+ .attrs = late_bind_attrs,
+ .is_visible = late_bind_attr_is_visible,
+};
+
/**
* DOC: PCIe Gen5 Limitations
*
@@ -278,24 +269,15 @@ auto_link_downgrade_status_show(struct device *dev, struct device_attribute *att
}
static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
-static const struct attribute *auto_link_downgrade_attrs[] = {
+static struct attribute *auto_link_downgrade_attrs[] = {
&dev_attr_auto_link_downgrade_capable.attr,
&dev_attr_auto_link_downgrade_status.attr,
NULL
};
-static void xe_device_sysfs_fini(void *arg)
-{
- struct xe_device *xe = arg;
-
- if (xe->d3cold.capable)
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
-
- if (xe->info.platform == XE_BATTLEMAGE) {
- sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
- late_bind_remove_files(xe->drm.dev);
- }
-}
+static const struct attribute_group auto_link_downgrade_attr_group = {
+ .attrs = auto_link_downgrade_attrs,
+};
int xe_device_sysfs_init(struct xe_device *xe)
{
@@ -303,24 +285,20 @@ int xe_device_sysfs_init(struct xe_device *xe)
int ret;
if (xe->d3cold.capable) {
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ ret = devm_device_add_group(dev, &vram_attr_group);
if (ret)
return ret;
}
if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
- ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ ret = devm_device_add_group(dev, &auto_link_downgrade_attr_group);
if (ret)
- goto cleanup;
+ return ret;
- ret = late_bind_create_files(dev);
+ ret = devm_device_add_group(dev, &late_bind_attr_group);
if (ret)
- goto cleanup;
+ return ret;
}
- return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
-
-cleanup:
- xe_device_sysfs_fini(xe);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 7ceb0c90f391..74d7af830b85 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,11 +10,11 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_device.h>
#include "xe_devcoredump_types.h"
#include "xe_heci_gsc.h"
+#include "xe_late_bind_fw_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
#include "xe_oa_types.h"
@@ -24,9 +24,10 @@
#include "xe_sriov_pf_types.h"
#include "xe_sriov_types.h"
#include "xe_sriov_vf_types.h"
+#include "xe_sriov_vf_ccs_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
-#include "xe_ttm_vram_mgr_types.h"
+#include "xe_validation.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
@@ -39,6 +40,7 @@ struct xe_ggtt;
struct xe_i2c;
struct xe_pat_ops;
struct xe_pxp;
+struct xe_vram_region;
#define XE_BO_INVALID_OFFSET LONG_MAX
@@ -72,61 +74,6 @@ struct xe_pxp;
struct xe_tile * : (tile__)->xe)
/**
- * struct xe_vram_region - memory region structure
- * This is used to describe a memory region in xe
- * device, such as HBM memory or CXL extension memory.
- */
-struct xe_vram_region {
- /** @io_start: IO start address of this VRAM instance */
- resource_size_t io_start;
- /**
- * @io_size: IO size of this VRAM instance
- *
- * This represents how much of this VRAM we can access
- * via the CPU through the VRAM BAR. This can be smaller
- * than @usable_size, in which case only part of VRAM is CPU
- * accessible (typically the first 256M). This
- * configuration is known as small-bar.
- */
- resource_size_t io_size;
- /** @dpa_base: This memory regions's DPA (device physical address) base */
- resource_size_t dpa_base;
- /**
- * @usable_size: usable size of VRAM
- *
- * Usable size of VRAM excluding reserved portions
- * (e.g stolen mem)
- */
- resource_size_t usable_size;
- /**
- * @actual_physical_size: Actual VRAM size
- *
- * Actual VRAM size including reserved portions
- * (e.g stolen mem)
- */
- resource_size_t actual_physical_size;
- /** @mapping: pointer to VRAM mappable space */
- void __iomem *mapping;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
-#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
- /** @pagemap: Used to remap device memory as ZONE_DEVICE */
- struct dev_pagemap pagemap;
- /**
- * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
- * pages of this tile.
- */
- struct drm_pagemap dpagemap;
- /**
- * @hpa_base: base host physical address
- *
- * This is generated when remap device memory as ZONE_DEVICE
- */
- resource_size_t hpa_base;
-#endif
-};
-
-/**
* struct xe_mmio - register mmio structure
*
* Represents an MMIO region that the CPU may use to access registers. A
@@ -216,7 +163,7 @@ struct xe_tile {
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
@@ -244,6 +191,9 @@ struct xe_tile {
/** @memirq: Memory Based Interrupts. */
struct xe_memirq memirq;
+ /** @csc_hw_error_work: worker to report CSC HW errors */
+ struct work_struct csc_hw_error_work;
+
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
@@ -255,6 +205,9 @@ struct xe_tile {
/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
struct kobject *sysfs;
+
+ /** @debugfs: debugfs directory associated with this tile */
+ struct dentry *debugfs;
};
/**
@@ -328,6 +281,8 @@ struct xe_device {
u8 has_heci_cscfi:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
+ /** @info.has_late_bind: Device has firmware late binding support */
+ u8 has_late_bind:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
/** @info.has_mbx_power_limits: Device has support to manage power limits using
@@ -336,8 +291,8 @@ struct xe_device {
u8 has_mbx_power_limits:1;
/** @info.has_pxp: Device has PXP support */
u8 has_pxp:1;
- /** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
- u8 has_range_tlb_invalidation:1;
+ /** @info.has_range_tlb_inval: Has range based TLB invalidations */
+ u8 has_range_tlb_inval:1;
/** @info.has_sriov: Supports SR-IOV */
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
@@ -412,7 +367,7 @@ struct xe_device {
/** @mem: memory info for device */
struct {
/** @mem.vram: VRAM info for device */
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
/** @mem.sys_mgr: system memory shrinker. */
@@ -476,7 +431,7 @@ struct xe_device {
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
- /** @unordered_wq: used to serialize unordered work, mostly display */
+ /** @unordered_wq: used to serialize unordered work */
struct workqueue_struct *unordered_wq;
/** @destroy_wq: used to serialize user destroy work, like queue */
@@ -581,6 +536,9 @@ struct xe_device {
/** @nvm: discrete graphics non-volatile memory */
struct intel_dg_nvm_dev *nvm;
+ /** @late_bind: xe mei late bind interface */
+ struct xe_late_bind late_bind;
+
/** @oa: oa observation subsystem */
struct xe_oa oa;
@@ -596,6 +554,8 @@ struct xe_device {
atomic_t flag;
/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
int mode;
+ /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */
+ unsigned long method;
} wedged;
/** @bo_device: Struct to control async free of BOs */
@@ -630,6 +590,23 @@ struct xe_device {
*/
atomic64_t global_total_pages;
#endif
+ /** @val: The domain for exhaustive eviction, which is currently per device. */
+ struct xe_validation_device val;
+
+ /** @psmi: GPU debugging via additional validation HW */
+ struct {
+ /** @psmi.capture_obj: PSMI buffer for VRAM */
+ struct xe_bo *capture_obj[XE_MAX_TILES_PER_DEVICE + 1];
+ /** @psmi.region_mask: Mask of valid memory regions */
+ u8 region_mask;
+ } psmi;
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ /** @g2g_test_array: for testing G2G communications */
+ u32 *g2g_test_array;
+ /** @g2g_test_count: for testing G2G communications */
+ atomic_t g2g_test_count;
+#endif
/* private: */
@@ -664,7 +641,6 @@ struct xe_device {
struct {
unsigned int hpll_freq;
unsigned int czclk_freq;
- unsigned int fsb_freq, mem_freq, is_ddr3;
};
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index af64baf872ef..a7d67725c3ee 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -51,6 +51,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->dmabuf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = xe_bo_device(bo);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
int ret;
/*
@@ -63,7 +64,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
return -EINVAL;
}
- ret = xe_bo_migrate(bo, XE_PL_TT);
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
drm_dbg(&xe->drm,
@@ -72,7 +73,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
- ret = xe_bo_pin_external(bo, true);
+ ret = xe_bo_pin_external(bo, true, exec);
xe_assert(xe, !ret);
return 0;
@@ -92,6 +93,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
struct sg_table *sgt;
int r = 0;
@@ -100,9 +102,9 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
if (!xe_bo_is_pinned(bo)) {
if (!attach->peer2peer)
- r = xe_bo_migrate(bo, XE_PL_TT);
+ r = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
else
- r = xe_bo_validate(bo, NULL, false);
+ r = xe_bo_validate(bo, NULL, false, exec);
if (r)
return ERR_PTR(r);
}
@@ -161,15 +163,26 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
struct xe_bo *bo = gem_to_xe_bo(obj);
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int ret = 0;
if (!reads)
return 0;
/* Can we do interruptible lock here? */
- xe_bo_lock(bo, false);
- (void)xe_bo_migrate(bo, XE_PL_TT);
- xe_bo_unlock(bo);
+ xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec, (struct xe_val_flags) {}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ }
+ /* If we failed, cpu-access takes place in current placement. */
return 0;
}
@@ -191,10 +204,22 @@ struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (bo->vm)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->ttm, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(obj, flags);
if (!IS_ERR(buf))
buf->ops = &xe_dmabuf_ops;
@@ -208,32 +233,45 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
{
struct dma_resv *resv = dma_buf->resv;
struct xe_device *xe = to_xe_device(dev);
+ struct xe_validation_ctx ctx;
+ struct drm_gem_object *dummy_obj;
+ struct drm_exec exec;
struct xe_bo *bo;
- int ret;
-
- dma_resv_lock(resv, NULL);
- bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
- 0, /* Will require 1way or 2way for vm_bind */
- ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto error;
+ int ret = 0;
+
+ dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
+ if (!dummy_obj)
+ return ERR_PTR(-ENOMEM);
+
+ dummy_obj->resv = resv;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) {
+ ret = drm_exec_lock_obj(&exec, dummy_obj);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ bo = xe_bo_init_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
+ 0, /* Will require 1way or 2way for vm_bind */
+ ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
}
- dma_resv_unlock(resv);
-
- return &bo->ttm.base;
+ drm_gem_object_put(dummy_obj);
-error:
- dma_resv_unlock(resv);
- return ERR_PTR(ret);
+ return ret ? ERR_PTR(ret) : &bo->ttm.base;
}
static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
- XE_WARN_ON(xe_bo_evict(bo));
+ XE_WARN_ON(xe_bo_evict(bo, exec));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index af7916315ac6..f5cfdf29fde3 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -617,9 +617,8 @@ static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream,
size = stream->per_xecore_buf_size * last_xecore;
- bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL,
- size, ~0ull, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64);
+ bo = xe_bo_create_pin_map_at_novm(tile->xe, tile, size, ~0ull, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64, false);
if (IS_ERR(bo)) {
kfree(stream->xecore_buf);
return PTR_ERR(bo);
@@ -649,7 +648,7 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
return -ETIMEDOUT;
}
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
@@ -805,7 +804,7 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
cancel_delayed_work_sync(&stream->buf_poll_work);
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 374c831e691b..7715e74bb945 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -19,6 +19,7 @@
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
+#include "xe_svm.h"
#include "xe_vm.h"
/**
@@ -97,9 +98,13 @@
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
+ int ret;
/* The fence slot added here is intended for the exec sched job. */
- return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
+ xe_vm_set_validation_exec(vm, &vm_exec->exec);
+ ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
+ xe_vm_set_validation_exec(vm, NULL);
+ return ret;
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -115,10 +120,10 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs, num_ufence = 0;
+ struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
- ktime_t end = 0;
int err = 0;
struct xe_hw_engine_group *group;
enum xe_hw_engine_group_execution_mode mode, previous_mode;
@@ -246,17 +251,12 @@ retry:
if (err)
goto err_unlock_list;
- vm_exec.vm = &vm->gpuvm;
- vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
- if (xe_vm_in_lr_mode(vm)) {
- drm_exec_init(exec, vm_exec.flags, 0);
- } else {
- err = drm_gpuvm_exec_lock(&vm_exec);
- if (err) {
- if (xe_vm_validate_should_retry(exec, err, &end))
- err = -EAGAIN;
+ if (!xe_vm_in_lr_mode(vm)) {
+ vm_exec.vm = &vm->gpuvm;
+ vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ err = xe_validation_exec_lock(&ctx, &vm_exec, &xe->val);
+ if (err)
goto err_unlock_list;
- }
}
if (xe_vm_is_closed_or_banned(q->vm)) {
@@ -303,7 +303,7 @@ retry:
if (err)
goto err_put_job;
- err = down_read_interruptible(&vm->userptr.notifier_lock);
+ err = xe_svm_notifier_lock_interruptible(vm);
if (err)
goto err_put_job;
@@ -345,12 +345,13 @@ retry:
err_repin:
if (!xe_vm_in_lr_mode(vm))
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
err_put_job:
if (err)
xe_sched_job_put(job);
err_exec:
- drm_exec_fini(exec);
+ if (!xe_vm_in_lr_mode(vm))
+ xe_validation_ctx_fini(&ctx);
err_unlock_list:
up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index c07edcda99c5..37b2b93b73d6 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -12,6 +12,7 @@
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
+#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
@@ -39,6 +40,12 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
+ int i;
+
+ for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
+ if (q->tlb_inval[i].dep_scheduler)
+ xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
+
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm)
@@ -50,6 +57,39 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
kfree(q);
}
+static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
+{
+ struct xe_tile *tile = gt_to_tile(q->gt);
+ int i;
+
+ for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
+ struct xe_dep_scheduler *dep_scheduler;
+ struct xe_gt *gt;
+ struct workqueue_struct *wq;
+
+ if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
+ gt = tile->primary_gt;
+ else
+ gt = tile->media_gt;
+
+ if (!gt)
+ continue;
+
+ wq = gt->tlb_inval.job_wq;
+
+#define MAX_TLB_INVAL_JOBS 16 /* Picking a reasonable value */
+ dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
+ MAX_TLB_INVAL_JOBS);
+ if (IS_ERR(dep_scheduler))
+ return PTR_ERR(dep_scheduler);
+
+ q->tlb_inval[i].dep_scheduler = dep_scheduler;
+ }
+#undef MAX_TLB_INVAL_JOBS
+
+ return 0;
+}
+
static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
struct xe_vm *vm,
u32 logical_mask,
@@ -94,6 +134,14 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
+ err = alloc_dep_schedulers(xe, q);
+ if (err) {
+ __xe_exec_queue_free(q);
+ return ERR_PTR(err);
+ }
+ }
+
if (vm)
q->vm = xe_vm_get(vm);
@@ -750,6 +798,21 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
}
/**
+ * xe_exec_queue_lrc() - Get the LRC from exec queue.
+ * @q: The exec_queue.
+ *
+ * Retrieves the primary LRC for the exec queue. Note that this function
+ * returns only the first LRC instance, even when multiple parallel LRCs
+ * are configured.
+ *
+ * Return: Pointer to LRC on success, error on failure
+ */
+struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
+{
+ return q->lrc[0];
+}
+
+/**
* xe_exec_queue_is_lr() - Whether an exec_queue is long-running
* @q: The exec_queue
*
@@ -1036,3 +1099,51 @@ int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
return err;
}
+
+/**
+ * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
+ * within all LRCs of a queue.
+ * @q: the &xe_exec_queue struct instance containing target LRCs
+ * @scratch: scratch buffer to be used as temporary storage
+ *
+ * Returns: zero on success, negative error code on failure
+ */
+int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < q->width; ++i) {
+ xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch);
+ xe_lrc_update_hwctx_regs_with_address(q->lrc[i]);
+ err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue.
+ * @q: the &xe_exec_queue struct instance
+ */
+void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+
+ /*
+ * This routine is used within VF migration recovery. This means
+ * using the lock here introduces a restriction: we cannot wait
+ * for any GFX HW response while the lock is taken.
+ */
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ if (xe_sched_job_is_error(job))
+ continue;
+
+ q->ring_ops->emit_job(job);
+ }
+ spin_unlock(&sched->base.job_list_lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 17bc50a7f05a..15ec852e7f7e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -90,4 +90,9 @@ int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
struct xe_vm *vm);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
+int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
+
+void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q);
+
+struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 1c9d03f2a3e5..27b76cf9da89 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -87,6 +87,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
/* flag to indicate low latency hint to guc */
#define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5)
+/* for migration (kernel copy, clear, bind) jobs */
+#define EXEC_QUEUE_FLAG_MIGRATE BIT(6)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -132,6 +134,19 @@ struct xe_exec_queue {
struct list_head link;
} lr;
+#define XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT 0
+#define XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT 1
+#define XE_EXEC_QUEUE_TLB_INVAL_COUNT (XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT + 1)
+
+ /** @tlb_inval: TLB invalidations exec queue state */
+ struct {
+ /**
+ * @tlb_inval.dep_scheduler: The TLB invalidation
+ * dependency scheduler
+ */
+ struct xe_dep_scheduler *dep_scheduler;
+ } tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT];
+
/** @pxp: PXP info tracking */
struct {
/** @pxp.type: PXP session type used by this queue */
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 29d4d3f51da1..7fdd0a97a628 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -23,13 +23,14 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_printk.h"
#include "xe_tile_sriov_vf.h"
+#include "xe_tlb_inval.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -106,10 +107,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
struct xe_tile *tile = ggtt->tile;
- struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
+ struct xe_gt *affected_gt = XE_GT_WA(tile->primary_gt, 22019338487) ?
tile->primary_gt : tile->media_gt;
struct xe_mmio *mmio = &affected_gt->mmio;
- u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
+ u32 max_gtt_writes = XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
* to wait for completion of prior GTT writes before letting this through.
@@ -269,7 +270,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
gsm_size = probe_gsm_size(pdev);
if (gsm_size == 0) {
- drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
+ xe_tile_err(ggtt->tile, "Hardware reported no preallocated GSM\n");
return -ENOMEM;
}
@@ -284,8 +285,8 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (GRAPHICS_VERx100(xe) >= 1270)
ggtt->pt_ops = (ggtt->tile->media_gt &&
- XE_WA(ggtt->tile->media_gt, 22019338487)) ||
- XE_WA(ggtt->tile->primary_gt, 22019338487) ?
+ XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
+ XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ?
&xelpg_pt_wa_ops : &xelpg_pt_ops;
else
ggtt->pt_ops = &xelp_pt_ops;
@@ -438,9 +439,8 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
if (!gt)
return;
- err = xe_gt_tlb_invalidation_ggtt(gt);
- if (err)
- drm_warn(&gt_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
+ err = xe_tlb_inval_ggtt(&gt->tlb_inval);
+ xe_gt_WARN(gt, err, "Failed to invalidate GGTT (%pe)", ERR_PTR(err));
}
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
@@ -467,8 +467,8 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
- xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
- node->start, node->start + node->size, buf, description);
+ xe_tile_dbg(ggtt->tile, "GGTT %#llx-%#llx (%s) %s\n",
+ node->start, node->start + node->size, buf, description);
}
}
@@ -500,9 +500,8 @@ int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- if (xe_gt_WARN(ggtt->tile->primary_gt, err,
- "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
- node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
+ if (xe_tile_WARN(ggtt->tile, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
+ node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
return err;
xe_ggtt_dump_node(ggtt, &node->base, "balloon");
@@ -732,7 +731,7 @@ void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
}
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end)
+ u64 start, u64 end, struct drm_exec *exec)
{
u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
u8 tile_id = ggtt->tile->id;
@@ -747,7 +746,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
return 0;
}
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
@@ -789,25 +788,28 @@ out:
* @bo: the &xe_bo to be inserted
* @start: address where it will be inserted
* @end: end of the range where it will be inserted
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end)
+ u64 start, u64 end, struct drm_exec *exec)
{
- return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
+ return __xe_ggtt_insert_bo_at(ggtt, bo, start, end, exec);
}
/**
* xe_ggtt_insert_bo - Insert BO into GGTT
* @ggtt: the &xe_ggtt where bo will be inserted
* @bo: the &xe_bo to be inserted
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ struct drm_exec *exec)
{
- return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
+ return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, exec);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index fbe1e397d05d..75fc7a1efea7 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -10,6 +10,7 @@
struct drm_printer;
struct xe_tile;
+struct drm_exec;
struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
@@ -31,9 +32,9 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
struct xe_bo *bo, u16 pat_index);
void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
-int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo, struct drm_exec *exec);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end);
+ u64 start, u64 end, struct drm_exec *exec);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 869b43a4151d..455ccaf17314 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -101,6 +101,19 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
cancel_work_sync(&sched->work_process_msg);
}
+/**
+ * xe_sched_submission_stop_async - Stop further runs of submission tasks on a scheduler.
+ * @sched: the &xe_gpu_scheduler struct instance
+ *
+ * This call disables further runs of scheduling work queue. It does not wait
+ * for any in-progress runs to finish, only makes sure no further runs happen
+ * afterwards.
+ */
+void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_wqueue_stop(&sched->base);
+}
+
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
{
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 308061f0cf37..e548b2aed95a 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -21,6 +21,7 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
+void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched);
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 1d84bf2f2cef..83d61bf8ec62 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -136,10 +136,10 @@ static int query_compatibility_version(struct xe_gsc *gsc)
u64 ggtt_offset;
int err;
- bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, tile, GSC_VER_PKT_SZ * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
@@ -266,7 +266,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
unsigned int fw_ref;
int ret;
- if (XE_WA(tile->primary_gt, 14018094691)) {
+ if (XE_GT_WA(tile->primary_gt, 14018094691)) {
fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
@@ -281,7 +281,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
ret = gsc_upload(gsc);
- if (XE_WA(tile->primary_gt, 14018094691))
+ if (XE_GT_WA(tile->primary_gt, 14018094691))
xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
@@ -593,7 +593,7 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
/* WA only applies if the GSC is loaded */
- if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
+ if (!XE_GT_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
return;
xe_mmio_rmw32(&gt->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 17634195cdc2..3e0ad7e5b5df 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -37,7 +37,6 @@
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_vf.h"
#include "xe_gt_sysfs.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
@@ -58,6 +57,7 @@
#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
+#include "xe_tlb_inval.h"
#include "xe_tuning.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
@@ -106,7 +106,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
@@ -128,7 +128,7 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
if (xe_gt_is_media_type(gt))
@@ -400,7 +400,7 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
- err = xe_wa_init(gt);
+ err = xe_wa_gt_init(gt);
if (err)
return err;
@@ -408,12 +408,12 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_oob(gt);
+ xe_wa_process_gt_oob(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
- err = xe_gt_tlb_invalidation_init_early(gt);
+ err = xe_gt_tlb_inval_init_early(gt);
if (err)
return err;
@@ -565,11 +565,9 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
if (xe_gt_is_main_type(gt)) {
struct xe_tile *tile = gt_to_tile(gt);
- tile->migrate = xe_migrate_init(tile);
- if (IS_ERR(tile->migrate)) {
- err = PTR_ERR(tile->migrate);
+ err = xe_migrate_init(tile->migrate);
+ if (err)
goto err_force_wake;
- }
}
err = xe_uc_load_hw(&gt->uc);
@@ -805,6 +803,11 @@ static int do_gt_restart(struct xe_gt *gt)
return 0;
}
+static int gt_wait_reset_unblock(struct xe_gt *gt)
+{
+ return xe_guc_wait_reset_unblock(&gt->uc.guc);
+}
+
static int gt_reset(struct xe_gt *gt)
{
unsigned int fw_ref;
@@ -819,6 +822,10 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
+ err = gt_wait_reset_unblock(gt);
+ if (!err)
+ xe_gt_warn(gt, "reset block failed to get lifted");
+
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
@@ -843,7 +850,7 @@ static int gt_reset(struct xe_gt *gt)
xe_uc_stop(&gt->uc);
- xe_gt_tlb_invalidation_reset(gt);
+ xe_tlb_inval_reset(&gt->tlb_inval);
err = do_gt_reset(gt);
if (err)
@@ -959,7 +966,7 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
- XE_WA(gt, 22019338487))
+ XE_GT_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;
@@ -1057,5 +1064,5 @@ void xe_gt_declare_wedged(struct xe_gt *gt)
xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
xe_uc_declare_wedged(&gt->uc);
- xe_gt_tlb_invalidation_reset(gt);
+ xe_tlb_inval_reset(&gt->tlb_inval);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 848618acdca8..f253e2df4907 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -29,7 +29,9 @@
#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
+#include "xe_sa.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -122,18 +124,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
return ret;
}
-static int sa_info(struct xe_gt *gt, struct drm_printer *p)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- xe_pm_runtime_get(gt_to_xe(gt));
- drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
- tile->mem.kernel_bb_pool->gpu_addr);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int topology(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -288,7 +278,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
@@ -299,7 +288,6 @@ static const struct drm_info_list vf_safe_debugfs_list[] = {
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
{"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
{"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
- {"stats", .show = xe_gt_debugfs_simple_show, .data = xe_gt_stats_print_info},
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
@@ -328,6 +316,24 @@ static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t
return count;
}
+static ssize_t stats_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, xe_gt_stats_clear, gt);
+}
+
+static int stats_show(struct seq_file *s, void *unused)
+{
+ struct drm_printer p = drm_seq_file_printer(s);
+ struct xe_gt *gt = s->private;
+
+ return xe_gt_stats_print_info(gt, &p);
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(stats);
+
static void force_reset(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -388,13 +394,18 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
+ struct dentry *parent = gt->tile->debugfs;
struct dentry *root;
+ char symlink[16];
char name[8];
xe_gt_assert(gt, minor->debugfs_root);
+ if (IS_ERR(parent))
+ return;
+
snprintf(name, sizeof(name), "gt%d", gt->info.id);
- root = debugfs_create_dir(name, minor->debugfs_root);
+ root = debugfs_create_dir(name, parent);
if (IS_ERR(root)) {
drm_warn(&xe->drm, "Create GT directory failed");
return;
@@ -408,6 +419,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
root->d_inode->i_private = gt;
/* VF safe */
+ debugfs_create_file("stats", 0600, root, gt, &stats_fops);
debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
@@ -426,4 +438,11 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
xe_gt_sriov_pf_debugfs_register(gt, root);
else if (IS_SRIOV_VF(xe))
xe_gt_sriov_vf_debugfs_register(gt, root);
+
+ /*
+ * Backwards compatibility only: create a link for the legacy clients
+ * who may expect gt/ directory at the root level, not the tile level.
+ */
+ snprintf(symlink, sizeof(symlink), "tile%u/%s", gt->tile->id, name);
+ debugfs_create_symlink(name, minor->debugfs_root, symlink);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 60d9354e7dbf..4ff1b6b58d6b 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -227,6 +227,33 @@ static ssize_t max_freq_store(struct kobject *kobj,
}
static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
+static ssize_t power_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ xe_guc_pc_get_power_profile(dev_to_pc(dev), buff);
+
+ return strlen(buff);
+}
+
+static ssize_t power_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ int err;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ err = xe_guc_pc_set_power_profile(pc, buff);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return err ?: count;
+}
+static struct kobj_attribute attr_power_profile = __ATTR_RW(power_profile);
+
static const struct attribute *freq_attrs[] = {
&attr_act_freq.attr,
&attr_cur_freq.attr,
@@ -236,6 +263,7 @@ static const struct attribute *freq_attrs[] = {
&attr_rpn_freq.attr,
&attr_min_freq.attr,
&attr_max_freq.attr,
+ &attr_power_profile.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index ffb210216aa9..f8950a52d0a4 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -322,15 +322,11 @@ static void gt_idle_fini(void *arg)
{
struct kobject *kobj = arg;
struct xe_gt *gt = kobj_to_gt(kobj->parent);
- unsigned int fw_ref;
xe_gt_idle_disable_pg(gt);
- if (gt_to_xe(gt)->info.skip_guc_pc) {
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (gt_to_xe(gt)->info.skip_guc_pc)
xe_gt_idle_disable_c6(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -390,14 +386,23 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
}
-void xe_gt_idle_disable_c6(struct xe_gt *gt)
+int xe_gt_idle_disable_c6(struct xe_gt *gt)
{
+ unsigned int fw_ref;
+
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
if (IS_SRIOV_VF(gt_to_xe(gt)))
- return;
+ return 0;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
xe_mmio_write32(&gt->mmio, RC_STATE, 0);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 591a01e181bc..9c34a155e102 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -13,7 +13,7 @@ struct xe_gt;
int xe_gt_idle_init(struct xe_gt_idle *gtidle);
void xe_gt_idle_enable_c6(struct xe_gt *gt);
-void xe_gt_idle_disable_c6(struct xe_gt *gt);
+int xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 64a2f0d6aaf9..8fb1cae91724 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -46,8 +46,6 @@
* MCR registers are not available on Virtual Function (VF).
*/
-#define STEER_SEMAPHORE XE_REG(0xFD0)
-
static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
{
return reg_mcr.__reg;
@@ -364,7 +362,7 @@ fallback:
* @group: pointer to storage for steering group ID
* @instance: pointer to storage for steering instance ID
*/
-void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
+void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
{
xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
@@ -533,7 +531,7 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
- xe_mmio_write32(&gt->mmio, MCFG_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, STEER_SEMAPHORE, steer_val);
xe_mmio_write32(&gt->mmio, SF_MCR_SELECTOR, steer_val);
/*
* For GAM registers, all reads should be directed to instance 1
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index bc06520befab..283a1c9770e2 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -31,7 +31,8 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
u8 *group, u8 *instance);
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
-void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt,
+ unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 5a75d56d8558..a054d6010ae0 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -16,13 +16,13 @@
#include "xe_gt.h"
#include "xe_gt_printk.h"
#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
#include "xe_svm.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
struct pagefault {
u64 page_addr;
@@ -74,7 +74,7 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
- bool atomic, unsigned int id)
+ bool need_vram_move, struct xe_vram_region *vram)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
@@ -84,24 +84,11 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
if (err)
return err;
- if (atomic && IS_DGFX(vm->xe)) {
- if (xe_vma_is_userptr(vma)) {
- err = -EACCES;
- return err;
- }
-
- /* Migrate to VRAM, move should invalidate the VMA first */
- err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
- if (err)
- return err;
- } else if (bo) {
- /* Create backing store if needed */
- err = xe_bo_validate(bo, vm, true);
- if (err)
- return err;
- }
+ if (!bo)
+ return 0;
- return 0;
+ return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) :
+ xe_bo_validate(bo, vm, true, exec);
}
static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
@@ -109,13 +96,17 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct dma_fence *fence;
- ktime_t end = 0;
- int err;
+ int err, needs_vram;
lockdep_assert_held_write(&vm->lock);
+ needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
+ return needs_vram < 0 ? needs_vram : -EACCES;
+
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
@@ -136,22 +127,22 @@ retry_userptr:
}
/* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0, 0);
+ xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
drm_exec_until_all_locked(&exec) {
- err = xe_pf_begin(&exec, vma, atomic, tile->id);
+ err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram);
drm_exec_retry_on_contention(&exec);
- if (xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
+ xe_validation_retry_on_oom(&ctx, &err);
if (err)
goto unlock_dma_resv;
/* Bind VMA only to the GT that has faulted */
trace_xe_vma_pf_bind(vma);
+ xe_vm_set_validation_exec(vm, &exec);
fence = xe_vma_rebind(vm, vma, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
- if (xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
+ xe_validation_retry_on_oom(&ctx, &err);
goto unlock_dma_resv;
}
}
@@ -160,7 +151,7 @@ retry_userptr:
dma_fence_put(fence);
unlock_dma_resv:
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
if (err == -EAGAIN)
goto retry_userptr;
@@ -542,6 +533,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct xe_vm *vm;
struct xe_vma *vma;
@@ -571,15 +563,14 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
goto unlock_vm;
/* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0, 0);
+ xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
drm_exec_until_all_locked(&exec) {
- ret = xe_pf_begin(&exec, vma, true, tile->id);
+ ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram);
drm_exec_retry_on_contention(&exec);
- if (ret)
- break;
+ xe_validation_retry_on_oom(&ctx, &ret);
}
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
unlock_vm:
up_read(&vm->lock);
xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 11da0228cea7..1313d32862db 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -6,18 +6,22 @@
#ifndef _XE_GT_PRINTK_H_
#define _XE_GT_PRINTK_H_
-#include <drm/drm_print.h>
-
#include "xe_gt_types.h"
+#include "xe_tile_printk.h"
+
+#define __XE_GT_PRINTK_FMT(_gt, _fmt, _args...) "GT%u: " _fmt, (_gt)->info.id, ##_args
#define xe_gt_printk(_gt, _level, _fmt, ...) \
- drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_tile_printk((_gt)->tile, _level, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
+
+#define xe_gt_err(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
#define xe_gt_err_once(_gt, _fmt, ...) \
xe_gt_printk((_gt), err_once, _fmt, ##__VA_ARGS__)
-#define xe_gt_err(_gt, _fmt, ...) \
- xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
+#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
#define xe_gt_warn(_gt, _fmt, ...) \
xe_gt_printk((_gt), warn, _fmt, ##__VA_ARGS__)
@@ -31,20 +35,20 @@
#define xe_gt_dbg(_gt, _fmt, ...) \
xe_gt_printk((_gt), dbg, _fmt, ##__VA_ARGS__)
-#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
- xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
+#define xe_gt_WARN_type(_gt, _type, _condition, _fmt, ...) \
+ xe_tile_WARN##_type((_gt)->tile, _condition, _fmt, ## __VA_ARGS__)
#define xe_gt_WARN(_gt, _condition, _fmt, ...) \
- drm_WARN(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_gt_WARN_type((_gt),, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
- drm_WARN_ONCE(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_gt_WARN_type((_gt), _ONCE, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define xe_gt_WARN_ON(_gt, _condition) \
- xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition))
+ xe_gt_WARN((_gt), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
#define xe_gt_WARN_ON_ONCE(_gt, _condition) \
- xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition))
+ xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf)
{
@@ -67,12 +71,12 @@ static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *
/*
* The original xe_gt_dbg() callsite annotations are useless here,
- * redirect to the tweaked drm_dbg_printer() instead.
+ * redirect to the tweaked xe_tile_dbg_printer() instead.
*/
- dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg = xe_tile_dbg_printer((gt)->tile);
dbg.origin = p->origin;
- drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+ drm_printf(&dbg, __XE_GT_PRINTK_FMT(gt, "%pV", vaf));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index bdbd15f3afe3..c4dda87b47cc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -55,7 +55,12 @@ static void pf_init_workers(struct xe_gt *gt)
static void pf_fini_workers(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- disable_work_sync(&gt->sriov.pf.workers.restart);
+
+ if (disable_work_sync(&gt->sriov.pf.workers.restart)) {
+ xe_gt_sriov_dbg_verbose(gt, "pending restart disabled!\n");
+ /* release an rpm reference taken on the worker's behalf */
+ xe_pm_runtime_put(gt_to_xe(gt));
+ }
}
/**
@@ -207,8 +212,11 @@ static void pf_cancel_restart(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- if (cancel_work_sync(&gt->sriov.pf.workers.restart))
+ if (cancel_work_sync(&gt->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
+ /* release an rpm reference taken on the worker's behalf */
+ xe_pm_runtime_put(gt_to_xe(gt));
+ }
}
/**
@@ -226,9 +234,12 @@ static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- xe_pm_runtime_get(xe);
+ xe_gt_assert(gt, !xe_pm_runtime_suspended(xe));
+
xe_gt_sriov_pf_config_restart(gt);
xe_gt_sriov_pf_control_restart(gt);
+
+ /* release an rpm reference taken on our behalf */
xe_pm_runtime_put(xe);
xe_gt_sriov_dbg(gt, "restart completed\n");
@@ -247,8 +258,13 @@ static void pf_queue_restart(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
- if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
+ /* take an rpm reference on behalf of the worker */
+ xe_pm_runtime_get_noresume(xe);
+
+ if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg(gt, "restart already in queue!\n");
+ xe_pm_runtime_put(xe);
+ }
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index d84831a03610..6344b5205c08 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -33,6 +33,7 @@
#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wopcm.h"
#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
@@ -1433,7 +1434,8 @@ fail:
return err;
}
-static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+/* Return: %true if there was an LMEM provisioned, %false otherwise */
+static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, xe_gt_is_main_type(gt));
@@ -1442,7 +1444,9 @@ static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_confi
if (config->lmem_obj) {
xe_bo_unpin_map_no_vm(config->lmem_obj);
config->lmem_obj = NULL;
+ return true;
}
+ return false;
}
static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1474,23 +1478,16 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_locked(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_PINNED_LATE_RESTORE);
+ bo = xe_bo_create_pin_range_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE), 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
- err = xe_bo_pin(bo);
- xe_bo_unlock(bo);
- if (unlikely(err)) {
- xe_bo_put(bo);
- return err;
- }
-
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
@@ -1604,7 +1601,7 @@ static u64 pf_query_free_lmem(struct xe_gt *gt)
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
@@ -2019,12 +2016,13 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
+ bool released;
if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
- pf_release_vf_config_lmem(gt, config);
- if (xe_device_has_lmtt(xe))
+ released = pf_release_vf_config_lmem(gt, config);
+ if (released && xe_device_has_lmtt(xe))
pf_update_vf_lmtt(xe, vfid);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index c712111aa30d..44cc612b0a75 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -55,12 +55,12 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, size % sizeof(u32) == 0);
xe_gt_assert(gt, size == ndwords * sizeof(u32));
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -91,12 +91,12 @@ static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, size % sizeof(u32) == 0);
xe_gt_assert(gt, size == ndwords * sizeof(u32));
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index b282838d59e6..0461d5513487 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -25,6 +25,7 @@
#include "xe_guc.h"
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
+#include "xe_lrc.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
@@ -751,6 +752,19 @@ failed:
}
/**
+ * xe_gt_sriov_vf_default_lrcs_hwsp_rebase - Update GGTT references in HWSP of default LRCs.
+ * @gt: the &xe_gt struct instance
+ */
+void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_default_lrc_update_memirq_regs_with_address(hwe);
+}
+
+/**
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
* or just mark that a GuC is ready for it.
* @gt: the &xe_gt struct instance linked to target GuC
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e0357f341a2d..0af1dc769fe0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -21,6 +21,7 @@ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
+void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 30f942671c2b..5f74706bab81 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -26,11 +26,46 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
atomic64_add(incr, &gt->stats.counters[id]);
}
+#define DEF_STAT_STR(ID, name) [XE_GT_STATS_ID_##ID] = name
+
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
- "svm_pagefault_count",
- "tlb_inval_count",
- "vma_pagefault_count",
- "vma_pagefault_kb",
+ DEF_STAT_STR(SVM_PAGEFAULT_COUNT, "svm_pagefault_count"),
+ DEF_STAT_STR(TLB_INVAL, "tlb_inval_count"),
+ DEF_STAT_STR(SVM_TLB_INVAL_COUNT, "svm_tlb_inval_count"),
+ DEF_STAT_STR(SVM_TLB_INVAL_US, "svm_tlb_inval_us"),
+ DEF_STAT_STR(VMA_PAGEFAULT_COUNT, "vma_pagefault_count"),
+ DEF_STAT_STR(VMA_PAGEFAULT_KB, "vma_pagefault_kb"),
+ DEF_STAT_STR(SVM_4K_PAGEFAULT_COUNT, "svm_4K_pagefault_count"),
+ DEF_STAT_STR(SVM_64K_PAGEFAULT_COUNT, "svm_64K_pagefault_count"),
+ DEF_STAT_STR(SVM_2M_PAGEFAULT_COUNT, "svm_2M_pagefault_count"),
+ DEF_STAT_STR(SVM_4K_VALID_PAGEFAULT_COUNT, "svm_4K_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_64K_VALID_PAGEFAULT_COUNT, "svm_64K_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_2M_VALID_PAGEFAULT_COUNT, "svm_2M_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_4K_PAGEFAULT_US, "svm_4K_pagefault_us"),
+ DEF_STAT_STR(SVM_64K_PAGEFAULT_US, "svm_64K_pagefault_us"),
+ DEF_STAT_STR(SVM_2M_PAGEFAULT_US, "svm_2M_pagefault_us"),
+ DEF_STAT_STR(SVM_4K_MIGRATE_COUNT, "svm_4K_migrate_count"),
+ DEF_STAT_STR(SVM_64K_MIGRATE_COUNT, "svm_64K_migrate_count"),
+ DEF_STAT_STR(SVM_2M_MIGRATE_COUNT, "svm_2M_migrate_count"),
+ DEF_STAT_STR(SVM_4K_MIGRATE_US, "svm_4K_migrate_us"),
+ DEF_STAT_STR(SVM_64K_MIGRATE_US, "svm_64K_migrate_us"),
+ DEF_STAT_STR(SVM_2M_MIGRATE_US, "svm_2M_migrate_us"),
+ DEF_STAT_STR(SVM_DEVICE_COPY_US, "svm_device_copy_us"),
+ DEF_STAT_STR(SVM_4K_DEVICE_COPY_US, "svm_4K_device_copy_us"),
+ DEF_STAT_STR(SVM_64K_DEVICE_COPY_US, "svm_64K_device_copy_us"),
+ DEF_STAT_STR(SVM_2M_DEVICE_COPY_US, "svm_2M_device_copy_us"),
+ DEF_STAT_STR(SVM_CPU_COPY_US, "svm_cpu_copy_us"),
+ DEF_STAT_STR(SVM_4K_CPU_COPY_US, "svm_4K_cpu_copy_us"),
+ DEF_STAT_STR(SVM_64K_CPU_COPY_US, "svm_64K_cpu_copy_us"),
+ DEF_STAT_STR(SVM_2M_CPU_COPY_US, "svm_2M_cpu_copy_us"),
+ DEF_STAT_STR(SVM_DEVICE_COPY_KB, "svm_device_copy_kb"),
+ DEF_STAT_STR(SVM_CPU_COPY_KB, "svm_cpu_copy_kb"),
+ DEF_STAT_STR(SVM_4K_GET_PAGES_US, "svm_4K_get_pages_us"),
+ DEF_STAT_STR(SVM_64K_GET_PAGES_US, "svm_64K_get_pages_us"),
+ DEF_STAT_STR(SVM_2M_GET_PAGES_US, "svm_2M_get_pages_us"),
+ DEF_STAT_STR(SVM_4K_BIND_US, "svm_4K_bind_us"),
+ DEF_STAT_STR(SVM_64K_BIND_US, "svm_64K_bind_us"),
+ DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
};
/**
@@ -50,3 +85,17 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+
+/**
+ * xe_gt_stats_clear - Clear the GT stats
+ * @gt: GT structure
+ *
+ * This clear (zeros) all the available GT stats.
+ */
+void xe_gt_stats_clear(struct xe_gt *gt)
+{
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(gt->stats.counters); ++id)
+ atomic64_set(&gt->stats.counters[id], 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 38325ef53617..e8aea32bc971 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -13,6 +13,7 @@ struct drm_printer;
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_stats_clear(struct xe_gt *gt);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
#else
static inline void
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index be3244d7133c..d8348a8de2e1 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -9,8 +9,41 @@
enum xe_gt_stats_id {
XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
+ XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT,
+ XE_GT_STATS_ID_SVM_TLB_INVAL_US,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
+ XE_GT_STATS_ID_SVM_4K_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_64K_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_2M_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_4K_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_64K_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_2M_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_4K_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_64K_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_2M_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_4K_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_64K_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_2M_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_4K_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_64K_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_2M_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_DEVICE_COPY_KB,
+ XE_GT_STATS_ID_SVM_CPU_COPY_KB,
+ XE_GT_STATS_ID_SVM_4K_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_64K_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_2M_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_4K_BIND_US,
+ XE_GT_STATS_ID_SVM_64K_BIND_US,
+ XE_GT_STATS_ID_SVM_2M_BIND_US,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
deleted file mode 100644
index 086c12ee3d9d..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ /dev/null
@@ -1,596 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "xe_gt_tlb_invalidation.h"
-
-#include "abi/guc_actions_abi.h"
-#include "xe_device.h"
-#include "xe_force_wake.h"
-#include "xe_gt.h"
-#include "xe_gt_printk.h"
-#include "xe_guc.h"
-#include "xe_guc_ct.h"
-#include "xe_gt_stats.h"
-#include "xe_mmio.h"
-#include "xe_pm.h"
-#include "xe_sriov.h"
-#include "xe_trace.h"
-#include "regs/xe_guc_regs.h"
-
-#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
-
-/*
- * TLB inval depends on pending commands in the CT queue and then the real
- * invalidation time. Double up the time to process full CT queue
- * just to be on the safe side.
- */
-static long tlb_timeout_jiffies(struct xe_gt *gt)
-{
- /* this reflects what HW/GuC needs to process TLB inv request */
- const long hw_tlb_timeout = HZ / 4;
-
- /* this estimates actual delay caused by the CTB transport */
- long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
-
- return hw_tlb_timeout + 2 * delay;
-}
-
-static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
-{
- if (WARN_ON_ONCE(!fence->gt))
- return;
-
- xe_pm_runtime_put(gt_to_xe(fence->gt));
- fence->gt = NULL; /* fini() should be called once */
-}
-
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
-
- trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
- xe_gt_tlb_invalidation_fence_fini(fence);
- dma_fence_signal(&fence->base);
- if (!stack)
- dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- list_del(&fence->link);
- __invalidation_fence_signal(xe, fence);
-}
-
-void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
-{
- if (WARN_ON_ONCE(!fence->gt))
- return;
-
- __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
-}
-
-static void xe_gt_tlb_fence_timeout(struct work_struct *work)
-{
- struct xe_gt *gt = container_of(work, struct xe_gt,
- tlb_invalidation.fence_tdr.work);
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_gt_tlb_invalidation_fence *fence, *next;
-
- LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
-
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link) {
- s64 since_inval_ms = ktime_ms_delta(ktime_get(),
- fence->invalidation_time);
-
- if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
- break;
-
- trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
- xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
- fence->seqno, gt->tlb_invalidation.seqno_recv);
-
- fence->base.error = -ETIME;
- invalidation_fence_signal(xe, fence);
- }
- if (!list_empty(&gt->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
-}
-
-/**
- * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
- * @gt: GT structure
- *
- * Initialize GT TLB invalidation state, purely software initialization, should
- * be called once during driver load.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
-{
- gt->tlb_invalidation.seqno = 1;
- INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
- spin_lock_init(&gt->tlb_invalidation.pending_lock);
- spin_lock_init(&gt->tlb_invalidation.lock);
- INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
- xe_gt_tlb_fence_timeout);
-
- return 0;
-}
-
-/**
- * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: GT structure
- *
- * Signal any pending invalidation fences, should be called during a GT reset
- */
-void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
-{
- struct xe_gt_tlb_invalidation_fence *fence, *next;
- int pending_seqno;
-
- /*
- * we can get here before the CTs are even initialized if we're wedging
- * very early, in which case there are not going to be any pending
- * fences so we can bail immediately.
- */
- if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
- return;
-
- /*
- * CT channel is already disabled at this point. No new TLB requests can
- * appear.
- */
-
- mutex_lock(&gt->uc.guc.ct.lock);
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
- /*
- * We might have various kworkers waiting for TLB flushes to complete
- * which are not tracked with an explicit TLB fence, however at this
- * stage that will never happen since the CT is already disabled, so
- * make sure we signal them here under the assumption that we have
- * completed a full GT reset.
- */
- if (gt->tlb_invalidation.seqno == 1)
- pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
- else
- pending_seqno = gt->tlb_invalidation.seqno - 1;
- WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
-
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link)
- invalidation_fence_signal(gt_to_xe(gt), fence);
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- mutex_unlock(&gt->uc.guc.ct.lock);
-}
-
-static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
-{
- int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
-
- if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
- return false;
-
- if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
- return true;
-
- return seqno_recv >= seqno;
-}
-
-static int send_tlb_invalidation(struct xe_guc *guc,
- struct xe_gt_tlb_invalidation_fence *fence,
- u32 *action, int len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
- int seqno;
- int ret;
-
- xe_gt_assert(gt, fence);
-
- /*
- * XXX: The seqno algorithm relies on TLB invalidation being processed
- * in order which they currently are, if that changes the algorithm will
- * need to be updated.
- */
-
- mutex_lock(&guc->ct.lock);
- seqno = gt->tlb_invalidation.seqno;
- fence->seqno = seqno;
- trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
- action[1] = seqno;
- ret = xe_guc_ct_send_locked(&guc->ct, action, len,
- G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret) {
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- /*
- * We haven't actually published the TLB fence as per
- * pending_fences, but in theory our seqno could have already
- * been written as we acquired the pending_lock. In such a case
- * we can just go ahead and signal the fence here.
- */
- if (tlb_invalidation_seqno_past(gt, seqno)) {
- __invalidation_fence_signal(xe, fence);
- } else {
- fence->invalidation_time = ktime_get();
- list_add_tail(&fence->link,
- &gt->tlb_invalidation.pending_fences);
-
- if (list_is_singular(&gt->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- }
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else {
- __invalidation_fence_signal(xe, fence);
- }
- if (!ret) {
- gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- if (!gt->tlb_invalidation.seqno)
- gt->tlb_invalidation.seqno = 1;
- }
- mutex_unlock(&guc->ct.lock);
- xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
-
- return ret;
-}
-
-#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
- XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
- XE_GUC_TLB_INVAL_FLUSH_CACHE)
-
-/**
- * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- *
- * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use the invalidation fence to wait for completion.
- *
- * Return: 0 on success, negative error code on error
- */
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
-{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
- };
- int ret;
-
- ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
- /*
- * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
- * should be nuked on a GT reset so this error can be ignored.
- */
- if (ret == -ECANCELED)
- return 0;
-
- return ret;
-}
-
-/**
- * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: GT structure
- *
- * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
- * synchronous.
- *
- * Return: 0 on success, negative error code on error
- */
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int fw_ref;
-
- if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
- gt->uc.guc.submission_state.enabled) {
- struct xe_gt_tlb_invalidation_fence fence;
- int ret;
-
- xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
- ret = xe_gt_tlb_invalidation_guc(gt, &fence);
- if (ret)
- return ret;
-
- xe_gt_tlb_invalidation_fence_wait(&fence);
- } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
- struct xe_mmio *mmio = &gt->mmio;
-
- if (IS_SRIOV_VF(xe))
- return 0;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
- PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
- PVC_GUC_TLB_INV_DESC0_VALID);
- } else {
- xe_mmio_write32(mmio, GUC_TLB_INV_CR,
- GUC_TLB_INV_CR_INVALIDATE);
- }
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- }
-
- return 0;
-}
-
-static int send_tlb_invalidation_all(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
-{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION_ALL,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
- };
-
- return send_tlb_invalidation(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
-}
-
-/**
- * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
- * @gt: the &xe_gt structure
- * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
- *
- * Send a request to invalidate all TLBs across PF and all VFs.
- *
- * Return: 0 on success, negative error code on error
- */
-int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
-{
- int err;
-
- xe_gt_assert(gt, gt == fence->gt);
-
- err = send_tlb_invalidation_all(gt, fence);
- if (err)
- xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
-
- return err;
-}
-
-/*
- * Ensure that roundup_pow_of_two(length) doesn't overflow.
- * Note that roundup_pow_of_two() operates on unsigned long,
- * not on u64.
- */
-#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
-
-/**
- * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
- * address range
- *
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- * @start: start address
- * @end: end address
- * @asid: address space id
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- u64 start, u64 end, u32 asid)
-{
- struct xe_device *xe = gt_to_xe(gt);
-#define MAX_TLB_INVALIDATION_LEN 7
- u32 action[MAX_TLB_INVALIDATION_LEN];
- u64 length = end - start;
- int len = 0;
-
- xe_gt_assert(gt, fence);
-
- /* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist) {
- __invalidation_fence_signal(xe, fence);
- return 0;
- }
-
- action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
- action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation ||
- length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
- } else {
- u64 orig_start = start;
- u64 align;
-
- if (length < SZ_4K)
- length = SZ_4K;
-
- /*
- * We need to invalidate a higher granularity if start address
- * is not aligned to length. When start is not aligned with
- * length we need to find the length large enough to create an
- * address mask covering the required range.
- */
- align = roundup_pow_of_two(length);
- start = ALIGN_DOWN(start, align);
- end = ALIGN(end, align);
- length = align;
- while (start + length < end) {
- length <<= 1;
- start = ALIGN_DOWN(orig_start, length);
- }
-
- /*
- * Minimum invalidation size for a 2MB page that the hardware
- * expects is 16MB
- */
- if (length >= SZ_2M) {
- length = max_t(u64, SZ_16M, length);
- start = ALIGN_DOWN(orig_start, length);
- }
-
- xe_gt_assert(gt, length >= SZ_4K);
- xe_gt_assert(gt, is_power_of_2(length));
- xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
- ilog2(SZ_2M) + 1)));
- xe_gt_assert(gt, IS_ALIGNED(start, length));
-
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
- action[len++] = asid;
- action[len++] = lower_32_bits(start);
- action[len++] = upper_32_bits(start);
- action[len++] = ilog2(length) - ilog2(SZ_4K);
- }
-
- xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
-
- return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
-}
-
-/**
- * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
- * @gt: graphics tile
- * @vm: VM to invalidate
- *
- * Invalidate entire VM's address space
- */
-void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
-{
- struct xe_gt_tlb_invalidation_fence fence;
- u64 range = 1ull << vm->xe->info.va_bits;
- int ret;
-
- xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
-
- ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
- if (ret < 0)
- return;
-
- xe_gt_tlb_invalidation_fence_wait(&fence);
-}
-
-/**
- * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
- * @guc: guc
- * @msg: message indicating TLB invalidation done
- * @len: length of message
- *
- * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
- * invalidation fences for seqno. Algorithm for this depends on seqno being
- * received in-order and asserts this assumption.
- *
- * Return: 0 on success, -EPROTO for malformed messages.
- */
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_gt_tlb_invalidation_fence *fence, *next;
- unsigned long flags;
-
- if (unlikely(len != 1))
- return -EPROTO;
-
- /*
- * This can also be run both directly from the IRQ handler and also in
- * process_g2h_msg(). Only one may process any individual CT message,
- * however the order they are processed here could result in skipping a
- * seqno. To handle that we just process all the seqnos from the last
- * seqno_recv up to and including the one in msg[0]. The delta should be
- * very small so there shouldn't be much of pending_fences we actually
- * need to iterate over here.
- *
- * From GuC POV we expect the seqnos to always appear in-order, so if we
- * see something later in the timeline we can be sure that anything
- * appearing earlier has already signalled, just that we have yet to
- * officially process the CT message like if racing against
- * process_g2h_msg().
- */
- spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
- if (tlb_invalidation_seqno_past(gt, msg[0])) {
- spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
- return 0;
- }
-
- WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
-
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link) {
- trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
-
- if (!tlb_invalidation_seqno_past(gt, fence->seqno))
- break;
-
- invalidation_fence_signal(xe, fence);
- }
-
- if (!list_empty(&gt->tlb_invalidation.pending_fences))
- mod_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- else
- cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
-
- spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
-
- return 0;
-}
-
-static const char *
-invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "invalidation_fence";
-}
-
-static const struct dma_fence_ops invalidation_fence_ops = {
- .get_driver_name = invalidation_fence_get_driver_name,
- .get_timeline_name = invalidation_fence_get_timeline_name,
-};
-
-/**
- * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
- * @gt: GT
- * @fence: TLB invalidation fence to initialize
- * @stack: fence is stack variable
- *
- * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
- * will be automatically called when fence is signalled (all fences must signal),
- * even on error.
- */
-void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- bool stack)
-{
- xe_pm_runtime_get_noresume(gt_to_xe(gt));
-
- spin_lock_irq(&gt->tlb_invalidation.lock);
- dma_fence_init(&fence->base, &invalidation_fence_ops,
- &gt->tlb_invalidation.lock,
- dma_fence_context_alloc(1), 1);
- spin_unlock_irq(&gt->tlb_invalidation.lock);
- INIT_LIST_HEAD(&fence->link);
- if (stack)
- set_bit(FENCE_STACK_BIT, &fence->base.flags);
- else
- dma_fence_get(&fence->base);
- fence->gt = gt;
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
deleted file mode 100644
index f7f0f2eaf4b5..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_GT_TLB_INVALIDATION_H_
-#define _XE_GT_TLB_INVALIDATION_H_
-
-#include <linux/types.h>
-
-#include "xe_gt_tlb_invalidation_types.h"
-
-struct xe_gt;
-struct xe_guc;
-struct xe_vm;
-struct xe_vma;
-
-int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
-
-void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
-int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence);
-int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- u64 start, u64 end, u32 asid);
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
-
-void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- bool stack);
-void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
-
-static inline void
-xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
-{
- dma_fence_wait(&fence->base, false);
-}
-
-#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
deleted file mode 100644
index de6e825e0851..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_GT_TLB_INVALIDATION_TYPES_H_
-#define _XE_GT_TLB_INVALIDATION_TYPES_H_
-
-#include <linux/dma-fence.h>
-
-struct xe_gt;
-
-/**
- * struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
- *
- * Optionally passed to xe_gt_tlb_invalidation and will be signaled upon TLB
- * invalidation completion.
- */
-struct xe_gt_tlb_invalidation_fence {
- /** @base: dma fence base */
- struct dma_fence base;
- /** @gt: GT which fence belong to */
- struct xe_gt *gt;
- /** @link: link into list of pending tlb fences */
- struct list_head link;
- /** @seqno: seqno of TLB invalidation to signal fence one */
- int seqno;
- /** @invalidation_time: time of TLB invalidation */
- ktime_t invalidation_time;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 8c63e3263643..4e61c5e39bcb 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,6 +12,7 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
@@ -122,6 +123,21 @@ gen_l3_mask_from_pattern(struct xe_device *xe, xe_l3_bank_mask_t dst,
}
}
+bool xe_gt_topology_report_l3(struct xe_gt *gt)
+{
+ /*
+ * No known userspace needs/uses the L3 bank mask reported by
+ * the media GT, and the hardware itself is known to report bogus
+ * values on several platforms. Only report L3 bank mask as part
+ * of the media GT's topology on pre-Xe3 platforms since that's
+ * already part of our ABI.
+ */
+ if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) >= 30)
+ return false;
+
+ return true;
+}
+
static void
load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
{
@@ -129,16 +145,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
struct xe_mmio *mmio = &gt->mmio;
u32 fuse3 = xe_mmio_read32(mmio, MIRROR_FUSE3);
- /*
- * PTL platforms with media version 30.00 do not provide proper values
- * for the media GT's L3 bank registers. Skip the readout since we
- * don't have any way to obtain real values.
- *
- * This may get re-described as an official workaround in the future,
- * but there's no tracking number assigned yet so we use a custom
- * OOB workaround descriptor.
- */
- if (XE_WA(gt, no_media_l3))
+ if (!xe_gt_topology_report_l3(gt))
return;
if (GRAPHICS_VER(xe) >= 30) {
@@ -275,8 +282,9 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU type: %s\n",
eu_type_to_str(gt->fuse_topo.eu_type));
- drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
- gt->fuse_topo.l3_bank_mask);
+ if (xe_gt_topology_report_l3(gt))
+ drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
+ gt->fuse_topo.l3_bank_mask);
}
/*
@@ -328,3 +336,19 @@ bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss)
{
return test_bit(dss, gt->fuse_topo.c_dss_mask);
}
+
+bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt)
+{
+ unsigned int xecore;
+ int last_group = -1;
+ u16 group, instance;
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ if (last_group != group) {
+ if (group - last_group > 1)
+ return true;
+ last_group = group;
+ }
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index c8140704ad4c..5e62f5949b7b 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -47,4 +47,8 @@ xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss);
bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss);
+bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt);
+
+bool xe_gt_topology_report_l3(struct xe_gt *gt);
+
#endif /* _XE_GT_TOPOLOGY_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 96344c604726..66158105aca5 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -17,6 +17,7 @@
#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
+#include "xe_tlb_inval_types.h"
#include "xe_uc_types.h"
struct xe_exec_queue_ops;
@@ -185,34 +186,8 @@ struct xe_gt {
struct work_struct worker;
} reset;
- /** @tlb_invalidation: TLB invalidation state */
- struct {
- /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
-#define TLB_INVALIDATION_SEQNO_MAX 0x100000
- int seqno;
- /**
- * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
- * protected by CT lock
- */
- int seqno_recv;
- /**
- * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
- * invaliations, protected by CT lock
- */
- struct list_head pending_fences;
- /**
- * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
- * and updating @tlb_invalidation.seqno_recv.
- */
- spinlock_t pending_lock;
- /**
- * @tlb_invalidation.fence_tdr: schedules a delayed call to
- * xe_gt_tlb_fence_timeout after the timeut interval is over.
- */
- struct delayed_work fence_tdr;
- /** @tlb_invalidation.lock: protects TLB invalidation fences */
- spinlock_t lock;
- } tlb_invalidation;
+ /** @tlb_inval: TLB invalidation state */
+ struct xe_tlb_inval tlb_inval;
/**
* @ccs_mode: Number of compute engines enabled.
@@ -411,7 +386,7 @@ struct xe_gt {
unsigned long *oob;
/**
* @wa_active.oob_initialized: mark oob as initialized to help
- * detecting misuse of XE_WA() - it can only be called on
+ * detecting misuse of XE_GT_WA() - it can only be called on
* initialization after OOB WAs have being processed
*/
bool oob_initialized;
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 270fc3792493..00789844ea4d 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -16,6 +16,7 @@
#include "regs/xe_guc_regs.h"
#include "regs/xe_irq_regs.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
@@ -73,19 +74,22 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED;
else
- flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
- GUC_LOG_VERBOSITY_SHIFT;
+ flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level));
return flags;
}
static u32 guc_ctl_feature_flags(struct xe_guc *guc)
{
+ struct xe_device *xe = guc_to_xe(guc);
u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
- if (!guc_to_xe(guc)->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc)
flags |= GUC_CTL_ENABLE_SLPC;
+ if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
+ flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
+
return flags;
}
@@ -117,22 +121,14 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
- BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
- (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
-
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
CAPTURE_FLAG |
LOG_FLAG |
- ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
- ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
- GUC_LOG_CAPTURE_SHIFT) |
- (offset << GUC_LOG_BUF_ADDR_SHIFT);
+ FIELD_PREP(GUC_LOG_CRASH, CRASH_BUFFER_SIZE / LOG_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_DEBUG, DEBUG_BUFFER_SIZE / LOG_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_CAPTURE, CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_BUF_ADDR, offset);
#undef LOG_UNIT
#undef LOG_FLAG
@@ -145,7 +141,7 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
static u32 guc_ctl_ads_flags(struct xe_guc *guc)
{
u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
- u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+ u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads);
return flags;
}
@@ -157,7 +153,7 @@ static bool needs_wa_dual_queue(struct xe_gt *gt)
* on RCS and CCSes with different address spaces, which on DG2 is
* required as a WA for an HW bug.
*/
- if (XE_WA(gt, 22011391025))
+ if (XE_GT_WA(gt, 22011391025))
return true;
/*
@@ -184,10 +180,10 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
u32 flags = 0;
- if (XE_WA(gt, 22012773006))
+ if (XE_GT_WA(gt, 22012773006))
flags |= GUC_WA_POLLCS;
- if (XE_WA(gt, 14014475959))
+ if (XE_GT_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
if (needs_wa_dual_queue(gt))
@@ -201,19 +197,22 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (GRAPHICS_VERx100(xe) < 1270)
flags |= GUC_WA_PRE_PARSER;
- if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
+ if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
- if (XE_WA(gt, 18020744125) &&
+ if (XE_GT_WA(gt, 18020744125) &&
!xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
- if (XE_WA(gt, 1509372804))
+ if (XE_GT_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
- if (XE_WA(gt, 14018913170))
+ if (XE_GT_WA(gt, 14018913170))
flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
+ if (XE_GT_WA(gt, 16023683509))
+ flags |= GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6;
+
return flags;
}
@@ -701,10 +700,6 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
- ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
- if (ret)
- return ret;
-
return 0;
}
@@ -839,6 +834,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
+ ret = xe_guc_ct_init_post_hwconfig(&guc->ct);
+ if (ret)
+ return ret;
+
guc_init_params_post_hwconfig(guc);
ret = xe_guc_submit_init(guc, ~0);
@@ -990,11 +989,14 @@ static int guc_load_done(u32 status)
case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
+ case XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH:
case XE_GUC_LOAD_STATUS_DPC_ERROR:
case XE_GUC_LOAD_STATUS_EXCEPTION:
case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
return -1;
}
@@ -1053,7 +1055,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
#endif
#define GUC_LOAD_TIME_WARN_MS 200
-static void guc_wait_ucode(struct xe_guc *guc)
+static int guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
struct xe_mmio *mmio = &gt->mmio;
@@ -1134,21 +1136,33 @@ static void guc_wait_ucode(struct xe_guc *guc)
}
switch (ukernel) {
+ case XE_GUC_LOAD_STATUS_HWCONFIG_START:
+ xe_gt_err(gt, "still extracting hwconfig table.\n");
+ break;
+
case XE_GUC_LOAD_STATUS_EXCEPTION:
xe_gt_err(gt, "firmware exception. EIP: %#x\n",
xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
break;
+ case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
+ xe_gt_err(gt, "illegal init/ADS data\n");
+ break;
+
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
xe_gt_err(gt, "illegal register in save/restore workaround list\n");
break;
- case XE_GUC_LOAD_STATUS_HWCONFIG_START:
- xe_gt_err(gt, "still extracting hwconfig table.\n");
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ xe_gt_err(gt, "illegal workaround KLV data\n");
+ break;
+
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
+ xe_gt_err(gt, "illegal feature flag specified\n");
break;
}
- xe_device_declare_wedged(gt_to_xe(gt));
+ return -EPROTO;
} else if (delta_ms > GUC_LOAD_TIME_WARN_MS) {
xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n",
delta_ms, status, count);
@@ -1160,7 +1174,10 @@ static void guc_wait_ucode(struct xe_guc *guc)
delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
before_freq, status, count);
}
+
+ return 0;
}
+ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO);
static int __xe_guc_upload(struct xe_guc *guc)
{
@@ -1192,14 +1209,16 @@ static int __xe_guc_upload(struct xe_guc *guc)
goto out;
/* Wait for authentication */
- guc_wait_ucode(guc);
+ ret = guc_wait_ucode(guc);
+ if (ret)
+ goto out;
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
return 0;
out:
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
- return 0 /* FIXME: ret, don't want to stop load currently */;
+ return ret;
}
static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
@@ -1670,3 +1689,7 @@ void xe_guc_declare_wedged(struct xe_guc *guc)
xe_guc_ct_stop(&guc->ct);
xe_guc_submit_wedge(guc);
}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_g2g_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 22cf019a11bf..1cca05967e62 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -53,6 +53,10 @@ void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
void xe_guc_declare_wedged(struct xe_guc *guc);
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *payload, u32 len);
+#endif
+
static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class)
{
switch (class) {
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 131cfc56be00..58e0b0294a5b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -247,7 +247,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (XE_WA(gt, 1607983814))
+ if (XE_GT_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -284,52 +284,26 @@ static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
return total_size;
}
-static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
- enum xe_guc_klv_ids klv_id,
- u32 value,
- u32 *offset, u32 *remain)
+static void guc_waklv_enable(struct xe_guc_ads *ads,
+ u32 data[], u32 data_len_dw,
+ u32 *offset, u32 *remain,
+ enum xe_guc_klv_ids klv_id)
{
- u32 size;
- u32 klv_entry[] = {
- /* 16:16 key/length */
- FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
- FIELD_PREP(GUC_KLV_0_LEN, 1),
- value,
- /* 1 dword data */
- };
-
- size = sizeof(klv_entry);
+ size_t size = sizeof(u32) * (1 + data_len_dw);
if (*remain < size) {
drm_warn(&ads_to_xe(ads)->drm,
- "w/a klv buffer too small to add klv id %d\n", klv_id);
- } else {
- xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
- klv_entry, size);
- *offset += size;
- *remain -= size;
+ "w/a klv buffer too small to add klv id 0x%04X\n", klv_id);
+ return;
}
-}
-static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
- enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
-{
- u32 klv_entry[] = {
- /* 16:16 key/length */
- FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
- FIELD_PREP(GUC_KLV_0_LEN, 0),
- /* 0 dwords data */
- };
- u32 size;
+ /* 16:16 key/length */
+ xe_map_wr(ads_to_xe(ads), ads_to_map(ads), *offset, u32,
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) | FIELD_PREP(GUC_KLV_0_LEN, data_len_dw));
+ /* data_len_dw dwords of data */
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads),
+ *offset + sizeof(u32), data, data_len_dw * sizeof(u32));
- size = sizeof(klv_entry);
-
- if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
- "w/a klv buffer too small to add klv id %d\n", klv_id))
- return;
-
- xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
- klv_entry, size);
*offset += size;
*remain -= size;
}
@@ -343,44 +317,51 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
- &offset, &remain);
- if (XE_WA(gt, 18024947630))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
- &offset, &remain);
- if (XE_WA(gt, 16022287689))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
- &offset, &remain);
-
- if (XE_WA(gt, 14022866841))
- guc_waklv_enable_simple(ads,
- GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
- &offset, &remain);
+ if (XE_GT_WA(gt, 14019882105) || XE_GT_WA(gt, 16021333562))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
+ if (XE_GT_WA(gt, 18024947630))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING);
+ if (XE_GT_WA(gt, 16022287689))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE);
+
+ if (XE_GT_WA(gt, 14022866841))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO);
/*
* On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
* the default value for this register is determined to be 0xC40. This could change in the
* future, so GuC depends on KMD to send it the correct value.
*/
- if (XE_WA(gt, 13011645652))
- guc_waklv_enable_one_word(ads,
- GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
- 0xC40,
- &offset, &remain);
-
- if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
- &offset, &remain);
-
- if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
- guc_waklv_enable_simple(ads,
- GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
- &offset, &remain);
+ if (XE_GT_WA(gt, 13011645652)) {
+ u32 data = 0xC40;
+
+ guc_waklv_enable(ads, &data, 1, &offset, &remain,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE);
+ }
+
+ if (XE_GT_WA(gt, 14022293748) || XE_GT_WA(gt, 22019794406))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET);
+
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_GT_WA(gt, 16026508708))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 47, 0) && XE_GT_WA(gt, 16026007364)) {
+ u32 data[] = {
+ 0x0,
+ 0xF,
+ };
+ guc_waklv_enable(ads, data, ARRAY_SIZE(data), &offset, &remain,
+ GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG);
+ }
+
+ if (XE_GT_WA(gt, 14020001231))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT);
size = guc_ads_waklv_size(ads) - remain;
if (!size)
@@ -784,7 +765,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 14a07dca48e7..502ca3a4ee60 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -164,7 +164,7 @@ u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *p
if (offset < 0 || offset + size > cache->sam->base.size)
return 0;
- return cache->sam->gpu_addr + offset;
+ return xe_sa_manager_gpu_addr(cache->sam) + offset;
}
#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 3f4e6a46ff16..18f6327bf552 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -26,11 +26,11 @@
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_sriov_printk.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
+#include "xe_guc_tlb_inval.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace_guc.h"
@@ -39,6 +39,8 @@ static void receive_g2h(struct xe_guc_ct *ct);
static void g2h_worker_func(struct work_struct *w);
static void safe_mode_worker_func(struct work_struct *w);
static void ct_exit_safe_mode(struct xe_guc_ct *ct);
+static void guc_ct_change_state(struct xe_guc_ct *ct,
+ enum xe_guc_ct_state state);
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
enum {
@@ -252,6 +254,13 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
+static void guc_action_disable_ct(void *arg)
+{
+ struct xe_guc_ct *ct = arg;
+
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
+}
+
int xe_guc_ct_init(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
@@ -268,10 +277,39 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
return PTR_ERR(bo);
ct->bo = bo;
- return 0;
+
+ return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
+/**
+ * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
+ * @ct: the &xe_guc_ct
+ *
+ * Allocate a new BO in VRAM and free the previous BO that was allocated
+ * in system memory (SMEM). Applicable only for DGFX products.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ int ret;
+
+ xe_assert(xe, !xe_guc_ct_enabled(ct));
+
+ if (IS_DGFX(xe)) {
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
+ if (ret)
+ return ret;
+ }
+
+ devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
+ return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
+}
+
#define desc_read(xe_, guc_ctb__, field_) \
xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
struct guc_ct_buffer_desc, field_)
@@ -1040,11 +1078,15 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
return true;
}
+#define GUC_SEND_RETRY_LIMIT 50
+#define GUC_SEND_RETRY_MSLEEP 5
+
static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer, bool no_fail)
{
struct xe_gt *gt = ct_to_gt(ct);
struct g2h_fence g2h_fence;
+ unsigned int retries = 0;
int ret = 0;
/*
@@ -1109,6 +1151,12 @@ retry_same_fence:
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
mutex_unlock(&ct->lock);
+ if (++retries > GUC_SEND_RETRY_LIMIT) {
+ xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
+ action[0], GUC_SEND_RETRY_LIMIT);
+ return -ELOOP;
+ }
+ msleep(GUC_SEND_RETRY_MSLEEP * retries);
goto retry;
}
if (g2h_fence.fail) {
@@ -1416,8 +1464,7 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = xe_guc_pagefault_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
- ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
- adj_len);
+ ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
ret = xe_guc_access_counter_notify_handler(guc, payload,
@@ -1439,6 +1486,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case XE_GUC_ACTION_NOTIFY_EXCEPTION:
ret = guc_crash_process_msg(ct, action);
break;
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ case XE_GUC_ACTION_TEST_G2G_RECV:
+ ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
+ break;
+#endif
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
@@ -1618,8 +1670,7 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
break;
case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
__g2h_release_space(ct, len);
- ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
- adj_len);
+ ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
break;
default:
xe_gt_warn(gt, "NOT_POSSIBLE");
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 18d4225e6502..cf41210ab30a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -13,6 +13,7 @@ struct xe_device;
int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
+int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 92e1f9f41b8c..2b99c1ebdd58 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -94,16 +94,17 @@ static int allocate_engine_activity_buffers(struct xe_guc *guc,
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
- metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
- ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+ metadata_bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(metadata_size),
+ ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE,
+ false);
if (IS_ERR(metadata_bo))
return PTR_ERR(metadata_bo);
- bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
- ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(size),
+ ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo)) {
xe_bo_unpin_map_no_vm(metadata_bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 6f57578b07cb..50c4c2406132 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -15,6 +15,7 @@
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
+#define G2H_LEN_DW_G2G_NOTIFY_MIN 3
#define GUC_ID_MAX 65535
#define GUC_ID_UNKNOWN 0xffffffff
@@ -45,6 +46,11 @@
#define GUC_MAX_ENGINE_CLASSES 16
#define GUC_MAX_INSTANCES_PER_CLASS 32
+#define GUC_CONTEXT_NORMAL 0
+#define GUC_CONTEXT_COMPRESSION_SAVE 1
+#define GUC_CONTEXT_COMPRESSION_RESTORE 2
+#define GUC_CONTEXT_COUNT (GUC_CONTEXT_COMPRESSION_RESTORE + 1)
+
/* Helper for context registration H2G */
struct guc_ctxt_registration_info {
u32 flags;
@@ -60,6 +66,7 @@ struct guc_ctxt_registration_info {
u32 hwlrca_hi;
};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+#define CONTEXT_REGISTRATION_FLAG_TYPE GENMASK(2, 1)
/* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t {
@@ -84,13 +91,10 @@ struct guc_update_exec_queue_policy {
#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
-#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
-#define GUC_LOG_DEBUG_SHIFT 6
-#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
-#define GUC_LOG_CAPTURE_SHIFT 10
-#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
-#define GUC_LOG_BUF_ADDR_SHIFT 12
+#define GUC_LOG_CRASH REG_GENMASK(5, 4)
+#define GUC_LOG_DEBUG REG_GENMASK(9, 6)
+#define GUC_LOG_CAPTURE REG_GENMASK(11, 10)
+#define GUC_LOG_BUF_ADDR REG_GENMASK(31, 12)
#define GUC_CTL_WA 1
#define GUC_WA_GAM_CREDITS BIT(10)
@@ -103,28 +107,23 @@ struct guc_update_exec_queue_policy {
#define GUC_WA_RENDER_RST_RC6_EXIT BIT(19)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
+#define GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6 BIT(25)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4)
+#define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
-#define GUC_LOG_VERBOSITY_SHIFT 0
-#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY REG_GENMASK(1, 0)
#define GUC_LOG_VERBOSITY_MAX 3
-#define GUC_LOG_VERBOSITY_MASK 0x0000000f
-#define GUC_LOG_DESTINATION_MASK (3 << 4)
-#define GUC_LOG_DISABLED (1 << 6)
-#define GUC_PROFILE_ENABLED (1 << 7)
+#define GUC_LOG_DESTINATION REG_GENMASK(5, 4)
+#define GUC_LOG_DISABLED BIT(6)
+#define GUC_PROFILE_ENABLED BIT(7)
#define GUC_CTL_ADS 4
-#define GUC_ADS_ADDR_SHIFT 1
-#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+#define GUC_ADS_ADDR REG_GENMASK(21, 1)
#define GUC_CTL_DEVID 5
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index f1e2b0be90a9..98a47ac42b08 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -17,7 +17,7 @@ struct xe_device;
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
#else
-#define CRASH_BUFFER_SIZE SZ_8K
+#define CRASH_BUFFER_SIZE SZ_16K
#define DEBUG_BUFFER_SIZE SZ_64K
#define CAPTURE_BUFFER_SIZE SZ_1M
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 68b192fe3b32..53fdf59524c4 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -79,6 +79,11 @@
* Xe driver enables SLPC with all of its defaults features and frequency
* selection, which varies per platform.
*
+ * Power profiles add another level of control to SLPC. When power saving
+ * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
+ * thus saving power. Base profile is default and ensures balanced performance
+ * for any workload.
+ *
* Render-C States:
* ================
*
@@ -722,7 +727,7 @@ static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
*/
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
- if (XE_WA(pc_to_gt(pc), 22019338487)) {
+ if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
if (wait_for_flush_complete(pc) != 0)
return -EAGAIN;
}
@@ -835,7 +840,7 @@ static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- if (XE_WA(gt, 22019338487)) {
+ if (XE_GT_WA(gt, 22019338487)) {
if (xe_gt_is_media_type(gt))
return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
else
@@ -899,7 +904,7 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
- if (XE_WA(tile->primary_gt, 14022085890))
+ if (XE_GT_WA(tile->primary_gt, 14022085890))
ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
out:
@@ -931,7 +936,7 @@ static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- return XE_WA(gt, 22019338487) &&
+ return XE_GT_WA(gt, 22019338487) &&
pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
}
@@ -1017,7 +1022,7 @@ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
{
int ret;
- if (!XE_WA(pc_to_gt(pc), 22019338487))
+ if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
return 0;
guard(mutex)(&pc->freq_lock);
@@ -1076,7 +1081,6 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
- unsigned int fw_ref;
int ret = 0;
if (xe->info.skip_guc_pc)
@@ -1086,17 +1090,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (ret)
return ret;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
- }
-
- xe_gt_idle_disable_c6(gt);
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return 0;
+ return xe_gt_idle_disable_c6(gt);
}
/**
@@ -1182,6 +1176,61 @@ static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
return ret;
}
+static const char *power_profile_to_string(struct xe_guc_pc *pc)
+{
+ switch (pc->power_profile) {
+ case SLPC_POWER_PROFILE_BASE:
+ return "base";
+ case SLPC_POWER_PROFILE_POWER_SAVING:
+ return "power_saving";
+ default:
+ return "invalid";
+ }
+}
+
+void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
+{
+ switch (pc->power_profile) {
+ case SLPC_POWER_PROFILE_BASE:
+ sprintf(profile, "[%s] %s\n", "base", "power_saving");
+ break;
+ case SLPC_POWER_PROFILE_POWER_SAVING:
+ sprintf(profile, "%s [%s]\n", "base", "power_saving");
+ break;
+ default:
+ sprintf(profile, "invalid");
+ }
+}
+
+int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
+{
+ int ret = 0;
+ u32 val;
+
+ if (strncmp("base", buf, strlen("base")) == 0)
+ val = SLPC_POWER_PROFILE_BASE;
+ else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
+ val = SLPC_POWER_PROFILE_POWER_SAVING;
+ else
+ return -EINVAL;
+
+ guard(mutex)(&pc->freq_lock);
+ xe_pm_runtime_get_noresume(pc_to_xe(pc));
+
+ ret = pc_action_set_param(pc,
+ SLPC_PARAM_POWER_PROFILE,
+ val);
+ if (ret)
+ xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
+ val, ERR_PTR(ret));
+ else
+ pc->power_profile = val;
+
+ xe_pm_runtime_put(pc_to_xe(pc));
+
+ return ret;
+}
+
/**
* xe_guc_pc_start - Start GuC's Power Conservation component
* @pc: Xe_GuC_PC instance
@@ -1260,6 +1309,11 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
/* Enable SLPC Optimized Strategy for compute */
ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+ /* Set cached value of power_profile */
+ ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
+ if (unlikely(ret))
+ xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
+
out:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return ret;
@@ -1338,6 +1392,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
pc->bo = bo;
+ pc->power_profile = SLPC_POWER_PROFILE_BASE;
+
return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 52ecdd5ddbff..0e31396f103c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -31,6 +31,8 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq);
int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq);
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq);
+int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf);
+void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile);
enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc);
u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index c02053948a57..5e4ea53fbee6 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -37,6 +37,8 @@ struct xe_guc_pc {
struct mutex freq_lock;
/** @freq_ready: Only handle freq changes, if they are really ready */
bool freq_ready;
+ /** @power_profile: Base or power_saving profile */
+ u32 power_profile;
};
#endif /* _XE_GUC_PC_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 0104afbc941c..53024eb5670b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -355,7 +355,7 @@ static int guc_init_global_schedule_policy(struct xe_guc *guc)
ret = xe_guc_ct_send_block(&guc->ct, data, count);
if (ret < 0) {
xe_gt_err(guc_to_gt(guc),
- "failed to enable GuC sheduling policies: %pe\n",
+ "failed to enable GuC scheduling policies: %pe\n",
ERR_PTR(ret));
return ret;
}
@@ -608,7 +608,7 @@ static void __register_exec_queue(struct xe_guc *guc,
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void register_exec_queue(struct xe_exec_queue *q)
+static void register_exec_queue(struct xe_exec_queue *q, int ctx_type)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
@@ -616,6 +616,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct guc_ctxt_registration_info info;
xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), ctx_type < GUC_CONTEXT_COUNT);
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -623,7 +624,8 @@ static void register_exec_queue(struct xe_exec_queue *q)
info.engine_submit_mask = q->logical_mask;
info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
- info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ info.flags = CONTEXT_REGISTRATION_FLAG_KMD |
+ FIELD_PREP(CONTEXT_REGISTRATION_FLAG_TYPE, ctx_type);
if (xe_exec_queue_is_parallel(q)) {
u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
@@ -733,12 +735,18 @@ static void wq_item_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wqi_size))
return;
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN);
wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
FIELD_PREP(WQ_LEN_MASK, len_dw);
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW);
wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
+ xe_gt_assert(guc_to_gt(guc), i ==
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS);
wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID);
wqi[i++] = 0;
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL);
for (j = 1; j < q->width; ++j) {
struct xe_lrc *lrc = q->lrc[j];
@@ -759,6 +767,50 @@ static void wq_item_append(struct xe_exec_queue *q)
parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
}
+static int wq_items_rebase(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
+ int i = q->guc->wqi_head;
+
+ /* the ring starts after a header struct */
+ iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, wq[0]));
+
+ while ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) {
+ u32 len_dw, type, val;
+
+ if (drm_WARN_ON_ONCE(&xe->drm, i < 0 || i > 2 * WQ_SIZE))
+ break;
+
+ val = xe_map_rd_ring_u32(xe, &map, i / sizeof(u32) +
+ XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN,
+ WQ_SIZE / sizeof(u32));
+ len_dw = FIELD_GET(WQ_LEN_MASK, val);
+ type = FIELD_GET(WQ_TYPE_MASK, val);
+
+ if (drm_WARN_ON_ONCE(&xe->drm, len_dw >= WQ_SIZE / sizeof(u32)))
+ break;
+
+ if (type == WQ_TYPE_MULTI_LRC) {
+ val = xe_lrc_descriptor(q->lrc[0]);
+ xe_map_wr_ring_u32(xe, &map, i / sizeof(u32) +
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW,
+ WQ_SIZE / sizeof(u32), val);
+ } else if (drm_WARN_ON_ONCE(&xe->drm, type != WQ_TYPE_NOOP)) {
+ break;
+ }
+
+ i += (len_dw + 1) * sizeof(u32);
+ }
+
+ if ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) {
+ xe_gt_err(q->gt, "Exec queue fixups incomplete - wqi parse failed\n");
+ return -EBADMSG;
+ }
+ return 0;
+}
+
#define RESUME_PENDING ~0x0ull
static void submit_exec_queue(struct xe_exec_queue *q)
{
@@ -827,7 +879,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
- register_exec_queue(q);
+ register_exec_queue(q, GUC_CONTEXT_NORMAL);
if (!lr) /* LR jobs are emitted in the exec IOCTL */
q->ring_ops->emit_job(job);
submit_exec_queue(q);
@@ -843,6 +895,30 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
return fence;
}
+/**
+ * xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending
+ * on all queues under a guc.
+ * @guc: the &xe_guc struct instance
+ */
+void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ /*
+ * This routine is used within VF migration recovery. This means
+ * using the lock here introduces a restriction: we cannot wait
+ * for any GFX HW response while the lock is taken.
+ */
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ if (exec_queue_killed_or_banned_or_wedged(q))
+ continue;
+ xe_exec_queue_jobs_ring_restore(q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
@@ -1849,6 +1925,43 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
}
}
+/**
+ * xe_guc_submit_reset_block - Disallow reset calls on given GuC.
+ * @guc: the &xe_guc struct instance
+ */
+int xe_guc_submit_reset_block(struct xe_guc *guc)
+{
+ return atomic_fetch_or(1, &guc->submission_state.reset_blocked);
+}
+
+/**
+ * xe_guc_submit_reset_unblock - Allow back reset calls on given GuC.
+ * @guc: the &xe_guc struct instance
+ */
+void xe_guc_submit_reset_unblock(struct xe_guc *guc)
+{
+ atomic_set_release(&guc->submission_state.reset_blocked, 0);
+ wake_up_all(&guc->ct.wq);
+}
+
+static int guc_submit_reset_is_blocked(struct xe_guc *guc)
+{
+ return atomic_read_acquire(&guc->submission_state.reset_blocked);
+}
+
+/* Maximum time of blocking reset */
+#define RESET_BLOCK_PERIOD_MAX (HZ * 5)
+
+/**
+ * xe_guc_wait_reset_unblock - Wait until reset blocking flag is lifted, or timeout.
+ * @guc: the &xe_guc struct instance
+ */
+int xe_guc_wait_reset_unblock(struct xe_guc *guc)
+{
+ return wait_event_timeout(guc->ct.wq,
+ !guc_submit_reset_is_blocked(guc), RESET_BLOCK_PERIOD_MAX);
+}
+
int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
@@ -1902,6 +2015,19 @@ void xe_guc_submit_stop(struct xe_guc *guc)
}
+/**
+ * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be disabled
+ */
+void xe_guc_submit_pause(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xe_sched_submission_stop_async(&q->guc->sched);
+}
+
static void guc_exec_queue_start(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
@@ -1942,6 +2068,28 @@ int xe_guc_submit_start(struct xe_guc *guc)
return 0;
}
+static void guc_exec_queue_unpause(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ xe_sched_submission_start(sched);
+}
+
+/**
+ * xe_guc_submit_unpause - Allow further runs of submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be enabled
+ */
+void xe_guc_submit_unpause(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ guc_exec_queue_unpause(q);
+
+ wake_up_all(&guc->ct.wq);
+}
+
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
@@ -1955,7 +2103,7 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
+ xe_gt_err(gt, "No exec queue found for guc_id %u\n", guc_id);
return NULL;
}
@@ -2454,6 +2602,34 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
}
/**
+ * xe_guc_register_vf_exec_queue - Register exec queue for a given context type.
+ * @q: Execution queue
+ * @ctx_type: Type of the context
+ *
+ * This function registers the execution queue with the guc. Special context
+ * types like GUC_CONTEXT_COMPRESSION_SAVE and GUC_CONTEXT_COMPRESSION_RESTORE
+ * are only applicable for IGPU and in the VF.
+ * Submits the execution queue to GUC after registering it.
+ *
+ * Returns - None.
+ */
+void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+ xe_gt_assert(gt, !IS_DGFX(xe));
+ xe_gt_assert(gt, ctx_type == GUC_CONTEXT_COMPRESSION_SAVE ||
+ ctx_type == GUC_CONTEXT_COMPRESSION_RESTORE);
+ xe_gt_assert(gt, GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 23, 0));
+
+ register_exec_queue(q, ctx_type);
+ enable_scheduling(q);
+}
+
+/**
* xe_guc_submit_print - GuC Submit Print.
* @guc: GuC.
* @p: drm_printer where it will be printed out.
@@ -2473,3 +2649,32 @@ void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
guc_exec_queue_print(q, p);
mutex_unlock(&guc->submission_state.lock);
}
+
+/**
+ * xe_guc_contexts_hwsp_rebase - Re-compute GGTT references within all
+ * exec queues registered to given GuC.
+ * @guc: the &xe_guc struct instance
+ * @scratch: scratch buffer to be used as temporary storage
+ *
+ * Returns: zero on success, negative error code on failure.
+ */
+int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+ int err = 0;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ err = xe_exec_queue_contexts_hwsp_rebase(q, scratch);
+ if (err)
+ break;
+ if (xe_exec_queue_is_parallel(q))
+ err = wq_items_rebase(q);
+ if (err)
+ break;
+ }
+ mutex_unlock(&guc->submission_state.lock);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 0d126b807c10..78c3f07e31a0 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -20,6 +20,11 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
+void xe_guc_submit_pause(struct xe_guc *guc);
+void xe_guc_submit_unpause(struct xe_guc *guc);
+int xe_guc_submit_reset_block(struct xe_guc *guc);
+void xe_guc_submit_reset_unblock(struct xe_guc *guc);
+int xe_guc_wait_reset_unblock(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);
@@ -31,6 +36,8 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
+void xe_guc_jobs_ring_rebase(struct xe_guc *guc);
+
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
void
@@ -41,5 +48,8 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
void
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
+void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type);
+
+int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
new file mode 100644
index 000000000000..6bf2103602f8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "abi/guc_actions_abi.h"
+
+#include "xe_device.h"
+#include "xe_gt_stats.h"
+#include "xe_gt_types.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_tlb_inval.h"
+#include "xe_force_wake.h"
+#include "xe_mmio.h"
+#include "xe_tlb_inval.h"
+
+#include "regs/xe_guc_regs.h"
+
+/*
+ * XXX: The seqno algorithm relies on TLB invalidation being processed in order
+ * which they currently are by the GuC, if that changes the algorithm will need
+ * to be updated.
+ */
+
+static int send_tlb_inval(struct xe_guc *guc, const u32 *action, int len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ xe_gt_assert(gt, action[1]); /* Seqno */
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
+ return xe_guc_ct_send(&guc->ct, action, len,
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
+}
+
+#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
+ XE_GUC_TLB_INVAL_FLUSH_CACHE)
+
+static int send_tlb_inval_all(struct xe_tlb_inval *tlb_inval, u32 seqno)
+{
+ struct xe_guc *guc = tlb_inval->private;
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
+ };
+
+ return send_tlb_inval(guc, action, ARRAY_SIZE(action));
+}
+
+static int send_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval, u32 seqno)
+{
+ struct xe_guc *guc = tlb_inval->private;
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ /*
+ * Returning -ECANCELED in this function is squashed at the caller and
+ * signals waiters.
+ */
+
+ if (xe_guc_ct_enabled(&guc->ct) && guc->submission_state.enabled) {
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
+ };
+
+ return send_tlb_inval(guc, action, ARRAY_SIZE(action));
+ } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+ struct xe_mmio *mmio = &gt->mmio;
+ unsigned int fw_ref;
+
+ if (IS_SRIOV_VF(xe))
+ return -ECANCELED;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
+ PVC_GUC_TLB_INV_DESC1_INVALIDATE);
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
+ PVC_GUC_TLB_INV_DESC0_VALID);
+ } else {
+ xe_mmio_write32(mmio, GUC_TLB_INV_CR,
+ GUC_TLB_INV_CR_INVALIDATE);
+ }
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+
+ return -ECANCELED;
+}
+
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
+static int send_tlb_inval_ppgtt(struct xe_tlb_inval *tlb_inval, u32 seqno,
+ u64 start, u64 end, u32 asid)
+{
+#define MAX_TLB_INVALIDATION_LEN 7
+ struct xe_guc *guc = tlb_inval->private;
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
+ int len = 0;
+
+ if (guc_to_xe(guc)->info.force_execlist)
+ return -ECANCELED;
+
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = seqno;
+ if (!gt_to_xe(gt)->info.has_range_tlb_inval ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+ } else {
+ u64 orig_start = start;
+ u64 align;
+
+ if (length < SZ_4K)
+ length = SZ_4K;
+
+ /*
+ * We need to invalidate a higher granularity if start address
+ * is not aligned to length. When start is not aligned with
+ * length we need to find the length large enough to create an
+ * address mask covering the required range.
+ */
+ align = roundup_pow_of_two(length);
+ start = ALIGN_DOWN(start, align);
+ end = ALIGN(end, align);
+ length = align;
+ while (start + length < end) {
+ length <<= 1;
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware
+ * expects is 16MB
+ */
+ if (length >= SZ_2M) {
+ length = max_t(u64, SZ_16M, length);
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ xe_gt_assert(gt, length >= SZ_4K);
+ xe_gt_assert(gt, is_power_of_2(length));
+ xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
+ ilog2(SZ_2M) + 1)));
+ xe_gt_assert(gt, IS_ALIGNED(start, length));
+
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
+ action[len++] = asid;
+ action[len++] = lower_32_bits(start);
+ action[len++] = upper_32_bits(start);
+ action[len++] = ilog2(length) - ilog2(SZ_4K);
+ }
+
+ xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
+
+ return send_tlb_inval(guc, action, len);
+}
+
+static bool tlb_inval_initialized(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ return xe_guc_ct_initialized(&guc->ct);
+}
+
+static void tlb_inval_flush(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ LNL_FLUSH_WORK(&guc->ct.g2h_worker);
+}
+
+static long tlb_inval_timeout_delay(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ /* this reflects what HW/GuC needs to process TLB inv request */
+ const long hw_tlb_timeout = HZ / 4;
+
+ /* this estimates actual delay caused by the CTB transport */
+ long delay = xe_guc_ct_queue_proc_time_jiffies(&guc->ct);
+
+ return hw_tlb_timeout + 2 * delay;
+}
+
+static const struct xe_tlb_inval_ops guc_tlb_inval_ops = {
+ .all = send_tlb_inval_all,
+ .ggtt = send_tlb_inval_ggtt,
+ .ppgtt = send_tlb_inval_ppgtt,
+ .initialized = tlb_inval_initialized,
+ .flush = tlb_inval_flush,
+ .timeout_delay = tlb_inval_timeout_delay,
+};
+
+/**
+ * xe_guc_tlb_inval_init_early() - Init GuC TLB invalidation early
+ * @guc: GuC object
+ * @tlb_inval: TLB invalidation client
+ *
+ * Inititialize GuC TLB invalidation by setting back pointer in TLB invalidation
+ * client to the GuC and setting GuC backend ops.
+ */
+void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
+ struct xe_tlb_inval *tlb_inval)
+{
+ tlb_inval->private = guc;
+ tlb_inval->ops = &guc_tlb_inval_ops;
+}
+
+/**
+ * xe_guc_tlb_inval_done_handler() - TLB invalidation done handler
+ * @guc: guc
+ * @msg: message indicating TLB invalidation done
+ * @len: length of message
+ *
+ * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
+ * invalidation fences for seqno. Algorithm for this depends on seqno being
+ * received in-order and asserts this assumption.
+ *
+ * Return: 0 on success, -EPROTO for malformed messages.
+ */
+int xe_guc_tlb_inval_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (unlikely(len != 1))
+ return -EPROTO;
+
+ xe_tlb_inval_done_handler(&gt->tlb_inval, msg[0]);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.h b/drivers/gpu/drm/xe/xe_guc_tlb_inval.h
new file mode 100644
index 000000000000..07d668b02e3d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_TLB_INVAL_H_
+#define _XE_GUC_TLB_INVAL_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+struct xe_tlb_inval;
+
+void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
+ struct xe_tlb_inval *tlb_inval);
+
+int xe_guc_tlb_inval_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 1fde7614fcc5..c7b9642b41ba 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -85,6 +85,12 @@ struct xe_guc {
struct xarray exec_queue_lookup;
/** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
+ /**
+ * @submission_state.reset_blocked: reset attempts are blocked;
+ * blocking reset in order to delay it may be required if running
+ * an operation which is sensitive to resets.
+ */
+ atomic_t reset_blocked;
/** @submission_state.lock: protects submission state */
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 6d7b62724126..a415ca488791 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -197,7 +197,7 @@ int xe_heci_gsc_init(struct xe_device *xe)
if (ret)
return ret;
- if (!def->use_polling && !xe_survivability_mode_is_enabled(xe)) {
+ if (!def->use_polling && !xe_survivability_mode_is_boot_enabled(xe)) {
ret = heci_gsc_irq_setup(xe);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
deleted file mode 100644
index 57b71956ddf4..000000000000
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ /dev/null
@@ -1,325 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2024 Intel Corporation
- */
-
-#include <linux/scatterlist.h>
-#include <linux/mmu_notifier.h>
-#include <linux/dma-mapping.h>
-#include <linux/memremap.h>
-#include <linux/swap.h>
-#include <linux/hmm.h>
-#include <linux/mm.h>
-#include "xe_hmm.h"
-#include "xe_vm.h"
-#include "xe_bo.h"
-
-static u64 xe_npages_in_range(unsigned long start, unsigned long end)
-{
- return (end - start) >> PAGE_SHIFT;
-}
-
-static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
- struct hmm_range *range, struct rw_semaphore *notifier_sem)
-{
- unsigned long i, npages, hmm_pfn;
- unsigned long num_chunks = 0;
- int ret;
-
- /* HMM docs says this is needed. */
- ret = down_read_interruptible(notifier_sem);
- if (ret)
- return ret;
-
- if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
- up_read(notifier_sem);
- return -EAGAIN;
- }
-
- npages = xe_npages_in_range(range->start, range->end);
- for (i = 0; i < npages;) {
- unsigned long len;
-
- hmm_pfn = range->hmm_pfns[i];
- xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
-
- len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
-
- /* If order > 0 the page may extend beyond range->start */
- len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
- i += len;
- num_chunks++;
- }
- up_read(notifier_sem);
-
- return sg_alloc_table(st, num_chunks, GFP_KERNEL);
-}
-
-/**
- * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
- * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
- * and will be used to program GPU page table later.
- * @xe: the xe device who will access the dma-address in sg table
- * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
- * has the pfn numbers of pages that back up this hmm address range.
- * @st: pointer to the sg table.
- * @notifier_sem: The xe notifier lock.
- * @write: whether we write to this range. This decides dma map direction
- * for system pages. If write we map it bi-diretional; otherwise
- * DMA_TO_DEVICE
- *
- * All the contiguous pfns will be collapsed into one entry in
- * the scatter gather table. This is for the purpose of efficiently
- * programming GPU page table.
- *
- * The dma_address in the sg table will later be used by GPU to
- * access memory. So if the memory is system memory, we need to
- * do a dma-mapping so it can be accessed by GPU/DMA.
- *
- * FIXME: This function currently only support pages in system
- * memory. If the memory is GPU local memory (of the GPU who
- * is going to access memory), we need gpu dpa (device physical
- * address), and there is no need of dma-mapping. This is TBD.
- *
- * FIXME: dma-mapping for peer gpu device to access remote gpu's
- * memory. Add this when you support p2p
- *
- * This function allocates the storage of the sg table. It is
- * caller's responsibility to free it calling sg_free_table.
- *
- * Returns 0 if successful; -ENOMEM if fails to allocate memory
- */
-static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st,
- struct rw_semaphore *notifier_sem,
- bool write)
-{
- unsigned long npages = xe_npages_in_range(range->start, range->end);
- struct device *dev = xe->drm.dev;
- struct scatterlist *sgl;
- struct page *page;
- unsigned long i, j;
-
- lockdep_assert_held(notifier_sem);
-
- i = 0;
- for_each_sg(st->sgl, sgl, st->nents, j) {
- unsigned long hmm_pfn, size;
-
- hmm_pfn = range->hmm_pfns[i];
- page = hmm_pfn_to_page(hmm_pfn);
- xe_assert(xe, !is_device_private_page(page));
-
- size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
- size -= page_to_pfn(page) & (size - 1);
- i += size;
-
- if (unlikely(j == st->nents - 1)) {
- xe_assert(xe, i >= npages);
- if (i > npages)
- size -= (i - npages);
-
- sg_mark_end(sgl);
- } else {
- xe_assert(xe, i < npages);
- }
-
- sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
- }
-
- return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
-}
-
-static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-
- lockdep_assert_held_write(&vm->lock);
- lockdep_assert_held(&vm->userptr.notifier_lock);
-
- mutex_lock(&userptr->unmap_mutex);
- xe_assert(vm->xe, !userptr->mapped);
- userptr->mapped = true;
- mutex_unlock(&userptr->unmap_mutex);
-}
-
-void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
-
- if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
- !lockdep_is_held_type(&vm->lock, 0) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
- /* Don't unmap in exec critical section. */
- xe_vm_assert_held(vm);
- /* Don't unmap while mapping the sg. */
- lockdep_assert_held(&vm->lock);
- }
-
- mutex_lock(&userptr->unmap_mutex);
- if (userptr->sg && userptr->mapped)
- dma_unmap_sgtable(xe->drm.dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
- userptr->mapped = false;
- mutex_unlock(&userptr->unmap_mutex);
-}
-
-/**
- * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- * @uvma: the userptr vma which hold the scatter gather table
- *
- * With function xe_userptr_populate_range, we allocate storage of
- * the userptr sg table. This is a helper function to free this
- * sg table, and dma unmap the address in the table.
- */
-void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
-
- xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
- xe_hmm_userptr_unmap(uvma);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
-}
-
-/**
- * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
- * address range
- *
- * @uvma: userptr vma which has information of the range to populate.
- * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller.
- *
- * This function populate the physical pages of a virtual
- * address range. The populated physical pages is saved in
- * userptr's sg table. It is similar to get_user_pages but call
- * hmm_range_fault.
- *
- * This function also read mmu notifier sequence # (
- * mmu_interval_read_begin), for the purpose of later
- * comparison (through mmu_interval_read_retry).
- *
- * This must be called with mmap read or write lock held.
- *
- * This function allocates the storage of the userptr sg table.
- * It is caller's responsibility to free it calling sg_free_table.
- *
- * returns: 0 for success; negative error no on failure
- */
-int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
- bool is_mm_mmap_locked)
-{
- unsigned long timeout =
- jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns;
- struct xe_userptr *userptr;
- struct xe_vma *vma = &uvma->vma;
- u64 userptr_start = xe_vma_userptr(vma);
- u64 userptr_end = userptr_start + xe_vma_size(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range = {
- .pfn_flags_mask = 0, /* ignore pfns */
- .default_flags = HMM_PFN_REQ_FAULT,
- .start = userptr_start,
- .end = userptr_end,
- .notifier = &uvma->userptr.notifier,
- .dev_private_owner = vm->xe,
- };
- bool write = !xe_vma_read_only(vma);
- unsigned long notifier_seq;
- u64 npages;
- int ret;
-
- userptr = &uvma->userptr;
-
- if (is_mm_mmap_locked)
- mmap_assert_locked(userptr->notifier.mm);
-
- if (vma->gpuva.flags & XE_VMA_DESTROYED)
- return 0;
-
- notifier_seq = mmu_interval_read_begin(&userptr->notifier);
- if (notifier_seq == userptr->notifier_seq)
- return 0;
-
- if (userptr->sg)
- xe_hmm_userptr_free_sg(uvma);
-
- npages = xe_npages_in_range(userptr_start, userptr_end);
- pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
- if (unlikely(!pfns))
- return -ENOMEM;
-
- if (write)
- hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
-
- if (!mmget_not_zero(userptr->notifier.mm)) {
- ret = -EFAULT;
- goto free_pfns;
- }
-
- hmm_range.hmm_pfns = pfns;
-
- while (true) {
- hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
-
- if (!is_mm_mmap_locked)
- mmap_read_lock(userptr->notifier.mm);
-
- ret = hmm_range_fault(&hmm_range);
-
- if (!is_mm_mmap_locked)
- mmap_read_unlock(userptr->notifier.mm);
-
- if (ret == -EBUSY) {
- if (time_after(jiffies, timeout))
- break;
-
- continue;
- }
- break;
- }
-
- mmput(userptr->notifier.mm);
-
- if (ret)
- goto free_pfns;
-
- ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
- if (ret)
- goto free_pfns;
-
- ret = down_read_interruptible(&vm->userptr.notifier_lock);
- if (ret)
- goto free_st;
-
- if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
- &vm->userptr.notifier_lock, write);
- if (ret)
- goto out_unlock;
-
- userptr->sg = &userptr->sgt;
- xe_hmm_userptr_set_mapped(uvma);
- userptr->notifier_seq = hmm_range.notifier_seq;
- up_read(&vm->userptr.notifier_lock);
- kvfree(pfns);
- return 0;
-
-out_unlock:
- up_read(&vm->userptr.notifier_lock);
-free_st:
- sg_free_table(&userptr->sgt);
-free_pfns:
- kvfree(pfns);
- return ret;
-}
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
deleted file mode 100644
index 0ea98d8e7bbc..000000000000
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * Copyright © 2024 Intel Corporation
- */
-
-#ifndef _XE_HMM_H_
-#define _XE_HMM_H_
-
-#include <linux/types.h>
-
-struct xe_userptr_vma;
-
-int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
-
-void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
-
-void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
-#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 796ba8c34a16..1cf623b4a5bc 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -576,7 +576,7 @@ static void adjust_idledly(struct xe_hw_engine *hwe)
u32 maxcnt_units_ns = 640;
bool inhibit_switch = 0;
- if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
+ if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index c926f840c87b..58bee3ffe881 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -103,8 +103,8 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
break;
case XE_ENGINE_CLASS_OTHER:
break;
- default:
- drm_warn(&xe->drm, "NOT POSSIBLE");
+ case XE_ENGINE_CLASS_MAX:
+ xe_gt_assert(gt, false);
}
}
diff --git a/drivers/gpu/drm/xe/xe_hw_error.c b/drivers/gpu/drm/xe/xe_hw_error.c
new file mode 100644
index 000000000000..8c65291f36fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_error.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/fault-inject.h>
+
+#include "regs/xe_gsc_regs.h"
+#include "regs/xe_hw_error_regs.h"
+#include "regs/xe_irq_regs.h"
+
+#include "xe_device.h"
+#include "xe_hw_error.h"
+#include "xe_mmio.h"
+#include "xe_survivability_mode.h"
+
+#define HEC_UNCORR_FW_ERR_BITS 4
+extern struct fault_attr inject_csc_hw_error;
+
+/* Error categories reported by hardware */
+enum hardware_error {
+ HARDWARE_ERROR_CORRECTABLE = 0,
+ HARDWARE_ERROR_NONFATAL = 1,
+ HARDWARE_ERROR_FATAL = 2,
+ HARDWARE_ERROR_MAX,
+};
+
+static const char * const hec_uncorrected_fw_errors[] = {
+ "Fatal",
+ "CSE Disabled",
+ "FD Corruption",
+ "Data Corruption"
+};
+
+static const char *hw_error_to_str(const enum hardware_error hw_err)
+{
+ switch (hw_err) {
+ case HARDWARE_ERROR_CORRECTABLE:
+ return "CORRECTABLE";
+ case HARDWARE_ERROR_NONFATAL:
+ return "NONFATAL";
+ case HARDWARE_ERROR_FATAL:
+ return "FATAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static bool fault_inject_csc_hw_error(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&inject_csc_hw_error, 1);
+}
+
+static void csc_hw_error_work(struct work_struct *work)
+{
+ struct xe_tile *tile = container_of(work, typeof(*tile), csc_hw_error_work);
+ struct xe_device *xe = tile_to_xe(tile);
+ int ret;
+
+ ret = xe_survivability_mode_runtime_enable(xe);
+ if (ret)
+ drm_err(&xe->drm, "Failed to enable runtime survivability mode\n");
+}
+
+static void csc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
+{
+ const char *hw_err_str = hw_error_to_str(hw_err);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_mmio *mmio = &tile->mmio;
+ u32 base, err_bit, err_src;
+ unsigned long fw_err;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return;
+
+ base = BMG_GSC_HECI1_BASE;
+ lockdep_assert_held(&xe->irq.lock);
+ err_src = xe_mmio_read32(mmio, HEC_UNCORR_ERR_STATUS(base));
+ if (!err_src) {
+ drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported HEC_ERR_STATUS_%s blank\n",
+ tile->id, hw_err_str);
+ return;
+ }
+
+ if (err_src & UNCORR_FW_REPORTED_ERR) {
+ fw_err = xe_mmio_read32(mmio, HEC_UNCORR_FW_ERR_DW0(base));
+ for_each_set_bit(err_bit, &fw_err, HEC_UNCORR_FW_ERR_BITS) {
+ drm_err_ratelimited(&xe->drm, HW_ERR
+ "%s: HEC Uncorrected FW %s error reported, bit[%d] is set\n",
+ hw_err_str, hec_uncorrected_fw_errors[err_bit],
+ err_bit);
+
+ schedule_work(&tile->csc_hw_error_work);
+ }
+ }
+
+ xe_mmio_write32(mmio, HEC_UNCORR_ERR_STATUS(base), err_src);
+}
+
+static void hw_error_source_handler(struct xe_tile *tile, const enum hardware_error hw_err)
+{
+ const char *hw_err_str = hw_error_to_str(hw_err);
+ struct xe_device *xe = tile_to_xe(tile);
+ unsigned long flags;
+ u32 err_src;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return;
+
+ spin_lock_irqsave(&xe->irq.lock, flags);
+ err_src = xe_mmio_read32(&tile->mmio, DEV_ERR_STAT_REG(hw_err));
+ if (!err_src) {
+ drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported DEV_ERR_STAT_%s blank!\n",
+ tile->id, hw_err_str);
+ goto unlock;
+ }
+
+ if (err_src & XE_CSC_ERROR)
+ csc_hw_error_handler(tile, hw_err);
+
+ xe_mmio_write32(&tile->mmio, DEV_ERR_STAT_REG(hw_err), err_src);
+
+unlock:
+ spin_unlock_irqrestore(&xe->irq.lock, flags);
+}
+
+/**
+ * xe_hw_error_irq_handler - irq handling for hw errors
+ * @tile: tile instance
+ * @master_ctl: value read from master interrupt register
+ *
+ * Xe platforms add three error bits to the master interrupt register to support error handling.
+ * These three bits are used to convey the class of error FATAL, NONFATAL, or CORRECTABLE.
+ * To process the interrupt, determine the source of error by reading the Device Error Source
+ * Register that corresponds to the class of error being serviced.
+ */
+void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl)
+{
+ enum hardware_error hw_err;
+
+ if (fault_inject_csc_hw_error())
+ schedule_work(&tile->csc_hw_error_work);
+
+ for (hw_err = 0; hw_err < HARDWARE_ERROR_MAX; hw_err++)
+ if (master_ctl & ERROR_IRQ(hw_err))
+ hw_error_source_handler(tile, hw_err);
+}
+
+/*
+ * Process hardware errors during boot
+ */
+static void process_hw_errors(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u32 master_ctl;
+ u8 id;
+
+ for_each_tile(tile, xe, id) {
+ master_ctl = xe_mmio_read32(&tile->mmio, GFX_MSTR_IRQ);
+ xe_hw_error_irq_handler(tile, master_ctl);
+ xe_mmio_write32(&tile->mmio, GFX_MSTR_IRQ, master_ctl);
+ }
+}
+
+/**
+ * xe_hw_error_init - Initialize hw errors
+ * @xe: xe device instance
+ *
+ * Initialize and check for errors that occurred during boot
+ * prior to driver load
+ */
+void xe_hw_error_init(struct xe_device *xe)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ return;
+
+ INIT_WORK(&tile->csc_hw_error_work, csc_hw_error_work);
+
+ process_hw_errors(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_error.h b/drivers/gpu/drm/xe/xe_hw_error.h
new file mode 100644
index 000000000000..d86e28c5180c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_error.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef XE_HW_ERROR_H_
+#define XE_HW_ERROR_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+struct xe_device;
+
+void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl);
+void xe_hw_error_init(struct xe_device *xe);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index c5b63e10bb91..b6790589e623 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -179,7 +179,7 @@ static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr
u32 clr, u32 set)
{
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
- u32 val0, val1;
+ u32 val0 = 0, val1 = 0;
int ret = 0;
ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
@@ -737,7 +737,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
int ret;
- u32 uval;
+ u32 uval = 0;
mutex_lock(&hwmon->hwmon_lock);
@@ -921,7 +921,7 @@ xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
static umode_t
xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
/* hwmon sysfs attribute of current available only for package */
if (channel != CHANNEL_PKG)
@@ -1023,7 +1023,7 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
if (!hwmon->xe->info.has_fan_control)
return 0;
@@ -1297,13 +1297,6 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
-static void xe_hwmon_mutex_destroy(void *arg)
-{
- struct xe_hwmon *hwmon = arg;
-
- mutex_destroy(&hwmon->hwmon_lock);
-}
-
int xe_hwmon_register(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -1322,8 +1315,7 @@ int xe_hwmon_register(struct xe_device *xe)
if (!hwmon)
return -ENOMEM;
- mutex_init(&hwmon->hwmon_lock);
- ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon);
+ ret = devm_mutex_init(dev, &hwmon->hwmon_lock);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
index bc7dc2099470..48dfcb41fa08 100644
--- a/drivers/gpu/drm/xe/xe_i2c.c
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -147,6 +147,20 @@ static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
}
/**
+ * xe_i2c_present - I2C controller is present and functional
+ * @xe: xe device instance
+ *
+ * Check whether the I2C controller is present and functioning with valid
+ * endpoint cookie.
+ *
+ * Return: %true if present, %false otherwise.
+ */
+bool xe_i2c_present(struct xe_device *xe)
+{
+ return xe->i2c && xe->i2c->ep.cookie == XE_I2C_EP_COOKIE_DEVICE;
+}
+
+/**
* xe_i2c_irq_handler: Handler for I2C interrupts
* @xe: xe device instance
* @master_ctl: interrupt register
@@ -230,7 +244,7 @@ void xe_i2c_pm_suspend(struct xe_device *xe)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ if (!xe_i2c_present(xe))
return;
xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
@@ -241,11 +255,11 @@ void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ if (!xe_i2c_present(xe))
return;
if (d3cold)
- xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY);
+ xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
index b767ed8ce52b..ecd5f10358e2 100644
--- a/drivers/gpu/drm/xe/xe_i2c.h
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -49,11 +49,13 @@ struct xe_i2c {
#if IS_ENABLED(CONFIG_I2C)
int xe_i2c_probe(struct xe_device *xe);
+bool xe_i2c_present(struct xe_device *xe);
void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
void xe_i2c_pm_suspend(struct xe_device *xe);
void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
#else
static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
+static inline bool xe_i2c_present(struct xe_device *xe) { return false; }
static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5df5b8c2a3e4..870edaf69388 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -18,6 +18,7 @@
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_hw_error.h"
#include "xe_i2c.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
@@ -468,6 +469,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
gt_irq_handler(tile, master_ctl, intr_dw, identity);
+ xe_hw_error_irq_handler(tile, master_ctl);
/*
* Display interrupts (including display backlight operations
@@ -756,6 +758,8 @@ int xe_irq_install(struct xe_device *xe)
int nvec = 1;
int err;
+ xe_hw_error_init(xe);
+
xe_irq_reset(xe);
if (xe_device_has_msix(xe)) {
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.c b/drivers/gpu/drm/xe/xe_late_bind_fw.c
new file mode 100644
index 000000000000..38f3feb2aecd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+
+#include <drm/drm_managed.h>
+#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_lb_mei_interface.h>
+#include <drm/drm_print.h>
+
+#include "xe_device.h"
+#include "xe_late_bind_fw.h"
+#include "xe_pcode.h"
+#include "xe_pcode_api.h"
+#include "xe_pm.h"
+
+/*
+ * The component should load quite quickly in most cases, but it could take
+ * a bit. Using a very big timeout just to cover the worst case scenario
+ */
+#define LB_INIT_TIMEOUT_MS 20000
+
+/*
+ * Retry interval set to 6 seconds, in steps of 200 ms, to allow time for
+ * other OS components to release the MEI CL handle
+ */
+#define LB_FW_LOAD_RETRY_MAXCOUNT 30
+#define LB_FW_LOAD_RETRY_PAUSE_MS 200
+
+static const u32 fw_id_to_type[] = {
+ [XE_LB_FW_FAN_CONTROL] = INTEL_LB_TYPE_FAN_CONTROL,
+ };
+
+static const char * const fw_id_to_name[] = {
+ [XE_LB_FW_FAN_CONTROL] = "fan_control",
+ };
+
+static struct xe_device *
+late_bind_to_xe(struct xe_late_bind *late_bind)
+{
+ return container_of(late_bind, struct xe_device, late_bind);
+}
+
+static struct xe_device *
+late_bind_fw_to_xe(struct xe_late_bind_fw *lb_fw)
+{
+ return container_of(lb_fw, struct xe_device, late_bind.late_bind_fw[lb_fw->id]);
+}
+
+/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */
+static int parse_cpd_header(struct xe_late_bind_fw *lb_fw,
+ const void *data, size_t size, const char *manifest_entry)
+{
+ struct xe_device *xe = late_bind_fw_to_xe(lb_fw);
+ const struct gsc_cpd_header_v2 *header = data;
+ const struct gsc_manifest_header *manifest;
+ const struct gsc_cpd_entry *entry;
+ size_t min_size = sizeof(*header);
+ u32 offset;
+ int i;
+
+ /* manifest_entry is mandatory */
+ xe_assert(xe, manifest_entry);
+
+ if (size < min_size || header->header_marker != GSC_CPD_HEADER_MARKER)
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct gsc_cpd_header_v2)) {
+ drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n",
+ fw_id_to_name[lb_fw->id], header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct gsc_cpd_entry) * header->num_of_entries;
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the manifest first */
+ entry = (void *)header + header->header_length;
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, manifest_entry) == 0)
+ offset = entry->offset & GSC_CPD_ENTRY_OFFSET_MASK;
+
+ if (!offset) {
+ drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n",
+ fw_id_to_name[lb_fw->id]);
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_manifest_header);
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ manifest = data + offset;
+
+ lb_fw->version = manifest->fw_version;
+
+ return 0;
+}
+
+/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */
+static int parse_lb_layout(struct xe_late_bind_fw *lb_fw,
+ const void *data, size_t size, const char *fpt_entry)
+{
+ struct xe_device *xe = late_bind_fw_to_xe(lb_fw);
+ const struct csc_fpt_header *header = data;
+ const struct csc_fpt_entry *entry;
+ size_t min_size = sizeof(*header);
+ u32 offset;
+ int i;
+
+ /* fpt_entry is mandatory */
+ xe_assert(xe, fpt_entry);
+
+ if (size < min_size || header->header_marker != CSC_FPT_HEADER_MARKER)
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct csc_fpt_header)) {
+ drm_err(&xe->drm, "%s late binding fw: Invalid FPT header length %u!\n",
+ fw_id_to_name[lb_fw->id], header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct csc_fpt_entry) * header->num_of_entries;
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the cpd header first */
+ entry = (void *)header + header->header_length;
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, fpt_entry) == 0)
+ offset = entry->offset;
+
+ if (!offset) {
+ drm_err(&xe->drm, "%s late binding fw: Failed to find fpt_entry\n",
+ fw_id_to_name[lb_fw->id]);
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_cpd_header_v2);
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ return parse_cpd_header(lb_fw, data + offset, size - offset, "LTES.man");
+}
+
+static const char *xe_late_bind_parse_status(uint32_t status)
+{
+ switch (status) {
+ case INTEL_LB_STATUS_SUCCESS:
+ return "success";
+ case INTEL_LB_STATUS_4ID_MISMATCH:
+ return "4Id Mismatch";
+ case INTEL_LB_STATUS_ARB_FAILURE:
+ return "ARB Failure";
+ case INTEL_LB_STATUS_GENERAL_ERROR:
+ return "General Error";
+ case INTEL_LB_STATUS_INVALID_PARAMS:
+ return "Invalid Params";
+ case INTEL_LB_STATUS_INVALID_SIGNATURE:
+ return "Invalid Signature";
+ case INTEL_LB_STATUS_INVALID_PAYLOAD:
+ return "Invalid Payload";
+ case INTEL_LB_STATUS_TIMEOUT:
+ return "Timeout";
+ default:
+ return "Unknown error";
+ }
+}
+
+static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ u32 uval;
+
+ if (!xe_pcode_read(root_tile,
+ PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), &uval, NULL))
+ return uval;
+ else
+ return 0;
+}
+
+void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_late_bind_fw *lbfw;
+ int fw_id;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ lbfw = &late_bind->late_bind_fw[fw_id];
+ if (lbfw->payload && late_bind->wq) {
+ drm_dbg(&xe->drm, "Flush work: load %s firmware\n",
+ fw_id_to_name[lbfw->id]);
+ flush_work(&lbfw->work);
+ }
+ }
+}
+
+static void xe_late_bind_work(struct work_struct *work)
+{
+ struct xe_late_bind_fw *lbfw = container_of(work, struct xe_late_bind_fw, work);
+ struct xe_late_bind *late_bind = container_of(lbfw, struct xe_late_bind,
+ late_bind_fw[lbfw->id]);
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ int retry = LB_FW_LOAD_RETRY_MAXCOUNT;
+ int ret;
+ int slept;
+
+ xe_device_assert_mem_access(xe);
+
+ /* we can queue this before the component is bound */
+ for (slept = 0; slept < LB_INIT_TIMEOUT_MS; slept += 100) {
+ if (late_bind->component.ops)
+ break;
+ msleep(100);
+ }
+
+ if (!late_bind->component.ops) {
+ drm_err(&xe->drm, "Late bind component not bound\n");
+ /* Do not re-attempt fw load */
+ drmm_kfree(&xe->drm, (void *)lbfw->payload);
+ lbfw->payload = NULL;
+ goto out;
+ }
+
+ drm_dbg(&xe->drm, "Load %s firmware\n", fw_id_to_name[lbfw->id]);
+
+ do {
+ ret = late_bind->component.ops->push_payload(late_bind->component.mei_dev,
+ lbfw->type,
+ lbfw->flags,
+ lbfw->payload,
+ lbfw->payload_size);
+ if (!ret)
+ break;
+ msleep(LB_FW_LOAD_RETRY_PAUSE_MS);
+ } while (--retry && ret == -EBUSY);
+
+ if (!ret) {
+ drm_dbg(&xe->drm, "Load %s firmware successful\n",
+ fw_id_to_name[lbfw->id]);
+ goto out;
+ }
+
+ if (ret > 0)
+ drm_err(&xe->drm, "Load %s firmware failed with err %d, %s\n",
+ fw_id_to_name[lbfw->id], ret, xe_late_bind_parse_status(ret));
+ else
+ drm_err(&xe->drm, "Load %s firmware failed with err %d",
+ fw_id_to_name[lbfw->id], ret);
+ /* Do not re-attempt fw load */
+ drmm_kfree(&xe->drm, (void *)lbfw->payload);
+ lbfw->payload = NULL;
+
+out:
+ xe_pm_runtime_put(xe);
+}
+
+int xe_late_bind_fw_load(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_late_bind_fw *lbfw;
+ int fw_id;
+
+ if (!late_bind->component_added)
+ return -ENODEV;
+
+ if (late_bind->disable)
+ return 0;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ lbfw = &late_bind->late_bind_fw[fw_id];
+ if (lbfw->payload) {
+ xe_pm_runtime_get_noresume(xe);
+ queue_work(late_bind->wq, &lbfw->work);
+ }
+ }
+ return 0;
+}
+
+static int __xe_late_bind_fw_init(struct xe_late_bind *late_bind, u32 fw_id)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_late_bind_fw *lb_fw;
+ const struct firmware *fw;
+ u32 num_fans;
+ int ret;
+
+ if (fw_id >= XE_LB_FW_MAX_ID)
+ return -EINVAL;
+
+ lb_fw = &late_bind->late_bind_fw[fw_id];
+
+ lb_fw->id = fw_id;
+ lb_fw->type = fw_id_to_type[lb_fw->id];
+ lb_fw->flags &= ~INTEL_LB_FLAG_IS_PERSISTENT;
+
+ if (lb_fw->type == INTEL_LB_TYPE_FAN_CONTROL) {
+ num_fans = xe_late_bind_fw_num_fans(late_bind);
+ drm_dbg(&xe->drm, "Number of Fans: %d\n", num_fans);
+ if (!num_fans)
+ return 0;
+ }
+
+ snprintf(lb_fw->blob_path, sizeof(lb_fw->blob_path), "xe/%s_8086_%04x_%04x_%04x.bin",
+ fw_id_to_name[lb_fw->id], pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ drm_dbg(&xe->drm, "Request late binding firmware %s\n", lb_fw->blob_path);
+ ret = firmware_request_nowarn(&fw, lb_fw->blob_path, xe->drm.dev);
+ if (ret) {
+ drm_dbg(&xe->drm, "%s late binding fw not available for current device",
+ fw_id_to_name[lb_fw->id]);
+ return 0;
+ }
+
+ if (fw->size > XE_LB_MAX_PAYLOAD_SIZE) {
+ drm_err(&xe->drm, "Firmware %s size %zu is larger than max pay load size %u\n",
+ lb_fw->blob_path, fw->size, XE_LB_MAX_PAYLOAD_SIZE);
+ release_firmware(fw);
+ return -ENODATA;
+ }
+
+ ret = parse_lb_layout(lb_fw, fw->data, fw->size, "LTES");
+ if (ret)
+ return ret;
+
+ lb_fw->payload_size = fw->size;
+ lb_fw->payload = drmm_kzalloc(&xe->drm, lb_fw->payload_size, GFP_KERNEL);
+ if (!lb_fw->payload) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ drm_info(&xe->drm, "Using %s firmware from %s version %u.%u.%u.%u\n",
+ fw_id_to_name[lb_fw->id], lb_fw->blob_path,
+ lb_fw->version.major, lb_fw->version.minor,
+ lb_fw->version.hotfix, lb_fw->version.build);
+
+ memcpy((void *)lb_fw->payload, fw->data, lb_fw->payload_size);
+ release_firmware(fw);
+ INIT_WORK(&lb_fw->work, xe_late_bind_work);
+
+ return 0;
+}
+
+static int xe_late_bind_fw_init(struct xe_late_bind *late_bind)
+{
+ int ret;
+ int fw_id;
+
+ late_bind->wq = alloc_ordered_workqueue("late-bind-ordered-wq", 0);
+ if (!late_bind->wq)
+ return -ENOMEM;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ ret = __xe_late_bind_fw_init(late_bind, fw_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xe_late_bind_component_bind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
+ struct xe_late_bind *late_bind = &xe->late_bind;
+
+ late_bind->component.ops = data;
+ late_bind->component.mei_dev = mei_kdev;
+
+ return 0;
+}
+
+static void xe_late_bind_component_unbind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
+ struct xe_late_bind *late_bind = &xe->late_bind;
+
+ xe_late_bind_wait_for_worker_completion(late_bind);
+
+ late_bind->component.ops = NULL;
+}
+
+static const struct component_ops xe_late_bind_component_ops = {
+ .bind = xe_late_bind_component_bind,
+ .unbind = xe_late_bind_component_unbind,
+};
+
+static void xe_late_bind_remove(void *arg)
+{
+ struct xe_late_bind *late_bind = arg;
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+
+ xe_late_bind_wait_for_worker_completion(late_bind);
+
+ late_bind->component_added = false;
+
+ component_del(xe->drm.dev, &xe_late_bind_component_ops);
+ if (late_bind->wq) {
+ destroy_workqueue(late_bind->wq);
+ late_bind->wq = NULL;
+ }
+}
+
+/**
+ * xe_late_bind_init() - add xe mei late binding component
+ * @late_bind: pointer to late bind structure.
+ *
+ * Return: 0 if the initialization was successful, a negative errno otherwise.
+ */
+int xe_late_bind_init(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ int err;
+
+ if (!xe->info.has_late_bind)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_LB) || !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) {
+ drm_info(&xe->drm, "Can't init xe mei late bind missing mei component\n");
+ return 0;
+ }
+
+ err = component_add_typed(xe->drm.dev, &xe_late_bind_component_ops,
+ INTEL_COMPONENT_LB);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to add mei late bind component (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ late_bind->component_added = true;
+
+ err = devm_add_action_or_reset(xe->drm.dev, xe_late_bind_remove, late_bind);
+ if (err)
+ return err;
+
+ err = xe_late_bind_fw_init(late_bind);
+ if (err)
+ return err;
+
+ return xe_late_bind_fw_load(late_bind);
+}
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.h b/drivers/gpu/drm/xe/xe_late_bind_fw.h
new file mode 100644
index 000000000000..07e437390539
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_LATE_BIND_FW_H_
+#define _XE_LATE_BIND_FW_H_
+
+#include <linux/types.h>
+
+struct xe_late_bind;
+
+int xe_late_bind_init(struct xe_late_bind *late_bind);
+int xe_late_bind_fw_load(struct xe_late_bind *late_bind);
+void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
new file mode 100644
index 000000000000..0f5da89ce98b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_LATE_BIND_TYPES_H_
+#define _XE_LATE_BIND_TYPES_H_
+
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "xe_uc_fw_abi.h"
+
+#define XE_LB_MAX_PAYLOAD_SIZE SZ_4K
+
+/**
+ * xe_late_bind_fw_id - enum to determine late binding fw index
+ */
+enum xe_late_bind_fw_id {
+ XE_LB_FW_FAN_CONTROL = 0,
+ XE_LB_FW_MAX_ID
+};
+
+/**
+ * struct xe_late_bind_fw
+ */
+struct xe_late_bind_fw {
+ /** @id: firmware index */
+ u32 id;
+ /** @blob_path: firmware binary path */
+ char blob_path[PATH_MAX];
+ /** @type: firmware type */
+ u32 type;
+ /** @flags: firmware flags */
+ u32 flags;
+ /** @payload: to store the late binding blob */
+ const u8 *payload;
+ /** @payload_size: late binding blob payload_size */
+ size_t payload_size;
+ /** @work: worker to upload latebind blob */
+ struct work_struct work;
+ /** @version: late binding blob manifest version */
+ struct gsc_version version;
+};
+
+/**
+ * struct xe_late_bind_component - Late Binding services component
+ * @mei_dev: device that provide Late Binding service.
+ * @ops: Ops implemented by Late Binding driver, used by Xe driver.
+ *
+ * Communication between Xe and MEI drivers for Late Binding services
+ */
+struct xe_late_bind_component {
+ struct device *mei_dev;
+ const struct intel_lb_component_ops *ops;
+};
+
+/**
+ * struct xe_late_bind
+ */
+struct xe_late_bind {
+ /** @component: struct for communication with mei component */
+ struct xe_late_bind_component component;
+ /** @late_bind_fw: late binding firmware array */
+ struct xe_late_bind_fw late_bind_fw[XE_LB_FW_MAX_ID];
+ /** @wq: workqueue to submit request to download late bind blob */
+ struct workqueue_struct *wq;
+ /** @component_added: whether the component has been added */
+ bool component_added;
+ /** @disable: to block late binding reload during pm resume flow*/
+ bool disable;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index a2000307d5bf..62fc5a1a332d 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -11,7 +11,7 @@
#include "xe_assert.h"
#include "xe_bo.h"
-#include "xe_gt_tlb_invalidation.h"
+#include "xe_tlb_inval.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -67,12 +67,12 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
goto out;
}
- bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL,
- PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
- lmtt->ops->lmtt_pte_num(level)),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K);
+ bo = xe_bo_create_pin_map_novm(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt),
+ PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+ lmtt->ops->lmtt_pte_num(level)),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+ XE_BO_FLAG_NEEDS_64K, false);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
@@ -195,14 +195,17 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
struct xe_tile *tile = lmtt_to_tile(lmtt);
struct xe_device *xe = tile_to_xe(tile);
dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE);
+ struct xe_gt *gt;
+ u8 id;
lmtt_debug(lmtt, "DIR offset %pad\n", &offset);
lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
- xe_mmio_write32(&tile->mmio,
- GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
- LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
+ for_each_gt_on_tile(gt, tile, id)
+ xe_mmio_write32(&gt->mmio,
+ GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
+ LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
}
/**
@@ -225,8 +228,8 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{
- struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
- struct xe_gt_tlb_invalidation_fence *fence = fences;
+ struct xe_tlb_inval_fence fences[XE_MAX_GT_PER_TILE];
+ struct xe_tlb_inval_fence *fence = fences;
struct xe_tile *tile = lmtt_to_tile(lmtt);
struct xe_gt *gt;
int result = 0;
@@ -234,8 +237,8 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
u8 id;
for_each_gt_on_tile(gt, tile, id) {
- xe_gt_tlb_invalidation_fence_init(gt, fence, true);
- err = xe_gt_tlb_invalidation_all(gt, fence);
+ xe_tlb_inval_fence_init(&gt->tlb_inval, fence, true);
+ err = xe_tlb_inval_all(&gt->tlb_inval, fence);
result = result ?: err;
fence++;
}
@@ -249,7 +252,7 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
*/
fence = fences;
for_each_gt_on_tile(gt, tile, id)
- xe_gt_tlb_invalidation_fence_wait(fence++);
+ xe_tlb_inval_fence_wait(fence++);
return result;
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 6d38411bdeba..47e9df775072 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -8,6 +8,7 @@
#include <generated/xe_wa_oob.h>
#include <linux/ascii85.h>
+#include <linux/panic.h>
#include "instructions/xe_mi_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
@@ -16,6 +17,7 @@
#include "regs/xe_lrc_layout.h"
#include "xe_bb.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue_types.h"
@@ -41,7 +43,6 @@
#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
-#define LRC_WA_BB_SIZE SZ_4K
/*
* Layout of the LRC and associated data allocated as
@@ -76,6 +77,17 @@ lrc_to_xe(struct xe_lrc *lrc)
static bool
gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (XE_GT_WA(gt, 16010904313) &&
+ (class == XE_ENGINE_CLASS_RENDER ||
+ class == XE_ENGINE_CLASS_COMPUTE))
+ return true;
+
+ if (xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+ class, NULL))
+ return true;
+
return false;
}
@@ -692,7 +704,13 @@ u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PPHWSP_SIZE;
}
-static size_t lrc_reg_size(struct xe_device *xe)
+/**
+ * xe_lrc_reg_size() - Get size of the LRC registers area within queues
+ * @xe: the &xe_device struct instance
+ *
+ * Returns: Size of the LRC registers area for current platform
+ */
+size_t xe_lrc_reg_size(struct xe_device *xe)
{
if (GRAPHICS_VERx100(xe) >= 1250)
return 96 * sizeof(u32);
@@ -702,7 +720,7 @@ static size_t lrc_reg_size(struct xe_device *xe)
size_t xe_lrc_skip_size(struct xe_device *xe)
{
- return LRC_PPHWSP_SIZE + lrc_reg_size(xe);
+ return LRC_PPHWSP_SIZE + xe_lrc_reg_size(xe);
}
static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)
@@ -943,6 +961,47 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
return data;
}
+/**
+ * xe_default_lrc_update_memirq_regs_with_address - Re-compute GGTT references in default LRC
+ * of given engine.
+ * @hwe: the &xe_hw_engine struct instance
+ */
+void xe_default_lrc_update_memirq_regs_with_address(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ u32 *regs;
+
+ if (!gt->default_lrc[hwe->class])
+ return;
+
+ regs = gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE;
+ set_memory_based_intr(regs, hwe);
+}
+
+/**
+ * xe_lrc_update_memirq_regs_with_address - Re-compute GGTT references in mem interrupt data
+ * for given LRC.
+ * @lrc: the &xe_lrc struct instance
+ * @hwe: the &xe_hw_engine struct instance
+ * @regs: scratch buffer to be used as temporary storage
+ */
+void xe_lrc_update_memirq_regs_with_address(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *regs)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct iosys_map map;
+ size_t regs_len;
+
+ if (!xe_device_uses_memirq(gt_to_xe(gt)))
+ return;
+
+ map = __xe_lrc_regs_map(lrc);
+ regs_len = xe_lrc_reg_size(gt_to_xe(gt));
+ xe_map_memcpy_from(gt_to_xe(gt), regs, &map, 0, regs_len);
+ set_memory_based_intr(regs, hwe);
+ xe_map_memcpy_to(gt_to_xe(gt), &map, 0, regs, regs_len);
+}
+
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
@@ -1014,6 +1073,121 @@ static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
return cmd - batch;
}
+static ssize_t setup_timestamp_wa(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ const u32 ts_addr = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ u32 *cmd = batch;
+
+ if (!XE_GT_WA(lrc->gt, 16010904313) ||
+ !(hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE ||
+ hwe->class == XE_ENGINE_CLASS_COPY ||
+ hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE ||
+ hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE))
+ return 0;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO |
+ MI_LRM_ASYNC;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO |
+ MI_LRM_ASYNC;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_configfs_post_ctx_restore_bb(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ const u32 *user_batch;
+ u32 *cmd = batch;
+ u32 count;
+
+ count = xe_configfs_get_ctx_restore_post_bb(to_pci_dev(xe->drm.dev),
+ hwe->class, &user_batch);
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -ENOSPC;
+
+ /*
+ * This should be used only for tests and validation. Taint the kernel
+ * as anything could be submitted directly in context switches
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ memcpy(cmd, user_batch, count * sizeof(u32));
+ cmd += count;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_configfs_mid_ctx_restore_bb(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ const u32 *user_batch;
+ u32 *cmd = batch;
+ u32 count;
+
+ count = xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+ hwe->class, &user_batch);
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -ENOSPC;
+
+ /*
+ * This should be used only for tests and validation. Taint the kernel
+ * as anything could be submitted directly in context switches
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ memcpy(cmd, user_batch, count * sizeof(u32));
+ cmd += count;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ u32 *cmd = batch;
+
+ if (!XE_GT_WA(lrc->gt, 18022495364) ||
+ hwe->class != XE_ENGINE_CLASS_RENDER)
+ return 0;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 3))
+ return -ENOSPC;
+
+ *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
+ *cmd++ = CS_DEBUG_MODE1(0).addr;
+ *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+
+ return cmd - batch;
+}
+
struct bo_setup {
ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
u32 *batch, size_t max_size);
@@ -1040,13 +1214,11 @@ static int setup_bo(struct bo_setup_state *state)
ssize_t remain;
if (state->lrc->bo->vmap.is_iomem) {
- state->buffer = kmalloc(state->max_size, GFP_KERNEL);
if (!state->buffer)
return -ENOMEM;
state->ptr = state->buffer;
} else {
state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
- state->buffer = NULL;
}
remain = state->max_size / sizeof(u32);
@@ -1071,7 +1243,6 @@ static int setup_bo(struct bo_setup_state *state)
return 0;
fail:
- kfree(state->buffer);
return -ENOSPC;
}
@@ -1083,18 +1254,28 @@ static void finish_bo(struct bo_setup_state *state)
xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
state->offset, state->buffer,
state->written * sizeof(u32));
- kfree(state->buffer);
}
-static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+/**
+ * xe_lrc_setup_wa_bb_with_scratch - Execute all wa bb setup callbacks.
+ * @lrc: the &xe_lrc struct instance
+ * @hwe: the &xe_hw_engine struct instance
+ * @scratch: preallocated scratch buffer for temporary storage
+ * Return: 0 on success, negative error code on failure
+ */
+int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe, u32 *scratch)
{
static const struct bo_setup funcs[] = {
+ { .setup = setup_timestamp_wa },
+ { .setup = setup_invalidate_state_cache_wa },
{ .setup = setup_utilization_wa },
+ { .setup = setup_configfs_post_ctx_restore_bb },
};
struct bo_setup_state state = {
.lrc = lrc,
.hwe = hwe,
.max_size = LRC_WA_BB_SIZE,
+ .buffer = scratch,
.reserve_dw = 1,
.offset = __xe_lrc_wa_bb_offset(lrc),
.funcs = funcs,
@@ -1117,15 +1298,36 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
return 0;
}
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ u32 *buf = NULL;
+ int ret;
+
+ if (lrc->bo->vmap.is_iomem)
+ buf = kmalloc(LRC_WA_BB_SIZE, GFP_KERNEL);
+
+ ret = xe_lrc_setup_wa_bb_with_scratch(lrc, hwe, buf);
+
+ kfree(buf);
+
+ return ret;
+}
+
static int
setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
{
- static struct bo_setup rcs_funcs[] = {
+ static const struct bo_setup rcs_funcs[] = {
+ { .setup = setup_timestamp_wa },
+ { .setup = setup_configfs_mid_ctx_restore_bb },
+ };
+ static const struct bo_setup xcs_funcs[] = {
+ { .setup = setup_configfs_mid_ctx_restore_bb },
};
struct bo_setup_state state = {
.lrc = lrc,
.hwe = hwe,
.max_size = (63 * 64) /* max 63 cachelines */,
+ .buffer = NULL,
.offset = __xe_lrc_indirect_ctx_offset(lrc),
};
int ret;
@@ -1137,14 +1339,22 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
hwe->class == XE_ENGINE_CLASS_COMPUTE) {
state.funcs = rcs_funcs;
state.num_funcs = ARRAY_SIZE(rcs_funcs);
+ } else {
+ state.funcs = xcs_funcs;
+ state.num_funcs = ARRAY_SIZE(xcs_funcs);
}
if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
return 0;
+ if (lrc->bo->vmap.is_iomem)
+ state.buffer = kmalloc(state.max_size, GFP_KERNEL);
+
ret = setup_bo(&state);
- if (ret)
+ if (ret) {
+ kfree(state.buffer);
return ret;
+ }
/*
* Align to 64B cacheline so there's no garbage at the end for CS to
@@ -1156,15 +1366,17 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
}
finish_bo(&state);
+ kfree(state.buffer);
+ /*
+ * Enable INDIRECT_CTX leaving INDIRECT_CTX_OFFSET at its default: it
+ * varies per engine class, but the default is good enough
+ */
xe_lrc_write_ctx_reg(lrc,
CTX_CS_INDIRECT_CTX,
(xe_bo_ggtt_addr(lrc->bo) + state.offset) |
/* Size in CLs. */
(state.written * sizeof(u32) / 64));
- xe_lrc_write_ctx_reg(lrc,
- CTX_CS_INDIRECT_CTX_OFFSET,
- CTX_INDIRECT_CTX_OFFSET_DEFAULT);
return 0;
}
@@ -1203,9 +1415,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
if (vm && vm->xef) /* userspace */
bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
- lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
- ttm_bo_type_kernel,
- bo_flags);
+ lrc->bo = xe_bo_create_pin_map_novm(xe, tile,
+ bo_size,
+ ttm_bo_type_kernel,
+ bo_flags, false);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
@@ -1374,6 +1587,23 @@ void xe_lrc_destroy(struct kref *ref)
kfree(lrc);
}
+/**
+ * xe_lrc_update_hwctx_regs_with_address - Re-compute GGTT references within given LRC.
+ * @lrc: the &xe_lrc struct instance
+ */
+void xe_lrc_update_hwctx_regs_with_address(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
+ __xe_lrc_indirect_ring_ggtt_addr(lrc));
+
+ xe_lrc_write_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START,
+ __xe_lrc_ring_ggtt_addr(lrc));
+ } else {
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_START, __xe_lrc_ring_ggtt_addr(lrc));
+ }
+}
+
void xe_lrc_set_ring_tail(struct xe_lrc *lrc, u32 tail)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1939,7 +2169,7 @@ u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
* continue to emit all of the SVG state since it's best not to leak
* any of the state between contexts, even if that leakage is harmless.
*/
- if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
state_table = xe_hpg_svg_state;
state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index b6c8053c581b..188565465779 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -42,6 +42,8 @@ struct xe_lrc_snapshot {
#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
+#define LRC_WA_BB_SIZE SZ_4K
+
#define XE_LRC_CREATE_RUNALONE 0x1
#define XE_LRC_CREATE_PXP 0x2
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
@@ -88,6 +90,10 @@ bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
+void xe_lrc_update_hwctx_regs_with_address(struct xe_lrc *lrc);
+void xe_default_lrc_update_memirq_regs_with_address(struct xe_hw_engine *hwe);
+void xe_lrc_update_memirq_regs_with_address(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *regs);
u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr);
void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val);
@@ -106,6 +112,7 @@ s32 xe_lrc_start_seqno(struct xe_lrc *lrc);
u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc);
struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc);
+size_t xe_lrc_reg_size(struct xe_device *xe);
size_t xe_lrc_skip_size(struct xe_device *xe);
void xe_lrc_dump_default(struct drm_printer *p,
@@ -124,6 +131,8 @@ u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
+int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *scratch);
/**
* xe_lrc_update_timestamp - readout LRC timestamp and update cached value
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 84f412fd3c5d..1d667fa36cf3 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -9,6 +9,7 @@
#include <linux/sizes.h>
#include <drm/drm_managed.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
@@ -30,10 +31,13 @@
#include "xe_mocs.h"
#include "xe_pt.h"
#include "xe_res_cursor.h"
+#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
+#include "xe_validation.h"
#include "xe_vm.h"
+#include "xe_vram.h"
/**
* struct xe_migrate - migrate context.
@@ -84,19 +88,6 @@ struct xe_migrate {
*/
#define MAX_PTE_PER_SDI 0x1FEU
-/**
- * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
- * @tile: The tile.
- *
- * Returns the default migrate exec queue of this tile.
- *
- * Return: The default migrate exec queue
- */
-struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
-{
- return tile->migrate->q;
-}
-
static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -130,38 +121,39 @@ static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
u64 identity_offset = IDENTITY_OFFSET;
if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
- identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
+ (xe->mem.vram), SZ_1G);
- addr -= xe->mem.vram.dpa_base;
+ addr -= xe_vram_region_dpa_base(xe->mem.vram);
return addr + (identity_offset << xe_pt_shift(2));
}
static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
{
+ struct xe_vram_region *vram = xe->mem.vram;
+ resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
u64 pos, ofs, flags;
u64 entry;
/* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
+ u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
u32 level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
true, 0);
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
/*
* Use 1GB pages when possible, last chunk always use 2M
* pages as mixing reserved memory (stolen, WOCPM) with a single
* mapping is not allowed on certain platforms.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ for (pos = dpa_base; pos < vram_limit;
pos += SZ_1G, ofs += 8) {
if (pos + SZ_1G >= vram_limit) {
- entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
- pat_index);
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
flags = vm->pt_ops->pte_encode_addr(xe, 0,
@@ -182,7 +174,7 @@ static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm,
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
- struct xe_vm *vm)
+ struct xe_vm *vm, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
@@ -209,13 +201,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PAGETABLE);
+ XE_BO_FLAG_PAGETABLE, exec);
if (IS_ERR(bo))
return PTR_ERR(bo);
/* PT30 & PT31 reserved for 2M identity map */
pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
- entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
@@ -283,15 +275,14 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
- XE_PAGE_SIZE, pat_index);
+ XE_PAGE_SIZE);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
- for (i = 0; i < map_ofs / PAGE_SIZE; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
- pat_index);
+ for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
@@ -307,11 +298,11 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
+ resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
- xe_assert(xe, xe->mem.vram.actual_physical_size <=
- (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
* Identity map the entire vram for compressed pat_index for xe2+
@@ -320,11 +311,11 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
- DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
- xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
- IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
+ IDENTITY_OFFSET / 2) * SZ_1G);
xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
comp_pat_index, pt31_ofs);
}
@@ -387,38 +378,63 @@ static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
}
/**
- * xe_migrate_init() - Initialize a migrate context
- * @tile: Back-pointer to the tile we're initializing for.
+ * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
+ * @tile: &xe_tile
*
- * Return: Pointer to a migrate context on success. Error pointer on error.
+ * Allocates a &xe_migrate for a given tile.
+ *
+ * Return: &xe_migrate on success, or NULL when out of memory.
*/
-struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
+struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
+{
+ struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
+
+ if (m)
+ m->tile = tile;
+ return m;
+}
+
+static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
{
struct xe_device *xe = tile_to_xe(tile);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int err = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ err = xe_migrate_prepare_vm(tile, m, vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ }
+
+ return err;
+}
+
+/**
+ * xe_migrate_init() - Initialize a migrate context
+ * @m: The migration context
+ *
+ * Return: 0 if successful, negative error code on failure
+ */
+int xe_migrate_init(struct xe_migrate *m)
+{
+ struct xe_tile *tile = m->tile;
struct xe_gt *primary_gt = tile->primary_gt;
- struct xe_migrate *m;
+ struct xe_device *xe = tile_to_xe(tile);
struct xe_vm *vm;
int err;
- m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
- if (!m)
- return ERR_PTR(-ENOMEM);
-
- m->tile = tile;
-
/* Special layout, prepared below.. */
vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
XE_VM_FLAG_SET_TILE_ID(tile), NULL);
if (IS_ERR(vm))
- return ERR_CAST(vm);
+ return PTR_ERR(vm);
- xe_vm_lock(vm, false);
- err = xe_migrate_prepare_vm(tile, m, vm);
- xe_vm_unlock(vm);
- if (err) {
- xe_vm_close_and_put(vm);
- return ERR_PTR(err);
- }
+ err = xe_migrate_lock_prepare_vm(tile, m, vm);
+ if (err)
+ return err;
if (xe->info.has_usm) {
struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
@@ -427,8 +443,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
false);
u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
- if (!hwe || !logical_mask)
- return ERR_PTR(-EINVAL);
+ if (!hwe || !logical_mask) {
+ err = -EINVAL;
+ goto err_out;
+ }
/*
* XXX: Currently only reserving 1 (likely slow) BCS instance on
@@ -437,16 +455,18 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
- EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY |
+ EXEC_QUEUE_FLAG_MIGRATE, 0);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT, 0);
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_MIGRATE, 0);
}
if (IS_ERR(m->q)) {
- xe_vm_close_and_put(vm);
- return ERR_CAST(m->q);
+ err = PTR_ERR(m->q);
+ goto err_out;
}
mutex_init(&m->job_mutex);
@@ -456,7 +476,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
- return ERR_PTR(err);
+ return err;
if (IS_DGFX(xe)) {
if (xe_migrate_needs_ccs_emit(xe))
@@ -471,7 +491,12 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
(unsigned long long)m->min_chunk_size);
}
- return m;
+ return err;
+
+err_out:
+ xe_vm_close_and_put(vm);
+ return err;
+
}
static u64 max_mem_transfer_per_pass(struct xe_device *xe)
@@ -834,11 +859,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
-
- pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
- batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
- &dst_L0_ofs, &dst_L0_pt, 0,
- avail_pts, avail_pts);
+ if (copy_only_ccs) {
+ dst_L0_ofs = src_L0_ofs;
+ } else {
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst,
+ &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt,
+ 0, avail_pts, avail_pts);
+ }
if (copy_system_ccs) {
xe_assert(xe, type_device);
@@ -868,7 +897,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
xe_res_next(&dst_it, src_L0);
- else
+ else if (!copy_only_ccs)
emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
&dst_it, src_L0, dst);
@@ -896,11 +925,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, flush_flags);
+ xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
if (!fence) {
err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
- if (!err && src_bo != dst_bo)
+ if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
if (err)
@@ -940,6 +969,167 @@ err_sync:
return fence;
}
+/**
+ * xe_migrate_lrc() - Get the LRC from migrate context.
+ * @migrate: Migrate context.
+ *
+ * Return: Pointer to LRC on success, error on failure
+ */
+struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
+{
+ return migrate->q->lrc[0];
+}
+
+static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i,
+ u32 flags)
+{
+ struct xe_lrc *lrc = xe_exec_queue_lrc(q);
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | flags;
+ dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)) |
+ MI_FLUSH_DW_USE_GTT;
+ dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc));
+ dw[i++] = MI_NOOP;
+ dw[i++] = MI_NOOP;
+
+ return i;
+}
+
+/**
+ * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
+ * @tile: Tile whose migration context to be used.
+ * @q : Execution to be used along with migration context.
+ * @src_bo: The buffer object @src is currently bound to.
+ * @read_write : Creates BB commands for CCS read/write.
+ *
+ * Creates batch buffer instructions to copy CCS metadata from CCS pool to
+ * memory and vice versa.
+ *
+ * This function should only be called for IGPU.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
+int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
+ struct xe_bo *src_bo,
+ enum xe_sriov_vf_ccs_rw_ctxs read_write)
+
+{
+ bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
+ bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
+ struct ttm_resource *src = src_bo->ttm.resource;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_gt *gt = tile->primary_gt;
+ u32 batch_size, batch_size_allocated;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_res_cursor src_it, ccs_it;
+ u64 size = xe_bo_size(src_bo);
+ struct xe_bb *bb = NULL;
+ u64 src_L0, src_L0_ofs;
+ u32 src_L0_pt;
+ int err;
+
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+
+ xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
+ PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
+ &ccs_it);
+
+ /* Calculate Batch buffer size */
+ batch_size = 0;
+ while (size) {
+ batch_size += 10; /* Flush + ggtt addr + 2 NOP */
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
+
+ batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
+ &ccs_pt, 0, avail_pts, avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+
+ /* Add copy commands size here */
+ batch_size += EMIT_COPY_CCS_DW;
+
+ size -= src_L0;
+ }
+
+ bb = xe_bb_ccs_new(gt, batch_size, read_write);
+ if (IS_ERR(bb)) {
+ drm_err(&xe->drm, "BB allocation failed.\n");
+ err = PTR_ERR(bb);
+ goto err_ret;
+ }
+
+ batch_size_allocated = batch_size;
+ size = xe_bo_size(src_bo);
+ batch_size = 0;
+
+ /*
+ * Emit PTE and copy commands here.
+ * The CCS copy command can only support limited size. If the size to be
+ * copied is more than the limit, divide copy into chunks. So, calculate
+ * sizes here again before copy command is emitted.
+ */
+ while (size) {
+ batch_size += 10; /* Flush + ggtt addr + 2 NOP */
+ u32 flush_flags = 0;
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+
+ batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
+ &ccs_pt, 0, avail_pts, avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+ batch_size += EMIT_COPY_CCS_DW;
+
+ emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
+
+ emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
+
+ bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
+ src_L0_ofs, dst_is_pltt,
+ src_L0, ccs_ofs, true);
+ bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+
+ size -= src_L0;
+ }
+
+ xe_assert(xe, (batch_size_allocated == bb->len));
+ src_bo->bb_ccs[read_write] = bb;
+
+ return 0;
+
+err_ret:
+ return err;
+}
+
+/**
+ * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.
+ * @migrate: Migrate context.
+ *
+ * Return: Pointer to execution queue on success, error on failure
+ */
+struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
+{
+ return migrate->q;
+}
+
static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
u32 size, u32 pitch)
{
@@ -1119,11 +1309,13 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
size -= clear_L0;
/* Preemption is enabled again by the ring ops. */
- if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
+ if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
xe_res_next(&src_it, clear_L0);
- else
- emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
- &src_it, clear_L0, dst);
+ } else {
+ emit_pte(m, bb, clear_L0_pt, clear_vram,
+ clear_only_system_ccs, &src_it, clear_L0, dst);
+ flush_flags |= MI_INVALIDATE_TLB;
+ }
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -1134,7 +1326,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
- flush_flags = MI_FLUSH_DW_CCS;
+ flush_flags |= MI_FLUSH_DW_CCS;
}
job = xe_bb_create_migration_job(m->q, bb,
@@ -1469,6 +1661,8 @@ next_cmd:
goto err_sa;
}
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
@@ -1571,7 +1765,8 @@ static u32 pte_update_cmd_size(u64 size)
static void build_pt_update_batch_sram(struct xe_migrate *m,
struct xe_bb *bb, u32 pt_offset,
- dma_addr_t *sram_addr, u32 size)
+ struct drm_pagemap_addr *sram_addr,
+ u32 size)
{
u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
u32 ptes;
@@ -1589,14 +1784,18 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
ptes -= chunk;
while (chunk--) {
- u64 addr = sram_addr[i++] & PAGE_MASK;
+ u64 addr = sram_addr[i].addr & PAGE_MASK;
+ xe_tile_assert(m->tile, sram_addr[i].proto ==
+ DRM_INTERCONNECT_SYSTEM);
xe_tile_assert(m->tile, addr);
addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
addr, pat_index,
0, false, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
+
+ i++;
}
}
}
@@ -1612,7 +1811,8 @@ enum xe_migrate_copy_dir {
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long len,
unsigned long sram_offset,
- dma_addr_t *sram_addr, u64 vram_addr,
+ struct drm_pagemap_addr *sram_addr,
+ u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
@@ -1628,6 +1828,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
PAGE_SIZE : 4;
int err;
+ unsigned long i, j;
if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
(sram_offset | vram_addr) & XE_CACHELINE_MASK))
@@ -1644,6 +1845,24 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
return ERR_PTR(err);
}
+ /*
+ * If the order of a struct drm_pagemap_addr entry is greater than 0,
+ * the entry is populated by GPU pagemap but subsequent entries within
+ * the range of that order are not populated.
+ * build_pt_update_batch_sram() expects a fully populated array of
+ * struct drm_pagemap_addr. Ensure this is the case even with higher
+ * orders.
+ */
+ for (i = 0; i < npages;) {
+ unsigned int order = sram_addr[i].order;
+
+ for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
+ if (!sram_addr[i + j].addr)
+ sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
+
+ i += NR_PAGES(order);
+ }
+
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
sram_addr, len + sram_offset);
@@ -1669,7 +1888,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, 0);
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
@@ -1694,7 +1913,7 @@ err:
* xe_migrate_to_vram() - Migrate to VRAM
* @m: The migration context.
* @npages: Number of pages to migrate.
- * @src_addr: Array of dma addresses (source of migrate)
+ * @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
*
* Copy from an array dma addresses to a VRAM device physical address
@@ -1704,7 +1923,7 @@ err:
*/
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
- dma_addr_t *src_addr,
+ struct drm_pagemap_addr *src_addr,
u64 dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
@@ -1716,7 +1935,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @m: The migration context.
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
- * @dst_addr: Array of dma addresses (destination of migrate)
+ * @dst_addr: Array of DMA information (destination of migrate)
*
* Copy from a VRAM device physical address to an array dma addresses
*
@@ -1726,61 +1945,65 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- dma_addr_t *dst_addr)
+ struct drm_pagemap_addr *dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
-static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+static void xe_migrate_dma_unmap(struct xe_device *xe,
+ struct drm_pagemap_addr *pagemap_addr,
int len, int write)
{
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
for (i = 0; i < npages; ++i) {
- if (!dma_addr[i])
+ if (!pagemap_addr[i].addr)
break;
- dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
- kfree(dma_addr);
+ kfree(pagemap_addr);
}
-static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
- void *buf, int len, int write)
+static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len,
+ int write)
{
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
- dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
- if (!dma_addr)
+ pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL);
+ if (!pagemap_addr)
return ERR_PTR(-ENOMEM);
for (i = 0; i < npages; ++i) {
dma_addr_t addr;
struct page *page;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
if (is_vmalloc_addr(buf))
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
- addr = dma_map_page(xe->drm.dev,
- page, 0, PAGE_SIZE,
- write ? DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
+ addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(xe->drm.dev, addr))
goto err_fault;
- dma_addr[i] = addr;
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(addr,
+ DRM_INTERCONNECT_SYSTEM,
+ 0, dir);
buf += PAGE_SIZE;
}
- return dma_addr;
+ return pagemap_addr;
err_fault:
- xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
return ERR_PTR(-EFAULT);
}
@@ -1809,7 +2032,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
struct xe_device *xe = tile_to_xe(tile);
struct xe_res_cursor cursor;
struct dma_fence *fence = NULL;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
int bytes_left = len, current_page = 0;
void *orig_buf = buf;
@@ -1869,9 +2092,9 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
return err;
}
- dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
- if (IS_ERR(dma_addr))
- return PTR_ERR(dma_addr);
+ pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(pagemap_addr))
+ return PTR_ERR(pagemap_addr);
xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
@@ -1895,7 +2118,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
- dma_addr + current_page,
+ &pagemap_addr[current_page],
vram_addr, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
@@ -1923,10 +2146,46 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
dma_fence_put(fence);
out_err:
- xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
return IS_ERR(fence) ? PTR_ERR(fence) : 0;
}
+/**
+ * xe_migrate_job_lock() - Lock migrate job lock
+ * @m: The migration context.
+ * @q: Queue associated with the operation which requires a lock
+ *
+ * Lock the migrate job lock if the queue is a migration queue, otherwise
+ * assert the VM's dma-resv is held (user queue's have own locking).
+ */
+void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
+{
+ bool is_migrate = q == m->q;
+
+ if (is_migrate)
+ mutex_lock(&m->job_mutex);
+ else
+ xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+}
+
+/**
+ * xe_migrate_job_unlock() - Unlock migrate job lock
+ * @m: The migration context.
+ * @q: Queue associated with the operation which requires a lock
+ *
+ * Unlock the migrate job lock if the queue is a migration queue, otherwise
+ * assert the VM's dma-resv is held (user queue's have own locking).
+ */
+void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
+{
+ bool is_migrate = q == m->q;
+
+ if (is_migrate)
+ mutex_unlock(&m->job_mutex);
+ else
+ xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index fb9839c1bae0..4fad324b6253 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -9,11 +9,13 @@
#include <linux/types.h>
struct dma_fence;
+struct drm_pagemap_addr;
struct iosys_map;
struct ttm_resource;
struct xe_bo;
struct xe_gt;
+struct xe_tlb_inval_job;
struct xe_exec_queue;
struct xe_migrate;
struct xe_migrate_pt_update;
@@ -24,6 +26,8 @@ struct xe_vm;
struct xe_vm_pgtable_update;
struct xe_vma;
+enum xe_sriov_vf_ccs_rw_ctxs;
+
/**
* struct xe_migrate_pt_update_ops - Callbacks for the
* xe_migrate_update_pgtables() function.
@@ -89,21 +93,30 @@ struct xe_migrate_pt_update {
struct xe_vma_ops *vops;
/** @job: The job if a GPU page-table update. NULL otherwise */
struct xe_sched_job *job;
+ /**
+ * @ijob: The TLB invalidation job for primary GT. NULL otherwise
+ */
+ struct xe_tlb_inval_job *ijob;
+ /**
+ * @mjob: The TLB invalidation job for media GT. NULL otherwise
+ */
+ struct xe_tlb_inval_job *mjob;
/** @tile_id: Tile ID of the update */
u8 tile_id;
};
-struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile);
+int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
- dma_addr_t *src_addr,
+ struct drm_pagemap_addr *src_addr,
u64 dst_addr);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- dma_addr_t *dst_addr);
+ struct drm_pagemap_addr *dst_addr);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
@@ -112,6 +125,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
+ struct xe_bo *src_bo,
+ enum xe_sriov_vf_ccs_rw_ctxs read_write);
+
+struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
+struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
unsigned long offset, void *buf, int len,
int write);
@@ -133,5 +152,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
void xe_migrate_wait(struct xe_migrate *m);
-struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile);
+void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q);
+void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index e4db8d58ea2d..ef6f3ea573a2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -58,7 +58,6 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
- struct xe_gt *gt;
u8 id;
/*
@@ -68,38 +67,6 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
if (xe->info.tile_count == 1)
return;
- /* Possibly override number of tile based on configuration register */
- if (!xe->info.skip_mtcfg) {
- struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- u8 tile_count, gt_count;
- u32 mtcfg;
-
- /*
- * Although the per-tile mmio regs are not yet initialized, this
- * is fine as it's going to the root tile's mmio, that's
- * guaranteed to be initialized earlier in xe_mmio_probe_early()
- */
- mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
- tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
-
- if (tile_count < xe->info.tile_count) {
- drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
- xe->info.tile_count = tile_count;
-
- /*
- * We've already setup gt_count according to the full
- * tile count. Re-calculate it to only include the GTs
- * that belong to the remaining tile(s).
- */
- gt_count = 0;
- for_each_gt(gt, xe, id)
- if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
- gt_count++;
- xe->info.gt_count = gt_count;
- }
- }
-
for_each_remote_tile(tile, xe, id)
xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio_gem.c b/drivers/gpu/drm/xe/xe_mmio_gem.c
new file mode 100644
index 000000000000..9a97c4387e4f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio_gem.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_mmio_gem.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device_types.h"
+
+/**
+ * DOC: Exposing MMIO regions to userspace
+ *
+ * In certain cases, the driver may allow userspace to mmap a portion of the hardware registers.
+ *
+ * This can be done as follows:
+ * 1. Call xe_mmio_gem_create() to create a GEM object with an mmap-able fake offset.
+ * 2. Use xe_mmio_gem_mmap_offset() on the created GEM object to retrieve the fake offset.
+ * 3. Provide the fake offset to userspace.
+ * 4. Userspace can call mmap with the fake offset. The length provided to mmap
+ * must match the size of the GEM object.
+ * 5. When the region is no longer needed, call xe_mmio_gem_destroy() to release the GEM object.
+ *
+ * NOTE: The exposed MMIO region must be page-aligned with regards to its BAR offset and size.
+ *
+ * WARNING: Exposing MMIO regions to userspace can have security and stability implications.
+ * Make sure not to expose any sensitive registers.
+ */
+
+static void xe_mmio_gem_free(struct drm_gem_object *);
+static int xe_mmio_gem_mmap(struct drm_gem_object *, struct vm_area_struct *);
+static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *);
+
+struct xe_mmio_gem {
+ struct drm_gem_object base;
+ phys_addr_t phys_addr;
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+ .fault = xe_mmio_gem_vm_fault,
+};
+
+static const struct drm_gem_object_funcs xe_mmio_gem_funcs = {
+ .free = xe_mmio_gem_free,
+ .mmap = xe_mmio_gem_mmap,
+ .vm_ops = &vm_ops,
+};
+
+static inline struct xe_mmio_gem *to_xe_mmio_gem(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct xe_mmio_gem, base);
+}
+
+/**
+ * xe_mmio_gem_create - Expose an MMIO region to userspace
+ * @xe: The xe device
+ * @file: DRM file descriptor
+ * @phys_addr: Start of the exposed MMIO region
+ * @size: The size of the exposed MMIO region
+ *
+ * This function creates a GEM object that exposes an MMIO region with an mmap-able
+ * fake offset.
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
+ phys_addr_t phys_addr, size_t size)
+{
+ struct xe_mmio_gem *obj;
+ struct drm_gem_object *base;
+ int err;
+
+ if ((phys_addr % PAGE_SIZE != 0) || (size % PAGE_SIZE != 0))
+ return ERR_PTR(-EINVAL);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ base = &obj->base;
+ base->funcs = &xe_mmio_gem_funcs;
+ obj->phys_addr = phys_addr;
+
+ drm_gem_private_object_init(&xe->drm, base, size);
+
+ err = drm_gem_create_mmap_offset(base);
+ if (err)
+ goto free_gem;
+
+ err = drm_vma_node_allow(&base->vma_node, file);
+ if (err)
+ goto free_gem;
+
+ return obj;
+
+free_gem:
+ xe_mmio_gem_free(base);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_mmio_gem_mmap_offset - Return the mmap-able fake offset
+ * @gem: the GEM object created with xe_mmio_gem_create()
+ *
+ * This function returns the mmap-able fake offset allocated during
+ * xe_mmio_gem_create().
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem)
+{
+ return drm_vma_node_offset_addr(&gem->base.vma_node);
+}
+
+static void xe_mmio_gem_free(struct drm_gem_object *base)
+{
+ struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
+
+ drm_gem_object_release(base);
+ kfree(obj);
+}
+
+/**
+ * xe_mmio_gem_destroy - Destroy the GEM object that exposes an MMIO region
+ * @gem: the GEM object to destroy
+ *
+ * This function releases resources associated with the GEM object created by
+ * xe_mmio_gem_create().
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+void xe_mmio_gem_destroy(struct xe_mmio_gem *gem)
+{
+ xe_mmio_gem_free(&gem->base);
+}
+
+static int xe_mmio_gem_mmap(struct drm_gem_object *base, struct vm_area_struct *vma)
+{
+ if (vma->vm_end - vma->vm_start != base->size)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ /* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 */
+ vma->vm_pgoff = 0;
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
+
+ /* Defer actual mapping to the fault handler. */
+ return 0;
+}
+
+static void xe_mmio_gem_release_dummy_page(struct drm_device *dev, void *res)
+{
+ __free_page((struct page *)res);
+}
+
+static vm_fault_t xe_mmio_gem_vm_fault_dummy_page(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *base = vma->vm_private_data;
+ struct drm_device *dev = base->dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ struct page *page;
+ unsigned long pfn;
+ unsigned long i;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ if (drmm_add_action_or_reset(dev, xe_mmio_gem_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ pfn = page_to_pfn(page);
+
+ /* Map the entire VMA to the same dummy page */
+ for (i = 0; i < base->size; i += PAGE_SIZE) {
+ unsigned long addr = vma->vm_start + i;
+
+ ret = vmf_insert_pfn(vma, addr, pfn);
+ if (ret & VM_FAULT_ERROR)
+ break;
+ }
+
+ return ret;
+}
+
+static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *base = vma->vm_private_data;
+ struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
+ struct drm_device *dev = base->dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long i;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx)) {
+ /*
+ * Provide a dummy page to avoid SIGBUS for events such as hot-unplug.
+ * This gives the userspace the option to recover instead of crashing.
+ * It is assumed the userspace will receive the notification via some
+ * other channel (e.g. drm uevent).
+ */
+ return xe_mmio_gem_vm_fault_dummy_page(vma);
+ }
+
+ for (i = 0; i < base->size; i += PAGE_SIZE) {
+ unsigned long addr = vma->vm_start + i;
+ unsigned long phys_addr = obj->phys_addr + i;
+
+ ret = vmf_insert_pfn(vma, addr, PHYS_PFN(phys_addr));
+ if (ret & VM_FAULT_ERROR)
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_mmio_gem.h b/drivers/gpu/drm/xe/xe_mmio_gem.h
new file mode 100644
index 000000000000..4b76d5586ebb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio_gem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_MMIO_GEM_H_
+#define _XE_MMIO_GEM_H_
+
+#include <linux/types.h>
+
+struct drm_file;
+struct xe_device;
+struct xe_mmio_gem;
+
+struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
+ phys_addr_t phys_addr, size_t size);
+u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem);
+void xe_mmio_gem_destroy(struct xe_mmio_gem *gem);
+
+#endif /* _XE_MMIO_GEM_H_ */
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index d9391bd08194..d08338fc3bc1 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -135,24 +135,17 @@ static const struct init_funcs init_funcs[] = {
},
};
-static int __init xe_call_init_func(unsigned int i)
+static int __init xe_call_init_func(const struct init_funcs *func)
{
- if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
- return 0;
- if (!init_funcs[i].init)
- return 0;
-
- return init_funcs[i].init();
+ if (func->init)
+ return func->init();
+ return 0;
}
-static void xe_call_exit_func(unsigned int i)
+static void xe_call_exit_func(const struct init_funcs *func)
{
- if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
- return;
- if (!init_funcs[i].exit)
- return;
-
- init_funcs[i].exit();
+ if (func->exit)
+ func->exit();
}
static int __init xe_init(void)
@@ -160,10 +153,12 @@ static int __init xe_init(void)
int err, i;
for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
- err = xe_call_init_func(i);
+ err = xe_call_init_func(init_funcs + i);
if (err) {
+ pr_info("%s: module_init aborted at %ps %pe\n",
+ DRIVER_NAME, init_funcs[i].init, ERR_PTR(err));
while (i--)
- xe_call_exit_func(i);
+ xe_call_exit_func(init_funcs + i);
return err;
}
}
@@ -176,7 +171,7 @@ static void __exit xe_exit(void)
int i;
for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
- xe_call_exit_func(i);
+ xe_call_exit_func(init_funcs + i);
}
module_init(xe_init);
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
index 2cfe9eb67391..33f4ac82fc80 100644
--- a/drivers/gpu/drm/xe/xe_nvm.c
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -43,17 +43,17 @@ static void xe_nvm_release_dev(struct device *dev)
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
if (xe->info.platform != XE_BATTLEMAGE)
return false;
- return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
+ return !(xe_mmio_read32(mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
NVM_NON_POSTED_ERASE_CHICKEN_BIT);
}
static bool xe_nvm_writable_override(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
bool writable_override;
resource_size_t base;
@@ -76,7 +76,7 @@ static bool xe_nvm_writable_override(struct xe_device *xe)
}
writable_override =
- !(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
+ !(xe_mmio_read32(mmio, HECI_FWSTS2(base)) &
HECI_FW_STATUS_2_NVM_ACCESS_MODE);
if (writable_override)
drm_info(&xe->drm, "NVM access overridden by jumper\n");
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5729e7d3e335..a4894eb0d7f3 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -822,7 +822,7 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
u32 sqcnt1;
/* Enable thread stall DOP gating and EU DOP gating. */
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -883,9 +883,9 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- size, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(stream->oa->xe, stream->gt->tile,
+ size, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1079,7 +1079,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1754,7 +1754,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (XE_WA(stream->gt, 1509372804)) {
+ if (XE_GT_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
@@ -1886,7 +1886,7 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
{
u32 reg, shift;
- if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
+ if (XE_GT_WA(gt, 18013179988) || XE_GT_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 3c40ef426f0c..be91343829dd 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -17,6 +17,8 @@
#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_drv.h"
#include "xe_gt.h"
@@ -55,7 +57,7 @@ static const struct xe_graphics_desc graphics_xelp = {
};
#define XE_HP_FEATURES \
- .has_range_tlb_invalidation = true, \
+ .has_range_tlb_inval = true, \
.va_bits = 48, \
.vm_max_level = 3
@@ -103,7 +105,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
.has_flat_ccs = 1, \
- .has_range_tlb_invalidation = 1, \
+ .has_range_tlb_inval = 1, \
.has_usm = 1, \
.has_64bit_timestamp = 1, \
.va_bits = 48, \
@@ -169,6 +171,7 @@ static const struct xe_device_desc tgl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -193,6 +196,7 @@ static const struct xe_device_desc adl_s_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -210,6 +214,7 @@ static const struct xe_device_desc adl_p_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -225,6 +230,7 @@ static const struct xe_device_desc adl_n_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -270,6 +276,7 @@ static const struct xe_device_desc ats_m_desc = {
DG2_FEATURES,
.has_display = false,
+ .has_sriov = true,
};
static const struct xe_device_desc dg2_desc = {
@@ -327,6 +334,7 @@ static const struct xe_device_desc bmg_desc = {
.has_mbx_power_limits = true,
.has_gsc_nvm = 1,
.has_heci_cscfi = 1,
+ .has_late_bind = true,
.has_sriov = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
@@ -503,6 +511,26 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
*revid = REG_FIELD_GET(GMD_ID_REVID, val);
}
+static const struct xe_ip *find_graphics_ip(unsigned int verx100)
+{
+ KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100);
+
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++)
+ if (graphics_ips[i].verx100 == verx100)
+ return &graphics_ips[i];
+ return NULL;
+}
+
+static const struct xe_ip *find_media_ip(unsigned int verx100)
+{
+ KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100);
+
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++)
+ if (media_ips[i].verx100 == verx100)
+ return &media_ips[i];
+ return NULL;
+}
+
/*
* Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
@@ -520,14 +548,7 @@ static void handle_gmdid(struct xe_device *xe,
read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- if (ver == graphics_ips[i].verx100) {
- *graphics_ip = &graphics_ips[i];
-
- break;
- }
- }
-
+ *graphics_ip = find_graphics_ip(ver);
if (!*graphics_ip) {
drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
ver / 100, ver % 100);
@@ -538,14 +559,7 @@ static void handle_gmdid(struct xe_device *xe,
if (ver == 0)
return;
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- if (ver == media_ips[i].verx100) {
- *media_ip = &media_ips[i];
-
- break;
- }
- }
-
+ *media_ip = find_media_ip(ver);
if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
@@ -574,6 +588,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
+ xe->info.has_late_bind = desc->has_late_bind;
xe->info.has_llc = desc->has_llc;
xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = desc->has_sriov;
@@ -599,6 +614,44 @@ static int xe_info_init_early(struct xe_device *xe,
}
/*
+ * Possibly override number of tile based on configuration register.
+ */
+static void xe_info_probe_tile_count(struct xe_device *xe)
+{
+ struct xe_mmio *mmio;
+ u8 tile_count;
+ u32 mtcfg;
+
+ KUNIT_STATIC_STUB_REDIRECT(xe_info_probe_tile_count, xe);
+
+ /*
+ * Probe for tile count only for platforms that support multiple
+ * tiles.
+ */
+ if (xe->info.tile_count == 1)
+ return;
+
+ if (xe->info.skip_mtcfg)
+ return;
+
+ mmio = xe_root_tile_mmio(xe);
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+ * is fine as it's going to the root tile's mmio, that's
+ * guaranteed to be initialized earlier in xe_mmio_probe_early()
+ */
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
+ tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
+ if (tile_count < xe->info.tile_count) {
+ drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
+ xe->info.tile_count, tile_count);
+ xe->info.tile_count = tile_count;
+ }
+}
+
+/*
* Initialize device info content that does require knowledge about
* graphics / media IP version.
* Make sure that GT / tile structures allocated by the driver match the data
@@ -668,10 +721,12 @@ static int xe_info_init(struct xe_device *xe,
/* Runtime detection may change this later */
xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
- xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
+ xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
+ xe_info_probe_tile_count(xe);
+
for_each_remote_tile(tile, xe, id) {
int err;
@@ -687,12 +742,17 @@ static int xe_info_init(struct xe_device *xe,
* All of these together determine the overall GT count.
*/
for_each_tile(tile, xe, id) {
+ int err;
+
gt = tile->primary_gt;
gt->info.type = XE_GT_TYPE_MAIN;
gt->info.id = tile->id * xe->info.max_gt_per_tile;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
- xe->info.gt_count++;
+
+ err = xe_tile_alloc_vram(tile);
+ if (err)
+ return err;
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -713,9 +773,15 @@ static int xe_info_init(struct xe_device *xe,
gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
- xe->info.gt_count++;
}
+ /*
+ * Now that we have tiles and GTs defined, let's loop over valid GTs
+ * in order to define gt_count.
+ */
+ for_each_gt(gt, xe, id)
+ xe->info.gt_count++;
+
return 0;
}
@@ -726,7 +792,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return;
xe_device_remove(xe);
@@ -759,6 +825,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct xe_device *xe;
int err;
+ xe_configfs_check_device(pdev);
+
if (desc->require_force_probe && !id_forced(pdev->device)) {
dev_info(&pdev->dev,
"Your graphics device %04x is not officially supported\n"
@@ -806,7 +874,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* flashed through mei. Return success, if survivability mode
* is enabled due to pcode failure or configfs being set
*/
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return 0;
if (err)
@@ -900,7 +968,7 @@ static int xe_pci_suspend(struct device *dev)
struct xe_device *xe = pdev_to_xe_device(pdev);
int err;
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return -EBUSY;
err = xe_pm_suspend(xe);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 4de6f69ed975..9b9766a3baa3 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -39,6 +39,7 @@ struct xe_device_desc {
u8 has_gsc_nvm:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
+ u8 has_late_bind:1;
u8 has_llc:1;
u8 has_mbx_power_limits:1;
u8 has_pxp:1;
@@ -60,7 +61,7 @@ struct xe_graphics_desc {
u8 has_atomic_enable_pte_bit:1;
u8 has_flat_ccs:1;
u8 has_indirect_ring_state:1;
- u8 has_range_tlb_invalidation:1;
+ u8 has_range_tlb_inval:1;
u8 has_usm:1;
u8 has_64bit_timestamp:1;
};
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index bb9b6ecad2af..d6625c71115b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -18,11 +18,13 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
-#include "xe_guc.h"
+#include "xe_gt_idle.h"
#include "xe_i2c.h"
#include "xe_irq.h"
+#include "xe_late_bind_fw.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_trace.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -128,6 +130,8 @@ int xe_pm_suspend(struct xe_device *xe)
if (err)
goto err;
+ xe_late_bind_wait_for_worker_completion(&xe->late_bind);
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -177,6 +181,9 @@ int xe_pm_resume(struct xe_device *xe)
drm_dbg(&xe->drm, "Resuming device\n");
trace_xe_pm_resume(xe, __builtin_return_address(0));
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
@@ -209,6 +216,11 @@ int xe_pm_resume(struct xe_device *xe)
xe_pxp_pm_resume(xe->pxp);
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_register_context(xe);
+
+ xe_late_bind_fw_load(&xe->late_bind);
+
drm_dbg(&xe->drm, "Device resumed\n");
return 0;
err:
@@ -244,6 +256,10 @@ static void xe_pm_runtime_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
+ /* Our current VFs do not support RPM. so, disable it */
+ if (IS_SRIOV_VF(xe))
+ return;
+
/*
* Disable the system suspend direct complete optimization.
* We need to ensure that the regular device suspend/resume functions
@@ -389,6 +405,10 @@ static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
+ /* Our current VFs do not support RPM. so, disable it */
+ if (IS_SRIOV_VF(xe))
+ return;
+
pm_runtime_get_sync(dev);
pm_runtime_forbid(dev);
}
@@ -547,6 +567,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_rpm_lockmap_acquire(xe);
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
if (err)
@@ -580,6 +603,12 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_pxp_pm_resume(xe->pxp);
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_register_context(xe);
+
+ if (xe->d3cold.allowed)
+ xe_late_bind_fw_load(&xe->late_bind);
+
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
diff --git a/drivers/gpu/drm/xe/xe_printk.h b/drivers/gpu/drm/xe/xe_printk.h
new file mode 100644
index 000000000000..c5be2385aa95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_printk.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PRINTK_H_
+#define _XE_PRINTK_H_
+
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+
+#define __XE_PRINTK_FMT(_xe, _fmt, _args...) _fmt, ##_args
+
+#define xe_printk(_xe, _level, _fmt, ...) \
+ drm_##_level(&(_xe)->drm, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_err(_xe, _fmt, ...) \
+ xe_printk((_xe), err, _fmt, ##__VA_ARGS__)
+
+#define xe_err_once(_xe, _fmt, ...) \
+ xe_printk((_xe), err_once, _fmt, ##__VA_ARGS__)
+
+#define xe_err_ratelimited(_xe, _fmt, ...) \
+ xe_printk((_xe), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_warn(_xe, _fmt, ...) \
+ xe_printk((_xe), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_notice(_xe, _fmt, ...) \
+ xe_printk((_xe), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_info(_xe, _fmt, ...) \
+ xe_printk((_xe), info, _fmt, ##__VA_ARGS__)
+
+#define xe_dbg(_xe, _fmt, ...) \
+ xe_printk((_xe), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_WARN_type(_xe, _type, _condition, _fmt, ...) \
+ drm_WARN##_type(&(_xe)->drm, _condition, _fmt, ## __VA_ARGS__)
+
+#define xe_WARN(_xe, _condition, _fmt, ...) \
+ xe_WARN_type((_xe),, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_WARN_ONCE(_xe, _condition, _fmt, ...) \
+ xe_WARN_type((_xe), _ONCE, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_WARN_ON(_xe, _condition) \
+ xe_WARN((_xe), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
+
+#define xe_WARN_ON_ONCE(_xe, _condition) \
+ xe_WARN_ONCE((_xe), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
+
+static inline void __xe_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+
+ xe_err(xe, "%pV", vaf);
+}
+
+static inline void __xe_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+
+ xe_info(xe, "%pV", vaf);
+}
+
+static inline void __xe_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+ struct drm_printer ddp;
+
+ /*
+ * The original xe_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ ddp = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
+ ddp.origin = p->origin;
+
+ drm_printf(&ddp, __XE_PRINTK_FMT(xe, "%pV", vaf));
+}
+
+/**
+ * xe_err_printer - Construct a &drm_printer that outputs to xe_err()
+ * @xe: the &xe_device pointer to use in xe_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_err_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_err,
+ .arg = xe,
+ };
+ return p;
+}
+
+/**
+ * xe_info_printer - Construct a &drm_printer that outputs to xe_info()
+ * @xe: the &xe_device pointer to use in xe_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_info_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_info,
+ .arg = xe,
+ };
+ return p;
+}
+
+/**
+ * xe_dbg_printer - Construct a &drm_printer that outputs like xe_dbg()
+ * @xe: the &xe_device pointer to use in xe_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_dbg_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_dbg,
+ .arg = xe,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_psmi.c b/drivers/gpu/drm/xe/xe_psmi.c
new file mode 100644
index 000000000000..45d142191d60
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_psmi.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_configfs.h"
+#include "xe_psmi.h"
+
+/*
+ * PSMI capture support
+ *
+ * Requirement for PSMI capture is to have a physically contiguous buffer. The
+ * PSMI tool owns doing all necessary configuration (MMIO register writes are
+ * done from user-space). However, KMD needs to provide the PSMI tool with the
+ * required physical address of the base of PSMI buffer in case of VRAM.
+ *
+ * VRAM backed PSMI buffer:
+ * Buffer is allocated as GEM object and with XE_BO_CREATE_PINNED_BIT flag which
+ * creates a contiguous allocation. The physical address is returned from
+ * psmi_debugfs_capture_addr_show(). PSMI tool can mmap the buffer via the
+ * PCIBAR through sysfs.
+ *
+ * SYSTEM memory backed PSMI buffer:
+ * Interface here does not support allocating from SYSTEM memory region. The
+ * PSMI tool needs to allocate memory themselves using hugetlbfs. In order to
+ * get the physical address, user-space can query /proc/[pid]/pagemap. As an
+ * alternative, CMA debugfs could also be used to allocate reserved CMA memory.
+ */
+
+static bool psmi_enabled(struct xe_device *xe)
+{
+ return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev));
+}
+
+static void psmi_free_object(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, NULL);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+}
+
+/*
+ * Free PSMI capture buffer objects.
+ */
+static void psmi_cleanup(struct xe_device *xe)
+{
+ unsigned long id, region_mask = xe->psmi.region_mask;
+ struct xe_bo *bo;
+
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = xe->psmi.capture_obj[id];
+ if (bo) {
+ psmi_free_object(bo);
+ xe->psmi.capture_obj[id] = NULL;
+ }
+ }
+}
+
+static struct xe_bo *psmi_alloc_object(struct xe_device *xe,
+ unsigned int id, size_t bo_size)
+{
+ struct xe_tile *tile;
+
+ if (!id || !bo_size)
+ return NULL;
+
+ tile = &xe->tiles[id - 1];
+
+ /* VRAM: Allocate GEM object for the capture buffer */
+ return xe_bo_create_pin_range_novm(xe, tile, bo_size, 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
+}
+
+/*
+ * Allocate PSMI capture buffer objects (via debugfs set function), based on
+ * which regions the user has selected in region_mask. @size: size in bytes
+ * (should be power of 2)
+ *
+ * Always release/free the current buffer objects before attempting to allocate
+ * new ones. Size == 0 will free all current buffers.
+ *
+ * Note, we don't write any registers as the capture tool is already configuring
+ * all PSMI registers itself via mmio space.
+ */
+static int psmi_resize_object(struct xe_device *xe, size_t size)
+{
+ unsigned long id, region_mask = xe->psmi.region_mask;
+ struct xe_bo *bo = NULL;
+ int err = 0;
+
+ /* if resizing, free currently allocated buffers first */
+ psmi_cleanup(xe);
+
+ /* can set size to 0, in which case, now done */
+ if (!size)
+ return 0;
+
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = psmi_alloc_object(xe, id, size);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ break;
+ }
+ xe->psmi.capture_obj[id] = bo;
+
+ drm_info(&xe->drm,
+ "PSMI capture size requested: %zu bytes, allocated: %lu:%zu\n",
+ size, id, bo ? xe_bo_size(bo) : 0);
+ }
+
+ /* on error, reverse what was allocated */
+ if (err)
+ psmi_cleanup(xe);
+
+ return err;
+}
+
+/*
+ * Returns an address for the capture tool to use to find start of capture
+ * buffer. Capture tool requires the capability to have a buffer allocated per
+ * each tile (VRAM region), thus we return an address for each region.
+ */
+static int psmi_debugfs_capture_addr_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = m->private;
+ unsigned long id, region_mask;
+ struct xe_bo *bo;
+ u64 val;
+
+ region_mask = xe->psmi.region_mask;
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ /* VRAM region */
+ bo = xe->psmi.capture_obj[id];
+ if (!bo)
+ continue;
+
+ /* pinned, so don't need bo_lock */
+ val = __xe_bo_addr(bo, 0, PAGE_SIZE);
+ seq_printf(m, "%ld: 0x%llx\n", id, val);
+ }
+
+ return 0;
+}
+
+/*
+ * Return capture buffer size, using the size from first allocated object that
+ * is found. This works because all objects must be of the same size.
+ */
+static int psmi_debugfs_capture_size_get(void *data, u64 *val)
+{
+ unsigned long id, region_mask;
+ struct xe_device *xe = data;
+ struct xe_bo *bo;
+
+ region_mask = xe->psmi.region_mask;
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = xe->psmi.capture_obj[id];
+ if (bo) {
+ *val = xe_bo_size(bo);
+ return 0;
+ }
+ }
+
+ /* no capture objects are allocated */
+ *val = 0;
+
+ return 0;
+}
+
+/*
+ * Set size of PSMI capture buffer. This triggers the allocation of capture
+ * buffer in each memory region as specified with prior write to
+ * psmi_capture_region_mask.
+ */
+static int psmi_debugfs_capture_size_set(void *data, u64 val)
+{
+ struct xe_device *xe = data;
+
+ /* user must have specified at least one region */
+ if (!xe->psmi.region_mask)
+ return -EINVAL;
+
+ return psmi_resize_object(xe, val);
+}
+
+static int psmi_debugfs_capture_region_mask_get(void *data, u64 *val)
+{
+ struct xe_device *xe = data;
+
+ *val = xe->psmi.region_mask;
+
+ return 0;
+}
+
+/*
+ * Select VRAM regions for multi-tile devices, only allowed when buffer is not
+ * currently allocated.
+ */
+static int psmi_debugfs_capture_region_mask_set(void *data, u64 region_mask)
+{
+ struct xe_device *xe = data;
+ u64 size = 0;
+
+ /* SMEM is not supported (see comments at top of file) */
+ if (region_mask & 0x1)
+ return -EOPNOTSUPP;
+
+ /* input bitmask should contain only valid TTM regions */
+ if (!region_mask || region_mask & ~xe->info.mem_region_mask)
+ return -EINVAL;
+
+ /* only allow setting mask if buffer is not yet allocated */
+ psmi_debugfs_capture_size_get(xe, &size);
+ if (size)
+ return -EBUSY;
+
+ xe->psmi.region_mask = region_mask;
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(psmi_debugfs_capture_addr);
+
+DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_region_mask_fops,
+ psmi_debugfs_capture_region_mask_get,
+ psmi_debugfs_capture_region_mask_set,
+ "0x%llx\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_size_fops,
+ psmi_debugfs_capture_size_get,
+ psmi_debugfs_capture_size_set,
+ "%lld\n");
+
+void xe_psmi_debugfs_register(struct xe_device *xe)
+{
+ struct drm_minor *minor;
+
+ if (!psmi_enabled(xe))
+ return;
+
+ minor = xe->drm.primary;
+ if (!minor->debugfs_root)
+ return;
+
+ debugfs_create_file("psmi_capture_addr",
+ 0400, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_addr_fops);
+
+ debugfs_create_file("psmi_capture_region_mask",
+ 0600, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_region_mask_fops);
+
+ debugfs_create_file("psmi_capture_size",
+ 0600, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_size_fops);
+}
+
+static void psmi_fini(void *arg)
+{
+ psmi_cleanup(arg);
+}
+
+int xe_psmi_init(struct xe_device *xe)
+{
+ if (!psmi_enabled(xe))
+ return 0;
+
+ return devm_add_action(xe->drm.dev, psmi_fini, xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_psmi.h b/drivers/gpu/drm/xe/xe_psmi.h
new file mode 100644
index 000000000000..b1dfba80d893
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_psmi.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PSMI_H_
+#define _XE_PSMI_H_
+
+struct xe_device;
+
+int xe_psmi_init(struct xe_device *xe);
+void xe_psmi_debugfs_register(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index c8e63bd23300..a1c88f9a6c76 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -13,16 +13,17 @@
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
-#include "xe_sync.h"
#include "xe_svm.h"
+#include "xe_sync.h"
+#include "xe_tlb_inval_job.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
+#include "xe_userptr.h"
#include "xe_vm.h"
struct xe_pt_dir {
@@ -69,7 +70,7 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
if (level > MAX_HUGEPTE_LEVEL)
return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
- 0, pat_index);
+ 0);
return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
XE_PTE_NULL;
@@ -88,6 +89,7 @@ static void xe_pt_free(struct xe_pt *pt)
* @vm: The vm to create for.
* @tile: The tile to create for.
* @level: The page-table level.
+ * @exec: The drm_exec object used to lock the vm.
*
* Allocate and initialize a single struct xe_pt metadata structure. Also
* create the corresponding page-table bo, but don't initialize it. If the
@@ -99,7 +101,7 @@ static void xe_pt_free(struct xe_pt *pt)
* error.
*/
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
- unsigned int level)
+ unsigned int level, struct drm_exec *exec)
{
struct xe_pt *pt;
struct xe_bo *bo;
@@ -123,9 +125,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
pt->level = level;
+
+ drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec));
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- bo_flags);
+ bo_flags, exec);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -518,7 +522,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
{
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
- u16 pat_index = xe_walk->vma->pat_index;
+ u16 pat_index = xe_walk->vma->attr.pat_index;
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
@@ -589,7 +593,8 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (covers || !*child) {
u64 flags = 0;
- xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
+ xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1,
+ xe_vm_validation_exec(vm));
if (IS_ERR(xe_child))
return PTR_ERR(xe_child);
@@ -616,7 +621,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
xe_child->is_compact = true;
}
- pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
+ pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
@@ -640,28 +645,31 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* - In all other cases device atomics will be disabled with AE=0 until an application
* request differently using a ioctl like madvise.
*/
-static bool xe_atomic_for_vram(struct xe_vm *vm)
+static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma)
{
+ if (vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
+ return false;
+
return true;
}
-static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma)
{
struct xe_device *xe = vm->xe;
+ struct xe_bo *bo = xe_vma_bo(vma);
- if (!xe->info.has_device_atomics_on_smem)
+ if (!xe->info.has_device_atomics_on_smem ||
+ vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
return false;
+ if (vma->attr.atomic_access == DRM_XE_ATOMIC_DEVICE)
+ return true;
+
/*
* If a SMEM+LMEM allocation is backed by SMEM, a device
* atomics will cause a gpu page fault and which then
* gets migrated to LMEM, bind such allocations with
* device atomics enabled.
- *
- * TODO: Revisit this. Perhaps add something like a
- * fault_on_atomics_in_system UAPI flag.
- * Note that this also prohibits GPU atomics in LR mode for
- * userptr and system memory on DGFX.
*/
return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
(bo && xe_bo_has_single_placement(bo))));
@@ -725,7 +733,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
- xe_res_first_dma(range->base.dma_addr, 0,
+ xe_res_first_dma(range->base.pages.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
@@ -744,8 +752,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
goto walk_pt;
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
- xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm, vma) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, vma) ?
XE_USM_PPGTT_PTE_AE : 0;
}
@@ -756,8 +764,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma) && !range) {
if (xe_vma_is_userptr(vma))
- xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
- xe_vma_size(vma), &curs);
+ xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0,
+ xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
@@ -910,7 +918,7 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
if (xe_vma_bo(vma))
xe_bo_assert_held(xe_vma_bo(vma));
else if (xe_vma_is_userptr(vma))
- lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+ lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock);
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -950,7 +958,19 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt = vm->pt_root[tile->id];
u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
- xe_svm_assert_in_notifier(vm);
+ /*
+ * Locking rules:
+ *
+ * - notifier_lock (write): full protection against page table changes
+ * and MMU notifier invalidations.
+ *
+ * - notifier_lock (read) + vm_lock (write): combined protection against
+ * invalidations and concurrent page table modifications. (e.g., madvise)
+ *
+ */
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0)));
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -1033,7 +1053,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
xe_pt_commit_prepare_locks_assert(vma);
if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
}
static void xe_pt_commit(struct xe_vma *vma,
@@ -1261,6 +1281,8 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
}
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
+ struct xe_tlb_inval_job *ijob,
+ struct xe_tlb_inval_job *mjob,
struct xe_vm *vm,
struct xe_vma_ops *vops,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1328,6 +1350,20 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
for (i = 0; job && !err && i < vops->num_syncs; i++)
err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+ if (job) {
+ if (ijob) {
+ err = xe_tlb_inval_job_alloc_dep(ijob);
+ if (err)
+ return err;
+ }
+
+ if (mjob) {
+ err = xe_tlb_inval_job_alloc_dep(mjob);
+ if (err)
+ return err;
+ }
+ }
+
return err;
}
@@ -1339,10 +1375,12 @@ static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[pt_update->tile_id];
- return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ return xe_pt_vm_dependencies(pt_update->job, pt_update->ijob,
+ pt_update->mjob, vm, pt_update->vops,
pt_update_ops, rftree);
}
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
@@ -1373,7 +1411,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
struct xe_userptr_vma *uvma;
unsigned long notifier_seq;
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
if (!xe_vma_is_userptr(vma))
return 0;
@@ -1382,7 +1420,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
if (xe_pt_userptr_inject_eagain(uvma))
xe_vma_userptr_force_invalidate(uvma);
- notifier_seq = uvma->userptr.notifier_seq;
+ notifier_seq = uvma->userptr.pages.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq))
@@ -1398,12 +1436,12 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
}
-static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
- struct xe_vm_pgtable_update_ops *pt_update)
+static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
{
int err = 0;
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
@@ -1421,64 +1459,10 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
case DRM_GPUVA_OP_UNMAP:
break;
case DRM_GPUVA_OP_PREFETCH:
- err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
- pt_update);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- return err;
-}
-
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
- struct xe_vm *vm = pt_update->vops->vm;
- struct xe_vma_ops *vops = pt_update->vops;
- struct xe_vm_pgtable_update_ops *pt_update_ops =
- &vops->pt_update_ops[pt_update->tile_id];
- struct xe_vma_op *op;
- int err;
-
- err = xe_pt_pre_commit(pt_update);
- if (err)
- return err;
-
- down_read(&vm->userptr.notifier_lock);
-
- list_for_each_entry(op, &vops->list, link) {
- err = op_check_userptr(vm, op, pt_update_ops);
- if (err) {
- up_read(&vm->userptr.notifier_lock);
- break;
- }
- }
-
- return err;
-}
-
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
- struct xe_vm *vm = pt_update->vops->vm;
- struct xe_vma_ops *vops = pt_update->vops;
- struct xe_vma_op *op;
- unsigned long i;
- int err;
-
- err = xe_pt_pre_commit(pt_update);
- if (err)
- return err;
-
- xe_svm_notifier_lock(vm);
-
- list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = NULL;
-
- if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
- continue;
+ if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) {
+ struct xe_svm_range *range = op->map_range.range;
+ unsigned long i;
- if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
xe_assert(vm->xe,
xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
xa_for_each(&op->prefetch_range.range, i, range) {
@@ -1486,97 +1470,62 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
return -ENODATA;
}
}
} else {
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update);
+ }
+ break;
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+ case DRM_GPUVA_OP_DRIVER:
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ struct xe_svm_range *range = op->map_range.range;
+
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
- range = op->map_range.range;
xe_svm_range_debug(range, "PRE-COMMIT");
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
}
- }
-
- return 0;
-}
+ break;
#endif
-
-struct invalidation_fence {
- struct xe_gt_tlb_invalidation_fence base;
- struct xe_gt *gt;
- struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct work_struct work;
- u64 start;
- u64 end;
- u32 asid;
-};
-
-static void invalidation_fence_cb(struct dma_fence *fence,
- struct dma_fence_cb *cb)
-{
- struct invalidation_fence *ifence =
- container_of(cb, struct invalidation_fence, cb);
- struct xe_device *xe = gt_to_xe(ifence->gt);
-
- trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base);
- if (!ifence->fence->error) {
- queue_work(system_wq, &ifence->work);
- } else {
- ifence->base.base.error = ifence->fence->error;
- xe_gt_tlb_invalidation_fence_signal(&ifence->base);
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- dma_fence_put(ifence->fence);
-}
-static void invalidation_fence_work_func(struct work_struct *w)
-{
- struct invalidation_fence *ifence =
- container_of(w, struct invalidation_fence, work);
- struct xe_device *xe = gt_to_xe(ifence->gt);
-
- trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
- xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
- ifence->end, ifence->asid);
+ return err;
}
-static void invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- int ret;
-
- trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
- xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
- ifence->fence = fence;
- ifence->gt = gt;
- ifence->start = start;
- ifence->end = end;
- ifence->asid = asid;
+ xe_svm_notifier_lock(vm);
- INIT_WORK(&ifence->work, invalidation_fence_work_func);
- ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
- if (ret == -ENOENT) {
- dma_fence_put(ifence->fence); /* Usually dropped in CB */
- invalidation_fence_work_func(&ifence->work);
- } else if (ret) {
- dma_fence_put(&ifence->base.base); /* Caller ref */
- dma_fence_put(&ifence->base.base); /* Creation ref */
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_svm_userptr(vm, op, pt_update_ops);
+ if (err) {
+ xe_svm_notifier_unlock(vm);
+ break;
+ }
}
- xe_gt_assert(gt, !ret || ret == -ENOENT);
+ return err;
}
+#endif
struct xe_pt_stage_unbind_walk {
/** @base: The pagewalk base-class. */
@@ -1879,7 +1828,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
xe_vma_start(vma),
xe_vma_end(vma));
++pt_update_ops->current_op;
- pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
/*
* If rebind, we have to invalidate TLB on !LR vms to invalidate
@@ -1987,7 +1936,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
xe_vma_end(vma));
++pt_update_ops->current_op;
- pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
pt_update_ops->needs_invalidation = true;
xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
@@ -2235,7 +2184,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
vma->tile_invalidated & ~BIT(tile->id));
vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
to_userptr_vma(vma)->userptr.initial_bind = true;
}
@@ -2271,7 +2220,7 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
if (!vma->tile_present) {
list_del_init(&vma->combined_links.rebind);
if (xe_vma_is_userptr(vma)) {
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
@@ -2374,22 +2323,25 @@ static const struct xe_migrate_pt_update_ops migrate_ops = {
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
-
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
- .populate = xe_vm_populate_pgtable,
- .clear = xe_migrate_clear_pgtable_callback,
- .pre_commit = xe_pt_svm_pre_commit,
+ .pre_commit = xe_pt_svm_userptr_pre_commit,
};
#else
-static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops;
#endif
+static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q,
+ struct xe_gt *gt)
+{
+ if (xe_gt_is_media_type(gt))
+ return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler;
+
+ return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler;
+}
+
/**
* xe_pt_update_ops_run() - Run PT update operations
* @tile: Tile of PT update operations
@@ -2407,8 +2359,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm *vm = vops->vm;
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL, *mfence = NULL;
+ struct dma_fence *fence, *ifence, *mfence;
+ struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
@@ -2416,9 +2368,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
int err = 0, i;
struct xe_migrate_pt_update update = {
.ops = pt_update_ops->needs_svm_lock ?
- &svm_migrate_ops :
- pt_update_ops->needs_userptr_lock ?
- &userptr_migrate_ops :
+ &svm_userptr_migrate_ops :
&migrate_ops,
.vops = vops,
.tile_id = tile->id,
@@ -2440,26 +2390,45 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
#endif
if (pt_update_ops->needs_invalidation) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence) {
- err = -ENOMEM;
+ struct xe_exec_queue *q = pt_update_ops->q;
+ struct xe_dep_scheduler *dep_scheduler =
+ to_dep_scheduler(q, tile->primary_gt);
+
+ ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval,
+ dep_scheduler,
+ pt_update_ops->start,
+ pt_update_ops->last,
+ vm->usm.asid);
+ if (IS_ERR(ijob)) {
+ err = PTR_ERR(ijob);
goto kill_vm_tile1;
}
+ update.ijob = ijob;
+
if (tile->media_gt) {
- mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!mfence) {
- err = -ENOMEM;
- goto free_ifence;
+ dep_scheduler = to_dep_scheduler(q, tile->media_gt);
+
+ mjob = xe_tlb_inval_job_create(q,
+ &tile->media_gt->tlb_inval,
+ dep_scheduler,
+ pt_update_ops->start,
+ pt_update_ops->last,
+ vm->usm.asid);
+ if (IS_ERR(mjob)) {
+ err = PTR_ERR(mjob);
+ goto free_ijob;
}
+ update.mjob = mjob;
+
fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
if (!fences) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
cf = dma_fence_array_alloc(2);
if (!cf) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
}
}
@@ -2467,7 +2436,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
fence = xe_migrate_update_pgtables(tile->migrate, &update);
@@ -2491,30 +2460,31 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
pt_update_ops->last, fence))
dma_fence_wait(fence, false);
- /* tlb invalidation must be done before signaling rebind */
- if (ifence) {
- if (mfence)
- dma_fence_get(fence);
- invalidation_fence_init(tile->primary_gt, ifence, fence,
- pt_update_ops->start,
- pt_update_ops->last, vm->usm.asid);
- if (mfence) {
- invalidation_fence_init(tile->media_gt, mfence, fence,
- pt_update_ops->start,
- pt_update_ops->last, vm->usm.asid);
- fences[0] = &ifence->base.base;
- fences[1] = &mfence->base.base;
+ /* tlb invalidation must be done before signaling unbind/rebind */
+ if (ijob) {
+ struct dma_fence *__fence;
+
+ ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence);
+ __fence = ifence;
+
+ if (mjob) {
+ fences[0] = ifence;
+ mfence = xe_tlb_inval_job_push(mjob, tile->migrate,
+ fence);
+ fences[1] = mfence;
+
dma_fence_array_init(cf, 2, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
false);
- fence = &cf->base;
- } else {
- fence = &ifence->base.base;
+ __fence = &cf->base;
}
+
+ dma_fence_put(fence);
+ fence = __fence;
}
- if (!mfence) {
+ if (!mjob) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
@@ -2523,35 +2493,36 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
list_for_each_entry(op, &vops->list, link)
op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
} else {
- dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ dma_resv_add_fence(xe_vm_resv(vm), ifence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ dma_resv_add_fence(xe_vm_resv(vm), mfence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
list_for_each_entry(op, &vops->list, link)
- op_commit(vops->vm, tile, pt_update_ops, op,
- &ifence->base.base, &mfence->base.base);
+ op_commit(vops->vm, tile, pt_update_ops, op, ifence,
+ mfence);
}
if (pt_update_ops->needs_svm_lock)
xe_svm_notifier_unlock(vm);
- if (pt_update_ops->needs_userptr_lock)
- up_read(&vm->userptr.notifier_lock);
+
+ xe_tlb_inval_job_put(mjob);
+ xe_tlb_inval_job_put(ijob);
return fence;
free_rfence:
kfree(rfence);
-free_ifence:
+free_ijob:
kfree(cf);
kfree(fences);
- kfree(mfence);
- kfree(ifence);
+ xe_tlb_inval_job_put(mjob);
+ xe_tlb_inval_job_put(ijob);
kill_vm_tile1:
if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 5ecf003d513c..4daeebaab5a1 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -10,6 +10,7 @@
#include "xe_pt_types.h"
struct dma_fence;
+struct drm_exec;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
@@ -29,7 +30,7 @@ struct xe_vma_ops;
unsigned int xe_pt_shift(unsigned int level);
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
- unsigned int level);
+ unsigned int level, struct drm_exec *exec);
void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 69eab6f37cfe..881f01e14db8 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -45,8 +45,7 @@ struct xe_pt_ops {
u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
u16 pat_index,
u32 pt_level, bool devmem, u64 flags);
- u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index);
+ u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset);
};
struct xe_pt_entry {
@@ -106,8 +105,6 @@ struct xe_vm_pgtable_update_ops {
u32 current_op;
/** @needs_svm_lock: Needs SVM lock */
bool needs_svm_lock;
- /** @needs_userptr_lock: Needs userptr lock */
- bool needs_userptr_lock;
/** @needs_invalidation: Needs invalidation */
bool needs_invalidation;
/**
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index 3d62008c99f1..bdbdbbf6a678 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -688,6 +688,7 @@ start:
return ret;
}
+ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO);
static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
{
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
index ca95f2a4d4ef..e60526e30030 100644
--- a/drivers/gpu/drm/xe/xe_pxp_submit.c
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -54,8 +54,9 @@ static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
* Each termination is 16 DWORDS, so 4K is enough to contain a
* termination for each sessions.
*/
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT,
+ false);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_queue;
@@ -87,7 +88,9 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = tile_to_xe(tile);
+ struct xe_validation_ctx ctx;
struct xe_hw_engine *hwe;
+ struct drm_exec exec;
struct xe_vm *vm;
struct xe_bo *bo;
struct xe_exec_queue *q;
@@ -106,15 +109,26 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
return PTR_ERR(vm);
/* We allocate a single object for the batch and the in/out memory */
- xe_vm_lock(vm, false);
- bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
- xe_vm_unlock(vm);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- goto vm_out;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags){}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+
+ bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_NEEDS_UC, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
}
+ if (err)
+ goto vm_out;
fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
if (IS_ERR(fence)) {
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index d517ec9ddcbf..e1b603aba61b 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -21,12 +21,14 @@
#include "xe_force_wake.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_gt_topology.h"
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
#include "xe_pxp.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
static const u16 xe_to_user_engine_class[] = {
@@ -337,7 +339,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->num_params = num_params;
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
- if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
+ if (xe->mem.vram)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
@@ -410,7 +412,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
gt_list->gt_list[iter].near_mem_regions = 0x1;
else
gt_list->gt_list[iter].near_mem_regions =
- BIT(gt_to_tile(gt)->id) << 1;
+ BIT(gt_to_tile(gt)->mem.vram->id) << 1;
gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
gt_list->gt_list[iter].near_mem_regions;
@@ -476,7 +478,7 @@ static size_t calc_topo_query_size(struct xe_device *xe)
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
/* L3bank mask may not be available for some GTs */
- if (!XE_WA(gt, no_media_l3))
+ if (xe_gt_topology_report_l3(gt))
query_size += sizeof(struct drm_xe_query_topology_mask) +
sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
}
@@ -539,7 +541,7 @@ static int query_gt_topology(struct xe_device *xe,
* mask, then it's better to omit L3 from the query rather than
* reporting bogus or zeroed information to userspace.
*/
- if (!XE_WA(gt, no_media_l3)) {
+ if (xe_gt_topology_report_l3(gt)) {
topo.type = DRM_XE_TOPO_L3_BANK;
err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
sizeof(gt->fuse_topo.l3_bank_mask));
@@ -748,10 +750,8 @@ static int query_eu_stall(struct xe_device *xe,
u32 num_rates;
int ret;
- if (!xe_eu_stall_supported_on_platform(xe)) {
- drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ if (!xe_eu_stall_supported_on_platform(xe))
return -ENODEV;
- }
array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
size = sizeof(struct drm_xe_query_eu_stall) + array_size;
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index d1a403cfb628..4e00008b7081 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -55,8 +55,8 @@ struct xe_res_cursor {
u32 mem_type;
/** @sgl: Scatterlist for cursor */
struct scatterlist *sgl;
- /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
- const struct drm_pagemap_device_addr *dma_addr;
+ /** @dma_addr: Current element in a struct drm_pagemap_addr array */
+ const struct drm_pagemap_addr *dma_addr;
/** @mm: Buddy allocator for VRAM cursor */
struct drm_buddy *mm;
/**
@@ -170,7 +170,7 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
*/
static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
{
- const struct drm_pagemap_device_addr *addr = cur->dma_addr;
+ const struct drm_pagemap_addr *addr = cur->dma_addr;
u64 start = cur->start;
while (start >= cur->dma_seg_size) {
@@ -222,14 +222,14 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
/**
* xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
*
- * @dma_addr: struct drm_pagemap_device_addr array to walk
+ * @dma_addr: struct drm_pagemap_addr array to walk
* @start: Start of the range
* @size: Size of the range
* @cur: cursor object to initialize
*
* Start walking over the range of allocations between @start and @size.
*/
-static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
+static inline void xe_res_first_dma(const struct drm_pagemap_addr *dma_addr,
u64 start, u64 size,
struct xe_res_cursor *cur)
{
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 7b50c7c1ee21..d71837773d6c 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -110,10 +110,10 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
+static int emit_flush_invalidate(u32 addr, u32 val, u32 flush_flags, u32 *dw, int i)
{
- dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
- MI_FLUSH_IMM_DW;
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | (flush_flags & MI_INVALIDATE_TLB) ?: 0;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
@@ -179,7 +179,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
- if (XE_WA(gt, 14016712196))
+ if (XE_GT_WA(gt, 14016712196))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
@@ -190,7 +190,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
- if (XE_WA(gt, 1409600907))
+ if (XE_GT_WA(gt, 1409600907))
flags |= PIPE_CONTROL_DEPTH_STALL;
if (lacks_render)
@@ -206,7 +206,7 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
if (hwe->class != XE_ENGINE_CLASS_RENDER)
return i;
- if (XE_WA(hwe->gt, 16020292621))
+ if (XE_GT_WA(hwe->gt, 16020292621))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC,
RING_NOPID(hwe->mmio_base).addr, 0);
@@ -410,16 +410,14 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(saddr, seqno, dw, i);
+ i = emit_flush_invalidate(saddr, seqno, job->migrate_flush_flags, dw, i);
dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
- dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags |
- MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW;
- dw[i++] = xe_lrc_seqno_ggtt_addr(lrc) | MI_FLUSH_DW_USE_GTT;
- dw[i++] = 0;
- dw[i++] = seqno; /* value */
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno,
+ job->migrate_flush_flags,
+ dw, i);
i = emit_user_interrupt(dw, i);
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 95571b87aa73..b5f430d59f80 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -9,6 +9,7 @@
#include <uapi/drm/xe_drm.h>
+#include "xe_configfs.h"
#include "xe_gt.h"
#include "xe_gt_topology.h"
#include "xe_macros.h"
@@ -363,3 +364,15 @@ bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
{
return !IS_SRIOV_VF(gt_to_xe(gt));
}
+
+bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_configfs_get_psmi_enabled(to_pci_dev(gt_to_xe(gt)->drm.dev));
+}
+
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_gt_has_discontiguous_dss_groups(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 5ed6c14b9ae3..ac12ddf6cde6 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -477,4 +477,10 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 1d43e183ca21..fedd017d6dd3 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -69,7 +69,6 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
- sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
index 1170ee5a81a8..99dbf0eea540 100644
--- a/drivers/gpu/drm/xe/xe_sa.h
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -7,6 +7,8 @@
#include <linux/sizes.h>
#include <linux/types.h>
+
+#include "xe_bo.h"
#include "xe_sa_types.h"
struct dma_fence;
@@ -43,9 +45,20 @@ to_xe_sa_manager(struct drm_suballoc_manager *mng)
return container_of(mng, struct xe_sa_manager, base);
}
+/**
+ * xe_sa_manager_gpu_addr - Retrieve GPU address of a back storage BO
+ * within suballocator.
+ * @sa_manager: the &xe_sa_manager struct instance
+ * Return: GGTT address of the back storage BO.
+ */
+static inline u64 xe_sa_manager_gpu_addr(struct xe_sa_manager *sa_manager)
+{
+ return xe_bo_ggtt_addr(sa_manager->bo);
+}
+
static inline u64 xe_sa_bo_gpu_addr(struct drm_suballoc *sa)
{
- return to_xe_sa_manager(sa->manager)->gpu_addr +
+ return xe_sa_manager_gpu_addr(to_xe_sa_manager(sa->manager)) +
drm_suballoc_soffset(sa);
}
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
index 2b070ff1292e..cb7238799dcb 100644
--- a/drivers/gpu/drm/xe/xe_sa_types.h
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -12,7 +12,6 @@ struct xe_bo;
struct xe_sa_manager {
struct drm_suballoc_manager base;
struct xe_bo *bo;
- u64 gpu_addr;
void *cpu_ptr;
bool is_iomem;
};
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index a0eab44c0e76..7d2d6de2aabf 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -15,6 +15,7 @@
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -157,3 +158,17 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
strscpy(buf, "PF", size);
return buf;
}
+
+/**
+ * xe_sriov_init_late() - SR-IOV late initialization functions.
+ * @xe: the &xe_device to initialize
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_init_late(struct xe_device *xe)
+{
+ if (IS_SRIOV_VF(xe))
+ return xe_sriov_vf_init_late(xe);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 688fbabf08f1..6db45df55615 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -18,6 +18,7 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len);
void xe_sriov_probe_early(struct xe_device *xe);
void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
int xe_sriov_init(struct xe_device *xe);
+int xe_sriov_init_late(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index 26e243c28994..cdd9f8e78b2a 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -3,6 +3,7 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
#include "xe_assert.h"
@@ -10,11 +11,16 @@
#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_guc.h"
#include "xe_guc_ct.h"
+#include "xe_guc_submit.h"
+#include "xe_irq.h"
+#include "xe_lrc.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_tile_sriov_vf.h"
/**
@@ -124,16 +130,66 @@
* | | |
*/
-static bool vf_migration_supported(struct xe_device *xe)
+/**
+ * xe_sriov_vf_migration_supported - Report whether SR-IOV VF migration is
+ * supported or not.
+ * @xe: the &xe_device to check
+ *
+ * Returns: true if VF migration is supported, false otherwise.
+ */
+bool xe_sriov_vf_migration_supported(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ return xe->sriov.vf.migration.enabled;
+}
+
+static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+ xe_sriov_notice(xe, "migration disabled: %pV\n", &vaf);
+ va_end(va_args);
+
+ xe->sriov.vf.migration.enabled = false;
+}
+
+static void migration_worker_func(struct work_struct *w);
+
+static void vf_migration_init_early(struct xe_device *xe)
{
/*
* TODO: Add conditions to allow specific platforms, when they're
* supported at production quality.
*/
- return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
-}
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ return vf_disable_migration(xe,
+ "experimental feature not available on production builds");
+
+ if (GRAPHICS_VER(xe) < 20)
+ return vf_disable_migration(xe, "requires gfx version >= 20, but only %u found",
+ GRAPHICS_VER(xe));
+
+ if (!IS_DGFX(xe)) {
+ struct xe_uc_fw_version guc_version;
+
+ xe_gt_sriov_vf_guc_versions(xe_device_get_gt(xe, 0), NULL, &guc_version);
+ if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0))
+ return vf_disable_migration(xe,
+ "CCS migration requires GuC ABI >= 1.23 but only %u.%u found",
+ guc_version.major, guc_version.minor);
+ }
-static void migration_worker_func(struct work_struct *w);
+ INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+
+ xe->sriov.vf.migration.enabled = true;
+ xe_sriov_dbg(xe, "migration support enabled\n");
+}
/**
* xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
@@ -141,10 +197,57 @@ static void migration_worker_func(struct work_struct *w);
*/
void xe_sriov_vf_init_early(struct xe_device *xe)
{
- INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+ vf_migration_init_early(xe);
+}
+
+/**
+ * vf_post_migration_shutdown - Stop the driver activities after VF migration.
+ * @xe: the &xe_device struct instance
+ *
+ * After this VM is migrated and assigned to a new VF, it is running on a new
+ * hardware, and therefore many hardware-dependent states and related structures
+ * require fixups. Without fixups, the hardware cannot do any work, and therefore
+ * all GPU pipelines are stalled.
+ * Stop some of kernel activities to make the fixup process faster.
+ */
+static void vf_post_migration_shutdown(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ xe_guc_submit_pause(&gt->uc.guc);
+ ret |= xe_guc_submit_reset_block(&gt->uc.guc);
+ }
+
+ if (ret)
+ drm_info(&xe->drm, "migration recovery encountered ongoing reset\n");
+}
+
+/**
+ * vf_post_migration_kickstart - Re-start the driver activities under new hardware.
+ * @xe: the &xe_device struct instance
+ *
+ * After we have finished with all post-migration fixups, restart the driver
+ * activities to continue feeding the GPU with workloads.
+ */
+static void vf_post_migration_kickstart(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
- if (!vf_migration_supported(xe))
- xe_sriov_info(xe, "migration not supported by this module version\n");
+ /*
+ * Make sure interrupts on the new HW are properly set. The GuC IRQ
+ * must be working at this point, since the recovery did started,
+ * but the rest was not enabled using the procedure from spec.
+ */
+ xe_irq_resume(xe);
+
+ for_each_gt(gt, xe, id) {
+ xe_guc_submit_reset_unblock(&gt->uc.guc);
+ xe_guc_submit_unpause(&gt->uc.guc);
+ }
}
static bool gt_vf_post_migration_needed(struct xe_gt *gt)
@@ -192,6 +295,11 @@ static int vf_get_next_migrated_gt_id(struct xe_device *xe)
return -1;
}
+static size_t post_migration_scratch_size(struct xe_device *xe)
+{
+ return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
+}
+
/**
* Perform post-migration fixups on a single GT.
*
@@ -208,19 +316,31 @@ static int vf_get_next_migrated_gt_id(struct xe_device *xe)
static int gt_vf_post_migration_fixups(struct xe_gt *gt)
{
s64 shift;
+ void *buf;
int err;
+ buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
err = xe_gt_sriov_vf_query_config(gt);
if (err)
- return err;
+ goto out;
shift = xe_gt_sriov_vf_ggtt_shift(gt);
if (shift) {
xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
- /* FIXME: add the recovery steps */
+ xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
+ err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
+ if (err)
+ goto out;
+ xe_guc_jobs_ring_rebase(&gt->uc.guc);
xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
}
- return 0;
+
+out:
+ kfree(buf);
+ return err;
}
static void vf_post_migration_recovery(struct xe_device *xe)
@@ -230,9 +350,10 @@ static void vf_post_migration_recovery(struct xe_device *xe)
drm_dbg(&xe->drm, "migration recovery in progress\n");
xe_pm_runtime_get(xe);
+ vf_post_migration_shutdown(xe);
- if (!vf_migration_supported(xe)) {
- xe_sriov_err(xe, "migration not supported by this module version\n");
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_sriov_err(xe, "migration is not supported\n");
err = -ENOTRECOVERABLE;
goto fail;
}
@@ -247,6 +368,7 @@ static void vf_post_migration_recovery(struct xe_device *xe)
set_bit(id, &fixed_gts);
}
+ vf_post_migration_kickstart(xe);
err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
if (err)
goto fail;
@@ -306,3 +428,48 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
drm_info(&xe->drm, "VF migration recovery %s\n", started ?
"scheduled" : "already in progress");
}
+
+/**
+ * xe_sriov_vf_init_late() - SR-IOV VF late initialization functions.
+ * @xe: the &xe_device to initialize
+ *
+ * This function initializes code for CCS migration.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vf_init_late(struct xe_device *xe)
+{
+ int err = 0;
+
+ if (xe_sriov_vf_migration_supported(xe))
+ err = xe_sriov_vf_ccs_init(xe);
+
+ return err;
+}
+
+static int sa_info_vf_ccs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct xe_device *xe = to_xe_device(node->minor->dev);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_sriov_vf_ccs_print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "sa_info_vf_ccs", .show = sa_info_vf_ccs },
+};
+
+/**
+ * xe_sriov_vf_debugfs_register - Register VF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Prepare debugfs attributes exposed by the VF.
+ */
+void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list),
+ root, xe->drm.primary);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
index 7b8622cff2b7..9e752105ec2a 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -6,9 +6,15 @@
#ifndef _XE_SRIOV_VF_H_
#define _XE_SRIOV_VF_H_
+#include <linux/types.h>
+
+struct dentry;
struct xe_device;
void xe_sriov_vf_init_early(struct xe_device *xe);
+int xe_sriov_vf_init_late(struct xe_device *xe);
void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+bool xe_sriov_vf_migration_supported(struct xe_device *xe);
+void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
new file mode 100644
index 000000000000..8dec616c37c9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "instructions/xe_mi_commands.h"
+#include "instructions/xe_gpu_commands.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_exec_queue_types.h"
+#include "xe_guc_submit.h"
+#include "xe_lrc.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+#include "xe_sa.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
+#include "xe_sriov_vf_ccs_types.h"
+
+/**
+ * DOC: VF save/restore of compression Meta Data
+ *
+ * VF KMD registers two special contexts/LRCAs.
+ *
+ * Save Context/LRCA: contain necessary cmds+page table to trigger Meta data /
+ * compression control surface (Aka CCS) save in regular System memory in VM.
+ *
+ * Restore Context/LRCA: contain necessary cmds+page table to trigger Meta data /
+ * compression control surface (Aka CCS) Restore from regular System memory in
+ * VM to corresponding CCS pool.
+ *
+ * Below diagram explain steps needed for VF save/Restore of compression Meta Data::
+ *
+ * CCS Save CCS Restore VF KMD Guc BCS
+ * LRCA LRCA
+ * | | | | |
+ * | | | | |
+ * | Create Save LRCA | | |
+ * [ ]<----------------------------- [ ] | |
+ * | | | | |
+ * | | | | |
+ * | | | Register save LRCA | |
+ * | | | with Guc | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | Create restore LRCA | | |
+ * | [ ]<------------------[ ] | |
+ * | | | | |
+ * | | | Register restore LRCA | |
+ * | | | with Guc | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | | | |
+ * | | [ ]------------------------- | |
+ * | | [ ] Allocate main memory. | | |
+ * | | [ ] Allocate CCS memory. | | |
+ * | | [ ] Update Main memory & | | |
+ * [ ]<------------------------------[ ] CCS pages PPGTT + BB | | |
+ * | [ ]<------------------[ ] cmds to save & restore.| | |
+ * | | [ ]<------------------------ | |
+ * | | | | |
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VF Paused -------------------------------------
+ * | | | | |
+ * | | | | |
+ * | | | |Schedule |
+ * | | | |CCS Save |
+ * | | | | LRCA |
+ * | | | [ ]------>[ ]
+ * | | | | |
+ * | | | | |
+ * | | | |CCS save |
+ * | | | |completed|
+ * | | | [ ]<------[ ]
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VM Migrated -----------------------------------
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VF Resumed ------------------------------------
+ * | | | | |
+ * | | | | |
+ * | | [ ]-------------- | |
+ * | | [ ] Fix up GGTT | | |
+ * | | [ ]<------------- | |
+ * | | | | |
+ * | | | | |
+ * | | | Notify VF_RESFIX_DONE | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | | |Schedule |
+ * | | | |CCS |
+ * | | | |Restore |
+ * | | | |LRCA |
+ * | | | [ ]------>[ ]
+ * | | | | |
+ * | | | | |
+ * | | | |CCS |
+ * | | | |restore |
+ * | | | |completed|
+ * | | | [ ]<------[ ]
+ * | | | | |
+ * | | | | |
+ * | | | VF_RESFIX_DONE complete | |
+ * | | | notification | |
+ * | | [ ]<---------------------------[ ] |
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ------------------------- Continue VM restore ------------------------------
+ */
+
+static u64 get_ccs_bb_pool_size(struct xe_device *xe)
+{
+ u64 sys_mem_size, ccs_mem_size, ptes, bb_pool_size;
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ sys_mem_size = si.totalram * si.mem_unit;
+ ccs_mem_size = div64_u64(sys_mem_size, NUM_BYTES_PER_CCS_BYTE(xe));
+ ptes = DIV_ROUND_UP_ULL(sys_mem_size + ccs_mem_size, XE_PAGE_SIZE);
+
+ /**
+ * We need below BB size to hold PTE mappings and some DWs for copy
+ * command. In reality, we need space for many copy commands. So, let
+ * us allocate double the calculated size which is enough to holds GPU
+ * instructions for the whole region.
+ */
+ bb_pool_size = ptes * sizeof(u32);
+
+ return round_up(bb_pool_size * 2, SZ_1M);
+}
+
+static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_sa_manager *sa_manager;
+ u64 bb_pool_size;
+ int offset, err;
+
+ bb_pool_size = get_ccs_bb_pool_size(xe);
+ xe_sriov_info(xe, "Allocating %s CCS BB pool size = %lldMB\n",
+ ctx->ctx_id ? "Restore" : "Save", bb_pool_size / SZ_1M);
+
+ sa_manager = xe_sa_bo_manager_init(tile, bb_pool_size, SZ_16);
+
+ if (IS_ERR(sa_manager)) {
+ xe_sriov_err(xe, "Suballocator init failed with error: %pe\n",
+ sa_manager);
+ err = PTR_ERR(sa_manager);
+ return err;
+ }
+
+ offset = 0;
+ xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP,
+ bb_pool_size);
+
+ offset = bb_pool_size - sizeof(u32);
+ xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END);
+
+ ctx->mem.ccs_bb_pool = sa_manager;
+
+ return 0;
+}
+
+static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool);
+ struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
+ u32 dw[10], i = 0;
+
+ dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ dw[i++] = MI_BATCH_BUFFER_START | XE_INSTR_NUM_DW(3);
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
+ dw[i++] = MI_NOOP;
+ dw[i++] = MI_NOOP;
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(u32));
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+}
+
+static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ int ctx_type;
+
+ switch (ctx->ctx_id) {
+ case XE_SRIOV_VF_CCS_READ_CTX:
+ ctx_type = GUC_CONTEXT_COMPRESSION_SAVE;
+ break;
+ case XE_SRIOV_VF_CCS_WRITE_CTX:
+ ctx_type = GUC_CONTEXT_COMPRESSION_RESTORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xe_guc_register_vf_exec_queue(ctx->mig_q, ctx_type);
+ return 0;
+}
+
+/**
+ * xe_sriov_vf_ccs_register_context - Register read/write contexts with guc.
+ * @xe: the &xe_device to register contexts on.
+ *
+ * This function registers read and write contexts with Guc. Re-registration
+ * is needed whenever resuming from pm runtime suspend.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
+{
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ int err;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ err = register_save_restore_context(ctx);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static void xe_sriov_vf_ccs_fini(void *arg)
+{
+ struct xe_sriov_vf_ccs_ctx *ctx = arg;
+ struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
+
+ /*
+ * Make TAIL = HEAD in the ring so that no issues are seen if Guc
+ * submits this context to HW on VF pause after unbinding device.
+ */
+ xe_lrc_set_ring_tail(lrc, xe_lrc_ring_head(lrc));
+ xe_exec_queue_put(ctx->mig_q);
+}
+
+/**
+ * xe_sriov_vf_ccs_init - Setup LRCA for save & restore.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF. It initializes
+ * LRCA and suballocator needed for CCS save & restore.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_sriov_vf_ccs_init(struct xe_device *xe)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ struct xe_exec_queue *q;
+ u32 flags;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ xe_assert(xe, xe_sriov_vf_migration_supported(xe));
+
+ if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))
+ return 0;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ ctx->ctx_id = ctx_id;
+
+ flags = EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_MIGRATE;
+ q = xe_exec_queue_create_bind(xe, tile, flags, 0);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto err_ret;
+ }
+ ctx->mig_q = q;
+
+ err = alloc_bb_pool(tile, ctx);
+ if (err)
+ goto err_free_queue;
+
+ ccs_rw_update_ring(ctx);
+
+ err = register_save_restore_context(ctx);
+ if (err)
+ goto err_free_queue;
+
+ err = devm_add_action_or_reset(xe->drm.dev,
+ xe_sriov_vf_ccs_fini,
+ ctx);
+ if (err)
+ goto err_ret;
+ }
+
+ xe->sriov.vf.ccs.initialized = 1;
+
+ return 0;
+
+err_free_queue:
+ xe_exec_queue_put(q);
+
+err_ret:
+ return err;
+}
+
+/**
+ * xe_sriov_vf_ccs_attach_bo - Insert CCS read write commands in the BO.
+ * @bo: the &buffer object to which batch buffer commands will be added.
+ *
+ * This function shall be called only by VF. It inserts the PTEs and copy
+ * command instructions in the BO by calling xe_migrate_ccs_rw_copy()
+ * function.
+ *
+ * Returns: 0 if successful, negative error code on failure.
+ */
+int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ struct xe_tile *tile;
+ struct xe_bb *bb;
+ int err = 0;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ tile = xe_device_get_root_tile(xe);
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb = bo->bb_ccs[ctx_id];
+ /* bb should be NULL here. Assert if not NULL */
+ xe_assert(xe, !bb);
+
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ err = xe_migrate_ccs_rw_copy(tile, ctx->mig_q, bo, ctx_id);
+ }
+ return err;
+}
+
+/**
+ * xe_sriov_vf_ccs_detach_bo - Remove CCS read write commands from the BO.
+ * @bo: the &buffer object from which batch buffer commands will be removed.
+ *
+ * This function shall be called only by VF. It removes the PTEs and copy
+ * command instructions from the BO. Make sure to update the BB with MI_NOOP
+ * before freeing.
+ *
+ * Returns: 0 if successful.
+ */
+int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_bb *bb;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ if (!xe_bo_has_valid_ccs_bb(bo))
+ return 0;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb = bo->bb_ccs[ctx_id];
+ if (!bb)
+ continue;
+
+ memset(bb->cs, MI_NOOP, bb->len * sizeof(u32));
+ xe_bb_free(bb, NULL);
+ bo->bb_ccs[ctx_id] = NULL;
+ }
+ return 0;
+}
+
+/**
+ * xe_sriov_vf_ccs_print - Print VF CCS details.
+ * @xe: the &xe_device
+ * @p: the &drm_printer
+ *
+ * This function is for VF use only.
+ */
+void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p)
+{
+ struct xe_sa_manager *bb_pool;
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ if (!IS_VF_CCS_READY(xe))
+ return;
+
+ xe_pm_runtime_get(xe);
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
+ if (!bb_pool)
+ break;
+
+ drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read");
+ drm_printf(p, "-------------------------\n");
+ drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool));
+ drm_puts(p, "\n");
+ }
+
+ xe_pm_runtime_put(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
new file mode 100644
index 000000000000..0745c0ff0228
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_CCS_H_
+#define _XE_SRIOV_VF_CCS_H_
+
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_vf_ccs_types.h"
+
+struct drm_printer;
+struct xe_device;
+struct xe_bo;
+
+int xe_sriov_vf_ccs_init(struct xe_device *xe);
+int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo);
+int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo);
+int xe_sriov_vf_ccs_register_context(struct xe_device *xe);
+void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p);
+
+static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ return xe->sriov.vf.ccs.initialized;
+}
+
+#define IS_VF_CCS_READY(xe) ({ \
+ struct xe_device *xe__ = (xe); \
+ IS_SRIOV_VF(xe__) && xe_sriov_vf_ccs_ready(xe__); \
+ })
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
new file mode 100644
index 000000000000..22c499943d2a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_CCS_TYPES_H_
+#define _XE_SRIOV_VF_CCS_TYPES_H_
+
+#include <linux/types.h>
+
+#define for_each_ccs_rw_ctx(id__) \
+ for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++)
+
+enum xe_sriov_vf_ccs_rw_ctxs {
+ XE_SRIOV_VF_CCS_READ_CTX,
+ XE_SRIOV_VF_CCS_WRITE_CTX,
+ XE_SRIOV_VF_CCS_CTX_COUNT
+};
+
+struct xe_migrate;
+struct xe_sa_manager;
+
+/**
+ * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data.
+ */
+struct xe_sriov_vf_ccs_ctx {
+ /** @ctx_id: Id to which context it belongs to */
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ /** @mig_q: exec queues used for migration */
+ struct xe_exec_queue *mig_q;
+
+ /** @mem: memory data */
+ struct {
+ /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */
+ struct xe_sa_manager *ccs_bb_pool;
+ } mem;
+};
+
+/**
+ * struct xe_sriov_vf_ccs - The VF CCS migration support data.
+ */
+struct xe_sriov_vf_ccs {
+ /** @contexts: CCS read and write contexts for VF. */
+ struct xe_sriov_vf_ccs_ctx contexts[XE_SRIOV_VF_CCS_CTX_COUNT];
+
+ /** @initialized: Initialization of VF CCS is completed or not. */
+ bool initialized;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
index 8300416a6226..426cc5841958 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/workqueue_types.h>
+#include "xe_sriov_vf_ccs_types.h"
+
/**
* struct xe_sriov_vf_relay_version - PF ABI version details.
*/
@@ -35,7 +37,15 @@ struct xe_device_vf {
struct work_struct worker;
/** @migration.gt_flags: Per-GT request flags for VF migration recovery */
unsigned long gt_flags;
+ /**
+ * @migration.enabled: flag indicating if migration support
+ * was enabled or not due to missing prerequisites
+ */
+ bool enabled;
} migration;
+
+ /** @ccs: VF CCS state data */
+ struct xe_sriov_vf_ccs ccs;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index 8f7b0add2364..1662bfddd4bc 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -22,15 +22,18 @@
#define MAX_SCRATCH_MMIO 8
/**
- * DOC: Xe Boot Survivability
+ * DOC: Survivability Mode
*
- * Boot Survivability is a software based workflow for recovering a system in a failed boot state
+ * Survivability Mode is a software based workflow for recovering a system in a failed boot state
* Here system recoverability is concerned with recovering the firmware responsible for boot.
*
- * This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
- * to be flashed through mei and collect telemetry. The driver's probe flow is modified
- * such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure.
+ * Boot Survivability
+ * ===================
+ *
+ * Boot Survivability is implemented by loading the driver with bare minimum (no drm card) to allow
+ * the firmware to be flashed through mei driver and collect telemetry. The driver's probe flow is
+ * modified such that it enters survivability mode when pcode initialization is incomplete and boot
+ * status denotes a failure.
*
* Survivability mode can also be entered manually using the survivability mode attribute available
* through configfs which is beneficial in several usecases. It can be used to address scenarios
@@ -48,7 +51,7 @@
* Survivability mode is indicated by the below admin-only readable sysfs which provides additional
* debug information::
*
- * /sys/bus/pci/devices/<device>/surivability_mode
+ * /sys/bus/pci/devices/<device>/survivability_mode
*
* Capability Information:
* Provides boot status
@@ -58,6 +61,22 @@
* Provides history of previous failures
* Auxiliary Information
* Certain failures may have information in addition to postcode information
+ *
+ * Runtime Survivability
+ * =====================
+ *
+ * Certain runtime firmware errors can cause the device to enter a wedged state
+ * (:ref:`xe-device-wedging`) requiring a firmware flash to restore normal operation.
+ * Runtime Survivability Mode indicates that a firmware flash is necessary to recover the device and
+ * is indicated by the presence of survivability mode sysfs::
+ *
+ * /sys/bus/pci/devices/<device>/survivability_mode
+ *
+ * Survivability mode sysfs provides information about the type of survivability mode.
+ *
+ * When such errors occur, userspace is notified with the drm device wedged uevent and runtime
+ * survivability mode. User can then initiate a firmware flash using userspace tools like fwupd
+ * to restore device to normal operation.
*/
static u32 aux_history_offset(u32 reg_value)
@@ -123,6 +142,14 @@ static void log_survivability_info(struct pci_dev *pdev)
}
}
+static int check_boot_failure(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+
+ return survivability->boot_status == NON_CRITICAL_FAILURE ||
+ survivability->boot_status == CRITICAL_FAILURE;
+}
+
static ssize_t survivability_mode_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
@@ -132,6 +159,12 @@ static ssize_t survivability_mode_show(struct device *dev,
struct xe_survivability_info *info = survivability->info;
int index = 0, count = 0;
+ count += sysfs_emit_at(buff, count, "Survivability mode type: %s\n",
+ survivability->type ? "Runtime" : "Boot");
+
+ if (!check_boot_failure(xe))
+ return count;
+
for (index = 0; index < MAX_SCRATCH_MMIO; index++) {
if (info[index].reg)
count += sysfs_emit_at(buff, count, "%s: 0x%x - 0x%x\n", info[index].name,
@@ -152,12 +185,11 @@ static void xe_survivability_mode_fini(void *arg)
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
-static int enable_survivability_mode(struct pci_dev *pdev)
+static int create_survivability_sysfs(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct xe_device *xe = pdev_to_xe_device(pdev);
- struct xe_survivability *survivability = &xe->survivability;
- int ret = 0;
+ int ret;
/* create survivability mode sysfs */
ret = sysfs_create_file(&dev->kobj, &dev_attr_survivability_mode.attr);
@@ -171,6 +203,20 @@ static int enable_survivability_mode(struct pci_dev *pdev)
if (ret)
return ret;
+ return 0;
+}
+
+static int enable_boot_survivability_mode(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ int ret = 0;
+
+ ret = create_survivability_sysfs(pdev);
+ if (ret)
+ return ret;
+
/* Make sure xe_heci_gsc_init() knows about survivability mode */
survivability->mode = true;
@@ -193,15 +239,36 @@ err:
return ret;
}
+static int init_survivability_mode(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info;
+
+ survivability->size = MAX_SCRATCH_MMIO;
+
+ info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ survivability->info = info;
+
+ populate_survivability_info(xe);
+
+ return 0;
+}
+
/**
- * xe_survivability_mode_is_enabled - check if survivability mode is enabled
+ * xe_survivability_mode_is_boot_enabled- check if boot survivability mode is enabled
* @xe: xe device instance
*
- * Returns true if in survivability mode, false otherwise
+ * Returns true if in boot survivability mode of type, else false
*/
-bool xe_survivability_mode_is_enabled(struct xe_device *xe)
+bool xe_survivability_mode_is_boot_enabled(struct xe_device *xe)
{
- return xe->survivability.mode;
+ struct xe_survivability *survivability = &xe->survivability;
+
+ return survivability->mode && survivability->type == XE_SURVIVABILITY_TYPE_BOOT;
}
/**
@@ -222,19 +289,10 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe)
u32 data;
bool survivability_mode;
- if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE)
return false;
survivability_mode = xe_configfs_get_survivability_mode(pdev);
-
- if (xe->info.platform < XE_BATTLEMAGE) {
- if (survivability_mode) {
- dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
- xe_configfs_clear_survivability_mode(pdev);
- }
- return false;
- }
-
/* Enable survivability mode if set via configfs */
if (survivability_mode)
return true;
@@ -242,44 +300,78 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe)
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
- return survivability->boot_status == NON_CRITICAL_FAILURE ||
- survivability->boot_status == CRITICAL_FAILURE;
+ return check_boot_failure(xe);
}
/**
- * xe_survivability_mode_enable - Initialize and enable the survivability mode
+ * xe_survivability_mode_runtime_enable - Initialize and enable runtime survivability mode
* @xe: xe device instance
*
- * Initialize survivability information and enable survivability mode
+ * Initialize survivability information and enable runtime survivability mode.
+ * Runtime survivability mode is enabled when certain errors cause the device to be
+ * in non-recoverable state. The device is declared wedged with the appropriate
+ * recovery method and survivability mode sysfs exposed to userspace
*
- * Return: 0 if survivability mode is enabled or not requested; negative error
- * code otherwise.
+ * Return: 0 if runtime survivability mode is enabled, negative error code otherwise.
*/
-int xe_survivability_mode_enable(struct xe_device *xe)
+int xe_survivability_mode_runtime_enable(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
- struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret;
- if (!xe_survivability_mode_is_requested(xe))
- return 0;
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE) {
+ dev_err(&pdev->dev, "Runtime Survivability Mode not supported\n");
+ return -EINVAL;
+ }
- survivability->size = MAX_SCRATCH_MMIO;
+ ret = init_survivability_mode(xe);
+ if (ret)
+ return ret;
- info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
+ ret = create_survivability_sysfs(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create survivability mode sysfs\n");
- survivability->info = info;
+ survivability->type = XE_SURVIVABILITY_TYPE_RUNTIME;
+ dev_err(&pdev->dev, "Runtime Survivability mode enabled\n");
- populate_survivability_info(xe);
+ xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_VENDOR);
+ xe_device_declare_wedged(xe);
+ dev_err(&pdev->dev, "Firmware flash required, Please refer to the userspace documentation for more details!\n");
+
+ return 0;
+}
+
+/**
+ * xe_survivability_mode_boot_enable - Initialize and enable boot survivability mode
+ * @xe: xe device instance
+ *
+ * Initialize survivability information and enable boot survivability mode
+ *
+ * Return: 0 if boot survivability mode is enabled or not requested, negative error
+ * code otherwise.
+ */
+int xe_survivability_mode_boot_enable(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret;
- /* Only log debug information and exit if it is a critical failure */
+ if (!xe_survivability_mode_is_requested(xe))
+ return 0;
+
+ ret = init_survivability_mode(xe);
+ if (ret)
+ return ret;
+
+ /* Log breadcrumbs but do not enter survivability mode for Critical boot errors */
if (survivability->boot_status == CRITICAL_FAILURE) {
log_survivability_info(pdev);
return -ENXIO;
}
- return enable_survivability_mode(pdev);
+ survivability->type = XE_SURVIVABILITY_TYPE_BOOT;
+
+ return enable_boot_survivability_mode(pdev);
}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index 02231c2bf008..1cc94226aa82 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -10,8 +10,9 @@
struct xe_device;
-int xe_survivability_mode_enable(struct xe_device *xe);
-bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+int xe_survivability_mode_boot_enable(struct xe_device *xe);
+int xe_survivability_mode_runtime_enable(struct xe_device *xe);
+bool xe_survivability_mode_is_boot_enabled(struct xe_device *xe);
bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode_types.h b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
index 19d433e253df..cd65a5d167c9 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode_types.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
@@ -9,6 +9,11 @@
#include <linux/limits.h>
#include <linux/types.h>
+enum xe_survivability_type {
+ XE_SURVIVABILITY_TYPE_BOOT,
+ XE_SURVIVABILITY_TYPE_RUNTIME,
+};
+
struct xe_survivability_info {
char name[NAME_MAX];
u32 reg;
@@ -30,6 +35,9 @@ struct xe_survivability {
/** @mode: boolean to indicate survivability mode */
bool mode;
+
+ /** @type: survivability type */
+ enum xe_survivability_type type;
};
#endif /* _XE_SURVIVABILITY_MODE_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index a7ff5975873f..7f2f1f041f1d 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -6,8 +6,8 @@
#include <drm/drm_drv.h>
#include "xe_bo.h"
+#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
#include "xe_pm.h"
@@ -17,6 +17,7 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
+#include "xe_vram_types.h"
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
@@ -25,9 +26,9 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range)
* memory.
*/
- struct drm_gpusvm_range_flags flags = {
+ struct drm_gpusvm_pages_flags flags = {
/* Pairs with WRITE_ONCE in drm_gpusvm.c */
- .__flags = READ_ONCE(range->base.flags.__flags),
+ .__flags = READ_ONCE(range->base.pages.flags.__flags),
};
return flags.has_devmem_pages;
@@ -49,15 +50,15 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-#define range_debug(r__, operaton__) \
+#define range_debug(r__, operation__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
"start=0x%014lx, end=0x%014lx, size=%lu", \
- (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (operation__), range_to_vm(&(r__)->base)->usm.asid, \
(r__)->base.gpusvm, \
xe_svm_range_in_vram((r__)) ? 1 : 0, \
xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
- (r__)->base.notifier_seq, \
+ (r__)->base.pages.notifier_seq, \
xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
xe_svm_range_size((r__)))
@@ -112,6 +113,11 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
&vm->svm.garbage_collector.work);
}
+static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
+{
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
+}
+
static u8
xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
const struct mmu_notifier_range *mmu_range,
@@ -128,7 +134,7 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
range_debug(range, "NOTIFIER");
/* Skip if already unmapped or if no binding exist */
- if (range->base.flags.unmapped || !range->tile_present)
+ if (range->base.pages.flags.unmapped || !range->tile_present)
return 0;
range_debug(range, "NOTIFIER - EXECUTE");
@@ -144,13 +150,19 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
*/
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
- tile_mask |= BIT(id);
/*
* WRITE_ONCE pairs with READ_ONCE in
* xe_vm_has_valid_gpu_mapping()
*/
WRITE_ONCE(range->tile_invalidated,
range->tile_invalidated | BIT(id));
+
+ if (!(tile_mask & BIT(id))) {
+ xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
+ tile_mask |= BIT(id);
+ }
}
return tile_mask;
@@ -170,6 +182,24 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
mmu_range);
}
+static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ?
+ ktime_us_delta(ktime_get(), start) : 0;
+}
+
+static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
+}
+
+static ktime_t xe_svm_stats_ktime_get(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
+}
+
static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
const struct mmu_notifier_range *mmu_range)
@@ -177,8 +207,10 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
struct drm_gpusvm_range *r, *first;
+ struct xe_tile *tile;
+ ktime_t start = xe_svm_stats_ktime_get();
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
- u8 tile_mask = 0;
+ u8 tile_mask = 0, id;
long err;
xe_svm_assert_in_notifier(vm);
@@ -224,13 +256,20 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
+ }
+ }
}
static int __xe_svm_garbage_collector(struct xe_vm *vm,
@@ -252,24 +291,73 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
return 0;
}
+static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
+{
+ struct xe_vma *vma;
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ };
+ int err = 0;
+
+ vma = xe_vm_find_vma_by_addr(vm, range_start);
+ if (!vma)
+ return -EINVAL;
+
+ if (xe_vma_has_default_mem_attrs(vma))
+ return 0;
+
+ vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
+ xe_vma_start(vma), xe_vma_end(vma));
+
+ if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
+ default_attr.pat_index = vma->attr.default_pat_index;
+ default_attr.default_pat_index = vma->attr.default_pat_index;
+ vma->attr = default_attr;
+ } else {
+ vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
+ range_start, range_end);
+ err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
+ if (err) {
+ drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+ }
+
+ /*
+ * On call from xe_svm_handle_pagefault original VMA might be changed
+ * signal this to lookup for VMA again.
+ */
+ return -EAGAIN;
+}
+
static int xe_svm_garbage_collector(struct xe_vm *vm)
{
struct xe_svm_range *range;
- int err;
+ u64 range_start;
+ u64 range_end;
+ int err, ret = 0;
lockdep_assert_held_write(&vm->lock);
if (xe_vm_is_closed_or_banned(vm))
return -ENOENT;
- spin_lock(&vm->svm.garbage_collector.lock);
for (;;) {
+ spin_lock(&vm->svm.garbage_collector.lock);
range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
typeof(*range),
garbage_collector_link);
if (!range)
break;
+ range_start = xe_svm_range_start(range);
+ range_end = xe_svm_range_end(range);
+
list_del(&range->garbage_collector_link);
spin_unlock(&vm->svm.garbage_collector.lock);
@@ -282,11 +370,17 @@ static int xe_svm_garbage_collector(struct xe_vm *vm)
return err;
}
- spin_lock(&vm->svm.garbage_collector.lock);
+ err = xe_svm_range_set_default_attr(vm, range_start, range_end);
+ if (err) {
+ if (err == -EAGAIN)
+ ret = -EAGAIN;
+ else
+ return err;
+ }
}
spin_unlock(&vm->svm.garbage_collector.lock);
- return 0;
+ return ret;
}
static void xe_svm_garbage_collector_work_func(struct work_struct *w)
@@ -306,21 +400,15 @@ static struct xe_vram_region *page_to_vr(struct page *page)
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
-static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
-{
- return container_of(vr, struct xe_tile, mem.vram);
-}
-
static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
struct page *page)
{
u64 dpa;
- struct xe_tile *tile = vr_to_tile(vr);
u64 pfn = page_to_pfn(page);
u64 offset;
- xe_tile_assert(tile, is_device_private_page(page));
- xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+ xe_assert(vr->xe, is_device_private_page(page));
+ xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
dpa = vr->dpa_base + offset;
@@ -333,17 +421,74 @@ enum xe_svm_copy_dir {
XE_SVM_COPY_TO_SRAM,
};
-static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ int kb)
+{
+ if (dir == XE_SVM_COPY_TO_VRAM)
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
+ else
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
+}
+
+static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ unsigned long npages,
+ ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ if (dir == XE_SVM_COPY_TO_VRAM) {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ us_delta);
+ } else {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ us_delta);
+ }
+}
+
+static int xe_svm_copy(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages, const enum xe_svm_copy_dir dir)
{
struct xe_vram_region *vr = NULL;
- struct xe_tile *tile;
+ struct xe_gt *gt = NULL;
+ struct xe_device *xe;
struct dma_fence *fence = NULL;
unsigned long i;
#define XE_VRAM_ADDR_INVALID ~0x0ull
u64 vram_addr = XE_VRAM_ADDR_INVALID;
int err = 0, pos = 0;
bool sram = dir == XE_SVM_COPY_TO_SRAM;
+ ktime_t start = xe_svm_stats_ktime_get();
/*
* This flow is complex: it locates physically contiguous device pages,
@@ -365,12 +510,13 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
last = (i + 1) == npages;
/* No CPU page and no device pages queue'd to copy */
- if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
continue;
if (!vr && spage) {
vr = page_to_vr(spage);
- tile = vr_to_tile(vr);
+ gt = xe_migrate_exec_queue(vr->migrate)->gt;
+ xe = vr->xe;
}
XE_WARN_ON(spage && page_to_vr(spage) != vr);
@@ -379,7 +525,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
* first device page, check if physical contiguous on subsequent
* device pages.
*/
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
if (vram_addr == XE_VRAM_ADDR_INVALID) {
vram_addr = __vram_addr;
@@ -387,6 +533,14 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ /* Expected with contiguous memory */
+ xe_assert(vr->xe, match);
+
+ if (pagemap_addr[i].order) {
+ i += NR_PAGES(pagemap_addr[i].order) - 1;
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+ }
}
/*
@@ -401,21 +555,26 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
int incr = (match && last) ? 1 : 0;
if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (i - pos + incr) *
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- vram_addr, (u64)dma_addr[pos], i - pos + incr);
- __fence = xe_migrate_from_vram(tile->migrate,
+ vram_addr,
+ (u64)pagemap_addr[pos].addr, i - pos + incr);
+ __fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- (u64)dma_addr[pos], vram_addr, i - pos + incr);
- __fence = xe_migrate_to_vram(tile->migrate,
+ (u64)pagemap_addr[pos].addr, vram_addr,
+ i - pos + incr);
+ __fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
- dma_addr + pos,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -428,7 +587,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
/* Setup physical address of next device page */
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
vram_addr = __vram_addr;
pos = i;
} else {
@@ -437,19 +596,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
/* Extra mismatched device page, copy it */
if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- vram_addr, (u64)dma_addr[pos], 1);
- __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr, (u64)pagemap_addr[pos].addr, 1);
+ __fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- (u64)dma_addr[pos], vram_addr, 1);
- __fence = xe_migrate_to_vram(tile->migrate, 1,
- dma_addr + pos,
+ (u64)pagemap_addr[pos].addr, vram_addr, 1);
+ __fence = xe_migrate_to_vram(vr->migrate, 1,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -470,21 +631,31 @@ err_out:
dma_fence_put(fence);
}
+ /*
+ * XXX: We can't derive the GT here (or anywhere in this functions, but
+ * compute always uses the primary GT so accumlate stats on the likely
+ * GT of the fault.
+ */
+ if (gt)
+ xe_svm_copy_us_stats_incr(gt, dir, npages, start);
+
return err;
#undef XE_MIGRATE_CHUNK_SIZE
#undef XE_VRAM_ADDR_INVALID
}
-static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_devmem(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
}
-static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_ram(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -506,9 +677,9 @@ static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
return PHYS_PFN(offset + vr->hpa_base);
}
-static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
{
- return &tile->mem.vram.ttm.mm;
+ return &vram->ttm.mm;
}
static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
@@ -522,8 +693,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
list_for_each_entry(block, blocks, link) {
struct xe_vram_region *vr = block->private;
- struct xe_tile *tile = vr_to_tile(vr);
- struct drm_buddy *buddy = tile_to_buddy(tile);
+ struct drm_buddy *buddy = vram_to_buddy(vr);
u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
int i;
@@ -567,22 +737,26 @@ int xe_svm_init(struct xe_vm *vm)
{
int err;
- spin_lock_init(&vm->svm.garbage_collector.lock);
- INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
- INIT_WORK(&vm->svm.garbage_collector.work,
- xe_svm_garbage_collector_work_func);
-
- err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
- current->mm, xe_svm_devm_owner(vm->xe), 0,
- vm->size, xe_modparam.svm_notifier_size * SZ_1M,
- &gpusvm_ops, fault_chunk_sizes,
- ARRAY_SIZE(fault_chunk_sizes));
- if (err)
- return err;
-
- drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, xe_svm_devm_owner(vm->xe), 0,
+ vm->size,
+ xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ } else {
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
+ &vm->xe->drm, NULL, NULL, 0, 0, 0, NULL,
+ NULL, 0);
+ }
- return 0;
+ return err;
}
/**
@@ -653,7 +827,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
xe_svm_notifier_lock(vm);
ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
- (devmem_preferred == range->base.flags.has_devmem_pages);
+ (devmem_preferred == range->base.pages.flags.has_devmem_pages);
xe_svm_notifier_unlock(vm);
@@ -683,66 +857,57 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *v
}
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
-{
- return &tile->mem.vram;
-}
-
static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
- struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
- struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
+ struct xe_validation_ctx vctx;
struct list_head *blocks;
+ struct drm_exec exec;
struct xe_bo *bo;
- ktime_t time_end = 0;
- int err, idx;
+ int err = 0, idx;
if (!drm_dev_enter(&xe->drm, &idx))
return -ENODEV;
xe_pm_runtime_get(xe);
- retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
- ttm_bo_type_device,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_CPU_ADDR_MIRROR);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &time_end))
- goto retry;
- goto out_pm_put;
- }
-
- drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops,
- &tile->mem.vram.dpagemap,
- end - start);
-
- blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
- list_for_each_entry(block, blocks, link)
- block->private = vr;
+ xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
+ ttm_bo_type_device,
+ (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&vctx, &err);
+ break;
+ }
- xe_bo_get(bo);
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops, dpagemap, end - start);
- /* Ensure the device has a pm ref while there are device pages active. */
- xe_pm_runtime_get_noresume(xe);
- err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
- start, end, timeslice_ms,
- xe_svm_devm_owner(xe));
- if (err)
- xe_svm_devmem_release(&bo->devmem_allocation);
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
- xe_bo_unlock(bo);
- xe_bo_put(bo);
+ xe_bo_get(bo);
-out_pm_put:
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
+ if (err)
+ xe_svm_devmem_release(&bo->devmem_allocation);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ }
xe_pm_runtime_put(xe);
drm_dev_exit(idx);
@@ -772,17 +937,17 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
+ if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
return false;
xe_assert(vm->xe, IS_DGFX(vm->xe));
- if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ if (xe_svm_range_in_vram(range)) {
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -790,40 +955,77 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
return true;
}
-/**
- * xe_svm_handle_pagefault() - SVM handle page fault
- * @vm: The VM.
- * @vma: The CPU address mirror VMA.
- * @gt: The gt upon the fault occurred.
- * @fault_addr: The GPU fault address.
- * @atomic: The fault atomic access bit.
- *
- * Create GPU bindings for a SVM page fault. Optionally migrate to device
- * memory.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_gt *gt, u64 fault_addr,
- bool atomic)
+#define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
+static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range) \
+{ \
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
+
+#define DECL_SVM_RANGE_US_STATS(elem, stat) \
+static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range, \
+ ktime_t start) \
+{ \
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
+\
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
+ us_delta); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
+DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
+DECL_SVM_RANGE_US_STATS(bind, BIND)
+DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
+
+static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool need_vram)
{
+ int devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
- .devmem_only = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ .devmem_possible = devmem_possible,
+ .check_pages_threshold = devmem_possible ? SZ_64K : 0,
+ .devmem_only = need_vram && devmem_possible,
+ .timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
};
+ struct xe_validation_ctx vctx;
+ struct drm_exec exec;
struct xe_svm_range *range;
struct dma_fence *fence;
+ struct drm_pagemap *dpagemap;
struct xe_tile *tile = gt_to_tile(gt);
int migrate_try_count = ctx.devmem_only ? 3 : 1;
- ktime_t end = 0;
+ ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -842,17 +1044,34 @@ retry:
if (IS_ERR(range))
return PTR_ERR(range);
- if (ctx.devmem_only && !range->base.flags.migrate_devmem)
- return -EACCES;
+ xe_svm_range_fault_count_stats_incr(gt, range);
- if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
- return 0;
+ if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
+ err = -EACCES;
+ goto out;
+ }
+
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
+ xe_svm_range_valid_fault_count_stats_incr(gt, range);
+ range_debug(range, "PAGE FAULT - VALID");
+ goto out;
+ }
range_debug(range, "PAGE FAULT");
+ dpagemap = xe_vma_resolve_pagemap(vma, tile);
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
+ ktime_t migrate_start = xe_svm_stats_ktime_get();
+
+ /* TODO : For multi-device dpagemap will be used to find the
+ * remote tile and remote device. Will need to modify
+ * xe_svm_alloc_vram to use dpagemap for future multi-device
+ * support.
+ */
+ xe_svm_range_migrate_count_stats_incr(gt, range);
err = xe_svm_alloc_vram(tile, range, &ctx);
+ xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
if (migrate_try_count || !ctx.devmem_only) {
@@ -869,6 +1088,8 @@ retry:
}
}
+ get_pages_start = xe_svm_stats_ktime_get();
+
range_debug(range, "GET PAGES");
err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
@@ -888,37 +1109,89 @@ retry:
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
- goto err_out;
+ goto out;
}
+ xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
range_debug(range, "PAGE FAULT - BIND");
-retry_bind:
- xe_vm_lock(vm, false);
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- xe_vm_unlock(vm);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
+ bind_start = xe_svm_stats_ktime_get();
+ xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+
+ xe_vm_set_validation_exec(vm, &exec);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
+ if (IS_ERR(fence)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(fence);
+ xe_validation_retry_on_oom(&vctx, &err);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+ break;
}
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry_bind;
- goto err_out;
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_out;
dma_fence_wait(fence, false);
dma_fence_put(fence);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+
+out:
+ xe_svm_range_fault_us_stats_incr(gt, range, start);
+ return 0;
err_out:
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
return err;
}
/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @gt: The gt upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic)
+{
+ int need_vram, ret;
+retry:
+ need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (need_vram < 0)
+ return need_vram;
+
+ ret = __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
+ need_vram ? true : false);
+ if (ret == -EAGAIN) {
+ /*
+ * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
+ * may have been split by xe_svm_range_set_default_attr.
+ */
+ vma = xe_vm_find_vma_by_addr(vm, fault_addr);
+ if (!vma)
+ return -EINVAL;
+
+ goto retry;
+ }
+ return ret;
+}
+
+/**
* xe_svm_has_mapping() - SVM has mappings
* @vm: The VM.
* @start: Start address.
@@ -934,6 +1207,41 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
}
/**
+ * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
+ * @vm: The VM
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function UNMAPS svm ranges if start or end address are inside them.
+ */
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ drm_gpusvm_range_get(range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
+ spin_lock(&vm->svm.garbage_collector.lock);
+ list_del(&to_xe_range(range)->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+ }
+ drm_gpusvm_range_put(range);
+ }
+ }
+ }
+}
+
+/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
*
@@ -967,7 +1275,7 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
xe_vma_start(vma), xe_vma_end(vma), ctx);
if (IS_ERR(r))
- return ERR_PTR(PTR_ERR(r));
+ return ERR_CAST(r);
return to_xe_range(r);
}
@@ -997,8 +1305,94 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
return err;
}
+/**
+ * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
+ * @vm: Pointer to the xe_vm structure
+ * @start: Start of the input range
+ * @end: End of the input range
+ *
+ * This function removes the page table entries (PTEs) associated
+ * with the svm ranges within the given input start and end
+ *
+ * Return: tile_mask for which gt's need to be tlb invalidated.
+ */
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct xe_svm_range *range;
+ u64 adj_start, adj_end;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0));
+
+ drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *r = NULL;
+
+ adj_start = max(start, drm_gpusvm_notifier_start(notifier));
+ adj_end = min(end, drm_gpusvm_notifier_end(notifier));
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
+ range = to_xe_range(r);
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping().
+ * Must not fail after setting
+ * tile_invalidated and before
+ * TLB invalidation.
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram->dpagemap;
+}
+
+/**
+ * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
+ * @vma: Pointer to the xe_vma structure containing memory attributes
+ * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
+ *
+ * This function determines the correct DRM pagemap to use for a given VMA.
+ * It first checks if a valid devmem_fd is provided in the VMA's preferred
+ * location. If the devmem_fd is negative, it returns NULL, indicating no
+ * pagemap is available and smem to be used as preferred location.
+ * If the devmem_fd is equal to the default faulting
+ * GT identifier, it returns the VRAM pagemap associated with the tile.
+ *
+ * Future support for multi-device configurations may use drm_pagemap_from_fd()
+ * to resolve pagemaps from arbitrary file descriptors.
+ *
+ * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
+ */
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
+ return NULL;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
+ return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
+
+ /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
+ return NULL;
+}
+
/**
* xe_svm_alloc_vram()- Allocate device memory pages for range,
* migrating existing data.
@@ -1013,17 +1407,17 @@ int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
{
struct drm_pagemap *dpagemap;
- xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
range_debug(range, "ALLOCATE VRAM");
- dpagemap = xe_tile_local_pagemap(tile);
+ dpagemap = tile_local_pagemap(tile);
return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
xe_svm_range_end(range),
range->base.gpusvm->mm,
ctx->timeslice_ms);
}
-static struct drm_pagemap_device_addr
+static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
struct page *page,
@@ -1042,7 +1436,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
prot = 0;
}
- return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+ return drm_pagemap_addr_encode(addr, prot, order, dir);
}
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
@@ -1111,6 +1505,11 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
#endif
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index da9a69ea0bb1..cef6ee7d6fe3 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -90,6 +90,12 @@ bool xe_svm_range_validate(struct xe_vm *vm,
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
+
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -99,7 +105,7 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *v
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
+ return range->base.pages.flags.has_dma_mapping;
}
/**
@@ -149,21 +155,13 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
return drm_gpusvm_range_size(&range->base);
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
-
void xe_svm_flush(struct xe_vm *vm);
#else
#include <linux/interval_tree.h>
+#include "xe_vm.h"
-struct drm_pagemap_device_addr;
+struct drm_pagemap_addr;
struct drm_gpusvm_ctx;
struct drm_gpusvm_range;
struct xe_bo;
@@ -178,7 +176,9 @@ struct xe_vram_region;
struct xe_svm_range {
struct {
struct interval_tree_node itree;
- const struct drm_pagemap_device_addr *dma_addr;
+ struct {
+ const struct drm_pagemap_addr *dma_addr;
+ } pages;
} base;
u32 tile_present;
u32 tile_invalidated;
@@ -198,12 +198,21 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
static inline
int xe_svm_init(struct xe_vm *vm)
{
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
+ NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+#else
return 0;
+#endif
}
static inline
void xe_svm_fini(struct xe_vm *vm)
{
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+#endif
}
static inline
@@ -303,19 +312,64 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
return ULONG_MAX;
}
-#define xe_svm_assert_in_notifier(...) do {} while (0)
+static inline
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
+static inline
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ return 0;
+}
+
+static inline
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
+
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
#define xe_svm_range_has_dma_mapping(...) false
+#endif /* CONFIG_DRM_XE_GPUSVM */
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_assert_held_read(vm__) \
+ lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_lock_interruptible(vm__) \
+ down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+#else
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+
+static inline void xe_svm_assert_held_read(struct xe_vm *vm)
+{
+}
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
-static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
+static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
{
+ return 0;
}
-static inline void xe_svm_flush(struct xe_vm *vm)
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
-#endif
+#endif /* CONFIG_DRM_GPUSVM */
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 86e9811e60ba..d49ba3401963 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
@@ -19,6 +20,8 @@
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_vram.h"
+#include "xe_vram_types.h"
/**
* DOC: Multi-tile Design
@@ -92,6 +95,35 @@ static int xe_tile_alloc(struct xe_tile *tile)
if (!tile->mem.ggtt)
return -ENOMEM;
+ tile->migrate = xe_migrate_alloc(tile);
+ if (!tile->migrate)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * xe_tile_alloc_vram - Perform per-tile VRAM structs allocation
+ * @tile: Tile to perform allocations for
+ *
+ * Allocates VRAM per-tile data structures using DRM-managed allocations.
+ * Does not touch the hardware.
+ *
+ * Returns -ENOMEM if allocations fail, otherwise 0.
+ */
+int xe_tile_alloc_vram(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = xe_vram_region_alloc(xe, tile->id, XE_PL_VRAM0 + tile->id);
+ if (!vram)
+ return -ENOMEM;
+ tile->mem.vram = vram;
+
return 0;
}
@@ -127,21 +159,6 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
}
ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */
-static int tile_ttm_mgr_init(struct xe_tile *tile)
-{
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (tile->mem.vram.usable_size) {
- err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram.ttm);
- if (err)
- return err;
- xe->info.mem_region_mask |= BIT(tile->id) << 1;
- }
-
- return 0;
-}
-
/**
* xe_tile_init_noalloc - Init tile up to the point where allocations can happen.
* @tile: The tile to initialize.
@@ -159,16 +176,19 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
int xe_tile_init_noalloc(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- err = tile_ttm_mgr_init(tile);
- if (err)
- return err;
xe_wa_apply_tile_workarounds(tile);
if (xe->info.has_usm && IS_DGFX(xe))
- xe_devm_add(tile, &tile->mem.vram);
+ xe_devm_add(tile, tile->mem.vram);
+
+ if (IS_DGFX(xe) && !ttm_resource_manager_used(&tile->mem.vram->ttm.manager)) {
+ int err = xe_ttm_vram_mgr_init(xe, tile->mem.vram);
+
+ if (err)
+ return err;
+ xe->info.mem_region_mask |= BIT(tile->mem.vram->id) << 1;
+ }
return xe_tile_sysfs_init(tile);
}
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index cc33e8733983..dceb6297aa01 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -14,19 +14,9 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
int xe_tile_init_noalloc(struct xe_tile *tile);
int xe_tile_init(struct xe_tile *tile);
-void xe_tile_migrate_wait(struct xe_tile *tile);
+int xe_tile_alloc_vram(struct xe_tile *tile);
-#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
-{
- return &tile->mem.vram.dpagemap;
-}
-#else
-static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
-{
- return NULL;
-}
-#endif
+void xe_tile_migrate_wait(struct xe_tile *tile);
static inline bool xe_tile_is_root(struct xe_tile *tile)
{
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.c b/drivers/gpu/drm/xe/xe_tile_debugfs.c
new file mode 100644
index 000000000000..5523874cba7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_pm.h"
+#include "xe_sa.h"
+#include "xe_tile_debugfs.h"
+
+static struct xe_tile *node_to_tile(struct drm_info_node *node)
+{
+ return node->dent->d_parent->d_inode->i_private;
+}
+
+/**
+ * tile_debugfs_simple_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_tile specific.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which struct dentry d_inode->i_private points to &xe_tile.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── tile0/ # tile = dentry->d_inode->i_private
+ * │ │ ├── id # tile = dentry->d_parent->d_inode->i_private
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_tile *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int tile_id(struct xe_tile *tile, struct drm_printer *p)
+ * {
+ * drm_printf(p, "%u\n", tile->id);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list info[] = {
+ * { name = "id", .show = tile_debugfs_simple_show, .data = tile_id },
+ * };
+ *
+ * dir = debugfs_create_dir("tile0", parent);
+ * dir->d_inode->i_private = tile;
+ * drm_debugfs_create_files(info, ARRAY_SIZE(info), dir, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int tile_debugfs_simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct xe_tile *tile = node_to_tile(node);
+ int (*print)(struct xe_tile *, struct drm_printer *) = node->info_ent->data;
+
+ return print(tile, &p);
+}
+
+/**
+ * tile_debugfs_show_with_rpm - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * Similar to tile_debugfs_simple_show() but implicitly takes a RPM ref.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct xe_tile *tile = node_to_tile(node);
+ struct xe_device *xe = tile_to_xe(tile);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = tile_debugfs_simple_show(m, data);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static int sa_info(struct xe_tile *tile, struct drm_printer *p)
+{
+ drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
+ xe_sa_manager_gpu_addr(tile->mem.kernel_bb_pool));
+
+ return 0;
+}
+
+/* only for debugfs files which can be safely used on the VF */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "sa_info", .show = tile_debugfs_show_with_rpm, .data = sa_info },
+};
+
+/**
+ * xe_tile_debugfs_register - Register tile's debugfs attributes
+ * @tile: the &xe_tile to register
+ *
+ * Create debugfs sub-directory with a name that includes a tile ID and
+ * then creates set of debugfs files (attributes) specific to this tile.
+ */
+void xe_tile_debugfs_register(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[8];
+
+ snprintf(name, sizeof(name), "tile%u", tile->id);
+ tile->debugfs = debugfs_create_dir(name, root);
+ if (IS_ERR(tile->debugfs))
+ return;
+
+ /*
+ * Store the xe_tile pointer as private data of the tile/ directory
+ * node so other tile specific attributes under that directory may
+ * refer to it by looking at its parent node private data.
+ */
+ tile->debugfs->d_inode->i_private = tile;
+
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ tile->debugfs, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.h b/drivers/gpu/drm/xe/xe_tile_debugfs.h
new file mode 100644
index 000000000000..0e5f724de37f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_DEBUGFS_H_
+#define _XE_TILE_DEBUGFS_H_
+
+struct xe_tile;
+
+void xe_tile_debugfs_register(struct xe_tile *tile);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_printk.h b/drivers/gpu/drm/xe/xe_tile_printk.h
new file mode 100644
index 000000000000..63640a42685d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_printk.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _xe_tile_printk_H_
+#define _xe_tile_printk_H_
+
+#include "xe_printk.h"
+
+#define __XE_TILE_PRINTK_FMT(_tile, _fmt, _args...) "Tile%u: " _fmt, (_tile)->id, ##_args
+
+#define xe_tile_printk(_tile, _level, _fmt, ...) \
+ xe_printk((_tile)->xe, _level, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_err(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_err_once(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err_once, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_err_ratelimited(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_warn(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_notice(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_info(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), info, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_dbg(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_WARN_type(_tile, _type, _condition, _fmt, ...) \
+ xe_WARN##_type((_tile)->xe, _condition, _fmt, ## __VA_ARGS__)
+
+#define xe_tile_WARN(_tile, _condition, _fmt, ...) \
+ xe_tile_WARN_type((_tile),, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_WARN_ONCE(_tile, _condition, _fmt, ...) \
+ xe_tile_WARN_type((_tile), _ONCE, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_WARN_ON(_tile, _condition) \
+ xe_tile_WARN((_tile), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
+
+#define xe_tile_WARN_ON_ONCE(_tile, _condition) \
+ xe_tile_WARN_ONCE((_tile), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
+
+static inline void __xe_tile_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+
+ xe_tile_err(tile, "%pV", vaf);
+}
+
+static inline void __xe_tile_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+
+ xe_tile_info(tile, "%pV", vaf);
+}
+
+static inline void __xe_tile_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_tile_dbg() callsite annotations are useless here,
+ * redirect to the tweaked xe_dbg_printer() instead.
+ */
+ dbg = xe_dbg_printer(tile->xe);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, __XE_TILE_PRINTK_FMT(tile, "%pV", vaf));
+}
+
+/**
+ * xe_tile_err_printer - Construct a &drm_printer that outputs to xe_tile_err()
+ * @tile: the &xe_tile pointer to use in xe_tile_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_err_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_err,
+ .arg = tile,
+ };
+ return p;
+}
+
+/**
+ * xe_tile_info_printer - Construct a &drm_printer that outputs to xe_tile_info()
+ * @tile: the &xe_tile pointer to use in xe_tile_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_info_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_info,
+ .arg = tile,
+ };
+ return p;
+}
+
+/**
+ * xe_tile_dbg_printer - Construct a &drm_printer that outputs like xe_tile_dbg()
+ * @tile: the &xe_tile pointer to use in xe_tile_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_dbg_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_dbg,
+ .arg = tile,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c
new file mode 100644
index 000000000000..918a59e686ea
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_stats.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_tlb_inval.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+#include "xe_tlb_inval.h"
+#include "xe_trace.h"
+
+/**
+ * DOC: Xe TLB invalidation
+ *
+ * Xe TLB invalidation is implemented in two layers. The first is the frontend
+ * API, which provides an interface for TLB invalidations to the driver code.
+ * The frontend handles seqno assignment, synchronization (fences), and the
+ * timeout mechanism. The frontend is implemented via an embedded structure
+ * xe_tlb_inval that includes a set of ops hooking into the backend. The backend
+ * interacts with the hardware (or firmware) to perform the actual invalidation.
+ */
+
+#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
+
+static void xe_tlb_inval_fence_fini(struct xe_tlb_inval_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->tlb_inval))
+ return;
+
+ xe_pm_runtime_put(fence->tlb_inval->xe);
+ fence->tlb_inval = NULL; /* fini() should be called once */
+}
+
+static void
+xe_tlb_inval_fence_signal(struct xe_tlb_inval_fence *fence)
+{
+ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+ lockdep_assert_held(&fence->tlb_inval->pending_lock);
+
+ list_del(&fence->link);
+ trace_xe_tlb_inval_fence_signal(fence->tlb_inval->xe, fence);
+ xe_tlb_inval_fence_fini(fence);
+ dma_fence_signal(&fence->base);
+ if (!stack)
+ dma_fence_put(&fence->base);
+}
+
+static void
+xe_tlb_inval_fence_signal_unlocked(struct xe_tlb_inval_fence *fence)
+{
+ struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ xe_tlb_inval_fence_signal(fence);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+}
+
+static void xe_tlb_inval_fence_timeout(struct work_struct *work)
+{
+ struct xe_tlb_inval *tlb_inval = container_of(work, struct xe_tlb_inval,
+ fence_tdr.work);
+ struct xe_device *xe = tlb_inval->xe;
+ struct xe_tlb_inval_fence *fence, *next;
+ long timeout_delay = tlb_inval->ops->timeout_delay(tlb_inval);
+
+ tlb_inval->ops->flush(tlb_inval);
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link) {
+ s64 since_inval_ms = ktime_ms_delta(ktime_get(),
+ fence->inval_time);
+
+ if (msecs_to_jiffies(since_inval_ms) < timeout_delay)
+ break;
+
+ trace_xe_tlb_inval_fence_timeout(xe, fence);
+ drm_err(&xe->drm,
+ "TLB invalidation fence timeout, seqno=%d recv=%d",
+ fence->seqno, tlb_inval->seqno_recv);
+
+ fence->base.error = -ETIME;
+ xe_tlb_inval_fence_signal(fence);
+ }
+ if (!list_empty(&tlb_inval->pending_fences))
+ queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ timeout_delay);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+}
+
+/**
+ * tlb_inval_fini - Clean up TLB invalidation state
+ * @drm: @drm_device
+ * @arg: pointer to struct @xe_tlb_inval
+ *
+ * Cancel pending fence workers and clean up any additional
+ * TLB invalidation state.
+ */
+static void tlb_inval_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_tlb_inval *tlb_inval = arg;
+
+ xe_tlb_inval_reset(tlb_inval);
+}
+
+/**
+ * xe_gt_tlb_inval_init - Initialize TLB invalidation state
+ * @gt: GT structure
+ *
+ * Initialize TLB invalidation state, purely software initialization, should
+ * be called once during driver load.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_gt_tlb_inval_init_early(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tlb_inval *tlb_inval = &gt->tlb_inval;
+ int err;
+
+ tlb_inval->xe = xe;
+ tlb_inval->seqno = 1;
+ INIT_LIST_HEAD(&tlb_inval->pending_fences);
+ spin_lock_init(&tlb_inval->pending_lock);
+ spin_lock_init(&tlb_inval->lock);
+ INIT_DELAYED_WORK(&tlb_inval->fence_tdr, xe_tlb_inval_fence_timeout);
+
+ err = drmm_mutex_init(&xe->drm, &tlb_inval->seqno_lock);
+ if (err)
+ return err;
+
+ tlb_inval->job_wq = drmm_alloc_ordered_workqueue(&xe->drm,
+ "gt-tbl-inval-job-wq",
+ WQ_MEM_RECLAIM);
+ if (IS_ERR(tlb_inval->job_wq))
+ return PTR_ERR(tlb_inval->job_wq);
+
+ /* XXX: Blindly setting up backend to GuC */
+ xe_guc_tlb_inval_init_early(&gt->uc.guc, tlb_inval);
+
+ return drmm_add_action_or_reset(&xe->drm, tlb_inval_fini, tlb_inval);
+}
+
+/**
+ * xe_tlb_inval_reset() - TLB invalidation reset
+ * @tlb_inval: TLB invalidation client
+ *
+ * Signal any pending invalidation fences, should be called during a GT reset
+ */
+void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_tlb_inval_fence *fence, *next;
+ int pending_seqno;
+
+ /*
+ * we can get here before the backends are even initialized if we're
+ * wedging very early, in which case there are not going to be any
+ * pendind fences so we can bail immediately.
+ */
+ if (!tlb_inval->ops->initialized(tlb_inval))
+ return;
+
+ /*
+ * Backend is already disabled at this point. No new TLB requests can
+ * appear.
+ */
+
+ mutex_lock(&tlb_inval->seqno_lock);
+ spin_lock_irq(&tlb_inval->pending_lock);
+ cancel_delayed_work(&tlb_inval->fence_tdr);
+ /*
+ * We might have various kworkers waiting for TLB flushes to complete
+ * which are not tracked with an explicit TLB fence, however at this
+ * stage that will never happen since the backend is already disabled,
+ * so make sure we signal them here under the assumption that we have
+ * completed a full GT reset.
+ */
+ if (tlb_inval->seqno == 1)
+ pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
+ else
+ pending_seqno = tlb_inval->seqno - 1;
+ WRITE_ONCE(tlb_inval->seqno_recv, pending_seqno);
+
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link)
+ xe_tlb_inval_fence_signal(fence);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+ mutex_unlock(&tlb_inval->seqno_lock);
+}
+
+static bool xe_tlb_inval_seqno_past(struct xe_tlb_inval *tlb_inval, int seqno)
+{
+ int seqno_recv = READ_ONCE(tlb_inval->seqno_recv);
+
+ lockdep_assert_held(&tlb_inval->pending_lock);
+
+ if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
+ return false;
+
+ if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
+ return true;
+
+ return seqno_recv >= seqno;
+}
+
+static void xe_tlb_inval_fence_prep(struct xe_tlb_inval_fence *fence)
+{
+ struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
+
+ fence->seqno = tlb_inval->seqno;
+ trace_xe_tlb_inval_fence_send(tlb_inval->xe, fence);
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ fence->inval_time = ktime_get();
+ list_add_tail(&fence->link, &tlb_inval->pending_fences);
+
+ if (list_is_singular(&tlb_inval->pending_fences))
+ queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+ spin_unlock_irq(&tlb_inval->pending_lock);
+
+ tlb_inval->seqno = (tlb_inval->seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!tlb_inval->seqno)
+ tlb_inval->seqno = 1;
+}
+
+#define xe_tlb_inval_issue(__tlb_inval, __fence, op, args...) \
+({ \
+ int __ret; \
+ \
+ xe_assert((__tlb_inval)->xe, (__tlb_inval)->ops); \
+ xe_assert((__tlb_inval)->xe, (__fence)); \
+ \
+ mutex_lock(&(__tlb_inval)->seqno_lock); \
+ xe_tlb_inval_fence_prep((__fence)); \
+ __ret = op((__tlb_inval), (__fence)->seqno, ##args); \
+ if (__ret < 0) \
+ xe_tlb_inval_fence_signal_unlocked((__fence)); \
+ mutex_unlock(&(__tlb_inval)->seqno_lock); \
+ \
+ __ret == -ECANCELED ? 0 : __ret; \
+})
+
+/**
+ * xe_tlb_inval_all() - Issue a TLB invalidation for all TLBs
+ * @tlb_inval: TLB invalidation client
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
+ *
+ * Issue a TLB invalidation for all TLBs. Completion of TLB is asynchronous and
+ * caller can use the invalidation fence to wait for completion.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence)
+{
+ return xe_tlb_inval_issue(tlb_inval, fence, tlb_inval->ops->all);
+}
+
+/**
+ * xe_tlb_inval_ggtt() - Issue a TLB invalidation for the GGTT
+ * @tlb_inval: TLB invalidation client
+ *
+ * Issue a TLB invalidation for the GGTT. Completion of TLB is asynchronous and
+ * caller can use the invalidation fence to wait for completion.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_tlb_inval_fence fence, *fence_ptr = &fence;
+ int ret;
+
+ xe_tlb_inval_fence_init(tlb_inval, fence_ptr, true);
+ ret = xe_tlb_inval_issue(tlb_inval, fence_ptr, tlb_inval->ops->ggtt);
+ xe_tlb_inval_fence_wait(fence_ptr);
+
+ return ret;
+}
+
+/**
+ * xe_tlb_inval_range() - Issue a TLB invalidation for an address range
+ * @tlb_inval: TLB invalidation client
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
+ * @start: start address
+ * @end: end address
+ * @asid: address space id
+ *
+ * Issue a range based TLB invalidation if supported, if not fallback to a full
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
+ *
+ * Return: Negative error code on error, 0 on success
+ */
+int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence, u64 start, u64 end,
+ u32 asid)
+{
+ return xe_tlb_inval_issue(tlb_inval, fence, tlb_inval->ops->ppgtt,
+ start, end, asid);
+}
+
+/**
+ * xe_tlb_inval_vm() - Issue a TLB invalidation for a VM
+ * @tlb_inval: TLB invalidation client
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm)
+{
+ struct xe_tlb_inval_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+
+ xe_tlb_inval_fence_init(tlb_inval, &fence, true);
+ xe_tlb_inval_range(tlb_inval, &fence, 0, range, vm->usm.asid);
+ xe_tlb_inval_fence_wait(&fence);
+}
+
+/**
+ * xe_tlb_inval_done_handler() - TLB invalidation done handler
+ * @tlb_inval: TLB invalidation client
+ * @seqno: seqno of invalidation that is done
+ *
+ * Update recv seqno, signal any TLB invalidation fences, and restart TDR
+ */
+void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno)
+{
+ struct xe_device *xe = tlb_inval->xe;
+ struct xe_tlb_inval_fence *fence, *next;
+ unsigned long flags;
+
+ /*
+ * This can also be run both directly from the IRQ handler and also in
+ * process_g2h_msg(). Only one may process any individual CT message,
+ * however the order they are processed here could result in skipping a
+ * seqno. To handle that we just process all the seqnos from the last
+ * seqno_recv up to and including the one in msg[0]. The delta should be
+ * very small so there shouldn't be much of pending_fences we actually
+ * need to iterate over here.
+ *
+ * From GuC POV we expect the seqnos to always appear in-order, so if we
+ * see something later in the timeline we can be sure that anything
+ * appearing earlier has already signalled, just that we have yet to
+ * officially process the CT message like if racing against
+ * process_g2h_msg().
+ */
+ spin_lock_irqsave(&tlb_inval->pending_lock, flags);
+ if (xe_tlb_inval_seqno_past(tlb_inval, seqno)) {
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+ return;
+ }
+
+ WRITE_ONCE(tlb_inval->seqno_recv, seqno);
+
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link) {
+ trace_xe_tlb_inval_fence_recv(xe, fence);
+
+ if (!xe_tlb_inval_seqno_past(tlb_inval, fence->seqno))
+ break;
+
+ xe_tlb_inval_fence_signal(fence);
+ }
+
+ if (!list_empty(&tlb_inval->pending_fences))
+ mod_delayed_work(system_wq,
+ &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+ else
+ cancel_delayed_work(&tlb_inval->fence_tdr);
+
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+}
+
+static const char *
+xe_inval_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+xe_inval_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "tlb_inval_fence";
+}
+
+static const struct dma_fence_ops inval_fence_ops = {
+ .get_driver_name = xe_inval_fence_get_driver_name,
+ .get_timeline_name = xe_inval_fence_get_timeline_name,
+};
+
+/**
+ * xe_tlb_inval_fence_init() - Initialize TLB invalidation fence
+ * @tlb_inval: TLB invalidation client
+ * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_tlb_inval_fence_fini
+ * will be automatically called when fence is signalled (all fences must signal),
+ * even on error.
+ */
+void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ bool stack)
+{
+ xe_pm_runtime_get_noresume(tlb_inval->xe);
+
+ spin_lock_irq(&tlb_inval->lock);
+ dma_fence_init(&fence->base, &inval_fence_ops, &tlb_inval->lock,
+ dma_fence_context_alloc(1), 1);
+ spin_unlock_irq(&tlb_inval->lock);
+ INIT_LIST_HEAD(&fence->link);
+ if (stack)
+ set_bit(FENCE_STACK_BIT, &fence->base.flags);
+ else
+ dma_fence_get(&fence->base);
+ fence->tlb_inval = tlb_inval;
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.h b/drivers/gpu/drm/xe/xe_tlb_inval.h
new file mode 100644
index 000000000000..554634dfd4e2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_H_
+#define _XE_TLB_INVAL_H_
+
+#include <linux/types.h>
+
+#include "xe_tlb_inval_types.h"
+
+struct xe_gt;
+struct xe_guc;
+struct xe_vm;
+
+int xe_gt_tlb_inval_init_early(struct xe_gt *gt);
+
+void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval);
+int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence);
+int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval);
+void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm);
+int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ u64 start, u64 end, u32 asid);
+
+void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ bool stack);
+
+/**
+ * xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
+ * @fence: TLB invalidation fence to wait on
+ *
+ * Wait on a TLB invalidiation fence until it signals, non interruptable
+ */
+static inline void
+xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
+{
+ dma_fence_wait(&fence->base, false);
+}
+
+void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno);
+
+#endif /* _XE_TLB_INVAL_ */
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
new file mode 100644
index 000000000000..492def04a559
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_assert.h"
+#include "xe_dep_job_types.h"
+#include "xe_dep_scheduler.h"
+#include "xe_exec_queue.h"
+#include "xe_gt_types.h"
+#include "xe_tlb_inval.h"
+#include "xe_tlb_inval_job.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+
+/** struct xe_tlb_inval_job - TLB invalidation job */
+struct xe_tlb_inval_job {
+ /** @dep: base generic dependency Xe job */
+ struct xe_dep_job dep;
+ /** @tlb_inval: TLB invalidation client */
+ struct xe_tlb_inval *tlb_inval;
+ /** @q: exec queue issuing the invalidate */
+ struct xe_exec_queue *q;
+ /** @refcount: ref count of this job */
+ struct kref refcount;
+ /**
+ * @fence: dma fence to indicate completion. 1 way relationship - job
+ * can safely reference fence, fence cannot safely reference job.
+ */
+ struct dma_fence *fence;
+ /** @start: Start address to invalidate */
+ u64 start;
+ /** @end: End address to invalidate */
+ u64 end;
+ /** @asid: Address space ID to invalidate */
+ u32 asid;
+ /** @fence_armed: Fence has been armed */
+ bool fence_armed;
+};
+
+static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job)
+{
+ struct xe_tlb_inval_job *job =
+ container_of(dep_job, typeof(*job), dep);
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+
+ xe_tlb_inval_range(job->tlb_inval, ifence, job->start,
+ job->end, job->asid);
+
+ return job->fence;
+}
+
+static void xe_tlb_inval_job_free(struct xe_dep_job *dep_job)
+{
+ struct xe_tlb_inval_job *job =
+ container_of(dep_job, typeof(*job), dep);
+
+ /* Pairs with get in xe_tlb_inval_job_push */
+ xe_tlb_inval_job_put(job);
+}
+
+static const struct xe_dep_job_ops dep_job_ops = {
+ .run_job = xe_tlb_inval_job_run,
+ .free_job = xe_tlb_inval_job_free,
+};
+
+/**
+ * xe_tlb_inval_job_create() - TLB invalidation job create
+ * @q: exec queue issuing the invalidate
+ * @tlb_inval: TLB invalidation client
+ * @dep_scheduler: Dependency scheduler for job
+ * @start: Start address to invalidate
+ * @end: End address to invalidate
+ * @asid: Address space ID to invalidate
+ *
+ * Create a TLB invalidation job and initialize internal fields. The caller is
+ * responsible for releasing the creation reference.
+ *
+ * Return: TLB invalidation job object on success, ERR_PTR failure
+ */
+struct xe_tlb_inval_job *
+xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
+ struct xe_dep_scheduler *dep_scheduler, u64 start,
+ u64 end, u32 asid)
+{
+ struct xe_tlb_inval_job *job;
+ struct drm_sched_entity *entity =
+ xe_dep_scheduler_entity(dep_scheduler);
+ struct xe_tlb_inval_fence *ifence;
+ int err;
+
+ job = kmalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ job->q = q;
+ job->tlb_inval = tlb_inval;
+ job->start = start;
+ job->end = end;
+ job->asid = asid;
+ job->fence_armed = false;
+ job->dep.ops = &dep_job_ops;
+ kref_init(&job->refcount);
+ xe_exec_queue_get(q); /* Pairs with put in xe_tlb_inval_job_destroy */
+
+ ifence = kmalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto err_job;
+ }
+ job->fence = &ifence->base;
+
+ err = drm_sched_job_init(&job->dep.drm, entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
+ if (err)
+ goto err_fence;
+
+ /* Pairs with put in xe_tlb_inval_job_destroy */
+ xe_pm_runtime_get_noresume(gt_to_xe(q->gt));
+
+ return job;
+
+err_fence:
+ kfree(ifence);
+err_job:
+ xe_exec_queue_put(q);
+ kfree(job);
+
+ return ERR_PTR(err);
+}
+
+static void xe_tlb_inval_job_destroy(struct kref *ref)
+{
+ struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
+ refcount);
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+ struct xe_exec_queue *q = job->q;
+ struct xe_device *xe = gt_to_xe(q->gt);
+
+ if (!job->fence_armed)
+ kfree(ifence);
+ else
+ /* Ref from xe_tlb_inval_fence_init */
+ dma_fence_put(job->fence);
+
+ drm_sched_job_cleanup(&job->dep.drm);
+ kfree(job);
+ xe_exec_queue_put(q); /* Pairs with get from xe_tlb_inval_job_create */
+ xe_pm_runtime_put(xe); /* Pairs with get from xe_tlb_inval_job_create */
+}
+
+/**
+ * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency
+ * @job: TLB invalidation job to alloc dependency for
+ *
+ * Allocate storage for a dependency in the TLB invalidation fence. This
+ * function should be called at most once per job and must be paired with
+ * xe_tlb_inval_job_push being called with a real fence.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job)
+{
+ xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0));
+ might_alloc(GFP_KERNEL);
+
+ return drm_sched_job_add_dependency(&job->dep.drm,
+ dma_fence_get_stub());
+}
+
+/**
+ * xe_tlb_inval_job_push() - TLB invalidation job push
+ * @job: TLB invalidation job to push
+ * @m: The migration object being used
+ * @fence: Dependency for TLB invalidation job
+ *
+ * Pushes a TLB invalidation job for execution, using @fence as a dependency.
+ * Storage for @fence must be preallocated with xe_tlb_inval_job_alloc_dep
+ * prior to this call if @fence is not signaled. Takes a reference to the job’s
+ * finished fence, which the caller is responsible for releasing, and return it
+ * to the caller. This function is safe to be called in the path of reclaim.
+ *
+ * Return: Job's finished fence on success, cannot fail
+ */
+struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
+ struct xe_migrate *m,
+ struct dma_fence *fence)
+{
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+
+ if (!dma_fence_is_signaled(fence)) {
+ void *ptr;
+
+ /*
+ * Can be in path of reclaim, hence the preallocation of fence
+ * storage in xe_tlb_inval_job_alloc_dep. Verify caller did
+ * this correctly.
+ */
+ xe_assert(gt_to_xe(job->q->gt),
+ xa_load(&job->dep.drm.dependencies, 0) ==
+ dma_fence_get_stub());
+
+ dma_fence_get(fence); /* ref released once dependency processed by scheduler */
+ ptr = xa_store(&job->dep.drm.dependencies, 0, fence,
+ GFP_ATOMIC);
+ xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr));
+ }
+
+ xe_tlb_inval_job_get(job); /* Pairs with put in free_job */
+ job->fence_armed = true;
+
+ /*
+ * We need the migration lock to protect the job's seqno and the spsc
+ * queue, only taken on migration queue, user queues protected dma-resv
+ * VM lock.
+ */
+ xe_migrate_job_lock(m, job->q);
+
+ /* Creation ref pairs with put in xe_tlb_inval_job_destroy */
+ xe_tlb_inval_fence_init(job->tlb_inval, ifence, false);
+ dma_fence_get(job->fence); /* Pairs with put in DRM scheduler */
+
+ drm_sched_job_arm(&job->dep.drm);
+ /*
+ * caller ref, get must be done before job push as it could immediately
+ * signal and free.
+ */
+ dma_fence_get(&job->dep.drm.s_fence->finished);
+ drm_sched_entity_push_job(&job->dep.drm);
+
+ xe_migrate_job_unlock(m, job->q);
+
+ /*
+ * Not using job->fence, as it has its own dma-fence context, which does
+ * not allow TLB invalidation fences on the same queue, GT tuple to
+ * be squashed in dma-resv/DRM scheduler. Instead, we use the DRM scheduler
+ * context and job's finished fence, which enables squashing.
+ */
+ return &job->dep.drm.s_fence->finished;
+}
+
+/**
+ * xe_tlb_inval_job_get() - Get a reference to TLB invalidation job
+ * @job: TLB invalidation job object
+ *
+ * Increment the TLB invalidation job's reference count
+ */
+void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job)
+{
+ kref_get(&job->refcount);
+}
+
+/**
+ * xe_tlb_inval_job_put() - Put a reference to TLB invalidation job
+ * @job: TLB invalidation job object
+ *
+ * Decrement the TLB invalidation job's reference count, call
+ * xe_tlb_inval_job_destroy when reference count == 0. Skips decrement if
+ * input @job is NULL or IS_ERR.
+ */
+void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job)
+{
+ if (!IS_ERR_OR_NULL(job))
+ kref_put(&job->refcount, xe_tlb_inval_job_destroy);
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.h b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
new file mode 100644
index 000000000000..e63edcb26b50
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_JOB_H_
+#define _XE_TLB_INVAL_JOB_H_
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct xe_dep_scheduler;
+struct xe_exec_queue;
+struct xe_tlb_inval;
+struct xe_tlb_inval_job;
+struct xe_migrate;
+
+struct xe_tlb_inval_job *
+xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
+ struct xe_dep_scheduler *dep_scheduler,
+ u64 start, u64 end, u32 asid);
+
+int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job);
+
+struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
+ struct xe_migrate *m,
+ struct dma_fence *fence);
+
+void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job);
+
+void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_types.h b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
new file mode 100644
index 000000000000..8f8b060e9005
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_TYPES_H_
+#define _XE_TLB_INVAL_TYPES_H_
+
+#include <linux/workqueue.h>
+#include <linux/dma-fence.h>
+
+struct xe_tlb_inval;
+
+/** struct xe_tlb_inval_ops - TLB invalidation ops (backend) */
+struct xe_tlb_inval_ops {
+ /**
+ * @all: Invalidate all TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*all)(struct xe_tlb_inval *tlb_inval, u32 seqno);
+
+ /**
+ * @ggtt: Invalidate global translation TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*ggtt)(struct xe_tlb_inval *tlb_inval, u32 seqno);
+
+ /**
+ * @ppgtt: Invalidate per-process translation TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ * @start: Start address
+ * @end: End address
+ * @asid: Address space ID
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*ppgtt)(struct xe_tlb_inval *tlb_inval, u32 seqno, u64 start,
+ u64 end, u32 asid);
+
+ /**
+ * @initialized: Backend is initialized
+ * @tlb_inval: TLB invalidation client
+ *
+ * Return: True if back is initialized, False otherwise
+ */
+ bool (*initialized)(struct xe_tlb_inval *tlb_inval);
+
+ /**
+ * @flush: Flush pending TLB invalidations
+ * @tlb_inval: TLB invalidation client
+ */
+ void (*flush)(struct xe_tlb_inval *tlb_inval);
+
+ /**
+ * @timeout_delay: Timeout delay for TLB invalidation
+ * @tlb_inval: TLB invalidation client
+ *
+ * Return: Timeout delay for TLB invalidation in jiffies
+ */
+ long (*timeout_delay)(struct xe_tlb_inval *tlb_inval);
+};
+
+/** struct xe_tlb_inval - TLB invalidation client (frontend) */
+struct xe_tlb_inval {
+ /** @private: Backend private pointer */
+ void *private;
+ /** @xe: Pointer to Xe device */
+ struct xe_device *xe;
+ /** @ops: TLB invalidation ops */
+ const struct xe_tlb_inval_ops *ops;
+ /** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */
+#define TLB_INVALIDATION_SEQNO_MAX 0x100000
+ int seqno;
+ /** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */
+ struct mutex seqno_lock;
+ /**
+ * @seqno_recv: last received TLB invalidation seqno, protected by
+ * CT lock
+ */
+ int seqno_recv;
+ /**
+ * @pending_fences: list of pending fences waiting TLB invaliations,
+ * protected CT lock
+ */
+ struct list_head pending_fences;
+ /**
+ * @pending_lock: protects @pending_fences and updating @seqno_recv.
+ */
+ spinlock_t pending_lock;
+ /**
+ * @fence_tdr: schedules a delayed call to xe_tlb_fence_timeout after
+ * the timeout interval is over.
+ */
+ struct delayed_work fence_tdr;
+ /** @job_wq: schedules TLB invalidation jobs */
+ struct workqueue_struct *job_wq;
+ /** @tlb_inval.lock: protects TLB invalidation fences */
+ spinlock_t lock;
+};
+
+/**
+ * struct xe_tlb_inval_fence - TLB invalidation fence
+ *
+ * Optionally passed to xe_tlb_inval* functions and will be signaled upon TLB
+ * invalidation completion.
+ */
+struct xe_tlb_inval_fence {
+ /** @base: dma fence base */
+ struct dma_fence base;
+ /** @tlb_inval: TLB invalidation client which fence belong to */
+ struct xe_tlb_inval *tlb_inval;
+ /** @link: link into list of pending tlb fences */
+ struct list_head link;
+ /** @seqno: seqno of TLB invalidation to signal fence one */
+ int seqno;
+ /** @inval_time: time of TLB invalidation */
+ ktime_t inval_time;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index b4a3577df70c..314f42fcbcbd 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -14,10 +14,10 @@
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
-#include "xe_gt_tlb_invalidation_types.h"
#include "xe_gt_types.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_sched_job.h"
+#include "xe_tlb_inval_types.h"
#include "xe_vm.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
@@ -25,13 +25,13 @@
#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
-DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DECLARE_EVENT_CLASS(xe_tlb_inval_fence,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence),
TP_STRUCT__entry(
__string(dev, __dev_name_xe(xe))
- __field(struct xe_gt_tlb_invalidation_fence *, fence)
+ __field(struct xe_tlb_inval_fence *, fence)
__field(int, seqno)
),
@@ -45,39 +45,23 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
__get_str(dev), __entry->fence, __entry->seqno)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_send,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
- xe_gt_tlb_invalidation_fence_work_func,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_recv,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_signal,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_timeout,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index d9c9d2547aad..dc588255674d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -25,6 +25,7 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_vram.h"
struct xe_ttm_stolen_mgr {
struct xe_ttm_vram_mgr base;
@@ -82,15 +83,16 @@ static u32 get_wopcm_size(struct xe_device *xe)
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *tile_vram = xe_device_get_root_tile(xe)->mem.vram;
+ resource_size_t tile_io_start = xe_vram_region_io_start(tile_vram);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
- tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
- tile_size = tile->mem.vram.actual_physical_size;
+ tile_offset = tile_io_start - xe_vram_region_io_start(xe->mem.vram);
+ tile_size = xe_vram_region_actual_physical_size(tile_vram);
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
@@ -107,7 +109,7 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
- mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
+ mgr->io_base = tile_io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
@@ -164,7 +166,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
stolen_size -= wopcm_size;
- if (media_gt && XE_WA(media_gt, 14019821291)) {
+ if (media_gt && XE_GT_WA(media_gt, 14019821291)) {
u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE)
& ~GENMASK_ULL(5, 0);
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 9e375a40aee9..9175b4a2214b 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -15,6 +15,7 @@
#include "xe_gt.h"
#include "xe_res_cursor.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
static inline struct drm_buddy_block *
xe_ttm_vram_mgr_first_block(struct list_head *list)
@@ -337,13 +338,20 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
}
-int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
+/**
+ * xe_ttm_vram_mgr_init - initialize TTM VRAM region
+ * @xe: pointer to Xe device
+ * @vram: pointer to xe_vram_region that contains the memory region attributes
+ *
+ * Initialize the Xe TTM for given @vram region using the given parameters.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram)
{
- struct xe_device *xe = tile_to_xe(tile);
- struct xe_vram_region *vram = &tile->mem.vram;
-
- return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
- vram->usable_size, vram->io_size,
+ return __xe_ttm_vram_mgr_init(xe, &vram->ttm, vram->placement,
+ xe_vram_region_usable_size(vram),
+ xe_vram_region_io_size(vram),
PAGE_SIZE);
}
@@ -392,7 +400,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
*/
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
+ phys_addr_t phys = cursor.start + xe_vram_region_io_start(tile->mem.vram);
size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
index cc76050e376d..87b7fae5edba 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
@@ -11,11 +11,12 @@
enum dma_data_direction;
struct xe_device;
struct xe_tile;
+struct xe_vram_region;
int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
u32 mem_type, u64 size, u64 io_size,
u64 default_page_size);
-int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr);
+int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram);
int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
struct ttm_resource *res,
u64 offset, u64 length,
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 828b45b24c23..a524170a04d0 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -99,7 +99,7 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
{ XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
},
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 9bbdde604923..622b76078567 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -115,8 +115,8 @@ struct fw_blobs_by_type {
#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \
- fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 49, 4)) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 49, 4)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \
@@ -328,7 +328,7 @@ static void uc_fw_fini(struct drm_device *drm, void *arg)
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED);
}
-static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
+static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_guc_info *guc_info)
{
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
@@ -343,11 +343,12 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
return -EINVAL;
}
- compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->submission_version);
- compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->submission_version);
- compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->submission_version);
+ compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, guc_info->submission_version);
+ compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, guc_info->submission_version);
+ compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, guc_info->submission_version);
- uc_fw->private_data_size = css->private_data_size;
+ uc_fw->build_type = FIELD_GET(CSS_UKERNEL_INFO_BUILDTYPE, guc_info->ukernel_info);
+ uc_fw->private_data_size = guc_info->private_data_size;
return 0;
}
@@ -416,8 +417,8 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
css = (struct uc_css_header *)fw_data;
/* Check integrity of size values inside CSS header */
- size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
- css->exponent_size_dw) * sizeof(u32);
+ size = (css->header_size_dw - css->rsa_info.key_size_dw - css->rsa_info.modulus_size_dw -
+ css->rsa_info.exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&xe->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
@@ -430,7 +431,7 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+ uc_fw->rsa_size = css->rsa_info.key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size +
@@ -443,12 +444,12 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
}
/* Get version numbers from the CSS header */
- release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->sw_version);
- release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->sw_version);
- release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version);
+ release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->guc_info.sw_version);
+ release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->guc_info.sw_version);
+ release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->guc_info.sw_version);
if (uc_fw->type == XE_UC_FW_TYPE_GUC)
- return guc_read_css_info(uc_fw, css);
+ return guc_read_css_info(uc_fw, &css->guc_info);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_abi.h b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
index 87ade41209d0..3c9a63d13032 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_abi.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
@@ -44,6 +44,39 @@
* in fw. So driver will load a truncated firmware in this case.
*/
+struct uc_css_rsa_info {
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+} __packed;
+
+struct uc_css_guc_info {
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_TIME_MIN (0xFF << 8)
+#define CSS_TIME_SEC (0xFFFF << 16)
+ u32 reserved0[5];
+ u32 sw_version;
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
+ u32 submission_version;
+ u32 reserved1[11];
+ u32 header_info;
+#define CSS_HEADER_INFO_SVN (0xFF)
+#define CSS_HEADER_INFO_COPY_VALID (0x1 << 31)
+ u32 private_data_size;
+ u32 ukernel_info;
+#define CSS_UKERNEL_INFO_DEVICEID (0xFFFF << 16)
+#define CSS_UKERNEL_INFO_PRODKEY (0xFF << 8)
+#define CSS_UKERNEL_INFO_BUILDTYPE (0x3 << 2)
+#define CSS_UKERNEL_INFO_BUILDTYPE_PROD 0
+#define CSS_UKERNEL_INFO_BUILDTYPE_PREPROD 1
+#define CSS_UKERNEL_INFO_BUILDTYPE_DEBUG 2
+#define CSS_UKERNEL_INFO_ENCSTATUS (0x1 << 1)
+#define CSS_UKERNEL_INFO_COPY_VALID (0x1 << 0)
+} __packed;
+
struct uc_css_header {
u32 module_type;
/*
@@ -52,36 +85,21 @@ struct uc_css_header {
*/
u32 header_size_dw;
u32 header_version;
- u32 module_id;
+ u32 reserved0;
u32 module_vendor;
u32 date;
-#define CSS_DATE_DAY (0xFF << 0)
-#define CSS_DATE_MONTH (0xFF << 8)
-#define CSS_DATE_YEAR (0xFFFF << 16)
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
u32 size_dw; /* uCode plus header_size_dw */
- u32 key_size_dw;
- u32 modulus_size_dw;
- u32 exponent_size_dw;
- u32 time;
-#define CSS_TIME_HOUR (0xFF << 0)
-#define CSS_DATE_MIN (0xFF << 8)
-#define CSS_DATE_SEC (0xFFFF << 16)
- char username[8];
- char buildnumber[12];
- u32 sw_version;
-#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
union {
- u32 submission_version; /* only applies to GuC */
- u32 reserved2;
+ u32 reserved1[3];
+ struct uc_css_rsa_info rsa_info;
};
- u32 reserved0[12];
union {
- u32 private_data_size; /* only applies to GuC */
- u32 reserved1;
+ u32 reserved2[22];
+ struct uc_css_guc_info guc_info;
};
- u32 header_info;
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
@@ -318,4 +336,70 @@ struct gsc_manifest_header {
u32 exponent_size; /* in dwords */
} __packed;
+/**
+ * DOC: Late binding Firmware Layout
+ *
+ * The Late binding binary starts with FPT header, which contains locations
+ * of various partitions of the binary. Here we're interested in finding out
+ * manifest version. To the manifest version, we need to locate CPD header
+ * one of the entry in CPD header points to manifest header. Manifest header
+ * contains the version.
+ *
+ * +================================================+
+ * | FPT Header |
+ * +================================================+
+ * | FPT entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "LTES" |
+ * | ... |
+ * | offset >-----------------------------|------o
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | CPD Header |<-----o
+ * +================================================+
+ * | CPD entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "LTES.man" |
+ * | ... |
+ * | offset >----------------------------|------o
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | Manifest Header |<-----o
+ * | ... |
+ * | FW version |
+ * | ... |
+ * +================================================+
+ */
+
+/* FPT Headers */
+struct csc_fpt_header {
+ u32 header_marker;
+#define CSC_FPT_HEADER_MARKER 0x54504624
+ u32 num_of_entries;
+ u8 header_version;
+ u8 entry_version;
+ u8 header_length; /* in bytes */
+ u8 flags;
+ u16 ticks_to_add;
+ u16 tokens_to_add;
+ u32 uma_size;
+ u32 crc32;
+ struct gsc_version fitc_version;
+} __packed;
+
+struct csc_fpt_entry {
+ u8 name[4]; /* partition name */
+ u32 reserved1;
+ u32 offset; /* offset from beginning of CSE region */
+ u32 length; /* partition length in bytes */
+ u32 reserved2[3];
+ u32 partition_flags;
+} __packed;
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 914026015019..77a1dcf8b4ed 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -147,6 +147,9 @@ struct xe_uc_fw {
/** @private_data_size: size of private data found in uC css header */
u32 private_data_size;
+
+ /** @build_type: Firmware build type (see CSS_UKERNEL_INFO_BUILDTYPE for definitions) */
+ u32 build_type;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c
new file mode 100644
index 000000000000..91d09af71ced
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_userptr.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_userptr.h"
+
+#include <linux/mm.h>
+
+#include "xe_trace_bo.h"
+
+/**
+ * xe_vma_userptr_check_repin() - Advisory check for repin needed
+ * @uvma: The userptr vma
+ *
+ * Check if the userptr vma has been invalidated since last successful
+ * repin. The check is advisory only and can the function can be called
+ * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
+ * vma userptr will remain valid after a lockless check, so typically
+ * the call needs to be followed by a proper check under the notifier_lock.
+ *
+ * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ */
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+{
+ return mmu_interval_check_retry(&uvma->userptr.notifier,
+ uvma->userptr.pages.notifier_seq) ?
+ -EAGAIN : 0;
+}
+
+/**
+ * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function checks for whether the VM has userptrs that need repinning,
+ * and provides a release-type barrier on the svm.gpusvm.notifier_lock after
+ * checking.
+ *
+ * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
+ */
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
+{
+ lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock);
+
+ return (list_empty(&vm->userptr.repin_list) &&
+ list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
+{
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ struct drm_gpusvm_ctx ctx = {
+ .read_only = xe_vma_read_only(vma),
+ };
+
+ lockdep_assert_held(&vm->lock);
+ xe_assert(xe, xe_vma_is_userptr(vma));
+
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return 0;
+
+ return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ uvma->userptr.notifier.mm,
+ &uvma->userptr.notifier,
+ xe_vma_userptr(vma),
+ xe_vma_userptr(vma) + xe_vma_size(vma),
+ &ctx);
+}
+
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ struct drm_gpusvm_ctx ctx = {
+ .in_notifier = true,
+ .read_only = xe_vma_read_only(vma),
+ };
+ long err;
+
+ /*
+ * Tell exec and rebind worker they need to repin and rebind this
+ * userptr.
+ */
+ if (!xe_vm_in_fault_mode(vm) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&userptr->invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+
+ /*
+ * Preempt fences turn into schedule disables, pipeline these.
+ * Note that even in fault mode, we need to wait for binds and
+ * unbinds to complete, and those are attached as BOOKMARK fences
+ * to the vm.
+ */
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
+ err = xe_vm_invalidate_vma(vma);
+ XE_WARN_ON(err);
+ }
+
+ drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ xe_vma_size(vma) >> PAGE_SHIFT, &ctx);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->svm.gpusvm.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->svm.gpusvm.notifier_lock);
+ trace_xe_vma_userptr_invalidate_complete(vma);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
+ .invalidate = vma_userptr_invalidate,
+};
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.pages.notifier_seq))
+ uvma->userptr.pages.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
+int xe_vm_userptr_pin(struct xe_vm *vm)
+{
+ struct xe_userptr_vma *uvma, *next;
+ int err = 0;
+
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Collect invalidated userptrs */
+ spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
+ list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
+ userptr.invalidate_link) {
+ list_del_init(&uvma->userptr.invalidate_link);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ /* Pin and move to bind list */
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ err = xe_vma_userptr_pin_pages(uvma);
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
+
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ down_read(&vm->svm.gpusvm.notifier_lock);
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->svm.gpusvm.notifier_lock);
+ xe_vm_unlock(vm);
+ if (err)
+ break;
+ } else {
+ if (err)
+ break;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
+ }
+
+ if (err) {
+ down_write(&vm->svm.gpusvm.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->svm.gpusvm.notifier_lock);
+ }
+ return err;
+}
+
+/**
+ * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function does an advisory check for whether the VM has userptrs that
+ * need repinning.
+ *
+ * Return: 0 if there are no indications of userptrs needing repinning,
+ * -EAGAIN if there are.
+ */
+int xe_vm_userptr_check_repin(struct xe_vm *vm)
+{
+ return (list_empty_careful(&vm->userptr.repin_list) &&
+ list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
+ unsigned long range)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ int err;
+
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
+
+ err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
+ start, range,
+ &vma_userptr_notifier_ops);
+ if (err)
+ return err;
+
+ userptr->pages.notifier_seq = LONG_MAX;
+
+ return 0;
+}
+
+void xe_userptr_remove(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ struct xe_userptr *userptr = &uvma->userptr;
+
+ drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ xe_vma_size(&uvma->vma) >> PAGE_SHIFT);
+
+ /*
+ * Since userptr pages are not pinned, we can't remove
+ * the notifier until we're sure the GPU is not accessing
+ * them anymore
+ */
+ mmu_interval_notifier_remove(&userptr->notifier);
+}
+
+void xe_userptr_destroy(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
+ list_del(&uvma->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_userptr.h b/drivers/gpu/drm/xe/xe_userptr.h
new file mode 100644
index 000000000000..ef801234991e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_userptr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_USERPTR_H_
+#define _XE_USERPTR_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_gpusvm.h>
+
+struct xe_vm;
+struct xe_vma;
+struct xe_userptr_vma;
+
+/** struct xe_userptr_vm - User pointer VM level state */
+struct xe_userptr_vm {
+ /**
+ * @userptr.repin_list: list of VMAs which are user pointers,
+ * and needs repinning. Protected by @lock.
+ */
+ struct list_head repin_list;
+ /**
+ * @userptr.invalidated_lock: Protects the
+ * @userptr.invalidated list.
+ */
+ spinlock_t invalidated_lock;
+ /**
+ * @userptr.invalidated: List of invalidated userptrs, not yet
+ * picked
+ * up for revalidation. Protected from access with the
+ * @invalidated_lock. Removing items from the list
+ * additionally requires @lock in write mode, and adding
+ * items to the list requires either the @svm.gpusvm.notifier_lock in
+ * write mode, OR @lock in write mode.
+ */
+ struct list_head invalidated;
+};
+
+/** struct xe_userptr - User pointer */
+struct xe_userptr {
+ /** @invalidate_link: Link for the vm::userptr.invalidated list */
+ struct list_head invalidate_link;
+ /** @userptr: link into VM repin list if userptr. */
+ struct list_head repin_link;
+ /**
+ * @pages: gpusvm pages for this user pointer.
+ */
+ struct drm_gpusvm_pages pages;
+ /**
+ * @notifier: MMU notifier for user pointer (invalidation call back)
+ */
+ struct mmu_interval_notifier notifier;
+
+ /**
+ * @initial_bind: user pointer has been bound at least once.
+ * write: vm->svm.gpusvm.notifier_lock in read mode and vm->resv held.
+ * read: vm->svm.gpusvm.notifier_lock in write mode or vm->resv held.
+ */
+ bool initial_bind;
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+ u32 divisor;
+#endif
+};
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+void xe_userptr_remove(struct xe_userptr_vma *uvma);
+int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
+ unsigned long range);
+void xe_userptr_destroy(struct xe_userptr_vma *uvma);
+
+int xe_vm_userptr_pin(struct xe_vm *vm);
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
+int xe_vm_userptr_check_repin(struct xe_vm *vm);
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_userptr_remove(struct xe_userptr_vma *uvma) {}
+
+static inline int xe_userptr_setup(struct xe_userptr_vma *uvma,
+ unsigned long start, unsigned long range)
+{
+ return -ENODEV;
+}
+
+static inline void xe_userptr_destroy(struct xe_userptr_vma *uvma) {}
+
+static inline int xe_vm_userptr_pin(struct xe_vm *vm) { return 0; }
+static inline int __xe_vm_userptr_needs_repin(struct xe_vm *vm) { return 0; }
+static inline int xe_vm_userptr_check_repin(struct xe_vm *vm) { return 0; }
+static inline int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) { return -ENODEV; }
+static inline int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) { return -ENODEV; };
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_validation.c b/drivers/gpu/drm/xe/xe_validation.c
new file mode 100644
index 000000000000..826cd09966ef
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_validation.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#include "xe_bo.h"
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
+
+#include "xe_assert.h"
+#include "xe_validation.h"
+
+#ifdef CONFIG_DRM_XE_DEBUG
+/**
+ * xe_validation_assert_exec() - Assert that the drm_exec pointer is suitable
+ * for validation.
+ * @xe: Pointer to the xe device.
+ * @exec: The drm_exec pointer to check.
+ * @obj: Pointer to the object subject to validation.
+ *
+ * NULL exec pointers are not allowed.
+ * For XE_VALIDATION_UNIMPLEMENTED, no checking.
+ * For XE_VLIDATION_OPT_OUT, check that the caller is a kunit test
+ * For XE_VALIDATION_UNSUPPORTED, check that the object subject to
+ * validation is a dma-buf, for which support for ww locking is
+ * not in place in the dma-buf layer.
+ */
+void xe_validation_assert_exec(const struct xe_device *xe,
+ const struct drm_exec *exec,
+ const struct drm_gem_object *obj)
+{
+ xe_assert(xe, exec);
+ if (IS_ERR(exec)) {
+ switch (PTR_ERR(exec)) {
+ case __XE_VAL_UNIMPLEMENTED:
+ break;
+ case __XE_VAL_UNSUPPORTED:
+ xe_assert(xe, !!obj->dma_buf);
+ break;
+#if IS_ENABLED(CONFIG_KUNIT)
+ case __XE_VAL_OPT_OUT:
+ xe_assert(xe, current->kunit_test);
+ break;
+#endif
+ default:
+ xe_assert(xe, false);
+ }
+ }
+}
+#endif
+
+static int xe_validation_lock(struct xe_validation_ctx *ctx)
+{
+ struct xe_validation_device *val = ctx->val;
+ int ret = 0;
+
+ if (ctx->val_flags.interruptible) {
+ if (ctx->request_exclusive)
+ ret = down_write_killable(&val->lock);
+ else
+ ret = down_read_interruptible(&val->lock);
+ } else {
+ if (ctx->request_exclusive)
+ down_write(&val->lock);
+ else
+ down_read(&val->lock);
+ }
+
+ if (!ret) {
+ ctx->lock_held = true;
+ ctx->lock_held_exclusive = ctx->request_exclusive;
+ }
+
+ return ret;
+}
+
+static int xe_validation_trylock(struct xe_validation_ctx *ctx)
+{
+ struct xe_validation_device *val = ctx->val;
+ bool locked;
+
+ if (ctx->request_exclusive)
+ locked = down_write_trylock(&val->lock);
+ else
+ locked = down_read_trylock(&val->lock);
+
+ if (locked) {
+ ctx->lock_held = true;
+ ctx->lock_held_exclusive = ctx->request_exclusive;
+ }
+
+ return locked ? 0 : -EWOULDBLOCK;
+}
+
+static void xe_validation_unlock(struct xe_validation_ctx *ctx)
+{
+ if (!ctx->lock_held)
+ return;
+
+ if (ctx->lock_held_exclusive)
+ up_write(&ctx->val->lock);
+ else
+ up_read(&ctx->val->lock);
+
+ ctx->lock_held = false;
+}
+
+/**
+ * xe_validation_ctx_init() - Initialize an xe_validation_ctx
+ * @ctx: The xe_validation_ctx to initialize.
+ * @val: The xe_validation_device representing the validation domain.
+ * @exec: The struct drm_exec to use for the transaction. May be NULL.
+ * @flags: The flags to use for initialization.
+ *
+ * Initialize and lock a an xe_validation transaction using the validation domain
+ * represented by @val. Also initialize the drm_exec object forwarding parts of
+ * @flags to the drm_exec initialization. The @flags.exclusive flag should
+ * typically be set to false to avoid locking out other validators from the
+ * domain until an OOM is hit. For testing- or final attempt purposes it can,
+ * however, be set to true.
+ *
+ * Return: %0 on success, %-EINTR if interruptible initial locking failed with a
+ * signal pending. If @flags.no_block is set to true, a failed trylock
+ * returns %-EWOULDBLOCK.
+ */
+int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
+ struct drm_exec *exec, const struct xe_val_flags flags)
+{
+ int ret;
+
+ ctx->exec = exec;
+ ctx->val = val;
+ ctx->lock_held = false;
+ ctx->lock_held_exclusive = false;
+ ctx->request_exclusive = flags.exclusive;
+ ctx->val_flags = flags;
+ ctx->exec_flags = 0;
+ ctx->nr = 0;
+
+ if (flags.no_block)
+ ret = xe_validation_trylock(ctx);
+ else
+ ret = xe_validation_lock(ctx);
+ if (ret)
+ return ret;
+
+ if (exec) {
+ if (flags.interruptible)
+ ctx->exec_flags |= DRM_EXEC_INTERRUPTIBLE_WAIT;
+ if (flags.exec_ignore_duplicates)
+ ctx->exec_flags |= DRM_EXEC_IGNORE_DUPLICATES;
+ drm_exec_init(exec, ctx->exec_flags, ctx->nr);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+/*
+ * This abuses both drm_exec and ww_mutex internals and should be
+ * replaced by checking for -EDEADLK when we can make TTM
+ * stop converting -EDEADLK to -ENOMEM.
+ * An alternative is to not have exhaustive eviction with
+ * CONFIG_DEBUG_WW_MUTEX_SLOWPATH until that happens.
+ */
+static bool xe_validation_contention_injected(struct drm_exec *exec)
+{
+ return !!exec->ticket.contending_lock;
+}
+
+#else
+
+static bool xe_validation_contention_injected(struct drm_exec *exec)
+{
+ return false;
+}
+
+#endif
+
+static bool __xe_validation_should_retry(struct xe_validation_ctx *ctx, int ret)
+{
+ if (ret == -ENOMEM &&
+ ((ctx->request_exclusive &&
+ xe_validation_contention_injected(ctx->exec)) ||
+ !ctx->request_exclusive)) {
+ ctx->request_exclusive = true;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * xe_validation_exec_lock() - Perform drm_gpuvm_exec_lock within a validation
+ * transaction.
+ * @ctx: An uninitialized xe_validation_ctx.
+ * @vm_exec: An initialized struct vm_exec.
+ * @val: The validation domain.
+ *
+ * The drm_gpuvm_exec_lock() function internally initializes its drm_exec
+ * transaction and therefore doesn't lend itself very well to be using
+ * xe_validation_ctx_init(). Provide a helper that takes an uninitialized
+ * xe_validation_ctx and calls drm_gpuvm_exec_lock() with OOM retry.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int xe_validation_exec_lock(struct xe_validation_ctx *ctx,
+ struct drm_gpuvm_exec *vm_exec,
+ struct xe_validation_device *val)
+{
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->exec = &vm_exec->exec;
+ ctx->exec_flags = vm_exec->flags;
+ ctx->val = val;
+ if (ctx->exec_flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
+ ctx->val_flags.interruptible = 1;
+ if (ctx->exec_flags & DRM_EXEC_IGNORE_DUPLICATES)
+ ctx->val_flags.exec_ignore_duplicates = 1;
+retry:
+ ret = xe_validation_lock(ctx);
+ if (ret)
+ return ret;
+
+ ret = drm_gpuvm_exec_lock(vm_exec);
+ if (ret) {
+ xe_validation_unlock(ctx);
+ if (__xe_validation_should_retry(ctx, ret))
+ goto retry;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_validation_ctx_fini() - Finalize a validation transaction
+ * @ctx: The Validation transaction to finalize.
+ *
+ * Finalize a validation transaction and its related drm_exec transaction.
+ */
+void xe_validation_ctx_fini(struct xe_validation_ctx *ctx)
+{
+ if (ctx->exec)
+ drm_exec_fini(ctx->exec);
+ xe_validation_unlock(ctx);
+}
+
+/**
+ * xe_validation_should_retry() - Determine if a validation transaction should retry
+ * @ctx: The validation transaction.
+ * @ret: Pointer to a return value variable.
+ *
+ * Determines whether a validation transaction should retry based on the
+ * internal transaction state and the return value pointed to by @ret.
+ * If a validation should be retried, the transaction is prepared for that,
+ * and the validation locked might be re-locked in exclusive mode, and *@ret
+ * is set to %0. If the re-locking errors, typically due to interruptible
+ * locking with signal pending, *@ret is instead set to -EINTR and the
+ * function returns %false.
+ *
+ * Return: %true if validation should be retried, %false otherwise.
+ */
+bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret)
+{
+ if (__xe_validation_should_retry(ctx, *ret)) {
+ drm_exec_fini(ctx->exec);
+ *ret = 0;
+ if (ctx->request_exclusive != ctx->lock_held_exclusive) {
+ xe_validation_unlock(ctx);
+ *ret = xe_validation_lock(ctx);
+ }
+ drm_exec_init(ctx->exec, ctx->exec_flags, ctx->nr);
+ return !*ret;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h
new file mode 100644
index 000000000000..fec331d791e7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_validation.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_VALIDATION_H_
+#define _XE_VALIDATION_H_
+
+#include <linux/dma-resv.h>
+#include <linux/types.h>
+#include <linux/rwsem.h>
+
+struct drm_exec;
+struct drm_gem_object;
+struct drm_gpuvm_exec;
+struct xe_device;
+
+#ifdef CONFIG_PROVE_LOCKING
+/**
+ * xe_validation_lockdep() - Assert that a drm_exec locking transaction can
+ * be initialized at this point.
+ */
+static inline void xe_validation_lockdep(void)
+{
+ struct ww_acquire_ctx ticket;
+
+ ww_acquire_init(&ticket, &reservation_ww_class);
+ ww_acquire_fini(&ticket);
+}
+#else
+static inline void xe_validation_lockdep(void)
+{
+}
+#endif
+
+/*
+ * Various values of the drm_exec pointer where we've not (yet)
+ * implemented full ww locking.
+ *
+ * XE_VALIDATION_UNIMPLEMENTED means implementation is pending.
+ * A lockdep check is made to assure that a drm_exec locking
+ * transaction can actually take place where the macro is
+ * used. If this asserts, the exec pointer needs to be assigned
+ * higher up in the callchain and passed down.
+ *
+ * XE_VALIDATION_UNSUPPORTED is for dma-buf code only where
+ * the dma-buf layer doesn't support WW locking.
+ *
+ * XE_VALIDATION_OPT_OUT is for simplification of kunit tests where
+ * exhaustive eviction isn't necessary.
+ */
+#define __XE_VAL_UNIMPLEMENTED -EINVAL
+#define XE_VALIDATION_UNIMPLEMENTED (xe_validation_lockdep(), \
+ (struct drm_exec *)ERR_PTR(__XE_VAL_UNIMPLEMENTED))
+
+#define __XE_VAL_UNSUPPORTED -EOPNOTSUPP
+#define XE_VALIDATION_UNSUPPORTED ((struct drm_exec *)ERR_PTR(__XE_VAL_UNSUPPORTED))
+
+#define __XE_VAL_OPT_OUT -ENOMEM
+#define XE_VALIDATION_OPT_OUT (xe_validation_lockdep(), \
+ (struct drm_exec *)ERR_PTR(__XE_VAL_OPT_OUT))
+#ifdef CONFIG_DRM_XE_DEBUG
+void xe_validation_assert_exec(const struct xe_device *xe, const struct drm_exec *exec,
+ const struct drm_gem_object *obj);
+#else
+#define xe_validation_assert_exec(_xe, _exec, _obj) \
+ do { \
+ (void)_xe; (void)_exec; (void)_obj; \
+ } while (0)
+#endif
+
+/**
+ * struct xe_validation_device - The domain for exhaustive eviction
+ * @lock: The lock used to exclude other processes from allocating graphics memory
+ *
+ * The struct xe_validation_device represents the domain for which we want to use
+ * exhaustive eviction. The @lock is typically grabbed in read mode for allocations
+ * but when graphics memory allocation fails, it is retried with the write mode held.
+ */
+struct xe_validation_device {
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct xe_val_flags - Flags for xe_validation_ctx_init().
+ * @exclusive: Start the validation transaction by locking out all other validators.
+ * @no_block: Don't block on initialization.
+ * @interruptible: Block interruptible if blocking. Implies initializing the drm_exec
+ * context with the DRM_EXEC_INTERRUPTIBLE_WAIT flag.
+ * @exec_ignore_duplicates: Initialize the drm_exec context with the
+ * DRM_EXEC_IGNORE_DUPLICATES flag.
+ */
+struct xe_val_flags {
+ u32 exclusive :1;
+ u32 no_block :1;
+ u32 interruptible :1;
+ u32 exec_ignore_duplicates :1;
+};
+
+/**
+ * struct xe_validation_ctx - A struct drm_exec subclass with support for
+ * exhaustive eviction
+ * @exec: The drm_exec object base class. Note that we use a pointer instead of
+ * embedding to avoid diamond inheritance.
+ * @val: The exhaustive eviction domain.
+ * @val_flags: Copy of the struct xe_val_flags passed to xe_validation_ctx_init.
+ * @lock_held: Whether The domain lock is currently held.
+ * @lock_held_exclusive: Whether the domain lock is held in exclusive mode.
+ * @request_exclusive: Whether to lock exclusively (write mode) the next time
+ * the domain lock is locked.
+ * @exec_flags: The drm_exec flags used for drm_exec (re-)initialization.
+ * @nr: The drm_exec nr parameter used for drm_exec (re-)initializaiton.
+ */
+struct xe_validation_ctx {
+ struct drm_exec *exec;
+ struct xe_validation_device *val;
+ struct xe_val_flags val_flags;
+ bool lock_held;
+ bool lock_held_exclusive;
+ bool request_exclusive;
+ u32 exec_flags;
+ unsigned int nr;
+};
+
+int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
+ struct drm_exec *exec, const struct xe_val_flags flags);
+
+int xe_validation_exec_lock(struct xe_validation_ctx *ctx, struct drm_gpuvm_exec *vm_exec,
+ struct xe_validation_device *val);
+
+void xe_validation_ctx_fini(struct xe_validation_ctx *ctx);
+
+bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret);
+
+/**
+ * xe_validation_retry_on_oom() - Retry on oom in an xe_validaton transaction
+ * @_ctx: Pointer to the xe_validation_ctx
+ * @_ret: The current error value possibly holding -ENOMEM
+ *
+ * Use this in way similar to drm_exec_retry_on_contention().
+ * If @_ret contains -ENOMEM the tranaction is restarted once in a way that
+ * blocks other transactions and allows exhastive eviction. If the transaction
+ * was already restarted once, Just return the -ENOMEM. May also set
+ * _ret to -EINTR if not retrying and waits are interruptible.
+ * May only be used within a drm_exec_until_all_locked() loop.
+ */
+#define xe_validation_retry_on_oom(_ctx, _ret) \
+ do { \
+ if (xe_validation_should_retry(_ctx, _ret)) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * xe_validation_device_init - Initialize a struct xe_validation_device
+ * @val: The xe_validation_device to init.
+ */
+static inline void
+xe_validation_device_init(struct xe_validation_device *val)
+{
+ init_rwsem(&val->lock);
+}
+
+/*
+ * Make guard() and scoped_guard() work with xe_validation_ctx
+ * so that we can exit transactions without caring about the
+ * cleanup.
+ */
+DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
+ if (_T) xe_validation_ctx_fini(_T);,
+ ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
+ _ret ? NULL : _ctx; }),
+ struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
+ struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret);
+static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
+{return *_T; }
+#define class_xe_validation_is_conditional true
+
+/**
+ * xe_validation_guard() - An auto-cleanup xe_validation_ctx transaction
+ * @_ctx: The xe_validation_ctx.
+ * @_val: The xe_validation_device.
+ * @_exec: The struct drm_exec object
+ * @_flags: Flags for the xe_validation_ctx initialization.
+ * @_ret: Return in / out parameter. May be set by this macro. Typicall 0 when called.
+ *
+ * This macro is will initiate a drm_exec transaction with additional support for
+ * exhaustive eviction.
+ */
+#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
+ scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \
+ drm_exec_until_all_locked(_exec)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5146999d27fa..0cacab20ff85 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -28,7 +28,6 @@
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt_pagefault.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pat.h"
#include "xe_pm.h"
@@ -38,9 +37,10 @@
#include "xe_res_cursor.h"
#include "xe_svm.h"
#include "xe_sync.h"
+#include "xe_tile.h"
+#include "xe_tlb_inval.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
-#include "xe_hmm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
@@ -48,34 +48,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
}
/**
- * xe_vma_userptr_check_repin() - Advisory check for repin needed
- * @uvma: The userptr vma
+ * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction
+ * @vm: The vm whose resv is to be locked.
+ * @exec: The drm_exec transaction.
*
- * Check if the userptr vma has been invalidated since last successful
- * repin. The check is advisory only and can the function can be called
- * without the vm->userptr.notifier_lock held. There is no guarantee that the
- * vma userptr will remain valid after a lockless check, so typically
- * the call needs to be followed by a proper check under the notifier_lock.
+ * Helper to lock the vm's resv as part of a drm_exec transaction.
*
- * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ * Return: %0 on success. See drm_exec_lock_obj() for error codes.
*/
-int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec)
{
- return mmu_interval_check_retry(&uvma->userptr.notifier,
- uvma->userptr.notifier_seq) ?
- -EAGAIN : 0;
-}
-
-int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
-{
- struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
-
- lockdep_assert_held(&vm->lock);
- xe_assert(xe, xe_vma_is_userptr(vma));
-
- return xe_hmm_userptr_populate_range(uvma, false);
+ return drm_exec_lock_obj(exec, xe_vm_obj(vm));
}
static bool preempt_fences_waiting(struct xe_vm *vm)
@@ -227,6 +210,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
.num_fences = 1,
};
struct drm_exec *exec = &vm_exec.exec;
+ struct xe_validation_ctx ctx;
struct dma_fence *pfence;
int err;
bool wait;
@@ -234,7 +218,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
down_write(&vm->lock);
- err = drm_gpuvm_exec_lock(&vm_exec);
+ err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val);
if (err)
goto out_up_write;
@@ -249,7 +233,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
++vm->preempt.num_exec_queues;
q->lr.pfence = pfence;
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
@@ -263,10 +247,10 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
if (wait)
dma_fence_enable_sw_signaling(pfence);
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
out_fini:
- drm_exec_fini(exec);
+ xe_validation_ctx_fini(&ctx);
out_up_write:
up_write(&vm->lock);
@@ -299,25 +283,6 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
up_write(&vm->lock);
}
-/**
- * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
- * that need repinning.
- * @vm: The VM.
- *
- * This function checks for whether the VM has userptrs that need repinning,
- * and provides a release-type barrier on the userptr.notifier_lock after
- * checking.
- *
- * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
- */
-int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
-{
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
-
- return (list_empty(&vm->userptr.repin_list) &&
- list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
-}
-
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
/**
@@ -349,39 +314,6 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked)
/* TODO: Inform user the VM is banned */
}
-/**
- * xe_vm_validate_should_retry() - Whether to retry after a validate error.
- * @exec: The drm_exec object used for locking before validation.
- * @err: The error returned from ttm_bo_validate().
- * @end: A ktime_t cookie that should be set to 0 before first use and
- * that should be reused on subsequent calls.
- *
- * With multiple active VMs, under memory pressure, it is possible that
- * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
- * Until ttm properly handles locking in such scenarios, best thing the
- * driver can do is retry with a timeout. Check if that is necessary, and
- * if so unlock the drm_exec's objects while keeping the ticket to prepare
- * for a rerun.
- *
- * Return: true if a retry after drm_exec_init() is recommended;
- * false otherwise.
- */
-bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
-{
- ktime_t cur;
-
- if (err != -ENOMEM)
- return false;
-
- cur = ktime_get();
- *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
- if (!ktime_before(cur, *end))
- return false;
-
- msleep(20);
- return true;
-}
-
static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
@@ -396,7 +328,7 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
if (!try_wait_for_completion(&vm->xe->pm_block))
return -EAGAIN;
- ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
+ ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false, exec);
if (ret)
return ret;
@@ -512,10 +444,10 @@ void xe_vm_resume_rebind_worker(struct xe_vm *vm)
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
- ktime_t end = 0;
int err = 0;
long wait;
int __maybe_unused tries = 0;
@@ -543,18 +475,19 @@ retry:
goto out_unlock_outer;
}
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec,
+ (struct xe_val_flags) {.interruptible = true});
+ if (err)
+ goto out_unlock_outer;
drm_exec_until_all_locked(&exec) {
bool done = false;
err = xe_preempt_work_begin(&exec, vm, &done);
drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
if (err || done) {
- drm_exec_fini(&exec);
- if (err && xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
-
+ xe_validation_ctx_fini(&ctx);
goto out_unlock_outer;
}
}
@@ -563,7 +496,9 @@ retry:
if (err)
goto out_unlock;
+ xe_vm_set_validation_exec(vm, &exec);
err = xe_vm_rebind(vm, true);
+ xe_vm_set_validation_exec(vm, NULL);
if (err)
goto out_unlock;
@@ -581,9 +516,9 @@ retry:
(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
__xe_vm_userptr_needs_repin(__vm))
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
if (retry_required(tries, vm)) {
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
err = -EAGAIN;
goto out_unlock;
}
@@ -597,10 +532,10 @@ retry:
/* Point of no return. */
arm_preempt_fences(vm, &preempt_fences);
resume_and_reinstall_preempt_fences(vm, &exec);
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
out_unlock:
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
out_unlock_outer:
if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm);
@@ -618,203 +553,6 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- /*
- * Tell exec and rebind worker they need to repin and rebind this
- * userptr.
- */
- if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&userptr->invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
-
- /*
- * Preempt fences turn into schedule disables, pipeline these.
- * Note that even in fault mode, we need to wait for binds and
- * unbinds to complete, and those are attached as BOOKMARK fences
- * to the vm.
- */
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
-
- if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
- err = xe_vm_invalidate_vma(vma);
- XE_WARN_ON(err);
- }
-
- xe_hmm_userptr_unmap(uvma);
-}
-
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
-{
- struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
- struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
-
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- __vma_userptr_invalidate(vm, uvma);
- up_write(&vm->userptr.notifier_lock);
- trace_xe_vma_userptr_invalidate_complete(vma);
-
- return true;
-}
-
-static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
- .invalidate = vma_userptr_invalidate,
-};
-
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
-/**
- * xe_vma_userptr_force_invalidate() - force invalidate a userptr
- * @uvma: The userptr vma to invalidate
- *
- * Perform a forced userptr invalidation for testing purposes.
- */
-void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
-{
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-
- /* Protect against concurrent userptr pinning */
- lockdep_assert_held(&vm->lock);
- /* Protect against concurrent notifiers */
- lockdep_assert_held(&vm->userptr.notifier_lock);
- /*
- * Protect against concurrent instances of this function and
- * the critical exec sections
- */
- xe_vm_assert_held(vm);
-
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- uvma->userptr.notifier_seq))
- uvma->userptr.notifier_seq -= 2;
- __vma_userptr_invalidate(vm, uvma);
-}
-#endif
-
-int xe_vm_userptr_pin(struct xe_vm *vm)
-{
- struct xe_userptr_vma *uvma, *next;
- int err = 0;
-
- xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
- lockdep_assert_held_write(&vm->lock);
-
- /* Collect invalidated userptrs */
- spin_lock(&vm->userptr.invalidated_lock);
- xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
- list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
- userptr.invalidate_link) {
- list_del_init(&uvma->userptr.invalidate_link);
- list_add_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
- }
- spin_unlock(&vm->userptr.invalidated_lock);
-
- /* Pin and move to bind list */
- list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
- userptr.repin_link) {
- err = xe_vma_userptr_pin_pages(uvma);
- if (err == -EFAULT) {
- list_del_init(&uvma->userptr.repin_link);
- /*
- * We might have already done the pin once already, but
- * then had to retry before the re-bind happened, due
- * some other condition in the caller, but in the
- * meantime the userptr got dinged by the notifier such
- * that we need to revalidate here, but this time we hit
- * the EFAULT. In such a case make sure we remove
- * ourselves from the rebind list to avoid going down in
- * flames.
- */
- if (!list_empty(&uvma->vma.combined_links.rebind))
- list_del_init(&uvma->vma.combined_links.rebind);
-
- /* Wait for pending binds */
- xe_vm_lock(vm, false);
- dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
-
- down_read(&vm->userptr.notifier_lock);
- err = xe_vm_invalidate_vma(&uvma->vma);
- up_read(&vm->userptr.notifier_lock);
- xe_vm_unlock(vm);
- if (err)
- break;
- } else {
- if (err)
- break;
-
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind,
- &vm->rebind_list);
- }
- }
-
- if (err) {
- down_write(&vm->userptr.notifier_lock);
- spin_lock(&vm->userptr.invalidated_lock);
- list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
- userptr.repin_link) {
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- }
- spin_unlock(&vm->userptr.invalidated_lock);
- up_write(&vm->userptr.notifier_lock);
- }
- return err;
-}
-
-/**
- * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
- * that need repinning.
- * @vm: The VM.
- *
- * This function does an advisory check for whether the VM has userptrs that
- * need repinning.
- *
- * Return: 0 if there are no indications of userptrs needing repinning,
- * -EAGAIN if there are.
- */
-int xe_vm_userptr_check_repin(struct xe_vm *vm)
-{
- return (list_empty_careful(&vm->userptr.repin_list) &&
- list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
-}
-
static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
{
int i;
@@ -988,7 +726,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
@@ -1078,7 +816,7 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
@@ -1161,7 +899,7 @@ struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_range_unbind(&vops, range);
@@ -1203,7 +941,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
u64 start, u64 end,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr,
+ unsigned int flags)
{
struct xe_vma *vma;
struct xe_tile *tile;
@@ -1258,7 +997,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->pat_index = pat_index;
+ vma->attr = *attr;
if (bo) {
struct drm_gpuvm_bo *vm_bo;
@@ -1278,25 +1017,17 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
if (!is_null && !is_cpu_addr_mirror) {
- struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
u64 size = end - start + 1;
int err;
- INIT_LIST_HEAD(&userptr->invalidate_link);
- INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
- mutex_init(&userptr->unmap_mutex);
- err = mmu_interval_notifier_insert(&userptr->notifier,
- current->mm,
- xe_vma_userptr(vma), size,
- &vma_userptr_notifier_ops);
+ err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size);
if (err) {
xe_vma_free(vma);
return ERR_PTR(err);
}
-
- userptr->notifier_seq = LONG_MAX;
}
xe_vm_get(vm);
@@ -1316,18 +1047,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
if (xe_vma_is_userptr(vma)) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
- struct xe_userptr *userptr = &uvma->userptr;
- if (userptr->sg)
- xe_hmm_userptr_free_sg(uvma);
-
- /*
- * Since userptr pages are not pinned, we can't remove
- * the notifier until we're sure the GPU is not accessing
- * them anymore
- */
- mmu_interval_notifier_remove(&userptr->notifier);
- mutex_destroy(&userptr->unmap_mutex);
+ xe_userptr_remove(uvma);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
@@ -1364,11 +1085,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
if (xe_vma_is_userptr(vma)) {
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
-
- spin_lock(&vm->userptr.invalidated_lock);
- xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
- list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
+ xe_userptr_destroy(to_userptr_vma(vma));
} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
@@ -1416,20 +1133,19 @@ int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
- int err;
+ int err = 0;
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
+ xe_vma_destroy(vma, NULL);
}
-
- xe_vma_destroy(vma, NULL);
-
- drm_exec_fini(&exec);
+ xe_assert(xe, !err);
}
struct xe_vma *
@@ -1547,14 +1263,39 @@ static u64 pte_encode_ps(u32 pt_level)
return 0;
}
-static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
- const u16 pat_index)
+static u16 pde_pat_index(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u16 pat_index;
+
+ /*
+ * We only have two bits to encode the PAT index in non-leaf nodes, but
+ * these only point to other paging structures so we only need a minimal
+ * selection of options. The user PAT index is only for encoding leaf
+ * nodes, where we have use of more bits to do the encoding. The
+ * non-leaf nodes are instead under driver control so the chosen index
+ * here should be distict from the user PAT index. Also the
+ * corresponding coherency of the PAT index should be tied to the
+ * allocation type of the page table (or at least we should pick
+ * something which is always safe).
+ */
+ if (!xe_bo_is_vram(bo) && bo->ttm.ttm->caching == ttm_cached)
+ pat_index = xe->pat.idx[XE_CACHE_WB];
+ else
+ pat_index = xe->pat.idx[XE_CACHE_NONE];
+
+ xe_assert(xe, pat_index <= 3);
+
+ return pat_index;
+}
+
+static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset)
{
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_pat_index(pat_index);
+ pde |= pde_encode_pat_index(pde_pat_index(bo));
return pde;
}
@@ -1629,6 +1370,7 @@ static void vm_destroy_work_func(struct work_struct *w);
* @xe: xe device.
* @tile: tile to set up for.
* @vm: vm to set up for.
+ * @exec: The struct drm_exec object used to lock the vm resv.
*
* Sets up a pagetable tree with one page-table per level and a single
* leaf PTE. All pagetable entries point to the single page-table or,
@@ -1638,20 +1380,19 @@ static void vm_destroy_work_func(struct work_struct *w);
* Return: 0 on success, negative error code on error.
*/
static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm)
+ struct xe_vm *vm, struct drm_exec *exec)
{
u8 id = tile->id;
int i;
for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
- vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
+ vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec);
if (IS_ERR(vm->scratch_pt[id][i])) {
int err = PTR_ERR(vm->scratch_pt[id][i]);
vm->scratch_pt[id][i] = NULL;
return err;
}
-
xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
}
@@ -1679,9 +1420,26 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
}
}
+static void xe_vm_pt_destroy(struct xe_vm *vm)
+{
+ struct xe_tile *tile;
+ u8 id;
+
+ xe_vm_assert_held(vm);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (vm->pt_root[id]) {
+ xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+ vm->pt_root[id] = NULL;
+ }
+ }
+}
+
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
{
struct drm_gem_object *vm_resv_obj;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_vm *vm;
int err, number_tiles = 0;
struct xe_tile *tile;
@@ -1725,7 +1483,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
INIT_LIST_HEAD(&vm->userptr.repin_list);
INIT_LIST_HEAD(&vm->userptr.invalidated);
- init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
@@ -1752,11 +1509,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
}
- if (flags & XE_VM_FLAG_FAULT_MODE) {
- err = xe_svm_init(vm);
- if (err)
- goto err_no_resv;
- }
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_no_resv;
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@@ -1769,49 +1524,68 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
drm_gem_object_put(vm_resv_obj);
- err = xe_vm_lock(vm, true);
- if (err)
- goto err_close;
-
- if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
- vm->flags |= XE_VM_FLAG_64K;
+ err = 0;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
- for_each_tile(tile, xe, id) {
- if (flags & XE_VM_FLAG_MIGRATION &&
- tile->id != XE_VM_FLAG_TILE_ID(flags))
- continue;
+ if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ vm->flags |= XE_VM_FLAG_64K;
- vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
- if (IS_ERR(vm->pt_root[id])) {
- err = PTR_ERR(vm->pt_root[id]);
- vm->pt_root[id] = NULL;
- goto err_unlock_close;
- }
- }
-
- if (xe_vm_has_scratch(vm)) {
for_each_tile(tile, xe, id) {
- if (!vm->pt_root[id])
+ if (flags & XE_VM_FLAG_MIGRATION &&
+ tile->id != XE_VM_FLAG_TILE_ID(flags))
continue;
- err = xe_vm_create_scratch(xe, tile, vm);
+ vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level,
+ &exec);
+ if (IS_ERR(vm->pt_root[id])) {
+ err = PTR_ERR(vm->pt_root[id]);
+ vm->pt_root[id] = NULL;
+ xe_vm_pt_destroy(vm);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
+ if (err)
+ break;
+
+ if (xe_vm_has_scratch(vm)) {
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
+
+ err = xe_vm_create_scratch(xe, tile, vm, &exec);
+ if (err) {
+ xe_vm_free_scratch(vm);
+ xe_vm_pt_destroy(vm);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
if (err)
- goto err_unlock_close;
+ break;
+ vm->batch_invalidate_tlb = true;
}
- vm->batch_invalidate_tlb = true;
- }
- if (vm->flags & XE_VM_FLAG_LR_MODE)
- vm->batch_invalidate_tlb = false;
+ if (vm->flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ vm->batch_invalidate_tlb = false;
+ }
- /* Fill pt_root after allocating scratch tables */
- for_each_tile(tile, xe, id) {
- if (!vm->pt_root[id])
- continue;
+ /* Fill pt_root after allocating scratch tables */
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
- xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ }
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_close;
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
@@ -1844,7 +1618,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
&xe->usm.next_asid, GFP_KERNEL);
up_write(&xe->usm.lock);
if (err < 0)
- goto err_unlock_close;
+ goto err_close;
vm->usm.asid = asid;
}
@@ -1853,8 +1627,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
return vm;
-err_unlock_close:
- xe_vm_unlock(vm);
err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
@@ -1907,7 +1679,7 @@ static void xe_vm_close(struct xe_vm *vm)
xe_pt_clear(xe, vm->pt_root[id]);
for_each_gt(gt, xe, id)
- xe_gt_tlb_invalidation_vm(gt, vm);
+ xe_tlb_inval_vm(&gt->tlb_inval, vm);
}
}
@@ -1961,9 +1733,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
vma = gpuva_to_vma(gpuva);
if (xe_vma_has_no_bo(vma)) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags |= XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
}
xe_vm_remove_vma(vm, vma);
@@ -1987,13 +1759,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
* destroy the pagetables immediately.
*/
xe_vm_free_scratch(vm);
-
- for_each_tile(tile, xe, id) {
- if (vm->pt_root[id]) {
- xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
- vm->pt_root[id] = NULL;
- }
- }
+ xe_vm_pt_destroy(vm);
xe_vm_unlock(vm);
/*
@@ -2007,8 +1773,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
- if (xe_vm_in_fault_mode(vm))
- xe_svm_fini(vm);
+ xe_svm_fini(vm);
up_write(&vm->lock);
@@ -2085,8 +1850,7 @@ struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
- return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
- tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
+ return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0);
}
static struct xe_exec_queue *
@@ -2128,7 +1892,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
+ if (XE_GT_WA(xe_root_mmio_gt(xe), 14016763929))
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
@@ -2210,6 +1974,110 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static int xe_vm_query_vmas(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+ u32 num_vmas = 0;
+
+ lockdep_assert_held(&vm->lock);
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
+ num_vmas++;
+
+ return num_vmas;
+}
+
+static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
+ u64 end, struct drm_xe_mem_range_attr *attrs)
+{
+ struct drm_gpuva *gpuva;
+ int i = 0;
+
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (i == *num_vmas)
+ return -ENOSPC;
+
+ attrs[i].start = xe_vma_start(vma);
+ attrs[i].end = xe_vma_end(vma);
+ attrs[i].atomic.val = vma->attr.atomic_access;
+ attrs[i].pat_index.val = vma->attr.pat_index;
+ attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
+ attrs[i].preferred_mem_loc.migration_policy =
+ vma->attr.preferred_loc.migration_policy;
+
+ i++;
+ }
+
+ *num_vmas = i;
+ return 0;
+}
+
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_mem_range_attr *mem_attrs;
+ struct drm_xe_vm_query_mem_range_attr *args = data;
+ u64 __user *attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
+ struct xe_vm *vm;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe,
+ ((args->num_mem_ranges == 0 &&
+ (attrs_user || args->sizeof_mem_range_attr != 0)) ||
+ (args->num_mem_ranges > 0 &&
+ (!attrs_user ||
+ args->sizeof_mem_range_attr !=
+ sizeof(struct drm_xe_mem_range_attr))))))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ err = down_read_interruptible(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
+
+ if (args->num_mem_ranges == 0 && !attrs_user) {
+ args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range);
+ args->sizeof_mem_range_attr = sizeof(struct drm_xe_mem_range_attr);
+ goto unlock_vm;
+ }
+
+ mem_attrs = kvmalloc_array(args->num_mem_ranges, args->sizeof_mem_range_attr,
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!mem_attrs) {
+ err = args->num_mem_ranges > 1 ? -ENOBUFS : -ENOMEM;
+ goto unlock_vm;
+ }
+
+ memset(mem_attrs, 0, args->num_mem_ranges * args->sizeof_mem_range_attr);
+ err = get_mem_attrs(vm, &args->num_mem_ranges, args->start,
+ args->start + args->range, mem_attrs);
+ if (err)
+ goto free_mem_attrs;
+
+ err = copy_to_user(attrs_user, mem_attrs,
+ args->sizeof_mem_range_attr * args->num_mem_ranges);
+ if (err)
+ err = -EFAULT;
+
+free_mem_attrs:
+ kvfree(mem_attrs);
+unlock_vm:
+ up_read(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+ return err;
+}
+
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
{
if (page_addr > xe_vma_end(vma) - 1 ||
@@ -2248,9 +2116,9 @@ static const u32 region_to_mem_type[] = {
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags |= XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_remove_vma(vm, vma);
}
@@ -2357,10 +2225,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
- case DRM_XE_VM_BIND_OP_MAP_USERPTR:
- ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
- obj, bo_offset_or_userptr);
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = range,
+ .map.gem.obj = obj,
+ .map.gem.offset = bo_offset_or_userptr,
+ };
+
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
+ }
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;
@@ -2408,9 +2283,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
__xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_tile *tile;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
+ struct drm_pagemap *dpagemap;
u8 id, tile_mask = 0;
u32 i;
@@ -2427,8 +2303,24 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
tile_mask |= 0x1 << id;
xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
- op->prefetch_range.region = prefetch_region;
op->prefetch_range.ranges_count = 0;
+ tile = NULL;
+
+ if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
+ dpagemap = xe_vma_resolve_pagemap(vma,
+ xe_device_get_root_tile(vm->xe));
+ /*
+ * TODO: Once multigpu support is enabled will need
+ * something to dereference tile from dpagemap.
+ */
+ if (dpagemap)
+ tile = xe_device_get_root_tile(vm->xe);
+ } else if (prefetch_region) {
+ tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
+ XE_PL_VRAM0];
+ }
+
+ op->prefetch_range.tile = tile;
alloc_next_range:
svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
@@ -2447,7 +2339,7 @@ alloc_next_range:
goto unwind_prefetch_ops;
}
- if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!tile)) {
xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
goto check_next_range;
}
@@ -2484,9 +2376,10 @@ unwind_prefetch_ops:
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr, unsigned int flags)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct xe_vma *vma;
int err = 0;
@@ -2494,9 +2387,9 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
lockdep_assert_held_write(&vm->lock);
if (bo) {
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
- drm_exec_until_all_locked(&exec) {
- err = 0;
+ err = 0;
+ xe_validation_guard(&ctx, &vm->xe->val, &exec,
+ (struct xe_val_flags) {.interruptible = true}, err) {
if (!bo->vm) {
err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
drm_exec_retry_on_contention(&exec);
@@ -2505,27 +2398,35 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
err = drm_exec_lock_obj(&exec, &bo->ttm.base);
drm_exec_retry_on_contention(&exec);
}
- if (err) {
- drm_exec_fini(&exec);
+ if (err)
return ERR_PTR(err);
- }
- }
- }
- vma = xe_vma_create(vm, bo, op->gem.offset,
- op->va.addr, op->va.addr +
- op->va.range - 1, pat_index, flags);
- if (IS_ERR(vma))
- goto err_unlock;
- if (xe_vma_is_userptr(vma))
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- else if (!xe_vma_has_no_bo(vma) && !bo->vm)
- err = add_preempt_fences(vm, bo);
+ vma = xe_vma_create(vm, bo, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, attr, flags);
+ if (IS_ERR(vma))
+ return vma;
-err_unlock:
- if (bo)
- drm_exec_fini(&exec);
+ if (!bo->vm) {
+ err = add_preempt_fences(vm, bo);
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy(vma, NULL);
+ }
+ }
+ }
+ if (err)
+ return ERR_PTR(err);
+ } else {
+ vma = xe_vma_create(vm, NULL, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, attr, flags);
+ if (IS_ERR(vma))
+ return vma;
+ if (xe_vma_is_userptr(vma))
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ }
if (err) {
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
@@ -2630,6 +2531,29 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
+/**
+ * xe_vma_has_default_mem_attrs - Check if a VMA has default memory attributes
+ * @vma: Pointer to the xe_vma structure to check
+ *
+ * This function determines whether the given VMA (Virtual Memory Area)
+ * has its memory attributes set to their default values. Specifically,
+ * it checks the following conditions:
+ *
+ * - `atomic_access` is `DRM_XE_VMA_ATOMIC_UNDEFINED`
+ * - `pat_index` is equal to `default_pat_index`
+ * - `preferred_loc.devmem_fd` is `DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE`
+ * - `preferred_loc.migration_policy` is `DRM_XE_MIGRATE_ALL_PAGES`
+ *
+ * Return: true if all attributes are at their default values, false otherwise.
+ */
+bool xe_vma_has_default_mem_attrs(struct xe_vma *vma)
+{
+ return (vma->attr.atomic_access == DRM_XE_ATOMIC_UNDEFINED &&
+ vma->attr.pat_index == vma->attr.default_pat_index &&
+ vma->attr.preferred_loc.devmem_fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE &&
+ vma->attr.preferred_loc.migration_policy == DRM_XE_MIGRATE_ALL_PAGES);
+}
+
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
struct xe_vma_ops *vops)
{
@@ -2656,6 +2580,16 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ .default_pat_index = op->map.pat_index,
+ .pat_index = op->map.pat_index,
+ };
+
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
@@ -2665,7 +2599,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
flags |= op->map.is_cpu_addr_mirror ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
- vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ vma = new_vma(vm, &op->base.map, &default_attr,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2693,8 +2627,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
end = op->base.remap.next->va.addr;
if (xe_vma_is_cpu_addr_mirror(old) &&
- xe_svm_has_mapping(vm, start, end))
- return -EBUSY;
+ xe_svm_has_mapping(vm, start, end)) {
+ if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
+ xe_svm_unmap_address_range(vm, start, end);
+ else
+ return -EBUSY;
+ }
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
@@ -2713,7 +2651,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
- old->pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2743,7 +2681,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.next) {
vma = new_vma(vm, op->base.remap.next,
- old->pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2832,9 +2770,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
if (vma) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_insert_vma(vm, vma);
}
@@ -2853,9 +2791,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
xe_vma_destroy_unlocked(op->remap.next);
}
if (vma) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_insert_vma(vm, vma);
}
@@ -2905,7 +2843,7 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
err = xe_bo_validate(bo, vm,
- !xe_vm_in_preempt_fence_mode(vm));
+ !xe_vm_in_preempt_fence_mode(vm), exec);
}
return err;
@@ -2930,30 +2868,26 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
{
bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_tile *tile = op->prefetch_range.tile;
int err = 0;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
unsigned long i;
- u32 region;
if (!xe_vma_is_cpu_addr_mirror(vma))
return 0;
- region = op->prefetch_range.region;
-
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- if (!region)
+ if (!tile)
xe_svm_range_migrate_to_smem(vm, svm_range);
- if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
- tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
err = xe_svm_alloc_vram(tile, svm_range, &ctx);
if (err) {
drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
@@ -3016,19 +2950,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
u32 region;
- if (xe_vma_is_cpu_addr_mirror(vma))
- region = op->prefetch_range.region;
- else
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
region = op->prefetch.region;
-
- xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
+ region <= ARRAY_SIZE(region_to_mem_type));
+ }
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
false);
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
- region_to_mem_type[region]);
+ region_to_mem_type[region],
+ NULL,
+ exec);
break;
}
default:
@@ -3291,35 +3226,37 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct dma_fence *fence;
- int err;
+ int err = 0;
lockdep_assert_held_write(&vm->lock);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES, 0);
- drm_exec_until_all_locked(&exec) {
+ xe_validation_guard(&ctx, &vm->xe->val, &exec,
+ ((struct xe_val_flags) {
+ .interruptible = true,
+ .exec_ignore_duplicates = true,
+ }), err) {
err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
drm_exec_retry_on_contention(&exec);
- if (err) {
- fence = ERR_PTR(err);
- goto unlock;
- }
+ xe_validation_retry_on_oom(&ctx, &err);
+ if (err)
+ return ERR_PTR(err);
+ xe_vm_set_validation_exec(vm, &exec);
fence = ops_execute(vm, vops);
+ xe_vm_set_validation_exec(vm, NULL);
if (IS_ERR(fence)) {
if (PTR_ERR(fence) == -ENODATA)
vm_bind_ioctl_ops_fini(vm, vops, NULL);
- goto unlock;
+ return fence;
}
vm_bind_ioctl_ops_fini(vm, vops, fence);
}
-unlock:
- drm_exec_fini(&exec);
- return fence;
+ return err ? ERR_PTR(err) : fence;
}
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
@@ -3435,12 +3372,14 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
+ !IS_ENABLED(CONFIG_DRM_GPUSVM)) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, prefetch_region &&
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
- xe->info.mem_region_mask)) ||
+ XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
+ !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
@@ -3587,7 +3526,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto put_vm;
+ goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
@@ -3633,7 +3572,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
- goto release_vm_lock;
+ goto free_bos;
}
}
@@ -3767,17 +3706,20 @@ free_syncs:
put_obj:
for (i = 0; i < args->num_binds; ++i)
xe_bo_put(bos[i]);
+
+ kvfree(ops);
+free_bos:
+ kvfree(bos);
release_vm_lock:
up_write(&vm->lock);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
-put_vm:
- xe_vm_put(vm);
- kvfree(bos);
- kvfree(ops);
+free_bind_ops:
if (args->num_binds > 1)
kvfree(bind_ops);
+put_vm:
+ xe_vm_put(vm);
return err;
}
@@ -3867,10 +3809,14 @@ release_vm_lock:
*/
int xe_vm_lock(struct xe_vm *vm, bool intr)
{
+ int ret;
+
if (intr)
- return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ else
+ ret = dma_resv_lock(xe_vm_resv(vm), NULL);
- return dma_resv_lock(xe_vm_resv(vm), NULL);
+ return ret;
}
/**
@@ -3885,7 +3831,7 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
- * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * xe_vm_range_tilemask_tlb_inval - Issue a TLB invalidation on this tilemask for an
* address range
* @vm: The VM
* @start: start address
@@ -3896,10 +3842,11 @@ void xe_vm_unlock(struct xe_vm *vm)
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
- u64 end, u8 tile_mask)
+int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
{
- struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tlb_inval_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
struct xe_tile *tile;
u32 fence_id = 0;
u8 id;
@@ -3909,39 +3856,36 @@ int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
return 0;
for_each_tile(tile, vm->xe, id) {
- if (tile_mask & BIT(id)) {
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- start,
- end,
- vm->usm.asid);
- if (err)
- goto wait;
- ++fence_id;
+ if (!(tile_mask & BIT(id)))
+ continue;
- if (!tile->media_gt)
- continue;
+ xe_tlb_inval_fence_init(&tile->primary_gt->tlb_inval,
+ &fence[fence_id], true);
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
+ err = xe_tlb_inval_range(&tile->primary_gt->tlb_inval,
+ &fence[fence_id], start, end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- start,
- end,
- vm->usm.asid);
- if (err)
- goto wait;
- ++fence_id;
- }
+ if (!tile->media_gt)
+ continue;
+
+ xe_tlb_inval_fence_init(&tile->media_gt->tlb_inval,
+ &fence[fence_id], true);
+
+ err = xe_tlb_inval_range(&tile->media_gt->tlb_inval,
+ &fence[fence_id], start, end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
}
wait:
for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ xe_tlb_inval_fence_wait(&fence[id]);
return err;
}
@@ -3979,13 +3923,13 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
*/
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
- lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
- (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
- to_userptr_vma(vma)->userptr.notifier_seq));
+ to_userptr_vma(vma)->userptr.pages.notifier_seq));
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
@@ -4000,8 +3944,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
xe_device_wmb(xe);
- ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
- xe_vma_end(vma), tile_mask);
+ ret = xe_vm_range_tilemask_tlb_inval(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
@@ -4203,3 +4147,223 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
}
kvfree(snap);
}
+
+/**
+ * xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
+ * @xe: Pointer to the XE device structure
+ * @vma: Pointer to the virtual memory area (VMA) structure
+ * @is_atomic: In pagefault path and atomic operation
+ *
+ * This function determines whether the given VMA needs to be migrated to
+ * VRAM in order to do atomic GPU operation.
+ *
+ * Return:
+ * 1 - Migration to VRAM is required
+ * 0 - Migration is not required
+ * -EACCES - Invalid access for atomic memory attr
+ *
+ */
+int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
+{
+ u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
+ vma->attr.atomic_access;
+
+ if (!IS_DGFX(xe) || !is_atomic)
+ return false;
+
+ /*
+ * NOTE: The checks implemented here are platform-specific. For
+ * instance, on a device supporting CXL atomics, these would ideally
+ * work universally without additional handling.
+ */
+ switch (atomic_access) {
+ case DRM_XE_ATOMIC_DEVICE:
+ return !xe->info.has_device_atomics_on_smem;
+
+ case DRM_XE_ATOMIC_CPU:
+ return -EACCES;
+
+ case DRM_XE_ATOMIC_UNDEFINED:
+ case DRM_XE_ATOMIC_GLOBAL:
+ default:
+ return 1;
+ }
+}
+
+static int xe_vm_alloc_vma(struct xe_vm *vm,
+ struct drm_gpuvm_map_req *map_req,
+ bool is_madvise)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct drm_gpuva_op *__op;
+ bool is_cpu_addr_mirror = false;
+ bool remap_op = false;
+ struct xe_vma_mem_attr tmp_attr;
+ u16 default_pat;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (is_madvise)
+ ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, map_req);
+ else
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, map_req);
+
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (list_empty(&ops->list)) {
+ err = 0;
+ goto free_ops;
+ }
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma = NULL;
+
+ if (!is_madvise) {
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ vma = gpuva_to_vma(op->base.unmap.va);
+ XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
+ default_pat = vma->attr.default_pat_index;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ default_pat = vma->attr.default_pat_index;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.is_cpu_addr_mirror = true;
+ op->map.pat_index = default_pat;
+ }
+ } else {
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ xe_assert(vm->xe, !remap_op);
+ xe_assert(vm->xe, xe_vma_has_no_bo(vma));
+ remap_op = true;
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ is_cpu_addr_mirror = true;
+ else
+ is_cpu_addr_mirror = false;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ xe_assert(vm->xe, remap_op);
+ remap_op = false;
+ /*
+ * In case of madvise ops DRM_GPUVA_OP_MAP is
+ * always after DRM_GPUVA_OP_REMAP, so ensure
+ * we assign op->map.is_cpu_addr_mirror true
+ * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ */
+ op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+ }
+ }
+ print_op(vm->xe, __op);
+ }
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+
+ if (is_madvise)
+ vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
+
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto unwind_ops;
+
+ xe_vm_lock(vm, false);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
+
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ vma = gpuva_to_vma(op->base.unmap.va);
+ /* There should be no unmap for madvise */
+ if (is_madvise)
+ XE_WARN_ON("UNEXPECTED UNMAP");
+
+ xe_vma_destroy(vma, NULL);
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ /* In case of madvise ops Store attributes for REMAP UNMAPPED
+ * VMA, so they can be assigned to newly MAP created vma.
+ */
+ if (is_madvise)
+ tmp_attr = vma->attr;
+
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_MAP) {
+ vma = op->map.vma;
+ /* In case of madvise call, MAP will always be follwed by REMAP.
+ * Therefore temp_attr will always have sane values, making it safe to
+ * copy them to new vma.
+ */
+ if (is_madvise)
+ vma->attr = tmp_attr;
+ }
+ }
+
+ xe_vm_unlock(vm);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return 0;
+
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+free_ops:
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return err;
+}
+
+/**
+ * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = start,
+ .map.va.range = range,
+ };
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
+
+ return xe_vm_alloc_vma(vm, &map_req, true);
+}
+
+/**
+ * xe_vm_alloc_cpu_addr_mirror_vma - Allocate CPU addr mirror vma
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits/merges existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = start,
+ .map.va.range = range,
+ };
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "CPU_ADDR_MIRROR_VMA_OPS_CREATE: addr=0x%016llx, size=0x%016llx",
+ start, range);
+
+ return xe_vm_alloc_vma(vm, &map_req, false);
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 82b112795807..ef8a5019574e 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -66,6 +66,8 @@ static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
+bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
+
/**
* xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
* @vm: The vm
@@ -171,6 +173,12 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
+
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
+int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -191,7 +199,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
@@ -212,12 +220,6 @@ static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
-int xe_vm_userptr_pin(struct xe_vm *vm);
-
-int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
-
-int xe_vm_userptr_check_repin(struct xe_vm *vm);
-
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
@@ -228,8 +230,8 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
-int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
- u64 end, u8 tile_mask);
+int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -258,12 +260,6 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
}
}
-int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
-
-int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
-
-bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
-
int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
@@ -294,6 +290,8 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked);
*/
#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
+int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define vm_dbg drm_dbg
#else
@@ -323,7 +321,7 @@ static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
if (vm && !allow_res_evict) {
xe_vm_assert_held(vm);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
- WRITE_ONCE(vm->validating, current);
+ WRITE_ONCE(vm->validation.validating, current);
}
}
@@ -341,7 +339,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict
{
if (vm && !allow_res_evict) {
/* Pairs with READ_ONCE in xe_vm_is_validating() */
- WRITE_ONCE(vm->validating, NULL);
+ WRITE_ONCE(vm->validation.validating, NULL);
}
}
@@ -359,7 +357,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict
static inline bool xe_vm_is_validating(struct xe_vm *vm)
{
/* Pairs with WRITE_ONCE in xe_vm_is_validating() */
- if (READ_ONCE(vm->validating) == current) {
+ if (READ_ONCE(vm->validation.validating) == current) {
xe_vm_assert_held(vm);
return true;
}
@@ -367,6 +365,34 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
}
/**
+ * xe_vm_set_validation_exec() - Accessor to set the drm_exec object
+ * @vm: The vm we want to register a drm_exec object with.
+ * @exec: The exec object we want to register.
+ *
+ * Set the drm_exec object used to lock the vm's resv.
+ */
+static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
+{
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
+ vm->validation._exec = exec;
+}
+
+/**
+ * xe_vm_set_validation_exec() - Accessor to read the drm_exec object
+ * @vm: The vm we want to register a drm_exec object with.
+ *
+ * Return: The drm_exec object used to lock the vm's resv. The value
+ * is a valid pointer, %NULL, or one of the special values defined in
+ * xe_validation.h.
+ */
+static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
+{
+ xe_vm_assert_held(vm);
+ return vm->validation._exec;
+}
+
+/**
* xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
* a valid GPU mapping
* @tile: The tile which the GPU mapping belongs to
@@ -385,11 +411,4 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
-void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
-#else
-static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
-{
-}
-#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
new file mode 100644
index 000000000000..cad3cf627c3f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_vm_madvise.h"
+
+#include <linux/nospec.h>
+#include <drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_pat.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+
+struct xe_vmas_in_madvise_range {
+ u64 addr;
+ u64 range;
+ struct xe_vma **vmas;
+ int num_vmas;
+ bool has_bo_vmas;
+ bool has_svm_userptr_vmas;
+};
+
+static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range *madvise_range)
+{
+ u64 addr = madvise_range->addr;
+ u64 range = madvise_range->range;
+
+ struct xe_vma **__vmas;
+ struct drm_gpuva *gpuva;
+ int max_vmas = 8;
+
+ lockdep_assert_held(&vm->lock);
+
+ madvise_range->num_vmas = 0;
+ madvise_range->vmas = kmalloc_array(max_vmas, sizeof(*madvise_range->vmas), GFP_KERNEL);
+ if (!madvise_range->vmas)
+ return -ENOMEM;
+
+ vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (xe_vma_bo(vma))
+ madvise_range->has_bo_vmas = true;
+ else if (xe_vma_is_cpu_addr_mirror(vma) || xe_vma_is_userptr(vma))
+ madvise_range->has_svm_userptr_vmas = true;
+
+ if (madvise_range->num_vmas == max_vmas) {
+ max_vmas <<= 1;
+ __vmas = krealloc(madvise_range->vmas,
+ max_vmas * sizeof(*madvise_range->vmas),
+ GFP_KERNEL);
+ if (!__vmas) {
+ kfree(madvise_range->vmas);
+ return -ENOMEM;
+ }
+ madvise_range->vmas = __vmas;
+ }
+
+ madvise_range->vmas[madvise_range->num_vmas] = vma;
+ (madvise_range->num_vmas)++;
+ }
+
+ if (!madvise_range->num_vmas)
+ kfree(madvise_range->vmas);
+
+ vm_dbg(&vm->xe->drm, "madvise_range-num_vmas = %d\n", madvise_range->num_vmas);
+
+ return 0;
+}
+
+static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC);
+
+ for (i = 0; i < num_vmas; i++) {
+ /*TODO: Extend attributes to bo based vmas */
+ if ((vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
+ vmas[i]->attr.preferred_loc.migration_policy ==
+ op->preferred_mem_loc.migration_policy) ||
+ !xe_vma_is_cpu_addr_mirror(vmas[i])) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
+ /* Till multi-device support is not added migration_policy
+ * is of no use and can be ignored.
+ */
+ vmas[i]->attr.preferred_loc.migration_policy =
+ op->preferred_mem_loc.migration_policy;
+ }
+ }
+}
+
+static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ struct xe_bo *bo;
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC);
+ xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
+
+ for (i = 0; i < num_vmas; i++) {
+ if (xe_vma_is_userptr(vmas[i]) &&
+ !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
+ xe->info.has_device_atomics_on_smem)) {
+ vmas[i]->skip_invalidation = true;
+ continue;
+ }
+
+ if (vmas[i]->attr.atomic_access == op->atomic.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.atomic_access = op->atomic.val;
+ }
+
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo || bo->attr.atomic_access == op->atomic.val)
+ continue;
+
+ vmas[i]->skip_invalidation = false;
+ xe_bo_assert_held(bo);
+ bo->attr.atomic_access = op->atomic.val;
+
+ /* Invalidate cpu page table, so bo can migrate to smem in next access */
+ if (xe_bo_is_vram(bo) &&
+ (bo->attr.atomic_access == DRM_XE_ATOMIC_CPU ||
+ bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL))
+ ttm_bo_unmap_virtual(&bo->ttm);
+ }
+}
+
+static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PAT);
+
+ for (i = 0; i < num_vmas; i++) {
+ if (vmas[i]->attr.pat_index == op->pat_index.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.pat_index = op->pat_index.val;
+ }
+ }
+}
+
+typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op);
+
+static const madvise_func madvise_funcs[] = {
+ [DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
+ [DRM_XE_MEM_RANGE_ATTR_ATOMIC] = madvise_atomic,
+ [DRM_XE_MEM_RANGE_ATTR_PAT] = madvise_pat_index,
+};
+
+static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Wait for pending binds */
+ if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT) <= 0)
+ XE_WARN_ON(1);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (vma->skip_invalidation || xe_vma_is_null(vma))
+ continue;
+
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ tile_mask |= xe_svm_ranges_zap_ptes_in_range(vm,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
+ } else {
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes(tile, vma)) {
+ tile_mask |= BIT(id);
+
+ /*
+ * WRITE_ONCE pairs with READ_ONCE
+ * in xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
+static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ u8 tile_mask = xe_zap_ptes_in_madvise_range(vm, start, end);
+
+ if (!tile_mask)
+ return 0;
+
+ xe_device_wmb(vm->xe);
+
+ return xe_vm_range_tilemask_tlb_inval(vm, start, end, tile_mask);
+}
+
+static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madvise *args)
+{
+ if (XE_IOCTL_DBG(xe, !args))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->start, SZ_4K)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->range, SZ_4K)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->range < SZ_4K))
+ return false;
+
+ switch (args->type) {
+ case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
+ {
+ s32 fd = (s32)args->preferred_mem_loc.devmem_fd;
+
+ if (XE_IOCTL_DBG(xe, fd < DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.migration_policy >
+ DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.reserved))
+ return false;
+ break;
+ }
+ case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
+ if (XE_IOCTL_DBG(xe, args->atomic.val > DRM_XE_ATOMIC_CPU))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->atomic.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->atomic.reserved))
+ return false;
+
+ break;
+ case DRM_XE_MEM_RANGE_ATTR_PAT:
+ {
+ u16 coh_mode = xe_pat_index_get_coh_mode(xe, args->pat_index.val);
+
+ if (XE_IOCTL_DBG(xe, !coh_mode))
+ return false;
+
+ if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->pat_index.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->pat_index.reserved))
+ return false;
+ break;
+ }
+ default:
+ if (XE_IOCTL_DBG(xe, 1))
+ return false;
+ }
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return false;
+
+ return true;
+}
+
+static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas,
+ int num_vmas, u32 atomic_val)
+{
+ struct xe_device *xe = vm->xe;
+ struct xe_bo *bo;
+ int i;
+
+ for (i = 0; i < num_vmas; i++) {
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo)
+ continue;
+ /*
+ * NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_CPU &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_DEVICE &&
+ !(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1) &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM &&
+ xe->info.has_device_atomics_on_smem)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_GLOBAL &&
+ (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
+ (!(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1)))))
+ return false;
+ }
+ return true;
+}
+/**
+ * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
+ * @dev: DRM device pointer
+ * @data: Pointer to ioctl data (drm_xe_madvise*)
+ * @file: DRM file pointer
+ *
+ * Handles the MADVISE ioctl to provide memory advice for vma's within
+ * input range.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_madvise *args = data;
+ struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start,
+ .range = args->range, };
+ struct xe_vm *vm;
+ struct drm_exec exec;
+ int err, attr_type;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ if (!madvise_args_are_sane(vm->xe, args)) {
+ err = -EINVAL;
+ goto put_vm;
+ }
+
+ xe_svm_flush(vm);
+
+ err = down_write_killable(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
+ err = xe_vm_alloc_madvise_vma(vm, args->start, args->range);
+ if (err)
+ goto unlock_vm;
+
+ err = get_vmas(vm, &madvise_range);
+ if (err || !madvise_range.num_vmas)
+ goto unlock_vm;
+
+ if (madvise_range.has_bo_vmas) {
+ if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
+ if (!check_bo_args_are_sane(vm, madvise_range.vmas,
+ madvise_range.num_vmas,
+ args->atomic.val)) {
+ err = -EINVAL;
+ goto unlock_vm;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ for (int i = 0; i < madvise_range.num_vmas; i++) {
+ struct xe_bo *bo = xe_vma_bo(madvise_range.vmas[i]);
+
+ if (!bo)
+ continue;
+ err = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ goto err_fini;
+ }
+ }
+ }
+
+ if (madvise_range.has_svm_userptr_vmas) {
+ err = xe_svm_notifier_lock_interruptible(vm);
+ if (err)
+ goto err_fini;
+ }
+
+ attr_type = array_index_nospec(args->type, ARRAY_SIZE(madvise_funcs));
+ madvise_funcs[attr_type](xe, vm, madvise_range.vmas, madvise_range.num_vmas, args);
+
+ err = xe_vm_invalidate_madvise_range(vm, args->start, args->start + args->range);
+
+ if (madvise_range.has_svm_userptr_vmas)
+ xe_svm_notifier_unlock(vm);
+
+err_fini:
+ if (madvise_range.has_bo_vmas)
+ drm_exec_fini(&exec);
+ kfree(madvise_range.vmas);
+ madvise_range.vmas = NULL;
+unlock_vm:
+ up_write(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
new file mode 100644
index 000000000000..b0e1fc445f23
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_VM_MADVISE_H_
+#define _XE_VM_MADVISE_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 6058cf739388..da39940501d8 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -17,6 +17,7 @@
#include "xe_device_types.h"
#include "xe_pt_types.h"
#include "xe_range_fence.h"
+#include "xe_userptr.h"
struct xe_bo;
struct xe_svm_range;
@@ -46,35 +47,42 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
-/** struct xe_userptr - User pointer */
-struct xe_userptr {
- /** @invalidate_link: Link for the vm::userptr.invalidated list */
- struct list_head invalidate_link;
- /** @userptr: link into VM repin list if userptr. */
- struct list_head repin_link;
+/**
+ * struct xe_vma_mem_attr - memory attributes associated with vma
+ */
+struct xe_vma_mem_attr {
+ /** @preferred_loc: perferred memory_location */
+ struct {
+ /** @preferred_loc.migration_policy: Pages migration policy */
+ u32 migration_policy;
+
+ /**
+ * @preferred_loc.devmem_fd: used for determining pagemap_fd
+ * requested by user DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM and
+ * DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE mean system memory or
+ * closest device memory respectively.
+ */
+ u32 devmem_fd;
+ } preferred_loc;
+
/**
- * @notifier: MMU notifier for user pointer (invalidation call back)
+ * @atomic_access: The atomic access type for the vma
+ * See %DRM_XE_VMA_ATOMIC_UNDEFINED, %DRM_XE_VMA_ATOMIC_DEVICE,
+ * %DRM_XE_VMA_ATOMIC_GLOBAL, and %DRM_XE_VMA_ATOMIC_CPU for possible
+ * values. These are defined in uapi/drm/xe_drm.h.
*/
- struct mmu_interval_notifier notifier;
- /** @sgt: storage for a scatter gather table */
- struct sg_table sgt;
- /** @sg: allocated scatter gather table */
- struct sg_table *sg;
- /** @notifier_seq: notifier sequence number */
- unsigned long notifier_seq;
- /** @unmap_mutex: Mutex protecting dma-unmapping */
- struct mutex unmap_mutex;
+ u32 atomic_access;
+
/**
- * @initial_bind: user pointer has been bound at least once.
- * write: vm->userptr.notifier_lock in read mode and vm->resv held.
- * read: vm->userptr.notifier_lock in write mode or vm->resv held.
+ * @default_pat_index: The pat index for VMA set during first bind by user.
*/
- bool initial_bind;
- /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
- bool mapped;
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
- u32 divisor;
-#endif
+ u16 default_pat_index;
+
+ /**
+ * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ * same as default_pat_index unless overwritten by madvise.
+ */
+ u16 pat_index;
};
struct xe_vma {
@@ -102,10 +110,10 @@ struct xe_vma {
/**
* @tile_invalidated: Tile mask of binding are invalidated for this VMA.
- * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
- * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * protected by BO's resv and for userptrs, vm->svm.gpusvm.notifier_lock in
+ * write mode for writing or vm->svm.gpusvm.notifier_lock in read mode and
* the vm->resv. For stable reading, BO's resv or userptr
- * vm->userptr.notifier_lock in read mode is required. Can be
+ * vm->svm.gpusvm.notifier_lock in read mode is required. Can be
* opportunistically read with READ_ONCE outside of locks.
*/
u8 tile_invalidated;
@@ -116,7 +124,7 @@ struct xe_vma {
/**
* @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
- * vm->userptr.notifier_lock for writing. Needs either for reading,
+ * vm->svm.gpusvm.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
* in write mode.
*/
@@ -126,15 +134,22 @@ struct xe_vma {
u8 tile_staged;
/**
- * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ * @skip_invalidation: Used in madvise to avoid invalidation
+ * if mem attributes doesn't change
*/
- u16 pat_index;
+ bool skip_invalidation;
/**
* @ufence: The user fence that was provided with MAP.
* Needs to be signalled before UNMAP can be processed.
*/
struct xe_user_fence *ufence;
+
+ /**
+ * @attr: The attributes of vma which determines the migration policy
+ * and encoding of the PTEs for this vma.
+ */
+ struct xe_vma_mem_attr attr;
};
/**
@@ -244,33 +259,7 @@ struct xe_vm {
const struct xe_pt_ops *pt_ops;
/** @userptr: user pointer state */
- struct {
- /**
- * @userptr.repin_list: list of VMAs which are user pointers,
- * and needs repinning. Protected by @lock.
- */
- struct list_head repin_list;
- /**
- * @notifier_lock: protects notifier in write mode and
- * submission in read mode.
- */
- struct rw_semaphore notifier_lock;
- /**
- * @userptr.invalidated_lock: Protects the
- * @userptr.invalidated list.
- */
- spinlock_t invalidated_lock;
- /**
- * @userptr.invalidated: List of invalidated userptrs, not yet
- * picked
- * up for revalidation. Protected from access with the
- * @invalidated_lock. Removing items from the list
- * additionally requires @lock in write mode, and adding
- * items to the list requires either the @userptr.notifier_lock in
- * write mode, OR @lock in write mode.
- */
- struct list_head invalidated;
- } userptr;
+ struct xe_userptr_vm userptr;
/** @preempt: preempt state */
struct {
@@ -318,18 +307,34 @@ struct xe_vm {
} error_capture;
/**
+ * @validation: Validation data only valid with the vm resv held.
+ * Note: This is really task state of the task holding the vm resv,
+ * and moving forward we should
+ * come up with a better way of passing this down the call-
+ * chain.
+ */
+ struct {
+ /**
+ * @validation.validating: The task that is currently making bos resident.
+ * for this vm.
+ * Protected by the VM's resv for writing. Opportunistic reading can be done
+ * using READ_ONCE. Note: This is a workaround for the
+ * TTM eviction_valuable() callback not being passed a struct
+ * ttm_operation_context(). Future work might want to address this.
+ */
+ struct task_struct *validating;
+ /**
+ * @validation.exec The drm_exec context used when locking the vm resv.
+ * Protected by the vm's resv.
+ */
+ struct drm_exec *_exec;
+ } validation;
+
+ /**
* @tlb_flush_seqno: Required TLB flush seqno for the next exec.
* protected by the vm resv.
*/
u64 tlb_flush_seqno;
- /**
- * @validating: The task that is currently making bos resident for this vm.
- * Protected by the VM's resv for writing. Opportunistic reading can be done
- * using READ_ONCE. Note: This is a workaround for the
- * TTM eviction_valuable() callback not being passed a struct
- * ttm_operation_context(). Future work might want to address this.
- */
- struct task_struct *validating;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -400,8 +405,11 @@ struct xe_vma_op_prefetch_range {
struct xarray range;
/** @ranges_count: number of svm ranges to map */
u32 ranges_count;
- /** @region: memory region to prefetch to */
- u32 region;
+ /**
+ * @tile: Pointer to the tile structure containing memory to prefetch.
+ * NULL if prefetch requested region is smem
+ */
+ struct xe_tile *tile;
};
/** enum xe_vma_op_flags - flags for VMA operation */
@@ -467,6 +475,7 @@ struct xe_vma_ops {
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
/** @flag: signify the properties within xe_vma_ops*/
#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+#define XE_VMA_OPS_FLAG_MADVISE BIT(1)
u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index e421a74fb87c..b44ebf50fedb 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -3,6 +3,7 @@
* Copyright © 2021-2024 Intel Corporation
*/
+#include <kunit/visibility.h>
#include <linux/pci.h>
#include <drm/drm_managed.h>
@@ -19,7 +20,9 @@
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_sriov.h"
+#include "xe_ttm_vram_mgr.h"
#include "xe_vram.h"
+#include "xe_vram_types.h"
#define BAR_SIZE_SHIFT 20
@@ -136,7 +139,7 @@ static bool resource_is_valid(struct pci_dev *pdev, int bar)
return true;
}
-static int determine_lmem_bar_size(struct xe_device *xe)
+static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region *lmem_bar)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -147,16 +150,16 @@ static int determine_lmem_bar_size(struct xe_device *xe)
resize_vram_bar(xe);
- xe->mem.vram.io_start = pci_resource_start(pdev, LMEM_BAR);
- xe->mem.vram.io_size = pci_resource_len(pdev, LMEM_BAR);
- if (!xe->mem.vram.io_size)
+ lmem_bar->io_start = pci_resource_start(pdev, LMEM_BAR);
+ lmem_bar->io_size = pci_resource_len(pdev, LMEM_BAR);
+ if (!lmem_bar->io_size)
return -EIO;
/* XXX: Need to change when xe link code is ready */
- xe->mem.vram.dpa_base = 0;
+ lmem_bar->dpa_base = 0;
/* set up a map to the total memory area. */
- xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
+ lmem_bar->mapping = devm_ioremap_wc(&pdev->dev, lmem_bar->io_start, lmem_bar->io_size);
return 0;
}
@@ -278,13 +281,71 @@ static void vram_fini(void *arg)
struct xe_tile *tile;
int id;
- if (xe->mem.vram.mapping)
- iounmap(xe->mem.vram.mapping);
-
- xe->mem.vram.mapping = NULL;
+ xe->mem.vram->mapping = NULL;
for_each_tile(tile, xe, id)
- tile->mem.vram.mapping = NULL;
+ tile->mem.vram->mapping = NULL;
+}
+
+struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement)
+{
+ struct xe_vram_region *vram;
+ struct drm_device *drm = &xe->drm;
+
+ xe_assert(xe, id < xe->info.tile_count);
+
+ vram = drmm_kzalloc(drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return NULL;
+
+ vram->xe = xe;
+ vram->id = id;
+ vram->placement = placement;
+#if defined(CONFIG_DRM_XE_PAGEMAP)
+ vram->migrate = xe->tiles[id].migrate;
+#endif
+ return vram;
+}
+
+static void print_vram_region_info(struct xe_device *xe, struct xe_vram_region *vram)
+{
+ struct drm_device *drm = &xe->drm;
+
+ if (vram->io_size < vram->usable_size)
+ drm_info(drm, "Small BAR device\n");
+
+ drm_info(drm,
+ "VRAM[%u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n",
+ vram->id, &vram->actual_physical_size, &vram->usable_size, &vram->io_size);
+ drm_info(drm, "VRAM[%u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n",
+ vram->id, &vram->dpa_base, vram->dpa_base + (u64)vram->actual_physical_size,
+ &vram->io_start, vram->io_start + (u64)vram->io_size);
+}
+
+static int vram_region_init(struct xe_device *xe, struct xe_vram_region *vram,
+ struct xe_vram_region *lmem_bar, u64 offset, u64 usable_size,
+ u64 region_size, resource_size_t remain_io_size)
+{
+ /* Check if VRAM region is already initialized */
+ if (vram->mapping)
+ return 0;
+
+ vram->actual_physical_size = region_size;
+ vram->io_start = lmem_bar->io_start + offset;
+ vram->io_size = min_t(u64, usable_size, remain_io_size);
+
+ if (!vram->io_size) {
+ drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
+ return -ENODEV;
+ }
+
+ vram->dpa_base = lmem_bar->dpa_base + offset;
+ vram->mapping = lmem_bar->mapping + offset;
+ vram->usable_size = usable_size;
+
+ print_vram_region_info(xe, vram);
+
+ return 0;
}
/**
@@ -298,78 +359,108 @@ static void vram_fini(void *arg)
int xe_vram_probe(struct xe_device *xe)
{
struct xe_tile *tile;
- resource_size_t io_size;
+ struct xe_vram_region lmem_bar;
+ resource_size_t remain_io_size;
u64 available_size = 0;
u64 total_size = 0;
- u64 tile_offset;
- u64 tile_size;
- u64 vram_size;
int err;
u8 id;
if (!IS_DGFX(xe))
return 0;
- /* Get the size of the root tile's vram for later accessibility comparison */
- tile = xe_device_get_root_tile(xe);
- err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ err = determine_lmem_bar_size(xe, &lmem_bar);
if (err)
return err;
+ drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &lmem_bar.io_start, &lmem_bar.io_size);
- err = determine_lmem_bar_size(xe);
- if (err)
- return err;
-
- drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.io_size);
-
- io_size = xe->mem.vram.io_size;
+ remain_io_size = lmem_bar.io_size;
- /* tile specific ranges */
for_each_tile(tile, xe, id) {
- err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ u64 region_size;
+ u64 usable_size;
+ u64 tile_offset;
+
+ err = tile_vram_size(tile, &usable_size, &region_size, &tile_offset);
if (err)
return err;
- tile->mem.vram.actual_physical_size = tile_size;
- tile->mem.vram.io_start = xe->mem.vram.io_start + tile_offset;
- tile->mem.vram.io_size = min_t(u64, vram_size, io_size);
+ total_size += region_size;
+ available_size += usable_size;
+
+ err = vram_region_init(xe, tile->mem.vram, &lmem_bar, tile_offset, usable_size,
+ region_size, remain_io_size);
+ if (err)
+ return err;
- if (!tile->mem.vram.io_size) {
- drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
- return -ENODEV;
+ if (total_size > lmem_bar.io_size) {
+ drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
+ &total_size, &lmem_bar.io_size);
}
- tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
- tile->mem.vram.usable_size = vram_size;
- tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
+ remain_io_size -= min_t(u64, tile->mem.vram->actual_physical_size, remain_io_size);
+ }
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size)
- drm_info(&xe->drm, "Small BAR device\n");
- drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
- tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
- drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
- &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
- &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
+ err = vram_region_init(xe, xe->mem.vram, &lmem_bar, 0, available_size, total_size,
+ lmem_bar.io_size);
+ if (err)
+ return err;
- /* calculate total size using tile size to get the correct HW sizing */
- total_size += tile_size;
- available_size += vram_size;
+ return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
+}
- if (total_size > xe->mem.vram.io_size) {
- drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
- &total_size, &xe->mem.vram.io_size);
- }
+/**
+ * xe_vram_region_io_start - Get the IO start of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO start of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_start : 0;
+}
- io_size -= min_t(u64, tile_size, io_size);
- }
+/**
+ * xe_vram_region_io_size - Get the IO size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_size : 0;
+}
- xe->mem.vram.actual_physical_size = total_size;
+/**
+ * xe_vram_region_dpa_base - Get the DPA base of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the DPA base of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram)
+{
+ return vram ? vram->dpa_base : 0;
+}
- drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.actual_physical_size);
- drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &available_size);
+/**
+ * xe_vram_region_usable_size - Get the usable size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the usable size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->usable_size : 0;
+}
- return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
+/**
+ * xe_vram_region_actual_physical_size - Get the actual physical size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the actual physical size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->actual_physical_size : 0;
}
+EXPORT_SYMBOL_IF_KUNIT(xe_vram_region_actual_physical_size);
diff --git a/drivers/gpu/drm/xe/xe_vram.h b/drivers/gpu/drm/xe/xe_vram.h
index e31cc04ec0db..72860f714fc6 100644
--- a/drivers/gpu/drm/xe/xe_vram.h
+++ b/drivers/gpu/drm/xe/xe_vram.h
@@ -6,8 +6,19 @@
#ifndef _XE_VRAM_H_
#define _XE_VRAM_H_
+#include <linux/types.h>
+
struct xe_device;
+struct xe_vram_region;
int xe_vram_probe(struct xe_device *xe);
+struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement);
+
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index b26e26d73dae..17bc84da4cdc 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -34,7 +34,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- u32 val, mbox;
+ u32 val = 0, mbox;
int err;
mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
@@ -56,7 +56,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- u32 val, mbox;
+ u32 val = 0, mbox;
int err;
mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
diff --git a/drivers/gpu/drm/xe/xe_vram_types.h b/drivers/gpu/drm/xe/xe_vram_types.h
new file mode 100644
index 000000000000..83772dcbf1af
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_types.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_VRAM_TYPES_H_
+#define _XE_VRAM_TYPES_H_
+
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+#include <drm/drm_pagemap.h>
+#endif
+
+#include "xe_ttm_vram_mgr_types.h"
+
+struct xe_device;
+struct xe_migrate;
+
+/**
+ * struct xe_vram_region - memory region structure
+ * This is used to describe a memory region in xe
+ * device, such as HBM memory or CXL extension memory.
+ */
+struct xe_vram_region {
+ /** @xe: Back pointer to xe device */
+ struct xe_device *xe;
+ /**
+ * @id: VRAM region instance id
+ *
+ * The value should be unique for VRAM region.
+ */
+ u8 id;
+ /** @io_start: IO start address of this VRAM instance */
+ resource_size_t io_start;
+ /**
+ * @io_size: IO size of this VRAM instance
+ *
+ * This represents how much of this VRAM we can access
+ * via the CPU through the VRAM BAR. This can be smaller
+ * than @usable_size, in which case only part of VRAM is CPU
+ * accessible (typically the first 256M). This
+ * configuration is known as small-bar.
+ */
+ resource_size_t io_size;
+ /** @dpa_base: This memory regions's DPA (device physical address) base */
+ resource_size_t dpa_base;
+ /**
+ * @usable_size: usable size of VRAM
+ *
+ * Usable size of VRAM excluding reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t usable_size;
+ /**
+ * @actual_physical_size: Actual VRAM size
+ *
+ * Actual VRAM size including reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t actual_physical_size;
+ /** @mapping: pointer to VRAM mappable space */
+ void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+ /** @placement: TTM placement dedicated for this region */
+ u32 placement;
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+ /** @migrate: Back pointer to migrate */
+ struct xe_migrate *migrate;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+ * pages of this tile.
+ */
+ struct drm_pagemap dpagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
+#endif
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 22a98600fd8f..cd03891654a1 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -39,7 +39,8 @@
* Register Immediate commands) once when initializing the device and saved in
* the default context. That default context is then used on every context
* creation to have a "primed golden context", i.e. a context image that
- * already contains the changes needed to all the registers.
+ * already contains the changes needed to all the registers. See
+ * drivers/gpu/drm/xe/xe_lrc.c for default context handling.
*
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
@@ -48,10 +49,10 @@
* them need to keeep the workaround programming: the approach taken in the
* driver is to tie those workarounds to the first compute/render engine that
* is registered. When executing with GuC submission, engine resets are
- * outside of kernel driver control, hence the list of registers involved in
+ * outside of kernel driver control, hence the list of registers involved is
* written once, on engine initialization, and then passed to GuC, that
* saves/restores their values before/after the reset takes place. See
- * ``drivers/gpu/drm/xe/xe_guc_ads.c`` for reference.
+ * drivers/gpu/drm/xe/xe_guc_ads.c for reference.
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -66,21 +67,39 @@
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
* programming sequences when switching contexts. The support for workaround
- * batchbuffers is enabled these hardware mechanisms:
+ * batchbuffers is enabled via these hardware mechanisms:
*
- * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
- * context, pointing the hardware to jump to that location when that offset
- * is reached in the context restore. Workaround batchbuffer in the driver
- * currently uses this mechanism for all platforms.
+ * #. INDIRECT_CTX (also known as **mid context restore bb**): A batchbuffer
+ * and an offset are provided in the default context, pointing the hardware
+ * to jump to that location when that offset is reached in the context
+ * restore. When a context is being restored, this is executed after the
+ * ring context, in the middle (or beginning) of the engine context image.
*
- * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
- * pointing the hardware to a buffer to continue executing after the
- * engine registers are restored in a context restore sequence. This is
- * currently not used in the driver.
+ * #. BB_PER_CTX_PTR (also known as **post context restore bb**): A
+ * batchbuffer is provided in the default context, pointing the hardware to
+ * a buffer to continue executing after the engine registers are restored
+ * in a context restore sequence.
+ *
+ * Below is the timeline for a context restore sequence:
+ *
+ * .. code::
+ *
+ * INDIRECT_CTX_OFFSET
+ * |----------->|
+ * .------------.------------.-------------.------------.--------------.-----------.
+ * |Ring | Engine | Mid-context | Engine | Post-context | Ring |
+ * |Restore | Restore (1)| BB Restore | Restore (2)| BB Restore | Execution |
+ * `------------'------------'-------------'------------'--------------'-----------'
*
* - Other/OOB: There are WAs that, due to their nature, cannot be applied from
* a central place. Those are peppered around the rest of the code, as needed.
- * Workarounds related to the display IP are the main example.
+ * There's a central place to control which workarounds are enabled:
+ * drivers/gpu/drm/xe/xe_wa_oob.rules for GT workarounds and
+ * drivers/gpu/drm/xe/xe_device_wa_oob.rules for device/SoC workarounds.
+ * These files only record which workarounds are enabled: during early device
+ * initialization those rules are evaluated and recorded by the driver. Then
+ * later the driver checks with ``XE_GT_WA()`` and ``XE_DEVICE_WA()`` to
+ * implement them.
*
* .. [1] Technically, some registers are powercontext saved & restored, so they
* survive a suspend/resume. In practice, writing them again is not too
@@ -538,6 +557,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
/* Xe2_HPG */
@@ -602,6 +626,18 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
+ { XE_RTP_NAME("18041344222"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute),
+ FUNC(xe_rtp_match_not_sriov_vf),
+ FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE))
+ },
/* Xe2_LPM */
@@ -647,7 +683,8 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
{ XE_RTP_NAME("13012615864"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), OR,
+ GRAPHICS_VERSION(3003),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
},
@@ -661,6 +698,13 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ { XE_RTP_NAME("18041344222"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute),
+ FUNC(xe_rtp_match_not_sriov_vf),
+ FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -868,6 +912,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -905,13 +953,13 @@ void xe_wa_process_device_oob(struct xe_device *xe)
}
/**
- * xe_wa_process_oob - process OOB workaround table
+ * xe_wa_process_gt_oob - process GT OOB workaround table
* @gt: GT instance to process workarounds for
*
* Process OOB workaround table for this platform, marking in @gt the
* workarounds that are active.
*/
-void xe_wa_process_oob(struct xe_gt *gt)
+void xe_wa_process_gt_oob(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
@@ -995,12 +1043,12 @@ int xe_wa_device_init(struct xe_device *xe)
}
/**
- * xe_wa_init - initialize gt with workaround bookkeeping
+ * xe_wa_gt_init - initialize gt with workaround bookkeeping
* @gt: GT instance to initialize
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_wa_init(struct xe_gt *gt)
+int xe_wa_gt_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
size_t n_oob, n_lrc, n_engine, n_gt, total;
@@ -1026,7 +1074,7 @@ int xe_wa_init(struct xe_gt *gt)
return 0;
}
-ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
+ALLOW_ERROR_INJECTION(xe_wa_gt_init, ERRNO); /* See xe_pci_probe() */
void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
{
@@ -1079,6 +1127,6 @@ void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_WA(tile->primary_gt, 22010954014))
+ if (XE_GT_WA(tile->primary_gt, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index f3880c65cb8d..6a869b2de643 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -14,9 +14,9 @@ struct xe_hw_engine;
struct xe_tile;
int xe_wa_device_init(struct xe_device *xe);
-int xe_wa_init(struct xe_gt *gt);
+int xe_wa_gt_init(struct xe_gt *gt);
void xe_wa_process_device_oob(struct xe_device *xe);
-void xe_wa_process_oob(struct xe_gt *gt);
+void xe_wa_process_gt_oob(struct xe_gt *gt);
void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
@@ -25,11 +25,11 @@ void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, to be queried and called as needed.
+ * XE_GT_WA - Out-of-band GT workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) ({ \
+#define XE_GT_WA(gt__, id__) ({ \
xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 710f4423726c..f3a6d5d239ce 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,4 +1,6 @@
1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
+16010904313 GRAPHICS_VERSION_RANGE(1200, 1210)
+18022495364 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
@@ -47,7 +49,6 @@
16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
-no_media_l3 MEDIA_VERSION(3000)
14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
16021333562 GRAPHICS_VERSION_RANGE(1200, 1274)
@@ -67,9 +68,16 @@ no_media_l3 MEDIA_VERSION(3000)
MEDIA_VERSION_RANGE(1300, 3000)
MEDIA_VERSION(3002)
GRAPHICS_VERSION(3003)
+14020001231 GRAPHICS_VERSION_RANGE(2001,2004), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3002), FUNC(xe_rtp_match_psmi_enabled)
+16023683509 MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_psmi_enabled)
# SoC workaround - currently applies to all platforms with the following
# primary GT GMDID
14022085890 GRAPHICS_VERSION(2001)
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
+16026007364 MEDIA_VERSION(3000)
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 5d23a91f51dd..edc72052e27a 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -57,14 +57,19 @@ impl pci::Driver for NovaCore {
pdev.enable_device_mem()?;
pdev.set_master();
- let bar = Arc::pin_init(
+ let devres_bar = Arc::pin_init(
pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
GFP_KERNEL,
)?;
+ // Used to provided a `&Bar0` to `Gpu::new` without tying it to the lifetime of
+ // `devres_bar`.
+ let bar_clone = Arc::clone(&devres_bar);
+ let bar = bar_clone.access(pdev.as_ref())?;
+
let this = KBox::pin_init(
try_pin_init!(Self {
- gpu <- Gpu::new(pdev, bar)?,
+ gpu <- Gpu::new(pdev, devres_bar, bar),
_reg: auxiliary::Registration::new(
pdev.as_ref(),
c_str!("nova-drm"),
@@ -77,4 +82,8 @@ impl pci::Driver for NovaCore {
Ok(this)
}
+
+ fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
+ this.gpu.unbind(pdev.as_ref());
+ }
}
diff --git a/drivers/gpu/nova-core/falcon.rs b/drivers/gpu/nova-core/falcon.rs
index 50437c67c14a..37e6298195e4 100644
--- a/drivers/gpu/nova-core/falcon.rs
+++ b/drivers/gpu/nova-core/falcon.rs
@@ -4,16 +4,17 @@
use core::ops::Deref;
use hal::FalconHal;
-use kernel::bindings;
use kernel::device;
+use kernel::dma::DmaAddress;
use kernel::prelude::*;
+use kernel::sync::aref::ARef;
use kernel::time::Delta;
-use kernel::types::ARef;
use crate::dma::DmaObject;
use crate::driver::Bar0;
use crate::gpu::Chipset;
use crate::regs;
+use crate::regs::macros::RegisterBase;
use crate::util;
pub(crate) mod gsp;
@@ -274,14 +275,25 @@ impl From<bool> for FalconFbifMemType {
}
}
-/// Trait defining the parameters of a given Falcon instance.
-pub(crate) trait FalconEngine: Sync {
- /// Base I/O address for the falcon, relative from which its registers are accessed.
- const BASE: usize;
+/// Type used to represent the `PFALCON` registers address base for a given falcon engine.
+pub(crate) struct PFalconBase(());
+
+/// Type used to represent the `PFALCON2` registers address base for a given falcon engine.
+pub(crate) struct PFalcon2Base(());
+
+/// Trait defining the parameters of a given Falcon engine.
+///
+/// Each engine provides one base for `PFALCON` and `PFALCON2` registers. The `ID` constant is used
+/// to identify a given Falcon instance with register I/O methods.
+pub(crate) trait FalconEngine:
+ Send + Sync + RegisterBase<PFalconBase> + RegisterBase<PFalcon2Base> + Sized
+{
+ /// Singleton of the engine, used to identify it with register I/O methods.
+ const ID: Self;
}
/// Represents a portion of the firmware to be loaded into a particular memory (e.g. IMEM or DMEM).
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub(crate) struct FalconLoadTarget {
/// Offset from the start of the source object to copy from.
pub(crate) src_start: u32,
@@ -292,7 +304,7 @@ pub(crate) struct FalconLoadTarget {
}
/// Parameters for the falcon boot ROM.
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub(crate) struct FalconBromParams {
/// Offset in `DMEM`` of the firmware's signature.
pub(crate) pkc_data_offset: u32,
@@ -343,13 +355,13 @@ impl<E: FalconEngine + 'static> Falcon<E> {
bar: &Bar0,
need_riscv: bool,
) -> Result<Self> {
- let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, E::BASE);
+ let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, &E::ID);
// Check that the revision and security model contain valid values.
let _ = hwcfg1.core_rev()?;
let _ = hwcfg1.security_model()?;
if need_riscv {
- let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
if !hwcfg2.riscv() {
dev_err!(
dev,
@@ -369,7 +381,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
fn reset_wait_mem_scrubbing(&self, bar: &Bar0) -> Result {
// TIMEOUT: memory scrubbing should complete in less than 20ms.
util::wait_on(Delta::from_millis(20), || {
- if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE).mem_scrubbing_done() {
+ if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID).mem_scrubbing_done() {
Some(())
} else {
None
@@ -379,12 +391,12 @@ impl<E: FalconEngine + 'static> Falcon<E> {
/// Reset the falcon engine.
fn reset_eng(&self, bar: &Bar0) -> Result {
- let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
// According to OpenRM's `kflcnPreResetWait_GA102` documentation, HW sometimes does not set
// RESET_READY so a non-failing timeout is used.
let _ = util::wait_on(Delta::from_micros(150), || {
- let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
if r.reset_ready() {
Some(())
} else {
@@ -392,13 +404,13 @@ impl<E: FalconEngine + 'static> Falcon<E> {
}
});
- regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(true));
+ regs::NV_PFALCON_FALCON_ENGINE::alter(bar, &E::ID, |v| v.set_reset(true));
// TODO[DLAY]: replace with udelay() or equivalent once available.
// TIMEOUT: falcon engine should not take more than 10us to reset.
let _: Result = util::wait_on(Delta::from_micros(10), || None);
- regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(false));
+ regs::NV_PFALCON_FALCON_ENGINE::alter(bar, &E::ID, |v| v.set_reset(false));
self.reset_wait_mem_scrubbing(bar)?;
@@ -413,7 +425,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
regs::NV_PFALCON_FALCON_RM::default()
.set_value(regs::NV_PMC_BOOT_0::read(bar).into())
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
@@ -443,7 +455,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
fw.dma_handle_with_offset(load_offsets.src_start as usize)?,
),
};
- if dma_start % bindings::dma_addr_t::from(DMA_LEN) > 0 {
+ if dma_start % DmaAddress::from(DMA_LEN) > 0 {
dev_err!(
self.dev,
"DMA transfer start addresses must be a multiple of {}",
@@ -451,44 +463,57 @@ impl<E: FalconEngine + 'static> Falcon<E> {
);
return Err(EINVAL);
}
- if load_offsets.len % DMA_LEN > 0 {
- dev_err!(
- self.dev,
- "DMA transfer length must be a multiple of {}",
- DMA_LEN
- );
- return Err(EINVAL);
- }
+
+ // DMA transfers can only be done in units of 256 bytes. Compute how many such transfers we
+ // need to perform.
+ let num_transfers = load_offsets.len.div_ceil(DMA_LEN);
+
+ // Check that the area we are about to transfer is within the bounds of the DMA object.
+ // Upper limit of transfer is `(num_transfers * DMA_LEN) + load_offsets.src_start`.
+ match num_transfers
+ .checked_mul(DMA_LEN)
+ .and_then(|size| size.checked_add(load_offsets.src_start))
+ {
+ None => {
+ dev_err!(self.dev, "DMA transfer length overflow");
+ return Err(EOVERFLOW);
+ }
+ Some(upper_bound) if upper_bound as usize > fw.size() => {
+ dev_err!(self.dev, "DMA transfer goes beyond range of DMA object");
+ return Err(EINVAL);
+ }
+ Some(_) => (),
+ };
// Set up the base source DMA address.
regs::NV_PFALCON_FALCON_DMATRFBASE::default()
.set_base((dma_start >> 8) as u32)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON_FALCON_DMATRFBASE1::default()
.set_base((dma_start >> 40) as u16)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
let cmd = regs::NV_PFALCON_FALCON_DMATRFCMD::default()
.set_size(DmaTrfCmdSize::Size256B)
.set_imem(target_mem == FalconMem::Imem)
.set_sec(if sec { 1 } else { 0 });
- for pos in (0..load_offsets.len).step_by(DMA_LEN as usize) {
+ for pos in (0..num_transfers).map(|i| i * DMA_LEN) {
// Perform a transfer of size `DMA_LEN`.
regs::NV_PFALCON_FALCON_DMATRFMOFFS::default()
.set_offs(load_offsets.dst_start + pos)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON_FALCON_DMATRFFBOFFS::default()
.set_offs(src_start + pos)
- .write(bar, E::BASE);
- cmd.write(bar, E::BASE);
+ .write(bar, &E::ID);
+ cmd.write(bar, &E::ID);
// Wait for the transfer to complete.
// TIMEOUT: arbitrarily large value, no DMA transfer to the falcon's small memories
// should ever take that long.
util::wait_on(Delta::from_secs(2), || {
- let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, E::BASE);
+ let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, &E::ID);
if r.idle() {
Some(())
} else {
@@ -502,9 +527,9 @@ impl<E: FalconEngine + 'static> Falcon<E> {
/// Perform a DMA load into `IMEM` and `DMEM` of `fw`, and prepare the falcon to run it.
pub(crate) fn dma_load<F: FalconFirmware<Target = E>>(&self, bar: &Bar0, fw: &F) -> Result {
- regs::NV_PFALCON_FBIF_CTL::alter(bar, E::BASE, |v| v.set_allow_phys_no_ctx(true));
- regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, E::BASE);
- regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, E::BASE, |v| {
+ regs::NV_PFALCON_FBIF_CTL::alter(bar, &E::ID, |v| v.set_allow_phys_no_ctx(true));
+ regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, &E::ID);
+ regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, &E::ID, 0, |v| {
v.set_target(FalconFbifTarget::CoherentSysmem)
.set_mem_type(FalconFbifMemType::Physical)
});
@@ -517,7 +542,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
// Set `BootVec` to start of non-secure code.
regs::NV_PFALCON_FALCON_BOOTVEC::default()
.set_value(fw.boot_addr())
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
@@ -538,27 +563,27 @@ impl<E: FalconEngine + 'static> Falcon<E> {
if let Some(mbox0) = mbox0 {
regs::NV_PFALCON_FALCON_MAILBOX0::default()
.set_value(mbox0)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
}
if let Some(mbox1) = mbox1 {
regs::NV_PFALCON_FALCON_MAILBOX1::default()
.set_value(mbox1)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
}
- match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE).alias_en() {
+ match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID).alias_en() {
true => regs::NV_PFALCON_FALCON_CPUCTL_ALIAS::default()
.set_startcpu(true)
- .write(bar, E::BASE),
+ .write(bar, &E::ID),
false => regs::NV_PFALCON_FALCON_CPUCTL::default()
.set_startcpu(true)
- .write(bar, E::BASE),
+ .write(bar, &E::ID),
}
// TIMEOUT: arbitrarily large value, firmwares should complete in less than 2 seconds.
util::wait_on(Delta::from_secs(2), || {
- let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE);
+ let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID);
if r.halted() {
Some(())
} else {
@@ -567,8 +592,8 @@ impl<E: FalconEngine + 'static> Falcon<E> {
})?;
let (mbox0, mbox1) = (
- regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, E::BASE).value(),
- regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, E::BASE).value(),
+ regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, &E::ID).value(),
+ regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, &E::ID).value(),
);
Ok((mbox0, mbox1))
diff --git a/drivers/gpu/nova-core/falcon/gsp.rs b/drivers/gpu/nova-core/falcon/gsp.rs
index d622e9a64470..f17599cb49fa 100644
--- a/drivers/gpu/nova-core/falcon/gsp.rs
+++ b/drivers/gpu/nova-core/falcon/gsp.rs
@@ -2,23 +2,31 @@
use crate::{
driver::Bar0,
- falcon::{Falcon, FalconEngine},
- regs,
+ falcon::{Falcon, FalconEngine, PFalcon2Base, PFalconBase},
+ regs::{self, macros::RegisterBase},
};
/// Type specifying the `Gsp` falcon engine. Cannot be instantiated.
pub(crate) struct Gsp(());
-impl FalconEngine for Gsp {
+impl RegisterBase<PFalconBase> for Gsp {
const BASE: usize = 0x00110000;
}
+impl RegisterBase<PFalcon2Base> for Gsp {
+ const BASE: usize = 0x00111000;
+}
+
+impl FalconEngine for Gsp {
+ const ID: Self = Gsp(());
+}
+
impl Falcon<Gsp> {
/// Clears the SWGEN0 bit in the Falcon's IRQ status clear register to
/// allow GSP to signal CPU for processing new messages in message queue.
pub(crate) fn clear_swgen0_intr(&self, bar: &Bar0) {
regs::NV_PFALCON_FALCON_IRQSCLR::default()
.set_swgen0(true)
- .write(bar, Gsp::BASE);
+ .write(bar, &Gsp::ID);
}
}
diff --git a/drivers/gpu/nova-core/falcon/hal.rs b/drivers/gpu/nova-core/falcon/hal.rs
index b233bc365882..bba288455617 100644
--- a/drivers/gpu/nova-core/falcon/hal.rs
+++ b/drivers/gpu/nova-core/falcon/hal.rs
@@ -13,7 +13,7 @@ mod ga102;
/// Implements chipset-specific low-level operations. The trait is generic against [`FalconEngine`]
/// so its `BASE` parameter can be used in order to avoid runtime bound checks when accessing
/// registers.
-pub(crate) trait FalconHal<E: FalconEngine>: Sync {
+pub(crate) trait FalconHal<E: FalconEngine>: Send + Sync {
/// Activates the Falcon core if the engine is a risvc/falcon dual engine.
fn select_core(&self, _falcon: &Falcon<E>, _bar: &Bar0) -> Result {
Ok(())
diff --git a/drivers/gpu/nova-core/falcon/hal/ga102.rs b/drivers/gpu/nova-core/falcon/hal/ga102.rs
index 52c33d3f22a8..0b1cbe7853b3 100644
--- a/drivers/gpu/nova-core/falcon/hal/ga102.rs
+++ b/drivers/gpu/nova-core/falcon/hal/ga102.rs
@@ -16,15 +16,15 @@ use crate::util;
use super::FalconHal;
fn select_core_ga102<E: FalconEngine>(bar: &Bar0) -> Result {
- let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID);
if bcr_ctrl.core_select() != PeregrineCoreSelect::Falcon {
regs::NV_PRISCV_RISCV_BCR_CTRL::default()
.set_core_select(PeregrineCoreSelect::Falcon)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
// TIMEOUT: falcon core should take less than 10ms to report being enabled.
util::wait_on(Delta::from_millis(10), || {
- let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID);
if r.valid() {
Some(())
} else {
@@ -42,50 +42,47 @@ fn signature_reg_fuse_version_ga102(
engine_id_mask: u16,
ucode_id: u8,
) -> Result<u32> {
- // TODO[REGA]: The ucode fuse versions are contained in the
- // FUSE_OPT_FPF_<ENGINE>_UCODE<X>_VERSION registers, which are an array. Our register
- // definition macros do not allow us to manage them properly, so we need to hardcode their
- // addresses for now. Clean this up once we support register arrays.
+ const NV_FUSE_OPT_FPF_SIZE: u8 = regs::NV_FUSE_OPT_FPF_SIZE as u8;
// Each engine has 16 ucode version registers numbered from 1 to 16.
- if ucode_id == 0 || ucode_id > 16 {
- dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
- return Err(EINVAL);
- }
+ let ucode_idx = match ucode_id {
+ 1..=NV_FUSE_OPT_FPF_SIZE => (ucode_id - 1) as usize,
+ _ => {
+ dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
+ return Err(EINVAL);
+ }
+ };
- // Base address of the FUSE registers array corresponding to the engine.
- let reg_fuse_base = if engine_id_mask & 0x0001 != 0 {
- regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::OFFSET
+ // `ucode_idx` is guaranteed to be in the range [0..15], making the `read` calls provable valid
+ // at build-time.
+ let reg_fuse_version = if engine_id_mask & 0x0001 != 0 {
+ regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::read(bar, ucode_idx).data()
} else if engine_id_mask & 0x0004 != 0 {
- regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::OFFSET
+ regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::read(bar, ucode_idx).data()
} else if engine_id_mask & 0x0400 != 0 {
- regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::OFFSET
+ regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::read(bar, ucode_idx).data()
} else {
dev_err!(dev, "unexpected engine_id_mask {:#x}", engine_id_mask);
return Err(EINVAL);
};
- // Read `reg_fuse_base[ucode_id - 1]`.
- let reg_fuse_version =
- bar.read32(reg_fuse_base + ((ucode_id - 1) as usize * core::mem::size_of::<u32>()));
-
// TODO[NUMM]: replace with `last_set_bit` once it lands.
- Ok(u32::BITS - reg_fuse_version.leading_zeros())
+ Ok(u16::BITS - reg_fuse_version.leading_zeros())
}
fn program_brom_ga102<E: FalconEngine>(bar: &Bar0, params: &FalconBromParams) -> Result {
regs::NV_PFALCON2_FALCON_BROM_PARAADDR::default()
.set_value(params.pkc_data_offset)
- .write(bar, E::BASE);
+ .write(bar, &E::ID, 0);
regs::NV_PFALCON2_FALCON_BROM_ENGIDMASK::default()
.set_value(u32::from(params.engine_id_mask))
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID::default()
.set_ucode_id(params.ucode_id)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON2_FALCON_MOD_SEL::default()
.set_algo(FalconModSelAlgo::Rsa3k)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
diff --git a/drivers/gpu/nova-core/falcon/sec2.rs b/drivers/gpu/nova-core/falcon/sec2.rs
index 5147d9e2a7fe..815786c8480d 100644
--- a/drivers/gpu/nova-core/falcon/sec2.rs
+++ b/drivers/gpu/nova-core/falcon/sec2.rs
@@ -1,10 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::falcon::FalconEngine;
+use crate::falcon::{FalconEngine, PFalcon2Base, PFalconBase};
+use crate::regs::macros::RegisterBase;
/// Type specifying the `Sec2` falcon engine. Cannot be instantiated.
pub(crate) struct Sec2(());
-impl FalconEngine for Sec2 {
+impl RegisterBase<PFalconBase> for Sec2 {
const BASE: usize = 0x00840000;
}
+
+impl RegisterBase<PFalcon2Base> for Sec2 {
+ const BASE: usize = 0x00841000;
+}
+
+impl FalconEngine for Sec2 {
+ const ID: Self = Sec2(());
+}
diff --git a/drivers/gpu/nova-core/fb.rs b/drivers/gpu/nova-core/fb.rs
index e4dc74f2f90a..27d9edab8347 100644
--- a/drivers/gpu/nova-core/fb.rs
+++ b/drivers/gpu/nova-core/fb.rs
@@ -5,7 +5,7 @@ use core::ops::Range;
use kernel::prelude::*;
use kernel::ptr::{Alignable, Alignment};
use kernel::sizes::*;
-use kernel::types::ARef;
+use kernel::sync::aref::ARef;
use kernel::{dev_warn, device};
use crate::dma::DmaObject;
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 2931912ddba0..4179a74a2342 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -4,48 +4,36 @@
//! to be loaded into a given execution unit.
use core::marker::PhantomData;
+use core::mem::size_of;
use kernel::device;
use kernel::firmware;
use kernel::prelude::*;
use kernel::str::CString;
+use kernel::transmute::FromBytes;
use crate::dma::DmaObject;
use crate::falcon::FalconFirmware;
use crate::gpu;
-use crate::gpu::Chipset;
+pub(crate) mod booter;
pub(crate) mod fwsec;
-
-pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
-
-/// Structure encapsulating the firmware blobs required for the GPU to operate.
-#[expect(dead_code)]
-pub(crate) struct Firmware {
- booter_load: firmware::Firmware,
- booter_unload: firmware::Firmware,
- bootloader: firmware::Firmware,
- gsp: firmware::Firmware,
-}
-
-impl Firmware {
- pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{chipset}"))?;
- chip_name.make_ascii_lowercase();
- let chip_name = &*chip_name;
-
- let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name_}-{ver}.bin"))
- .and_then(|path| firmware::Firmware::request(&path, dev))
- };
-
- Ok(Firmware {
- booter_load: request("booter_load")?,
- booter_unload: request("booter_unload")?,
- bootloader: request("bootloader")?,
- gsp: request("gsp")?,
- })
- }
+pub(crate) mod gsp;
+pub(crate) mod riscv;
+
+pub(crate) const FIRMWARE_VERSION: &str = "570.144";
+
+/// Requests the GPU firmware `name` suitable for `chipset`, with version `ver`.
+fn request_firmware(
+ dev: &device::Device,
+ chipset: gpu::Chipset,
+ name: &str,
+ ver: &str,
+) -> Result<firmware::Firmware> {
+ let chip_name = chipset.name();
+
+ CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name}-{ver}.bin"))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
}
/// Structure used to describe some firmwares, notably FWSEC-FRTS.
@@ -150,6 +138,65 @@ impl<F: FalconFirmware> FirmwareDmaObject<F, Unsigned> {
}
}
+/// Header common to most firmware files.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct BinHdr {
+ /// Magic number, must be `0x10de`.
+ bin_magic: u32,
+ /// Version of the header.
+ bin_ver: u32,
+ /// Size in bytes of the binary (to be ignored).
+ bin_size: u32,
+ /// Offset of the start of the application-specific header.
+ header_offset: u32,
+ /// Offset of the start of the data payload.
+ data_offset: u32,
+ /// Size in bytes of the data payload.
+ data_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for BinHdr {}
+
+// A firmware blob starting with a `BinHdr`.
+struct BinFirmware<'a> {
+ hdr: BinHdr,
+ fw: &'a [u8],
+}
+
+impl<'a> BinFirmware<'a> {
+ /// Interpret `fw` as a firmware image starting with a [`BinHdr`], and returns the
+ /// corresponding [`BinFirmware`] that can be used to extract its payload.
+ fn new(fw: &'a firmware::Firmware) -> Result<Self> {
+ const BIN_MAGIC: u32 = 0x10de;
+ let fw = fw.data();
+
+ fw.get(0..size_of::<BinHdr>())
+ // Extract header.
+ .and_then(BinHdr::from_bytes_copy)
+ // Validate header.
+ .and_then(|hdr| {
+ if hdr.bin_magic == BIN_MAGIC {
+ Some(hdr)
+ } else {
+ None
+ }
+ })
+ .map(|hdr| Self { hdr, fw })
+ .ok_or(EINVAL)
+ }
+
+ /// Returns the data payload of the firmware, or `None` if the data range is out of bounds of
+ /// the firmware image.
+ fn data(&self) -> Option<&[u8]> {
+ let fw_start = self.hdr.data_offset as usize;
+ let fw_size = self.hdr.data_size as usize;
+
+ self.fw.get(fw_start..fw_start + fw_size)
+ }
+}
+
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
@@ -180,8 +227,8 @@ impl<const N: usize> ModInfoBuilder<N> {
let mut this = Self(firmware::ModInfoBuilder::new(module_name));
let mut i = 0;
- while i < gpu::Chipset::NAMES.len() {
- this = this.make_entry_chipset(gpu::Chipset::NAMES[i]);
+ while i < gpu::Chipset::ALL.len() {
+ this = this.make_entry_chipset(gpu::Chipset::ALL[i].name());
i += 1;
}
diff --git a/drivers/gpu/nova-core/firmware/booter.rs b/drivers/gpu/nova-core/firmware/booter.rs
new file mode 100644
index 000000000000..b4ff1b17e4a0
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/booter.rs
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for loading and patching the `Booter` firmware. `Booter` is a Heavy Secured firmware
+//! running on [`Sec2`], that is used on Turing/Ampere to load the GSP firmware into the GSP falcon
+//! (and optionally unload it through a separate firmware image).
+
+use core::marker::PhantomData;
+use core::mem::size_of;
+use core::ops::Deref;
+
+use kernel::device;
+use kernel::prelude::*;
+use kernel::transmute::FromBytes;
+
+use crate::dma::DmaObject;
+use crate::driver::Bar0;
+use crate::falcon::sec2::Sec2;
+use crate::falcon::{Falcon, FalconBromParams, FalconFirmware, FalconLoadParams, FalconLoadTarget};
+use crate::firmware::{BinFirmware, FirmwareDmaObject, FirmwareSignature, Signed, Unsigned};
+use crate::gpu::Chipset;
+
+/// Local convenience function to return a copy of `S` by reinterpreting the bytes starting at
+/// `offset` in `slice`.
+fn frombytes_at<S: FromBytes + Sized>(slice: &[u8], offset: usize) -> Result<S> {
+ slice
+ .get(offset..offset + size_of::<S>())
+ .and_then(S::from_bytes_copy)
+ .ok_or(EINVAL)
+}
+
+/// Heavy-Secured firmware header.
+///
+/// Such firmwares have an application-specific payload that needs to be patched with a given
+/// signature.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsHeaderV2 {
+ /// Offset to the start of the signatures.
+ sig_prod_offset: u32,
+ /// Size in bytes of the signatures.
+ sig_prod_size: u32,
+ /// Offset to a `u32` containing the location at which to patch the signature in the microcode
+ /// image.
+ patch_loc_offset: u32,
+ /// Offset to a `u32` containing the index of the signature to patch.
+ patch_sig_offset: u32,
+ /// Start offset to the signature metadata.
+ meta_data_offset: u32,
+ /// Size in bytes of the signature metadata.
+ meta_data_size: u32,
+ /// Offset to a `u32` containing the number of signatures in the signatures section.
+ num_sig_offset: u32,
+ /// Offset of the application-specific header.
+ header_offset: u32,
+ /// Size in bytes of the application-specific header.
+ header_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsHeaderV2 {}
+
+/// Heavy-Secured Firmware image container.
+///
+/// This provides convenient access to the fields of [`HsHeaderV2`] that are actually indices to
+/// read from in the firmware data.
+struct HsFirmwareV2<'a> {
+ hdr: HsHeaderV2,
+ fw: &'a [u8],
+}
+
+impl<'a> HsFirmwareV2<'a> {
+ /// Interprets the header of `bin_fw` as a [`HsHeaderV2`] and returns an instance of
+ /// `HsFirmwareV2` for further parsing.
+ ///
+ /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image.
+ fn new(bin_fw: &BinFirmware<'a>) -> Result<Self> {
+ frombytes_at::<HsHeaderV2>(bin_fw.fw, bin_fw.hdr.header_offset as usize)
+ .map(|hdr| Self { hdr, fw: bin_fw.fw })
+ }
+
+ /// Returns the location at which the signatures should be patched in the microcode image.
+ ///
+ /// Fails if the offset of the patch location is outside the bounds of the firmware
+ /// image.
+ fn patch_location(&self) -> Result<u32> {
+ frombytes_at::<u32>(self.fw, self.hdr.patch_loc_offset as usize)
+ }
+
+ /// Returns an iterator to the signatures of the firmware. The iterator can be empty if the
+ /// firmware is unsigned.
+ ///
+ /// Fails if the pointed signatures are outside the bounds of the firmware image.
+ fn signatures_iter(&'a self) -> Result<impl Iterator<Item = BooterSignature<'a>>> {
+ let num_sig = frombytes_at::<u32>(self.fw, self.hdr.num_sig_offset as usize)?;
+ let iter = match self.hdr.sig_prod_size.checked_div(num_sig) {
+ // If there are no signatures, return an iterator that will yield zero elements.
+ None => (&[] as &[u8]).chunks_exact(1),
+ Some(sig_size) => {
+ let patch_sig = frombytes_at::<u32>(self.fw, self.hdr.patch_sig_offset as usize)?;
+ let signatures_start = (self.hdr.sig_prod_offset + patch_sig) as usize;
+
+ self.fw
+ // Get signatures range.
+ .get(signatures_start..signatures_start + self.hdr.sig_prod_size as usize)
+ .ok_or(EINVAL)?
+ .chunks_exact(sig_size as usize)
+ }
+ };
+
+ // Map the byte slices into signatures.
+ Ok(iter.map(BooterSignature))
+ }
+}
+
+/// Signature parameters, as defined in the firmware.
+#[repr(C)]
+struct HsSignatureParams {
+ /// Fuse version to use.
+ fuse_ver: u32,
+ /// Mask of engine IDs this firmware applies to.
+ engine_id_mask: u32,
+ /// ID of the microcode.
+ ucode_id: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsSignatureParams {}
+
+impl HsSignatureParams {
+ /// Returns the signature parameters contained in `hs_fw`.
+ ///
+ /// Fails if the meta data parameter of `hs_fw` is outside the bounds of the firmware image, or
+ /// if its size doesn't match that of [`HsSignatureParams`].
+ fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> {
+ let start = hs_fw.hdr.meta_data_offset as usize;
+ let end = start
+ .checked_add(hs_fw.hdr.meta_data_size as usize)
+ .ok_or(EINVAL)?;
+
+ hs_fw
+ .fw
+ .get(start..end)
+ .and_then(Self::from_bytes_copy)
+ .ok_or(EINVAL)
+ }
+}
+
+/// Header for code and data load offsets.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsLoadHeaderV2 {
+ // Offset at which the code starts.
+ os_code_offset: u32,
+ // Total size of the code, for all apps.
+ os_code_size: u32,
+ // Offset at which the data starts.
+ os_data_offset: u32,
+ // Size of the data.
+ os_data_size: u32,
+ // Number of apps following this header. Each app is described by a [`HsLoadHeaderV2App`].
+ num_apps: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsLoadHeaderV2 {}
+
+impl HsLoadHeaderV2 {
+ /// Returns the load header contained in `hs_fw`.
+ ///
+ /// Fails if the header pointed at by `hs_fw` is not within the bounds of the firmware image.
+ fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> {
+ frombytes_at::<Self>(hs_fw.fw, hs_fw.hdr.header_offset as usize)
+ }
+}
+
+/// Header for app code loader.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsLoadHeaderV2App {
+ /// Offset at which to load the app code.
+ offset: u32,
+ /// Length in bytes of the app code.
+ len: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsLoadHeaderV2App {}
+
+impl HsLoadHeaderV2App {
+ /// Returns the [`HsLoadHeaderV2App`] for app `idx` of `hs_fw`.
+ ///
+ /// Fails if `idx` is larger than the number of apps declared in `hs_fw`, or if the header is
+ /// not within the bounds of the firmware image.
+ fn new(hs_fw: &HsFirmwareV2<'_>, idx: u32) -> Result<Self> {
+ let load_hdr = HsLoadHeaderV2::new(hs_fw)?;
+ if idx >= load_hdr.num_apps {
+ Err(EINVAL)
+ } else {
+ frombytes_at::<Self>(
+ hs_fw.fw,
+ (hs_fw.hdr.header_offset as usize)
+ // Skip the load header...
+ .checked_add(size_of::<HsLoadHeaderV2>())
+ // ... and jump to app header `idx`.
+ .and_then(|offset| {
+ offset.checked_add((idx as usize).checked_mul(size_of::<Self>())?)
+ })
+ .ok_or(EINVAL)?,
+ )
+ }
+ }
+}
+
+/// Signature for Booter firmware. Their size is encoded into the header and not known a compile
+/// time, so we just wrap a byte slices on which we can implement [`FirmwareSignature`].
+struct BooterSignature<'a>(&'a [u8]);
+
+impl<'a> AsRef<[u8]> for BooterSignature<'a> {
+ fn as_ref(&self) -> &[u8] {
+ self.0
+ }
+}
+
+impl<'a> FirmwareSignature<BooterFirmware> for BooterSignature<'a> {}
+
+/// The `Booter` loader firmware, responsible for loading the GSP.
+pub(crate) struct BooterFirmware {
+ // Load parameters for `IMEM` falcon memory.
+ imem_load_target: FalconLoadTarget,
+ // Load parameters for `DMEM` falcon memory.
+ dmem_load_target: FalconLoadTarget,
+ // BROM falcon parameters.
+ brom_params: FalconBromParams,
+ // Device-mapped firmware image.
+ ucode: FirmwareDmaObject<Self, Signed>,
+}
+
+impl FirmwareDmaObject<BooterFirmware, Unsigned> {
+ fn new_booter(dev: &device::Device<device::Bound>, data: &[u8]) -> Result<Self> {
+ DmaObject::from_data(dev, data).map(|ucode| Self(ucode, PhantomData))
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub(crate) enum BooterKind {
+ Loader,
+ #[expect(unused)]
+ Unloader,
+}
+
+impl BooterFirmware {
+ /// Parses the Booter firmware contained in `fw`, and patches the correct signature so it is
+ /// ready to be loaded and run on `falcon`.
+ pub(crate) fn new(
+ dev: &device::Device<device::Bound>,
+ kind: BooterKind,
+ chipset: Chipset,
+ ver: &str,
+ falcon: &Falcon<<Self as FalconFirmware>::Target>,
+ bar: &Bar0,
+ ) -> Result<Self> {
+ let fw_name = match kind {
+ BooterKind::Loader => "booter_load",
+ BooterKind::Unloader => "booter_unload",
+ };
+ let fw = super::request_firmware(dev, chipset, fw_name, ver)?;
+ let bin_fw = BinFirmware::new(&fw)?;
+
+ // The binary firmware embeds a Heavy-Secured firmware.
+ let hs_fw = HsFirmwareV2::new(&bin_fw)?;
+
+ // The Heavy-Secured firmware embeds a firmware load descriptor.
+ let load_hdr = HsLoadHeaderV2::new(&hs_fw)?;
+
+ // Offset in `ucode` where to patch the signature.
+ let patch_loc = hs_fw.patch_location()?;
+
+ let sig_params = HsSignatureParams::new(&hs_fw)?;
+ let brom_params = FalconBromParams {
+ // `load_hdr.os_data_offset` is an absolute index, but `pkc_data_offset` is from the
+ // signature patch location.
+ pkc_data_offset: patch_loc
+ .checked_sub(load_hdr.os_data_offset)
+ .ok_or(EINVAL)?,
+ engine_id_mask: u16::try_from(sig_params.engine_id_mask).map_err(|_| EINVAL)?,
+ ucode_id: u8::try_from(sig_params.ucode_id).map_err(|_| EINVAL)?,
+ };
+ let app0 = HsLoadHeaderV2App::new(&hs_fw, 0)?;
+
+ // Object containing the firmware microcode to be signature-patched.
+ let ucode = bin_fw
+ .data()
+ .ok_or(EINVAL)
+ .and_then(|data| FirmwareDmaObject::<Self, _>::new_booter(dev, data))?;
+
+ let ucode_signed = {
+ let mut signatures = hs_fw.signatures_iter()?.peekable();
+
+ if signatures.peek().is_none() {
+ // If there are no signatures, then the firmware is unsigned.
+ ucode.no_patch_signature()
+ } else {
+ // Obtain the version from the fuse register, and extract the corresponding
+ // signature.
+ let reg_fuse_version = falcon.signature_reg_fuse_version(
+ bar,
+ brom_params.engine_id_mask,
+ brom_params.ucode_id,
+ )?;
+
+ // `0` means the last signature should be used.
+ const FUSE_VERSION_USE_LAST_SIG: u32 = 0;
+ let signature = match reg_fuse_version {
+ FUSE_VERSION_USE_LAST_SIG => signatures.last(),
+ // Otherwise hardware fuse version needs to be subtracted to obtain the index.
+ reg_fuse_version => {
+ let Some(idx) = sig_params.fuse_ver.checked_sub(reg_fuse_version) else {
+ dev_err!(dev, "invalid fuse version for Booter firmware\n");
+ return Err(EINVAL);
+ };
+ signatures.nth(idx as usize)
+ }
+ }
+ .ok_or(EINVAL)?;
+
+ ucode.patch_signature(&signature, patch_loc as usize)?
+ }
+ };
+
+ Ok(Self {
+ imem_load_target: FalconLoadTarget {
+ src_start: app0.offset,
+ dst_start: 0,
+ len: app0.len,
+ },
+ dmem_load_target: FalconLoadTarget {
+ src_start: load_hdr.os_data_offset,
+ dst_start: 0,
+ len: load_hdr.os_data_size,
+ },
+ brom_params,
+ ucode: ucode_signed,
+ })
+ }
+}
+
+impl FalconLoadParams for BooterFirmware {
+ fn imem_load_params(&self) -> FalconLoadTarget {
+ self.imem_load_target.clone()
+ }
+
+ fn dmem_load_params(&self) -> FalconLoadTarget {
+ self.dmem_load_target.clone()
+ }
+
+ fn brom_params(&self) -> FalconBromParams {
+ self.brom_params.clone()
+ }
+
+ fn boot_addr(&self) -> u32 {
+ self.imem_load_target.src_start
+ }
+}
+
+impl Deref for BooterFirmware {
+ type Target = DmaObject;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ucode.0
+ }
+}
+
+impl FalconFirmware for BooterFirmware {
+ type Target = Sec2;
+}
diff --git a/drivers/gpu/nova-core/firmware/fwsec.rs b/drivers/gpu/nova-core/firmware/fwsec.rs
index 0dff3cfa90af..8edbb5c0572c 100644
--- a/drivers/gpu/nova-core/firmware/fwsec.rs
+++ b/drivers/gpu/nova-core/firmware/fwsec.rs
@@ -202,9 +202,6 @@ pub(crate) struct FwsecFirmware {
ucode: FirmwareDmaObject<Self, Signed>,
}
-// We need to load full DMEM pages.
-const DMEM_LOAD_SIZE_ALIGN: u32 = 256;
-
impl FalconLoadParams for FwsecFirmware {
fn imem_load_params(&self) -> FalconLoadTarget {
FalconLoadTarget {
@@ -218,11 +215,7 @@ impl FalconLoadParams for FwsecFirmware {
FalconLoadTarget {
src_start: self.desc.imem_load_size,
dst_start: self.desc.dmem_phys_base,
- // TODO[NUMM]: replace with `align_up` once it lands.
- len: self
- .desc
- .dmem_load_size
- .next_multiple_of(DMEM_LOAD_SIZE_ALIGN),
+ len: self.desc.dmem_load_size,
}
}
@@ -253,8 +246,8 @@ impl FalconFirmware for FwsecFirmware {
impl FirmwareDmaObject<FwsecFirmware, Unsigned> {
fn new_fwsec(dev: &Device<device::Bound>, bios: &Vbios, cmd: FwsecCommand) -> Result<Self> {
- let desc = bios.fwsec_image().header(dev)?;
- let ucode = bios.fwsec_image().ucode(dev, desc)?;
+ let desc = bios.fwsec_image().header()?;
+ let ucode = bios.fwsec_image().ucode(desc)?;
let mut dma_object = DmaObject::from_data(dev, ucode)?;
let hdr_offset = (desc.imem_load_size + desc.interface_offset) as usize;
@@ -343,7 +336,7 @@ impl FwsecFirmware {
let ucode_dma = FirmwareDmaObject::<Self, _>::new_fwsec(dev, bios, cmd)?;
// Patch signature if needed.
- let desc = bios.fwsec_image().header(dev)?;
+ let desc = bios.fwsec_image().header()?;
let ucode_signed = if desc.signature_count != 0 {
let sig_base_img = (desc.imem_load_size + desc.pkc_data_offset) as usize;
let desc_sig_versions = u32::from(desc.signature_versions);
@@ -382,7 +375,7 @@ impl FwsecFirmware {
dev_dbg!(dev, "patching signature with index {}\n", signature_idx);
let signature = bios
.fwsec_image()
- .sigs(dev, desc)
+ .sigs(desc)
.and_then(|sigs| sigs.get(signature_idx).ok_or(EINVAL))?;
ucode_dma.patch_signature(signature, sig_base_img)?
diff --git a/drivers/gpu/nova-core/firmware/gsp.rs b/drivers/gpu/nova-core/firmware/gsp.rs
new file mode 100644
index 000000000000..9b70095434c6
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/gsp.rs
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::mem::size_of_val;
+
+use kernel::device;
+use kernel::dma::{DataDirection, DmaAddress};
+use kernel::kvec;
+use kernel::prelude::*;
+use kernel::scatterlist::{Owned, SGTable};
+
+use crate::dma::DmaObject;
+use crate::firmware::riscv::RiscvFirmware;
+use crate::gpu::{Architecture, Chipset};
+use crate::gsp::GSP_PAGE_SIZE;
+
+/// Ad-hoc and temporary module to extract sections from ELF images.
+///
+/// Some firmware images are currently packaged as ELF files, where sections names are used as keys
+/// to specific and related bits of data. Future firmware versions are scheduled to move away from
+/// that scheme before nova-core becomes stable, which means this module will eventually be
+/// removed.
+mod elf {
+ use core::mem::size_of;
+
+ use kernel::bindings;
+ use kernel::str::CStr;
+ use kernel::transmute::FromBytes;
+
+ /// Newtype to provide a [`FromBytes`] implementation.
+ #[repr(transparent)]
+ struct Elf64Hdr(bindings::elf64_hdr);
+ // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+ unsafe impl FromBytes for Elf64Hdr {}
+
+ #[repr(transparent)]
+ struct Elf64SHdr(bindings::elf64_shdr);
+ // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+ unsafe impl FromBytes for Elf64SHdr {}
+
+ /// Tries to extract section with name `name` from the ELF64 image `elf`, and returns it.
+ pub(super) fn elf64_section<'a, 'b>(elf: &'a [u8], name: &'b str) -> Option<&'a [u8]> {
+ let hdr = &elf
+ .get(0..size_of::<bindings::elf64_hdr>())
+ .and_then(Elf64Hdr::from_bytes)?
+ .0;
+
+ // Get all the section headers.
+ let mut shdr = {
+ let shdr_num = usize::from(hdr.e_shnum);
+ let shdr_start = usize::try_from(hdr.e_shoff).ok()?;
+ let shdr_end = shdr_num
+ .checked_mul(size_of::<Elf64SHdr>())
+ .and_then(|v| v.checked_add(shdr_start))?;
+
+ elf.get(shdr_start..shdr_end)
+ .map(|slice| slice.chunks_exact(size_of::<Elf64SHdr>()))?
+ };
+
+ // Get the strings table.
+ let strhdr = shdr
+ .clone()
+ .nth(usize::from(hdr.e_shstrndx))
+ .and_then(Elf64SHdr::from_bytes)?;
+
+ // Find the section which name matches `name` and return it.
+ shdr.find(|&sh| {
+ let Some(hdr) = Elf64SHdr::from_bytes(sh) else {
+ return false;
+ };
+
+ let Some(name_idx) = strhdr
+ .0
+ .sh_offset
+ .checked_add(u64::from(hdr.0.sh_name))
+ .and_then(|idx| usize::try_from(idx).ok())
+ else {
+ return false;
+ };
+
+ // Get the start of the name.
+ elf.get(name_idx..)
+ // Stop at the first `0`.
+ .and_then(|nstr| nstr.get(0..=nstr.iter().position(|b| *b == 0)?))
+ // Convert into CStr. This should never fail because of the line above.
+ .and_then(|nstr| CStr::from_bytes_with_nul(nstr).ok())
+ // Convert into str.
+ .and_then(|c_str| c_str.to_str().ok())
+ // Check that the name matches.
+ .map(|str| str == name)
+ .unwrap_or(false)
+ })
+ // Return the slice containing the section.
+ .and_then(|sh| {
+ let hdr = Elf64SHdr::from_bytes(sh)?;
+ let start = usize::try_from(hdr.0.sh_offset).ok()?;
+ let end = usize::try_from(hdr.0.sh_size)
+ .ok()
+ .and_then(|sh_size| start.checked_add(sh_size))?;
+
+ elf.get(start..end)
+ })
+ }
+}
+
+/// GSP firmware with 3-level radix page tables for the GSP bootloader.
+///
+/// The bootloader expects firmware to be mapped starting at address 0 in GSP's virtual address
+/// space:
+///
+/// ```text
+/// Level 0: 1 page, 1 entry -> points to first level 1 page
+/// Level 1: Multiple pages/entries -> each entry points to a level 2 page
+/// Level 2: Multiple pages/entries -> each entry points to a firmware page
+/// ```
+///
+/// Each page is 4KB, each entry is 8 bytes (64-bit DMA address).
+/// Also known as "Radix3" firmware.
+#[pin_data]
+pub(crate) struct GspFirmware {
+ /// The GSP firmware inside a [`VVec`], device-mapped via a SG table.
+ #[pin]
+ fw: SGTable<Owned<VVec<u8>>>,
+ /// Level 2 page table whose entries contain DMA addresses of firmware pages.
+ #[pin]
+ level2: SGTable<Owned<VVec<u8>>>,
+ /// Level 1 page table whose entries contain DMA addresses of level 2 pages.
+ #[pin]
+ level1: SGTable<Owned<VVec<u8>>>,
+ /// Level 0 page table (single 4KB page) with one entry: DMA address of first level 1 page.
+ level0: DmaObject,
+ /// Size in bytes of the firmware contained in [`Self::fw`].
+ size: usize,
+ /// Device-mapped GSP signatures matching the GPU's [`Chipset`].
+ signatures: DmaObject,
+ /// GSP bootloader, verifies the GSP firmware before loading and running it.
+ bootloader: RiscvFirmware,
+}
+
+impl GspFirmware {
+ /// Loads the GSP firmware binaries, map them into `dev`'s address-space, and creates the page
+ /// tables expected by the GSP bootloader to load it.
+ pub(crate) fn new<'a, 'b>(
+ dev: &'a device::Device<device::Bound>,
+ chipset: Chipset,
+ ver: &'b str,
+ ) -> Result<impl PinInit<Self, Error> + 'a> {
+ let fw = super::request_firmware(dev, chipset, "gsp", ver)?;
+
+ let fw_section = elf::elf64_section(fw.data(), ".fwimage").ok_or(EINVAL)?;
+
+ let sigs_section = match chipset.arch() {
+ Architecture::Ampere => ".fwsignature_ga10x",
+ _ => return Err(ENOTSUPP),
+ };
+ let signatures = elf::elf64_section(fw.data(), sigs_section)
+ .ok_or(EINVAL)
+ .and_then(|data| DmaObject::from_data(dev, data))?;
+
+ let size = fw_section.len();
+
+ // Move the firmware into a vmalloc'd vector and map it into the device address
+ // space.
+ let fw_vvec = VVec::with_capacity(fw_section.len(), GFP_KERNEL)
+ .and_then(|mut v| {
+ v.extend_from_slice(fw_section, GFP_KERNEL)?;
+ Ok(v)
+ })
+ .map_err(|_| ENOMEM)?;
+
+ let bl = super::request_firmware(dev, chipset, "bootloader", ver)?;
+ let bootloader = RiscvFirmware::new(dev, &bl)?;
+
+ Ok(try_pin_init!(Self {
+ fw <- SGTable::new(dev, fw_vvec, DataDirection::ToDevice, GFP_KERNEL),
+ level2 <- {
+ // Allocate the level 2 page table, map the firmware onto it, and map it into the
+ // device address space.
+ VVec::<u8>::with_capacity(
+ fw.iter().count() * core::mem::size_of::<u64>(),
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)
+ .and_then(|level2| map_into_lvl(&fw, level2))
+ .map(|level2| SGTable::new(dev, level2, DataDirection::ToDevice, GFP_KERNEL))?
+ },
+ level1 <- {
+ // Allocate the level 1 page table, map the level 2 page table onto it, and map it
+ // into the device address space.
+ VVec::<u8>::with_capacity(
+ level2.iter().count() * core::mem::size_of::<u64>(),
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)
+ .and_then(|level1| map_into_lvl(&level2, level1))
+ .map(|level1| SGTable::new(dev, level1, DataDirection::ToDevice, GFP_KERNEL))?
+ },
+ level0: {
+ // Allocate the level 0 page table as a device-visible DMA object, and map the
+ // level 1 page table onto it.
+
+ // Level 0 page table data.
+ let mut level0_data = kvec![0u8; GSP_PAGE_SIZE]?;
+
+ // Fill level 1 page entry.
+ #[allow(clippy::useless_conversion)]
+ let level1_entry = u64::from(level1.iter().next().unwrap().dma_address());
+ let dst = &mut level0_data[..size_of_val(&level1_entry)];
+ dst.copy_from_slice(&level1_entry.to_le_bytes());
+
+ // Turn the level0 page table into a [`DmaObject`].
+ DmaObject::from_data(dev, &level0_data)?
+ },
+ size,
+ signatures,
+ bootloader,
+ }))
+ }
+
+ #[expect(unused)]
+ /// Returns the DMA handle of the radix3 level 0 page table.
+ pub(crate) fn radix3_dma_handle(&self) -> DmaAddress {
+ self.level0.dma_handle()
+ }
+}
+
+/// Build a page table from a scatter-gather list.
+///
+/// Takes each DMA-mapped region from `sg_table` and writes page table entries
+/// for all 4KB pages within that region. For example, a 16KB SG entry becomes
+/// 4 consecutive page table entries.
+fn map_into_lvl(sg_table: &SGTable<Owned<VVec<u8>>>, mut dst: VVec<u8>) -> Result<VVec<u8>> {
+ for sg_entry in sg_table.iter() {
+ // Number of pages we need to map.
+ let num_pages = (sg_entry.dma_len() as usize).div_ceil(GSP_PAGE_SIZE);
+
+ for i in 0..num_pages {
+ let entry = sg_entry.dma_address() + (i as u64 * GSP_PAGE_SIZE as u64);
+ dst.extend_from_slice(&entry.to_le_bytes(), GFP_KERNEL)?;
+ }
+ }
+
+ Ok(dst)
+}
diff --git a/drivers/gpu/nova-core/firmware/riscv.rs b/drivers/gpu/nova-core/firmware/riscv.rs
new file mode 100644
index 000000000000..afb08f5bc4ba
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/riscv.rs
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for firmware binaries designed to run on a RISC-V core. Such firmwares files have a
+//! dedicated header.
+
+use core::mem::size_of;
+
+use kernel::device;
+use kernel::firmware::Firmware;
+use kernel::prelude::*;
+use kernel::transmute::FromBytes;
+
+use crate::dma::DmaObject;
+use crate::firmware::BinFirmware;
+
+/// Descriptor for microcode running on a RISC-V core.
+#[repr(C)]
+#[derive(Debug)]
+struct RmRiscvUCodeDesc {
+ version: u32,
+ bootloader_offset: u32,
+ bootloader_size: u32,
+ bootloader_param_offset: u32,
+ bootloader_param_size: u32,
+ riscv_elf_offset: u32,
+ riscv_elf_size: u32,
+ app_version: u32,
+ manifest_offset: u32,
+ manifest_size: u32,
+ monitor_data_offset: u32,
+ monitor_data_size: u32,
+ monitor_code_offset: u32,
+ monitor_code_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for RmRiscvUCodeDesc {}
+
+impl RmRiscvUCodeDesc {
+ /// Interprets the header of `bin_fw` as a [`RmRiscvUCodeDesc`] and returns it.
+ ///
+ /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image.
+ fn new(bin_fw: &BinFirmware<'_>) -> Result<Self> {
+ let offset = bin_fw.hdr.header_offset as usize;
+
+ bin_fw
+ .fw
+ .get(offset..offset + size_of::<Self>())
+ .and_then(Self::from_bytes_copy)
+ .ok_or(EINVAL)
+ }
+}
+
+/// A parsed firmware for a RISC-V core, ready to be loaded and run.
+#[expect(unused)]
+pub(crate) struct RiscvFirmware {
+ /// Offset at which the code starts in the firmware image.
+ code_offset: u32,
+ /// Offset at which the data starts in the firmware image.
+ data_offset: u32,
+ /// Offset at which the manifest starts in the firmware image.
+ manifest_offset: u32,
+ /// Application version.
+ app_version: u32,
+ /// Device-mapped firmware image.
+ ucode: DmaObject,
+}
+
+impl RiscvFirmware {
+ /// Parses the RISC-V firmware image contained in `fw`.
+ pub(crate) fn new(dev: &device::Device<device::Bound>, fw: &Firmware) -> Result<Self> {
+ let bin_fw = BinFirmware::new(fw)?;
+
+ let riscv_desc = RmRiscvUCodeDesc::new(&bin_fw)?;
+
+ let ucode = {
+ let start = bin_fw.hdr.data_offset as usize;
+ let len = bin_fw.hdr.data_size as usize;
+
+ DmaObject::from_data(dev, fw.data().get(start..start + len).ok_or(EINVAL)?)?
+ };
+
+ Ok(Self {
+ ucode,
+ code_offset: riscv_desc.monitor_code_offset,
+ data_offset: riscv_desc.monitor_data_offset,
+ manifest_offset: riscv_desc.manifest_offset,
+ app_version: riscv_desc.app_version,
+ })
+ }
+}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index 600cc90b5fab..af20e2daea24 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -3,15 +3,11 @@
use kernel::{device, devres::Devres, error::code::*, fmt, pci, prelude::*, sync::Arc};
use crate::driver::Bar0;
-use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
-use crate::fb::FbLayout;
+use crate::falcon::{gsp::Gsp as GspFalcon, sec2::Sec2 as Sec2Falcon, Falcon};
use crate::fb::SysmemFlush;
-use crate::firmware::fwsec::{FwsecCommand, FwsecFirmware};
-use crate::firmware::{Firmware, FIRMWARE_VERSION};
use crate::gfw;
+use crate::gsp::Gsp;
use crate::regs;
-use crate::util;
-use crate::vbios::Vbios;
macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
@@ -27,13 +23,23 @@ macro_rules! define_chipset {
$( Chipset::$variant, )*
];
- pub(crate) const NAMES: [&'static str; Self::ALL.len()] = [
- $( util::const_bytes_to_str(
- util::to_lowercase_bytes::<{ stringify!($variant).len() }>(
- stringify!($variant)
- ).as_slice()
- ), )*
- ];
+ ::kernel::macros::paste!(
+ /// Returns the name of this chipset, in lowercase.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let chipset = Chipset::GA102;
+ /// assert_eq!(chipset.name(), "ga102");
+ /// ```
+ pub(crate) const fn name(&self) -> &'static str {
+ match *self {
+ $(
+ Chipset::$variant => stringify!([<$variant:lower>]),
+ )*
+ }
+ }
+ );
}
// TODO[FPRI]: replace with something like derive(FromPrimitive)
@@ -162,150 +168,74 @@ impl Spec {
}
/// Structure holding the resources required to operate the GPU.
-#[pin_data(PinnedDrop)]
+#[pin_data]
pub(crate) struct Gpu {
spec: Spec,
/// MMIO mapping of PCI BAR 0
bar: Arc<Devres<Bar0>>,
- fw: Firmware,
/// System memory page required for flushing all pending GPU-side memory writes done through
/// PCIE into system memory, via sysmembar (A GPU-initiated HW memory-barrier operation).
sysmem_flush: SysmemFlush,
-}
-
-#[pinned_drop]
-impl PinnedDrop for Gpu {
- fn drop(self: Pin<&mut Self>) {
- // Unregister the sysmem flush page before we release it.
- self.bar
- .try_access_with(|b| self.sysmem_flush.unregister(b));
- }
+ /// GSP falcon instance, used for GSP boot up and cleanup.
+ gsp_falcon: Falcon<GspFalcon>,
+ /// SEC2 falcon instance, used for GSP boot up and cleanup.
+ sec2_falcon: Falcon<Sec2Falcon>,
+ /// GSP runtime data. Temporarily an empty placeholder.
+ #[pin]
+ gsp: Gsp,
}
impl Gpu {
- /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
- /// created the WPR2 region.
- ///
- /// TODO: this needs to be moved into a larger type responsible for booting the whole GSP
- /// (`GspBooter`?).
- fn run_fwsec_frts(
- dev: &device::Device<device::Bound>,
- falcon: &Falcon<Gsp>,
- bar: &Bar0,
- bios: &Vbios,
- fb_layout: &FbLayout,
- ) -> Result<()> {
- // Check that the WPR2 region does not already exists - if it does, we cannot run
- // FWSEC-FRTS until the GPU is reset.
- if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
- dev_err!(
- dev,
- "WPR2 region already exists - GPU needs to be reset to proceed\n"
- );
- return Err(EBUSY);
- }
+ pub(crate) fn new<'a>(
+ pdev: &'a pci::Device<device::Bound>,
+ devres_bar: Arc<Devres<Bar0>>,
+ bar: &'a Bar0,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(Self {
+ spec: Spec::new(bar).inspect(|spec| {
+ dev_info!(
+ pdev.as_ref(),
+ "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
+ spec.chipset,
+ spec.chipset.arch(),
+ spec.revision
+ );
+ })?,
- let fwsec_frts = FwsecFirmware::new(
- dev,
- falcon,
- bar,
- bios,
- FwsecCommand::Frts {
- frts_addr: fb_layout.frts.start,
- frts_size: fb_layout.frts.end - fb_layout.frts.start,
+ // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
+ _: {
+ gfw::wait_gfw_boot_completion(bar)
+ .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
},
- )?;
- // Run FWSEC-FRTS to create the WPR2 region.
- fwsec_frts.run(dev, falcon, bar)?;
+ sysmem_flush: SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?,
- // SCRATCH_E contains the error code for FWSEC-FRTS.
- let frts_status = regs::NV_PBUS_SW_SCRATCH_0E::read(bar).frts_err_code();
- if frts_status != 0 {
- dev_err!(
- dev,
- "FWSEC-FRTS returned with error code {:#x}",
- frts_status
- );
+ gsp_falcon: Falcon::new(
+ pdev.as_ref(),
+ spec.chipset,
+ bar,
+ spec.chipset > Chipset::GA100,
+ )
+ .inspect(|falcon| falcon.clear_swgen0_intr(bar))?,
- return Err(EIO);
- }
+ sec2_falcon: Falcon::new(pdev.as_ref(), spec.chipset, bar, true)?,
- // Check that the WPR2 region has been created as we requested.
- let (wpr2_lo, wpr2_hi) = (
- regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
- regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
- );
+ gsp <- Gsp::new(),
- match (wpr2_lo, wpr2_hi) {
- (_, 0) => {
- dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
+ _: { gsp.boot(pdev, bar, spec.chipset, gsp_falcon, sec2_falcon)? },
- Err(EIO)
- }
- (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
- dev_err!(
- dev,
- "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
- wpr2_lo,
- fb_layout.frts.start,
- );
-
- Err(EIO)
- }
- (wpr2_lo, wpr2_hi) => {
- dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
- dev_dbg!(dev, "GPU instance built\n");
-
- Ok(())
- }
- }
+ bar: devres_bar,
+ })
}
- pub(crate) fn new(
- pdev: &pci::Device<device::Bound>,
- devres_bar: Arc<Devres<Bar0>>,
- ) -> Result<impl PinInit<Self>> {
- let bar = devres_bar.access(pdev.as_ref())?;
- let spec = Spec::new(bar)?;
- let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?;
-
- dev_info!(
- pdev.as_ref(),
- "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
- spec.chipset,
- spec.chipset.arch(),
- spec.revision
- );
-
- // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
- gfw::wait_gfw_boot_completion(bar)
- .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
-
- let sysmem_flush = SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?;
-
- let gsp_falcon = Falcon::<Gsp>::new(
- pdev.as_ref(),
- spec.chipset,
- bar,
- spec.chipset > Chipset::GA100,
- )?;
- gsp_falcon.clear_swgen0_intr(bar);
-
- let _sec2_falcon = Falcon::<Sec2>::new(pdev.as_ref(), spec.chipset, bar, true)?;
-
- let fb_layout = FbLayout::new(spec.chipset, bar)?;
- dev_dbg!(pdev.as_ref(), "{:#x?}\n", fb_layout);
-
- let bios = Vbios::new(pdev, bar)?;
-
- Self::run_fwsec_frts(pdev.as_ref(), &gsp_falcon, bar, &bios, &fb_layout)?;
-
- Ok(pin_init!(Self {
- spec,
- bar: devres_bar,
- fw,
- sysmem_flush,
- }))
+ /// Called when the corresponding [`Device`](device::Device) is unbound.
+ ///
+ /// Note: This method must only be called from `Driver::unbind`.
+ pub(crate) fn unbind(&self, dev: &device::Device<device::Core>) {
+ kernel::warn_on!(self
+ .bar
+ .access(dev)
+ .inspect(|bar| self.sysmem_flush.unregister(bar))
+ .is_err());
}
}
diff --git a/drivers/gpu/nova-core/gsp.rs b/drivers/gpu/nova-core/gsp.rs
new file mode 100644
index 000000000000..64e472e7a9d3
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp.rs
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+mod boot;
+
+use kernel::prelude::*;
+
+mod fw;
+
+pub(crate) const GSP_PAGE_SHIFT: usize = 12;
+pub(crate) const GSP_PAGE_SIZE: usize = 1 << GSP_PAGE_SHIFT;
+
+/// GSP runtime data.
+///
+/// This is an empty pinned placeholder for now.
+#[pin_data]
+pub(crate) struct Gsp {}
+
+impl Gsp {
+ pub(crate) fn new() -> impl PinInit<Self> {
+ pin_init!(Self {})
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/boot.rs
new file mode 100644
index 000000000000..2800f3aee37d
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/boot.rs
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::device;
+use kernel::pci;
+use kernel::prelude::*;
+
+use crate::driver::Bar0;
+use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
+use crate::fb::FbLayout;
+use crate::firmware::{
+ booter::{BooterFirmware, BooterKind},
+ fwsec::{FwsecCommand, FwsecFirmware},
+ gsp::GspFirmware,
+ FIRMWARE_VERSION,
+};
+use crate::gpu::Chipset;
+use crate::regs;
+use crate::vbios::Vbios;
+
+impl super::Gsp {
+ /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
+ /// created the WPR2 region.
+ fn run_fwsec_frts(
+ dev: &device::Device<device::Bound>,
+ falcon: &Falcon<Gsp>,
+ bar: &Bar0,
+ bios: &Vbios,
+ fb_layout: &FbLayout,
+ ) -> Result<()> {
+ // Check that the WPR2 region does not already exists - if it does, we cannot run
+ // FWSEC-FRTS until the GPU is reset.
+ if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
+ dev_err!(
+ dev,
+ "WPR2 region already exists - GPU needs to be reset to proceed\n"
+ );
+ return Err(EBUSY);
+ }
+
+ let fwsec_frts = FwsecFirmware::new(
+ dev,
+ falcon,
+ bar,
+ bios,
+ FwsecCommand::Frts {
+ frts_addr: fb_layout.frts.start,
+ frts_size: fb_layout.frts.end - fb_layout.frts.start,
+ },
+ )?;
+
+ // Run FWSEC-FRTS to create the WPR2 region.
+ fwsec_frts.run(dev, falcon, bar)?;
+
+ // SCRATCH_E contains the error code for FWSEC-FRTS.
+ let frts_status = regs::NV_PBUS_SW_SCRATCH_0E_FRTS_ERR::read(bar).frts_err_code();
+ if frts_status != 0 {
+ dev_err!(
+ dev,
+ "FWSEC-FRTS returned with error code {:#x}",
+ frts_status
+ );
+
+ return Err(EIO);
+ }
+
+ // Check that the WPR2 region has been created as we requested.
+ let (wpr2_lo, wpr2_hi) = (
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
+ );
+
+ match (wpr2_lo, wpr2_hi) {
+ (_, 0) => {
+ dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
+
+ Err(EIO)
+ }
+ (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
+ dev_err!(
+ dev,
+ "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
+ wpr2_lo,
+ fb_layout.frts.start,
+ );
+
+ Err(EIO)
+ }
+ (wpr2_lo, wpr2_hi) => {
+ dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
+ dev_dbg!(dev, "GPU instance built\n");
+
+ Ok(())
+ }
+ }
+ }
+
+ /// Attempt to boot the GSP.
+ ///
+ /// This is a GPU-dependent and complex procedure that involves loading firmware files from
+ /// user-space, patching them with signatures, and building firmware-specific intricate data
+ /// structures that the GSP will use at runtime.
+ ///
+ /// Upon return, the GSP is up and running, and its runtime object given as return value.
+ pub(crate) fn boot(
+ self: Pin<&mut Self>,
+ pdev: &pci::Device<device::Bound>,
+ bar: &Bar0,
+ chipset: Chipset,
+ gsp_falcon: &Falcon<Gsp>,
+ sec2_falcon: &Falcon<Sec2>,
+ ) -> Result {
+ let dev = pdev.as_ref();
+
+ let bios = Vbios::new(dev, bar)?;
+
+ let _gsp_fw = KBox::pin_init(
+ GspFirmware::new(dev, chipset, FIRMWARE_VERSION)?,
+ GFP_KERNEL,
+ )?;
+
+ let fb_layout = FbLayout::new(chipset, bar)?;
+ dev_dbg!(dev, "{:#x?}\n", fb_layout);
+
+ Self::run_fwsec_frts(dev, gsp_falcon, bar, &bios, &fb_layout)?;
+
+ let _booter_loader = BooterFirmware::new(
+ dev,
+ BooterKind::Loader,
+ chipset,
+ FIRMWARE_VERSION,
+ sec2_falcon,
+ bar,
+ )?;
+
+ Ok(())
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs
new file mode 100644
index 000000000000..34226dd00982
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw.rs
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+mod r570_144;
+
+// Alias to avoid repeating the version number with every use.
+#[expect(unused)]
+use r570_144 as bindings;
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
new file mode 100644
index 000000000000..35cb0370a7c9
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Firmware bindings.
+//!
+//! Imports the generated bindings by `bindgen`.
+//!
+//! This module may not be directly used. Please abstract or re-export the needed symbols in the
+//! parent module instead.
+
+#![cfg_attr(test, allow(deref_nullptr))]
+#![cfg_attr(test, allow(unaligned_references))]
+#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+#![allow(
+ dead_code,
+ unused_imports,
+ clippy::all,
+ clippy::undocumented_unsafe_blocks,
+ clippy::ptr_as_ptr,
+ clippy::ref_as_ptr,
+ missing_docs,
+ non_camel_case_types,
+ non_upper_case_globals,
+ non_snake_case,
+ improper_ctypes,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn
+)]
+use kernel::ffi;
+include!("r570_144/bindings.rs");
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
new file mode 100644
index 000000000000..cec594032515
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
@@ -0,0 +1 @@
+// SPDX-License-Identifier: GPL-2.0
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index cb2bbb30cba1..fffcaee2249f 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -9,6 +9,7 @@ mod fb;
mod firmware;
mod gfw;
mod gpu;
+mod gsp;
mod regs;
mod util;
mod vbios;
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index d49fddf6a3c6..206dab2e1335 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -5,11 +5,11 @@
#![allow(non_camel_case_types)]
#[macro_use]
-mod macros;
+pub(crate) mod macros;
use crate::falcon::{
DmaTrfCmdSize, FalconCoreRev, FalconCoreRevSubversion, FalconFbifMemType, FalconFbifTarget,
- FalconModSelAlgo, FalconSecurityModel, PeregrineCoreSelect,
+ FalconModSelAlgo, FalconSecurityModel, PFalcon2Base, PFalconBase, PeregrineCoreSelect,
};
use crate::gpu::{Architecture, Chipset};
use kernel::prelude::*;
@@ -28,7 +28,7 @@ impl NV_PMC_BOOT_0 {
/// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip.
pub(crate) fn architecture(self) -> Result<Architecture> {
Architecture::try_from(
- self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()),
+ self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0_RANGE.len()),
)
}
@@ -36,7 +36,8 @@ impl NV_PMC_BOOT_0 {
pub(crate) fn chipset(self) -> Result<Chipset> {
self.architecture()
.map(|arch| {
- ((arch as u32) << Self::IMPLEMENTATION.len()) | u32::from(self.implementation())
+ ((arch as u32) << Self::IMPLEMENTATION_RANGE.len())
+ | u32::from(self.implementation())
})
.and_then(Chipset::try_from)
}
@@ -44,8 +45,10 @@ impl NV_PMC_BOOT_0 {
// PBUS
-// TODO[REGA]: this is an array of registers.
-register!(NV_PBUS_SW_SCRATCH_0E@0x00001438 {
+register!(NV_PBUS_SW_SCRATCH @ 0x00001400[64] {});
+
+register!(NV_PBUS_SW_SCRATCH_0E_FRTS_ERR => NV_PBUS_SW_SCRATCH[0xe],
+ "scratch register 0xe used as FRTS firmware error code" {
31:16 frts_err_code as u16;
});
@@ -123,13 +126,12 @@ register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK @ 0x00118128,
0:0 read_protection_level0 as bool, "Set after FWSEC lowers its protection level";
});
-// TODO[REGA]: This is an array of registers.
-register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234 {
- 31:0 value as u32;
-});
+// OpenRM defines this as a register array, but doesn't specify its size and only uses its first
+// element. Be conservative until we know the actual size or need to use more registers.
+register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234[1] {});
register!(
- NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05,
+ NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05[0],
"Scratch group 05 register 0 used as GFW boot progress indicator" {
7:0 progress as u8, "Progress of GFW boot (0xff means completed)";
}
@@ -180,38 +182,40 @@ impl NV_PDISP_VGA_WORKSPACE_BASE {
// FUSE
-register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100 {
+pub(crate) const NV_FUSE_OPT_FPF_SIZE: usize = 16;
+
+register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
-register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140 {
+register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
-register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0 {
+register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
// PFALCON
-register!(NV_PFALCON_FALCON_IRQSCLR @ +0x00000004 {
+register!(NV_PFALCON_FALCON_IRQSCLR @ PFalconBase[0x00000004] {
4:4 halt as bool;
6:6 swgen0 as bool;
});
-register!(NV_PFALCON_FALCON_MAILBOX0 @ +0x00000040 {
+register!(NV_PFALCON_FALCON_MAILBOX0 @ PFalconBase[0x00000040] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_MAILBOX1 @ +0x00000044 {
+register!(NV_PFALCON_FALCON_MAILBOX1 @ PFalconBase[0x00000044] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_RM @ +0x00000084 {
+register!(NV_PFALCON_FALCON_RM @ PFalconBase[0x00000084] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_HWCFG2 @ +0x000000f4 {
+register!(NV_PFALCON_FALCON_HWCFG2 @ PFalconBase[0x000000f4] {
10:10 riscv as bool;
12:12 mem_scrubbing as bool, "Set to 0 after memory scrubbing is completed";
31:31 reset_ready as bool, "Signal indicating that reset is completed (GA102+)";
@@ -224,17 +228,17 @@ impl NV_PFALCON_FALCON_HWCFG2 {
}
}
-register!(NV_PFALCON_FALCON_CPUCTL @ +0x00000100 {
+register!(NV_PFALCON_FALCON_CPUCTL @ PFalconBase[0x00000100] {
1:1 startcpu as bool;
4:4 halted as bool;
6:6 alias_en as bool;
});
-register!(NV_PFALCON_FALCON_BOOTVEC @ +0x00000104 {
+register!(NV_PFALCON_FALCON_BOOTVEC @ PFalconBase[0x00000104] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
+register!(NV_PFALCON_FALCON_DMACTL @ PFalconBase[0x0000010c] {
0:0 require_ctx as bool;
1:1 dmem_scrubbing as bool;
2:2 imem_scrubbing as bool;
@@ -242,15 +246,15 @@ register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
7:7 secure_stat as bool;
});
-register!(NV_PFALCON_FALCON_DMATRFBASE @ +0x00000110 {
+register!(NV_PFALCON_FALCON_DMATRFBASE @ PFalconBase[0x00000110] {
31:0 base as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFMOFFS @ +0x00000114 {
+register!(NV_PFALCON_FALCON_DMATRFMOFFS @ PFalconBase[0x00000114] {
23:0 offs as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
+register!(NV_PFALCON_FALCON_DMATRFCMD @ PFalconBase[0x00000118] {
0:0 full as bool;
1:1 idle as bool;
3:2 sec as u8;
@@ -261,60 +265,62 @@ register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
16:16 set_dmtag as u8;
});
-register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ +0x0000011c {
+register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ PFalconBase[0x0000011c] {
31:0 offs as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFBASE1 @ +0x00000128 {
+register!(NV_PFALCON_FALCON_DMATRFBASE1 @ PFalconBase[0x00000128] {
8:0 base as u16;
});
-register!(NV_PFALCON_FALCON_HWCFG1 @ +0x0000012c {
+register!(NV_PFALCON_FALCON_HWCFG1 @ PFalconBase[0x0000012c] {
3:0 core_rev as u8 ?=> FalconCoreRev, "Core revision";
5:4 security_model as u8 ?=> FalconSecurityModel, "Security model";
7:6 core_rev_subversion as u8 ?=> FalconCoreRevSubversion, "Core revision subversion";
});
-register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ +0x00000130 {
+register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ PFalconBase[0x00000130] {
1:1 startcpu as bool;
});
// Actually known as `NV_PSEC_FALCON_ENGINE` and `NV_PGSP_FALCON_ENGINE` depending on the falcon
// instance.
-register!(NV_PFALCON_FALCON_ENGINE @ +0x000003c0 {
+register!(NV_PFALCON_FALCON_ENGINE @ PFalconBase[0x000003c0] {
0:0 reset as bool;
});
-// TODO[REGA]: this is an array of registers.
-register!(NV_PFALCON_FBIF_TRANSCFG @ +0x00000600 {
+register!(NV_PFALCON_FBIF_TRANSCFG @ PFalconBase[0x00000600[8]] {
1:0 target as u8 ?=> FalconFbifTarget;
2:2 mem_type as bool => FalconFbifMemType;
});
-register!(NV_PFALCON_FBIF_CTL @ +0x00000624 {
+register!(NV_PFALCON_FBIF_CTL @ PFalconBase[0x00000624] {
7:7 allow_phys_no_ctx as bool;
});
-register!(NV_PFALCON2_FALCON_MOD_SEL @ +0x00001180 {
+/* PFALCON2 */
+
+register!(NV_PFALCON2_FALCON_MOD_SEL @ PFalcon2Base[0x00000180] {
7:0 algo as u8 ?=> FalconModSelAlgo;
});
-register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ +0x00001198 {
+register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ PFalcon2Base[0x00000198] {
7:0 ucode_id as u8;
});
-register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ +0x0000119c {
+register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ PFalcon2Base[0x0000019c] {
31:0 value as u32;
});
-// TODO[REGA]: this is an array of registers.
-register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ +0x00001210 {
+// OpenRM defines this as a register array, but doesn't specify its size and only uses its first
+// element. Be conservative until we know the actual size or need to use more registers.
+register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ PFalcon2Base[0x00000210[1]] {
31:0 value as u32;
});
// PRISCV
-register!(NV_PRISCV_RISCV_BCR_CTRL @ +0x00001668 {
+register!(NV_PRISCV_RISCV_BCR_CTRL @ PFalconBase[0x00001668] {
0:0 valid as bool;
4:4 core_select as bool => PeregrineCoreSelect;
8:8 br_fetch as bool;
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
index 6b9df4205f46..8058e1696df9 100644
--- a/drivers/gpu/nova-core/regs/macros.rs
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -1,17 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
-//! Macro to define register layout and accessors.
+//! `register!` macro to define register layout and accessors.
//!
//! A single register typically includes several fields, which are accessed through a combination
//! of bit-shift and mask operations that introduce a class of potential mistakes, notably because
//! not all possible field values are necessarily valid.
//!
-//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated
-//! type for each register with its own field accessors that can return an error is a field's value
-//! is invalid.
+//! The `register!` macro in this module provides an intuitive and readable syntax for defining a
+//! dedicated type for each register. Each such type comes with its own field accessors that can
+//! return an error if a field's value is invalid.
-/// Defines a dedicated type for a register with an absolute offset, alongside with getter and
-/// setter methods for its fields and methods to read and write it from an `Io` region.
+/// Trait providing a base address to be added to the offset of a relative register to obtain
+/// its actual offset.
+///
+/// The `T` generic argument is used to distinguish which base to use, in case a type provides
+/// several bases. It is given to the `register!` macro to restrict the use of the register to
+/// implementors of this particular variant.
+pub(crate) trait RegisterBase<T> {
+ const BASE: usize;
+}
+
+/// Defines a dedicated type for a register with an absolute offset, including getter and setter
+/// methods for its fields and methods to read and write it from an `Io` region.
///
/// Example:
///
@@ -24,7 +34,7 @@
/// ```
///
/// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io`
-/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less
+/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 least
/// significant bits of the register. Each field can be accessed and modified using accessor
/// methods:
///
@@ -33,130 +43,344 @@
/// let boot0 = BOOT_0::read(&bar);
/// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision());
///
-/// // `Chipset::try_from` will be called with the value of the field and returns an error if the
-/// // value is invalid.
+/// // `Chipset::try_from` is called with the value of the `chipset` field and returns an
+/// // error if it is invalid.
/// let chipset = boot0.chipset()?;
///
/// // Update some fields and write the value back.
/// boot0.set_major_revision(3).set_minor_revision(10).write(&bar);
///
-/// // Or just read and update the register in a single step:
+/// // Or, just read and update the register in a single step:
/// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
/// ```
///
-/// Fields can be defined as follows:
+/// Fields are defined as follows:
///
-/// - `as <type>` simply returns the field value casted as the requested integer type, typically
-/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit.
+/// - `as <type>` simply returns the field value casted to <type>, typically `u32`, `u16`, `u8` or
+/// `bool`. Note that `bool` fields must have a range of 1 bit.
/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
/// the result.
/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
-/// and returns the result. This is useful on fields for which not all values are value.
+/// and returns the result. This is useful with fields for which not all values are valid.
///
/// The documentation strings are optional. If present, they will be added to the type's
/// definition, or the field getter and setter methods they are attached to.
///
-/// Putting a `+` before the address of the register makes it relative to a base: the `read` and
-/// `write` methods take a `base` argument that is added to the specified address before access,
-/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown
-/// at compile-time:
+/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
+/// for cases where a register's interpretation depends on the context:
///
/// ```no_run
-/// register!(CPU_CTL @ +0x0000010, "CPU core control" {
-/// 0:0 start as bool, "Start the CPU core";
+/// register!(SCRATCH @ 0x00000200, "Scratch register" {
+/// 31:0 value as u32, "Raw value";
/// });
///
-/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`.
-/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE);
-/// pr_info!("CPU CTL: {:#x}", cpuctl);
-/// cpuctl.set_start(true).write(&bar, CPU_BASE);
+/// register!(SCRATCH_BOOT_STATUS => SCRATCH, "Boot status of the firmware" {
+/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// });
/// ```
///
-/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
-/// for cases where a register's interpretation depends on the context:
+/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH`, while also
+/// providing its own `completed` field.
+///
+/// ## Relative registers
+///
+/// A register can be defined as being accessible from a fixed offset of a provided base. For
+/// instance, imagine the following I/O space:
+///
+/// ```text
+/// +-----------------------------+
+/// | ... |
+/// | |
+/// 0x100--->+------------CPU0-------------+
+/// | |
+/// 0x110--->+-----------------------------+
+/// | CPU_CTL |
+/// +-----------------------------+
+/// | ... |
+/// | |
+/// | |
+/// 0x200--->+------------CPU1-------------+
+/// | |
+/// 0x210--->+-----------------------------+
+/// | CPU_CTL |
+/// +-----------------------------+
+/// | ... |
+/// +-----------------------------+
+/// ```
+///
+/// `CPU0` and `CPU1` both have a `CPU_CTL` register that starts at offset `0x10` of their I/O
+/// space segment. Since both instances of `CPU_CTL` share the same layout, we don't want to define
+/// them twice and would prefer a way to select which one to use from a single definition
+///
+/// This can be done using the `Base[Offset]` syntax when specifying the register's address.
+///
+/// `Base` is an arbitrary type (typically a ZST) to be used as a generic parameter of the
+/// [`RegisterBase`] trait to provide the base as a constant, i.e. each type providing a base for
+/// this register needs to implement `RegisterBase<Base>`. Here is the above example translated
+/// into code:
///
/// ```no_run
-/// register!(SCRATCH_0 @ 0x0000100, "Scratch register 0" {
-/// 31:0 value as u32, "Raw value";
+/// // Type used to identify the base.
+/// pub(crate) struct CpuCtlBase;
///
-/// register!(SCRATCH_0_BOOT_STATUS => SCRATCH_0, "Boot status of the firmware" {
-/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// // ZST describing `CPU0`.
+/// struct Cpu0;
+/// impl RegisterBase<CpuCtlBase> for Cpu0 {
+/// const BASE: usize = 0x100;
+/// }
+/// // Singleton of `CPU0` used to identify it.
+/// const CPU0: Cpu0 = Cpu0;
+///
+/// // ZST describing `CPU1`.
+/// struct Cpu1;
+/// impl RegisterBase<CpuCtlBase> for Cpu1 {
+/// const BASE: usize = 0x200;
+/// }
+/// // Singleton of `CPU1` used to identify it.
+/// const CPU1: Cpu1 = Cpu1;
+///
+/// // This makes `CPU_CTL` accessible from all implementors of `RegisterBase<CpuCtlBase>`.
+/// register!(CPU_CTL @ CpuCtlBase[0x10], "CPU core control" {
+/// 0:0 start as bool, "Start the CPU core";
+/// });
+///
+/// // The `read`, `write` and `alter` methods of relative registers take an extra `base` argument
+/// // that is used to resolve its final address by adding its `BASE` to the offset of the
+/// // register.
+///
+/// // Start `CPU0`.
+/// CPU_CTL::alter(bar, &CPU0, |r| r.set_start(true));
+///
+/// // Start `CPU1`.
+/// CPU_CTL::alter(bar, &CPU1, |r| r.set_start(true));
+///
+/// // Aliases can also be defined for relative register.
+/// register!(CPU_CTL_ALIAS => CpuCtlBase[CPU_CTL], "Alias to CPU core control" {
+/// 1:1 alias_start as bool, "Start the aliased CPU core";
+/// });
+///
+/// // Start the aliased `CPU0`.
+/// CPU_CTL_ALIAS::alter(bar, &CPU0, |r| r.set_alias_start(true));
+/// ```
+///
+/// ## Arrays of registers
+///
+/// Some I/O areas contain consecutive values that can be interpreted in the same way. These areas
+/// can be defined as an array of identical registers, allowing them to be accessed by index with
+/// compile-time or runtime bound checking. Simply define their address as `Address[Size]`, and add
+/// an `idx` parameter to their `read`, `write` and `alter` methods:
+///
+/// ```no_run
+/// # fn no_run() -> Result<(), Error> {
+/// # fn get_scratch_idx() -> usize {
+/// # 0x15
+/// # }
+/// // Array of 64 consecutive registers with the same layout starting at offset `0x80`.
+/// register!(SCRATCH @ 0x00000080[64], "Scratch registers" {
+/// 31:0 value as u32;
+/// });
+///
+/// // Read scratch register 0, i.e. I/O address `0x80`.
+/// let scratch_0 = SCRATCH::read(bar, 0).value();
+/// // Read scratch register 15, i.e. I/O address `0x80 + (15 * 4)`.
+/// let scratch_15 = SCRATCH::read(bar, 15).value();
+///
+/// // This is out of bounds and won't build.
+/// // let scratch_128 = SCRATCH::read(bar, 128).value();
+///
+/// // Runtime-obtained array index.
+/// let scratch_idx = get_scratch_idx();
+/// // Access on a runtime index returns an error if it is out-of-bounds.
+/// let some_scratch = SCRATCH::try_read(bar, scratch_idx)?.value();
+///
+/// // Alias to a particular register in an array.
+/// // Here `SCRATCH[8]` is used to convey the firmware exit code.
+/// register!(FIRMWARE_STATUS => SCRATCH[8], "Firmware exit status code" {
+/// 7:0 status as u8;
+/// });
+///
+/// let status = FIRMWARE_STATUS::read(bar).status();
+///
+/// // Non-contiguous register arrays can be defined by adding a stride parameter.
+/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the
+/// // registers of the two declarations below are interleaved.
+/// register!(SCRATCH_INTERLEAVED_0 @ 0x000000c0[16 ; 8], "Scratch registers bank 0" {
+/// 31:0 value as u32;
+/// });
+/// register!(SCRATCH_INTERLEAVED_1 @ 0x000000c4[16 ; 8], "Scratch registers bank 1" {
+/// 31:0 value as u32;
+/// });
+/// # Ok(())
+/// # }
/// ```
///
-/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH_0`, while also
-/// providing its own `completed` method.
+/// ## Relative arrays of registers
+///
+/// Combining the two features described in the sections above, arrays of registers accessible from
+/// a base can also be defined:
+///
+/// ```no_run
+/// # fn no_run() -> Result<(), Error> {
+/// # fn get_scratch_idx() -> usize {
+/// # 0x15
+/// # }
+/// // Type used as parameter of `RegisterBase` to specify the base.
+/// pub(crate) struct CpuCtlBase;
+///
+/// // ZST describing `CPU0`.
+/// struct Cpu0;
+/// impl RegisterBase<CpuCtlBase> for Cpu0 {
+/// const BASE: usize = 0x100;
+/// }
+/// // Singleton of `CPU0` used to identify it.
+/// const CPU0: Cpu0 = Cpu0;
+///
+/// // ZST describing `CPU1`.
+/// struct Cpu1;
+/// impl RegisterBase<CpuCtlBase> for Cpu1 {
+/// const BASE: usize = 0x200;
+/// }
+/// // Singleton of `CPU1` used to identify it.
+/// const CPU1: Cpu1 = Cpu1;
+///
+/// // 64 per-cpu scratch registers, arranged as an contiguous array.
+/// register!(CPU_SCRATCH @ CpuCtlBase[0x00000080[64]], "Per-CPU scratch registers" {
+/// 31:0 value as u32;
+/// });
+///
+/// let cpu0_scratch_0 = CPU_SCRATCH::read(bar, &Cpu0, 0).value();
+/// let cpu1_scratch_15 = CPU_SCRATCH::read(bar, &Cpu1, 15).value();
+///
+/// // This won't build.
+/// // let cpu0_scratch_128 = CPU_SCRATCH::read(bar, &Cpu0, 128).value();
+///
+/// // Runtime-obtained array index.
+/// let scratch_idx = get_scratch_idx();
+/// // Access on a runtime value returns an error if it is out-of-bounds.
+/// let cpu0_some_scratch = CPU_SCRATCH::try_read(bar, &Cpu0, scratch_idx)?.value();
+///
+/// // `SCRATCH[8]` is used to convey the firmware exit code.
+/// register!(CPU_FIRMWARE_STATUS => CpuCtlBase[CPU_SCRATCH[8]],
+/// "Per-CPU firmware exit status code" {
+/// 7:0 status as u8;
+/// });
+///
+/// let cpu0_status = CPU_FIRMWARE_STATUS::read(bar, &Cpu0).status();
+///
+/// // Non-contiguous register arrays can be defined by adding a stride parameter.
+/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the
+/// // registers of the two declarations below are interleaved.
+/// register!(CPU_SCRATCH_INTERLEAVED_0 @ CpuCtlBase[0x00000d00[16 ; 8]],
+/// "Scratch registers bank 0" {
+/// 31:0 value as u32;
+/// });
+/// register!(CPU_SCRATCH_INTERLEAVED_1 @ CpuCtlBase[0x00000d04[16 ; 8]],
+/// "Scratch registers bank 1" {
+/// 31:0 value as u32;
+/// });
+/// # Ok(())
+/// # }
+/// ```
macro_rules! register {
// Creates a register at a fixed offset of the MMIO space.
+ ($name:ident @ $offset:literal $(, $comment:literal)? { $($fields:tt)* } ) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $offset);
+ };
+
+ // Creates an alias register of fixed offset register `alias` with its own fields.
+ ($name:ident => $alias:ident $(, $comment:literal)? { $($fields:tt)* } ) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $alias::OFFSET);
+ };
+
+ // Creates a register at a relative offset from a base address provider.
+ ($name:ident @ $base:ty [ $offset:literal ] $(, $comment:literal)? { $($fields:tt)* } ) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $offset ]);
+ };
+
+ // Creates an alias register of relative offset register `alias` with its own fields.
+ ($name:ident => $base:ty [ $alias:ident ] $(, $comment:literal)? { $($fields:tt)* }) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $alias::OFFSET ]);
+ };
+
+ // Creates an array of registers at a fixed offset of the MMIO space.
(
- $name:ident @ $offset:literal $(, $comment:literal)? {
+ $name:ident @ $offset:literal [ $size:expr ; $stride:expr ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $offset $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ $offset);
+ static_assert!(::core::mem::size_of::<u32>() <= $stride);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_array $name @ $offset [ $size ; $stride ]);
};
- // Creates a alias register of fixed offset register `alias` with its own fields.
+ // Shortcut for contiguous array of registers (stride == size of element).
(
- $name:ident => $alias:ident $(, $comment:literal)? {
+ $name:ident @ $offset:literal [ $size:expr ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $alias::OFFSET $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ $alias::OFFSET);
+ register!($name @ $offset [ $size ; ::core::mem::size_of::<u32>() ] $(, $comment)? {
+ $($fields)*
+ } );
+ };
+
+ // Creates an array of registers at a relative offset from a base address provider.
+ (
+ $name:ident @ $base:ty [ $offset:literal [ $size:expr ; $stride:expr ] ]
+ $(, $comment:literal)? { $($fields:tt)* }
+ ) => {
+ static_assert!(::core::mem::size_of::<u32>() <= $stride);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative_array $name @ $base [ $offset [ $size ; $stride ] ]);
};
- // Creates a register at a relative offset from a base address.
+ // Shortcut for contiguous array of relative registers (stride == size of element).
(
- $name:ident @ + $offset:literal $(, $comment:literal)? {
+ $name:ident @ $base:ty [ $offset:literal [ $size:expr ] ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $offset $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io$name @ + $offset);
+ register!($name @ $base [ $offset [ $size ; ::core::mem::size_of::<u32>() ] ]
+ $(, $comment)? { $($fields)* } );
};
- // Creates a alias register of relative offset register `alias` with its own fields.
+ // Creates an alias of register `idx` of relative array of registers `alias` with its own
+ // fields.
(
- $name:ident => + $alias:ident $(, $comment:literal)? {
+ $name:ident => $base:ty [ $alias:ident [ $idx:expr ] ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $alias::OFFSET $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ + $alias::OFFSET);
+ static_assert!($idx < $alias::SIZE);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $alias::OFFSET + $idx * $alias::STRIDE ] );
+ };
+
+ // Creates an alias of register `idx` of array of registers `alias` with its own fields.
+ // This rule belongs to the (non-relative) register arrays set, but needs to be put last
+ // to avoid it being interpreted in place of the relative register array alias rule.
+ ($name:ident => $alias:ident [ $idx:expr ] $(, $comment:literal)? { $($fields:tt)* }) => {
+ static_assert!($idx < $alias::SIZE);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $alias::OFFSET + $idx * $alias::STRIDE );
};
// All rules below are helpers.
- // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
- // and conversion to regular `u32`).
- (@common $name:ident @ $offset:expr $(, $comment:literal)?) => {
+ // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`,
+ // `Default`, `BitOr`, and conversion to the value type) and field accessor methods.
+ (@core $name:ident $(, $comment:literal)? { $($fields:tt)* }) => {
$(
#[doc=$comment]
)?
#[repr(transparent)]
- #[derive(Clone, Copy, Default)]
+ #[derive(Clone, Copy)]
pub(crate) struct $name(u32);
- #[allow(dead_code)]
- impl $name {
- pub(crate) const OFFSET: usize = $offset;
- }
-
- // TODO[REGA]: display the raw hex value, then the value of all the fields. This requires
- // matching the fields, which will complexify the syntax considerably...
- impl ::kernel::fmt::Debug for $name {
- fn fmt(&self, f: &mut ::kernel::fmt::Formatter<'_>) -> ::kernel::fmt::Result {
- f.debug_tuple(stringify!($name))
- .field(&::kernel::prelude::fmt!("0x{0:x}", &self.0))
- .finish()
- }
- }
-
impl ::core::ops::BitOr for $name {
type Output = Self;
@@ -170,6 +394,34 @@ macro_rules! register {
reg.0
}
}
+
+ register!(@fields_dispatcher $name { $($fields)* });
+ };
+
+ // Captures the fields and passes them to all the implementers that require field information.
+ //
+ // Used to simplify the matching rules for implementers, so they don't need to match the entire
+ // complex fields rule even though they only make use of part of it.
+ (@fields_dispatcher $name:ident {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ register!(@field_accessors $name {
+ $(
+ $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ )*
+ });
+ register!(@debug $name { $($field;)* });
+ register!(@default $name { $($field;)* });
};
// Defines all the field getter/methods methods for `$name`.
@@ -228,7 +480,7 @@ macro_rules! register {
$(, $comment:literal)?;
) => {
register!(
- @leaf_accessor $name $hi:$lo $field as bool
+ @leaf_accessor $name $hi:$lo $field
{ |f| <$into_type>::from(if f != 0 { true } else { false }) }
$into_type => $into_type $(, $comment)?;
);
@@ -246,7 +498,7 @@ macro_rules! register {
@field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty
$(, $comment:literal)?;
) => {
- register!(@leaf_accessor $name $hi:$lo $field as $type
+ register!(@leaf_accessor $name $hi:$lo $field
{ |f| <$try_into_type>::try_from(f as $type) } $try_into_type =>
::core::result::Result<
$try_into_type,
@@ -260,11 +512,11 @@ macro_rules! register {
@field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty
$(, $comment:literal)?;
) => {
- register!(@leaf_accessor $name $hi:$lo $field as $type
+ register!(@leaf_accessor $name $hi:$lo $field
{ |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;);
};
- // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax.
+ // Shortcut for non-boolean fields defined without the `=>` or `?=>` syntax.
(
@field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt
$(, $comment:literal)?;
@@ -274,11 +526,11 @@ macro_rules! register {
// Generates the accessor methods for a single field.
(
- @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
+ @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident
{ $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
) => {
::kernel::macros::paste!(
- const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
+ const [<$field:upper _RANGE>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
);
@@ -287,7 +539,7 @@ macro_rules! register {
#[doc="Returns the value of this field:"]
#[doc=$comment]
)?
- #[inline]
+ #[inline(always)]
pub(crate) fn $field(self) -> $res_type {
::kernel::macros::paste!(
const MASK: u32 = $name::[<$field:upper _MASK>];
@@ -303,7 +555,7 @@ macro_rules! register {
#[doc="Sets the value of this field:"]
#[doc=$comment]
)?
- #[inline]
+ #[inline(always)]
pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
const MASK: u32 = $name::[<$field:upper _MASK>];
const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
@@ -315,25 +567,64 @@ macro_rules! register {
);
};
- // Creates the IO accessors for a fixed offset register.
- (@io $name:ident @ $offset:expr) => {
+ // Generates the `Debug` implementation for `$name`.
+ (@debug $name:ident { $($field:ident;)* }) => {
+ impl ::kernel::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::kernel::fmt::Formatter<'_>) -> ::kernel::fmt::Result {
+ f.debug_struct(stringify!($name))
+ .field("<raw>", &::kernel::prelude::fmt!("{:#x}", &self.0))
+ $(
+ .field(stringify!($field), &self.$field())
+ )*
+ .finish()
+ }
+ }
+ };
+
+ // Generates the `Default` implementation for `$name`.
+ (@default $name:ident { $($field:ident;)* }) => {
+ /// Returns a value for the register where all fields are set to their default value.
+ impl ::core::default::Default for $name {
+ fn default() -> Self {
+ #[allow(unused_mut)]
+ let mut value = Self(Default::default());
+
+ ::kernel::macros::paste!(
+ $(
+ value.[<set_ $field>](Default::default());
+ )*
+ );
+
+ value
+ }
+ }
+ };
+
+ // Generates the IO accessors for a fixed offset register.
+ (@io_fixed $name:ident @ $offset:expr) => {
#[allow(dead_code)]
impl $name {
- #[inline]
+ pub(crate) const OFFSET: usize = $offset;
+
+ /// Read the register from its address in `io`.
+ #[inline(always)]
pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
Self(io.read32($offset))
}
- #[inline]
+ /// Write the value contained in `self` to the register address in `io`.
+ #[inline(always)]
pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
io.write32(self.0, $offset)
}
- #[inline]
+ /// Read the register from its address in `io` and run `f` on its value to obtain a new
+ /// value to write back.
+ #[inline(always)]
pub(crate) fn alter<const SIZE: usize, T, F>(
io: &T,
f: F,
@@ -347,76 +638,322 @@ macro_rules! register {
}
};
- // Create the IO accessors for a relative offset register.
- (@io $name:ident @ + $offset:literal) => {
+ // Generates the IO accessors for a relative offset register.
+ (@io_relative $name:ident @ $base:ty [ $offset:expr ]) => {
#[allow(dead_code)]
impl $name {
- #[inline]
+ pub(crate) const OFFSET: usize = $offset;
+
+ /// Read the register from `io`, using the base address provided by `base` and adding
+ /// the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T, B>(
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ const OFFSET: usize = $name::OFFSET;
+
+ let value = io.read32(
+ <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET
+ );
+
+ Self(value)
+ }
+
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ const OFFSET: usize = $name::OFFSET;
+
+ io.write32(
+ self.0,
+ <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET
+ );
+ }
+
+ /// Read the register from `io`, using the base address provided by `base` and adding
+ /// the register's offset to it, then run `f` on its value to obtain a new value to
+ /// write back.
+ #[inline(always)]
+ pub(crate) fn alter<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base));
+ reg.write(io, base);
+ }
+ }
+ };
+
+ // Generates the IO accessors for an array of registers.
+ (@io_array $name:ident @ $offset:literal [ $size:expr ; $stride:expr ]) => {
+ #[allow(dead_code)]
+ impl $name {
+ pub(crate) const OFFSET: usize = $offset;
+ pub(crate) const SIZE: usize = $size;
+ pub(crate) const STRIDE: usize = $stride;
+
+ /// Read the array register at index `idx` from its address in `io`.
+ #[inline(always)]
pub(crate) fn read<const SIZE: usize, T>(
io: &T,
- base: usize,
+ idx: usize,
) -> Self where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- Self(io.read32(base + $offset))
+ build_assert!(idx < Self::SIZE);
+
+ let offset = Self::OFFSET + (idx * Self::STRIDE);
+ let value = io.read32(offset);
+
+ Self(value)
}
- #[inline]
+ /// Write the value contained in `self` to the array register with index `idx` in `io`.
+ #[inline(always)]
pub(crate) fn write<const SIZE: usize, T>(
self,
io: &T,
- base: usize,
+ idx: usize
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.write32(self.0, base + $offset)
+ build_assert!(idx < Self::SIZE);
+
+ let offset = Self::OFFSET + (idx * Self::STRIDE);
+
+ io.write32(self.0, offset);
}
- #[inline]
+ /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a
+ /// new value to write back.
+ #[inline(always)]
pub(crate) fn alter<const SIZE: usize, T, F>(
io: &T,
- base: usize,
+ idx: usize,
f: F,
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::read(io, base));
- reg.write(io, base);
+ let reg = f(Self::read(io, idx));
+ reg.write(io, idx);
}
- #[inline]
+ /// Read the array register at index `idx` from its address in `io`.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
pub(crate) fn try_read<const SIZE: usize, T>(
io: &T,
- base: usize,
+ idx: usize,
) -> ::kernel::error::Result<Self> where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.try_read32(base + $offset).map(Self)
+ if idx < Self::SIZE {
+ Ok(Self::read(io, idx))
+ } else {
+ Err(EINVAL)
+ }
}
- #[inline]
+ /// Write the value contained in `self` to the array register with index `idx` in `io`.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
pub(crate) fn try_write<const SIZE: usize, T>(
self,
io: &T,
- base: usize,
- ) -> ::kernel::error::Result<()> where
+ idx: usize,
+ ) -> ::kernel::error::Result where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.try_write32(self.0, base + $offset)
+ if idx < Self::SIZE {
+ Ok(self.write(io, idx))
+ } else {
+ Err(EINVAL)
+ }
}
- #[inline]
+ /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a
+ /// new value to write back.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
pub(crate) fn try_alter<const SIZE: usize, T, F>(
io: &T,
- base: usize,
+ idx: usize,
+ f: F,
+ ) -> ::kernel::error::Result where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ if idx < Self::SIZE {
+ Ok(Self::alter(io, idx, f))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ };
+
+ // Generates the IO accessors for an array of relative registers.
+ (
+ @io_relative_array $name:ident @ $base:ty
+ [ $offset:literal [ $size:expr ; $stride:expr ] ]
+ ) => {
+ #[allow(dead_code)]
+ impl $name {
+ pub(crate) const OFFSET: usize = $offset;
+ pub(crate) const SIZE: usize = $size;
+ pub(crate) const STRIDE: usize = $stride;
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T, B>(
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ idx: usize,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ build_assert!(idx < Self::SIZE);
+
+ let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE +
+ Self::OFFSET + (idx * Self::STRIDE);
+ let value = io.read32(offset);
+
+ Self(value)
+ }
+
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the offset of array register `idx` to it.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ idx: usize
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ build_assert!(idx < Self::SIZE);
+
+ let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE +
+ Self::OFFSET + (idx * Self::STRIDE);
+
+ io.write32(self.0, offset);
+ }
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it, then run `f` on its value to
+ /// obtain a new value to write back.
+ #[inline(always)]
+ pub(crate) fn alter<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ idx: usize,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base, idx));
+ reg.write(io, base, idx);
+ }
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_read<const SIZE: usize, T, B>(
+ io: &T,
+ base: &B,
+ idx: usize,
+ ) -> ::kernel::error::Result<Self> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ if idx < Self::SIZE {
+ Ok(Self::read(io, base, idx))
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the offset of array register `idx` to it.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ base: &B,
+ idx: usize,
+ ) -> ::kernel::error::Result where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ if idx < Self::SIZE {
+ Ok(self.write(io, base, idx))
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it, then run `f` on its value to
+ /// obtain a new value to write back.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_alter<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ idx: usize,
f: F,
- ) -> ::kernel::error::Result<()> where
+ ) -> ::kernel::error::Result where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::try_read(io, base)?);
- reg.try_write(io, base)
+ if idx < Self::SIZE {
+ Ok(Self::alter(io, base, idx, f))
+ } else {
+ Err(EINVAL)
+ }
}
}
};
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
index 76cedf3710d7..bf35f00cb732 100644
--- a/drivers/gpu/nova-core/util.rs
+++ b/drivers/gpu/nova-core/util.rs
@@ -3,26 +3,6 @@
use kernel::prelude::*;
use kernel::time::{Delta, Instant, Monotonic};
-pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
- let src = s.as_bytes();
- let mut dst = [0; N];
- let mut i = 0;
-
- while i < src.len() && i < N {
- dst[i] = (src[i] as char).to_ascii_lowercase() as u8;
- i += 1;
- }
-
- dst
-}
-
-pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
- match core::str::from_utf8(bytes) {
- Ok(string) => string,
- Err(_) => kernel::build_error!("Bytes are not valid UTF-8."),
- }
-}
-
/// Wait until `cond` is true or `timeout` elapsed.
///
/// When `cond` evaluates to `Some`, its return value is returned.
diff --git a/drivers/gpu/nova-core/vbios.rs b/drivers/gpu/nova-core/vbios.rs
index 091642d6a5a1..71fbe71b84db 100644
--- a/drivers/gpu/nova-core/vbios.rs
+++ b/drivers/gpu/nova-core/vbios.rs
@@ -8,9 +8,9 @@ use crate::firmware::FalconUCodeDescV3;
use core::convert::TryFrom;
use kernel::device;
use kernel::error::Result;
-use kernel::pci;
use kernel::prelude::*;
use kernel::ptr::{Alignable, Alignment};
+use kernel::types::ARef;
/// The offset of the VBIOS ROM in the BAR0 space.
const ROM_OFFSET: usize = 0x300000;
@@ -32,7 +32,7 @@ const FALCON_UCODE_ENTRY_APPID_FWSEC_PROD: u8 = 0x85;
/// Vbios Reader for constructing the VBIOS data.
struct VbiosIterator<'a> {
- pdev: &'a pci::Device,
+ dev: &'a device::Device,
bar0: &'a Bar0,
/// VBIOS data vector: As BIOS images are scanned, they are added to this vector for reference
/// or copying into other data structures. It is the entire scanned contents of the VBIOS which
@@ -47,9 +47,9 @@ struct VbiosIterator<'a> {
}
impl<'a> VbiosIterator<'a> {
- fn new(pdev: &'a pci::Device, bar0: &'a Bar0) -> Result<Self> {
+ fn new(dev: &'a device::Device, bar0: &'a Bar0) -> Result<Self> {
Ok(Self {
- pdev,
+ dev,
bar0,
data: KVec::new(),
current_offset: 0,
@@ -65,7 +65,7 @@ impl<'a> VbiosIterator<'a> {
// Ensure length is a multiple of 4 for 32-bit reads
if len % core::mem::size_of::<u32>() != 0 {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"VBIOS read length {} is not a multiple of 4\n",
len
);
@@ -90,7 +90,7 @@ impl<'a> VbiosIterator<'a> {
/// Read bytes at a specific offset, filling any gap.
fn read_more_at_offset(&mut self, offset: usize, len: usize) -> Result {
if offset > BIOS_MAX_SCAN_LEN {
- dev_err!(self.pdev.as_ref(), "Error: exceeded BIOS scan limit.\n");
+ dev_err!(self.dev, "Error: exceeded BIOS scan limit.\n");
return Err(EINVAL);
}
@@ -116,7 +116,7 @@ impl<'a> VbiosIterator<'a> {
if offset + len > data_len {
self.read_more_at_offset(offset, len).inspect_err(|e| {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"Failed to read more at offset {:#x}: {:?}\n",
offset,
e
@@ -124,9 +124,9 @@ impl<'a> VbiosIterator<'a> {
})?;
}
- BiosImage::new(self.pdev, &self.data[offset..offset + len]).inspect_err(|err| {
+ BiosImage::new(self.dev, &self.data[offset..offset + len]).inspect_err(|err| {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"Failed to {} at offset {:#x}: {:?}\n",
context,
offset,
@@ -147,10 +147,7 @@ impl<'a> Iterator for VbiosIterator<'a> {
}
if self.current_offset > BIOS_MAX_SCAN_LEN {
- dev_err!(
- self.pdev.as_ref(),
- "Error: exceeded BIOS scan limit, stopping scan\n"
- );
+ dev_err!(self.dev, "Error: exceeded BIOS scan limit, stopping scan\n");
return None;
}
@@ -192,18 +189,18 @@ impl Vbios {
/// Probe for VBIOS extraction.
///
/// Once the VBIOS object is built, `bar0` is not read for [`Vbios`] purposes anymore.
- pub(crate) fn new(pdev: &pci::Device, bar0: &Bar0) -> Result<Vbios> {
+ pub(crate) fn new(dev: &device::Device, bar0: &Bar0) -> Result<Vbios> {
// Images to extract from iteration
let mut pci_at_image: Option<PciAtBiosImage> = None;
let mut first_fwsec_image: Option<FwSecBiosBuilder> = None;
let mut second_fwsec_image: Option<FwSecBiosBuilder> = None;
// Parse all VBIOS images in the ROM
- for image_result in VbiosIterator::new(pdev, bar0)? {
+ for image_result in VbiosIterator::new(dev, bar0)? {
let full_image = image_result?;
dev_dbg!(
- pdev.as_ref(),
+ dev,
"Found BIOS image: size: {:#x}, type: {}, last: {}\n",
full_image.image_size_bytes(),
full_image.image_type_str(),
@@ -234,14 +231,14 @@ impl Vbios {
(second_fwsec_image, first_fwsec_image, pci_at_image)
{
second
- .setup_falcon_data(pdev, &pci_at, &first)
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Falcon data setup failed: {:?}\n", e))?;
+ .setup_falcon_data(&pci_at, &first)
+ .inspect_err(|e| dev_err!(dev, "Falcon data setup failed: {:?}\n", e))?;
Ok(Vbios {
- fwsec_image: second.build(pdev)?,
+ fwsec_image: second.build()?,
})
} else {
dev_err!(
- pdev.as_ref(),
+ dev,
"Missing required images for falcon data setup, skipping\n"
);
Err(EINVAL)
@@ -284,9 +281,9 @@ struct PcirStruct {
}
impl PcirStruct {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < core::mem::size_of::<PcirStruct>() {
- dev_err!(pdev.as_ref(), "Not enough data for PcirStruct\n");
+ dev_err!(dev, "Not enough data for PcirStruct\n");
return Err(EINVAL);
}
@@ -295,11 +292,7 @@ impl PcirStruct {
// Signature should be "PCIR" (0x52494350) or "NPDS" (0x5344504e).
if &signature != b"PCIR" && &signature != b"NPDS" {
- dev_err!(
- pdev.as_ref(),
- "Invalid signature for PcirStruct: {:?}\n",
- signature
- );
+ dev_err!(dev, "Invalid signature for PcirStruct: {:?}\n", signature);
return Err(EINVAL);
}
@@ -308,7 +301,7 @@ impl PcirStruct {
let image_len = u16::from_le_bytes([data[16], data[17]]);
if image_len == 0 {
- dev_err!(pdev.as_ref(), "Invalid image length: 0\n");
+ dev_err!(dev, "Invalid image length: 0\n");
return Err(EINVAL);
}
@@ -345,7 +338,7 @@ impl PcirStruct {
/// its header) is in the [`PciAtBiosImage`] and the falcon data it is pointing to is in the
/// [`FwSecBiosImage`].
#[derive(Debug, Clone, Copy)]
-#[expect(dead_code)]
+#[repr(C)]
struct BitHeader {
/// 0h: BIT Header Identifier (BMP=0x7FFF/BIT=0xB8FF)
id: u16,
@@ -365,7 +358,7 @@ struct BitHeader {
impl BitHeader {
fn new(data: &[u8]) -> Result<Self> {
- if data.len() < 12 {
+ if data.len() < core::mem::size_of::<Self>() {
return Err(EINVAL);
}
@@ -467,7 +460,7 @@ struct PciRomHeader {
}
impl PciRomHeader {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < 26 {
// Need at least 26 bytes to read pciDataStrucPtr and sizeOfBlock.
return Err(EINVAL);
@@ -479,7 +472,7 @@ impl PciRomHeader {
match signature {
0xAA55 | 0xBB77 | 0x4E56 => {}
_ => {
- dev_err!(pdev.as_ref(), "ROM signature unknown {:#x}\n", signature);
+ dev_err!(dev, "ROM signature unknown {:#x}\n", signature);
return Err(EINVAL);
}
}
@@ -538,9 +531,9 @@ struct NpdeStruct {
}
impl NpdeStruct {
- fn new(pdev: &pci::Device, data: &[u8]) -> Option<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Option<Self> {
if data.len() < core::mem::size_of::<Self>() {
- dev_dbg!(pdev.as_ref(), "Not enough data for NpdeStruct\n");
+ dev_dbg!(dev, "Not enough data for NpdeStruct\n");
return None;
}
@@ -549,17 +542,13 @@ impl NpdeStruct {
// Signature should be "NPDE" (0x4544504E).
if &signature != b"NPDE" {
- dev_dbg!(
- pdev.as_ref(),
- "Invalid signature for NpdeStruct: {:?}\n",
- signature
- );
+ dev_dbg!(dev, "Invalid signature for NpdeStruct: {:?}\n", signature);
return None;
}
let subimage_len = u16::from_le_bytes([data[8], data[9]]);
if subimage_len == 0 {
- dev_dbg!(pdev.as_ref(), "Invalid subimage length: 0\n");
+ dev_dbg!(dev, "Invalid subimage length: 0\n");
return None;
}
@@ -584,7 +573,7 @@ impl NpdeStruct {
/// Try to find NPDE in the data, the NPDE is right after the PCIR.
fn find_in_data(
- pdev: &pci::Device,
+ dev: &device::Device,
data: &[u8],
rom_header: &PciRomHeader,
pcir: &PcirStruct,
@@ -596,12 +585,12 @@ impl NpdeStruct {
// Check if we have enough data
if npde_start + core::mem::size_of::<Self>() > data.len() {
- dev_dbg!(pdev.as_ref(), "Not enough data for NPDE\n");
+ dev_dbg!(dev, "Not enough data for NPDE\n");
return None;
}
// Try to create NPDE from the data
- NpdeStruct::new(pdev, &data[npde_start..])
+ NpdeStruct::new(dev, &data[npde_start..])
}
}
@@ -669,10 +658,10 @@ impl BiosImage {
/// Create a [`BiosImageBase`] from a byte slice and convert it to a [`BiosImage`] which
/// triggers the constructor of the specific BiosImage enum variant.
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
- let base = BiosImageBase::new(pdev, data)?;
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
+ let base = BiosImageBase::new(dev, data)?;
let image = base.into_image().inspect_err(|e| {
- dev_err!(pdev.as_ref(), "Failed to create BiosImage: {:?}\n", e);
+ dev_err!(dev, "Failed to create BiosImage: {:?}\n", e);
})?;
Ok(image)
@@ -754,9 +743,10 @@ impl TryFrom<BiosImageBase> for BiosImage {
///
/// Each BiosImage type has a BiosImageBase type along with other image-specific fields. Note that
/// Rust favors composition of types over inheritance.
-#[derive(Debug)]
#[expect(dead_code)]
struct BiosImageBase {
+ /// Used for logging.
+ dev: ARef<device::Device>,
/// PCI ROM Expansion Header
rom_header: PciRomHeader,
/// PCI Data Structure
@@ -773,16 +763,16 @@ impl BiosImageBase {
}
/// Creates a new BiosImageBase from raw byte data.
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
// Ensure we have enough data for the ROM header.
if data.len() < 26 {
- dev_err!(pdev.as_ref(), "Not enough data for ROM header\n");
+ dev_err!(dev, "Not enough data for ROM header\n");
return Err(EINVAL);
}
// Parse the ROM header.
- let rom_header = PciRomHeader::new(pdev, &data[0..26])
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PciRomHeader: {:?}\n", e))?;
+ let rom_header = PciRomHeader::new(dev, &data[0..26])
+ .inspect_err(|e| dev_err!(dev, "Failed to create PciRomHeader: {:?}\n", e))?;
// Get the PCI Data Structure using the pointer from the ROM header.
let pcir_offset = rom_header.pci_data_struct_offset as usize;
@@ -791,28 +781,29 @@ impl BiosImageBase {
.ok_or(EINVAL)
.inspect_err(|_| {
dev_err!(
- pdev.as_ref(),
+ dev,
"PCIR offset {:#x} out of bounds (data length: {})\n",
pcir_offset,
data.len()
);
dev_err!(
- pdev.as_ref(),
+ dev,
"Consider reading more data for construction of BiosImage\n"
);
})?;
- let pcir = PcirStruct::new(pdev, pcir_data)
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PcirStruct: {:?}\n", e))?;
+ let pcir = PcirStruct::new(dev, pcir_data)
+ .inspect_err(|e| dev_err!(dev, "Failed to create PcirStruct: {:?}\n", e))?;
// Look for NPDE structure if this is not an NBSI image (type != 0x70).
- let npde = NpdeStruct::find_in_data(pdev, data, &rom_header, &pcir);
+ let npde = NpdeStruct::find_in_data(dev, data, &rom_header, &pcir);
// Create a copy of the data.
let mut data_copy = KVec::new();
data_copy.extend_from_slice(data, GFP_KERNEL)?;
Ok(BiosImageBase {
+ dev: dev.into(),
rom_header,
pcir,
npde,
@@ -848,7 +839,7 @@ impl PciAtBiosImage {
///
/// This is just a 4 byte structure that contains a pointer to the Falcon data in the FWSEC
/// image.
- fn falcon_data_ptr(&self, pdev: &pci::Device) -> Result<u32> {
+ fn falcon_data_ptr(&self) -> Result<u32> {
let token = self.get_bit_token(BIT_TOKEN_ID_FALCON_DATA)?;
// Make sure we don't go out of bounds
@@ -859,14 +850,14 @@ impl PciAtBiosImage {
// read the 4 bytes at the offset specified in the token
let offset = token.data_offset as usize;
let bytes: [u8; 4] = self.base.data[offset..offset + 4].try_into().map_err(|_| {
- dev_err!(pdev.as_ref(), "Failed to convert data slice to array");
+ dev_err!(self.base.dev, "Failed to convert data slice to array");
EINVAL
})?;
let data_ptr = u32::from_le_bytes(bytes);
if (data_ptr as usize) < self.base.data.len() {
- dev_err!(pdev.as_ref(), "Falcon data pointer out of bounds\n");
+ dev_err!(self.base.dev, "Falcon data pointer out of bounds\n");
return Err(EINVAL);
}
@@ -892,7 +883,7 @@ impl TryFrom<BiosImageBase> for PciAtBiosImage {
/// The [`PmuLookupTableEntry`] structure is a single entry in the [`PmuLookupTable`].
///
/// See the [`PmuLookupTable`] description for more information.
-#[expect(dead_code)]
+#[repr(C, packed)]
struct PmuLookupTableEntry {
application_id: u8,
target_id: u8,
@@ -901,7 +892,7 @@ struct PmuLookupTableEntry {
impl PmuLookupTableEntry {
fn new(data: &[u8]) -> Result<Self> {
- if data.len() < 6 {
+ if data.len() < core::mem::size_of::<Self>() {
return Err(EINVAL);
}
@@ -928,7 +919,7 @@ struct PmuLookupTable {
}
impl PmuLookupTable {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < 4 {
return Err(EINVAL);
}
@@ -940,10 +931,7 @@ impl PmuLookupTable {
let required_bytes = header_len + (entry_count * entry_len);
if data.len() < required_bytes {
- dev_err!(
- pdev.as_ref(),
- "PmuLookupTable data length less than required\n"
- );
+ dev_err!(dev, "PmuLookupTable data length less than required\n");
return Err(EINVAL);
}
@@ -956,11 +944,7 @@ impl PmuLookupTable {
// Debug logging of entries (dumps the table data to dmesg)
for i in (header_len..required_bytes).step_by(entry_len) {
- dev_dbg!(
- pdev.as_ref(),
- "PMU entry: {:02x?}\n",
- &data[i..][..entry_len]
- );
+ dev_dbg!(dev, "PMU entry: {:02x?}\n", &data[i..][..entry_len]);
}
Ok(PmuLookupTable {
@@ -997,11 +981,10 @@ impl PmuLookupTable {
impl FwSecBiosBuilder {
fn setup_falcon_data(
&mut self,
- pdev: &pci::Device,
pci_at_image: &PciAtBiosImage,
first_fwsec: &FwSecBiosBuilder,
) -> Result {
- let mut offset = pci_at_image.falcon_data_ptr(pdev)? as usize;
+ let mut offset = pci_at_image.falcon_data_ptr()? as usize;
let mut pmu_in_first_fwsec = false;
// The falcon data pointer assumes that the PciAt and FWSEC images
@@ -1024,10 +1007,15 @@ impl FwSecBiosBuilder {
self.falcon_data_offset = Some(offset);
if pmu_in_first_fwsec {
- self.pmu_lookup_table =
- Some(PmuLookupTable::new(pdev, &first_fwsec.base.data[offset..])?);
+ self.pmu_lookup_table = Some(PmuLookupTable::new(
+ &self.base.dev,
+ &first_fwsec.base.data[offset..],
+ )?);
} else {
- self.pmu_lookup_table = Some(PmuLookupTable::new(pdev, &self.base.data[offset..])?);
+ self.pmu_lookup_table = Some(PmuLookupTable::new(
+ &self.base.dev,
+ &self.base.data[offset..],
+ )?);
}
match self
@@ -1040,7 +1028,7 @@ impl FwSecBiosBuilder {
let mut ucode_offset = entry.data as usize;
ucode_offset -= pci_at_image.base.data.len();
if ucode_offset < first_fwsec.base.data.len() {
- dev_err!(pdev.as_ref(), "Falcon Ucode offset not in second Fwsec.\n");
+ dev_err!(self.base.dev, "Falcon Ucode offset not in second Fwsec.\n");
return Err(EINVAL);
}
ucode_offset -= first_fwsec.base.data.len();
@@ -1048,7 +1036,7 @@ impl FwSecBiosBuilder {
}
Err(e) => {
dev_err!(
- pdev.as_ref(),
+ self.base.dev,
"PmuLookupTableEntry not found, error: {:?}\n",
e
);
@@ -1059,7 +1047,7 @@ impl FwSecBiosBuilder {
}
/// Build the final FwSecBiosImage from this builder
- fn build(self, pdev: &pci::Device) -> Result<FwSecBiosImage> {
+ fn build(self) -> Result<FwSecBiosImage> {
let ret = FwSecBiosImage {
base: self.base,
falcon_ucode_offset: self.falcon_ucode_offset.ok_or(EINVAL)?,
@@ -1067,8 +1055,8 @@ impl FwSecBiosBuilder {
if cfg!(debug_assertions) {
// Print the desc header for debugging
- let desc = ret.header(pdev.as_ref())?;
- dev_dbg!(pdev.as_ref(), "PmuLookupTableEntry desc: {:#?}\n", desc);
+ let desc = ret.header()?;
+ dev_dbg!(ret.base.dev, "PmuLookupTableEntry desc: {:#?}\n", desc);
}
Ok(ret)
@@ -1077,13 +1065,16 @@ impl FwSecBiosBuilder {
impl FwSecBiosImage {
/// Get the FwSec header ([`FalconUCodeDescV3`]).
- pub(crate) fn header(&self, dev: &device::Device) -> Result<&FalconUCodeDescV3> {
+ pub(crate) fn header(&self) -> Result<&FalconUCodeDescV3> {
// Get the falcon ucode offset that was found in setup_falcon_data.
let falcon_ucode_offset = self.falcon_ucode_offset;
// Make sure the offset is within the data bounds.
if falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>() > self.base.data.len() {
- dev_err!(dev, "fwsec-frts header not contained within BIOS bounds\n");
+ dev_err!(
+ self.base.dev,
+ "fwsec-frts header not contained within BIOS bounds\n"
+ );
return Err(ERANGE);
}
@@ -1095,7 +1086,7 @@ impl FwSecBiosImage {
let ver = (hdr & 0xff00) >> 8;
if ver != 3 {
- dev_err!(dev, "invalid fwsec firmware version: {:?}\n", ver);
+ dev_err!(self.base.dev, "invalid fwsec firmware version: {:?}\n", ver);
return Err(EINVAL);
}
@@ -1115,7 +1106,7 @@ impl FwSecBiosImage {
}
/// Get the ucode data as a byte slice
- pub(crate) fn ucode(&self, dev: &device::Device, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
+ pub(crate) fn ucode(&self, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
let falcon_ucode_offset = self.falcon_ucode_offset;
// The ucode data follows the descriptor.
@@ -1127,15 +1118,16 @@ impl FwSecBiosImage {
.data
.get(ucode_data_offset..ucode_data_offset + size)
.ok_or(ERANGE)
- .inspect_err(|_| dev_err!(dev, "fwsec ucode data not contained within BIOS bounds\n"))
+ .inspect_err(|_| {
+ dev_err!(
+ self.base.dev,
+ "fwsec ucode data not contained within BIOS bounds\n"
+ )
+ })
}
/// Get the signatures as a byte slice
- pub(crate) fn sigs(
- &self,
- dev: &device::Device,
- desc: &FalconUCodeDescV3,
- ) -> Result<&[Bcrt30Rsa3kSignature]> {
+ pub(crate) fn sigs(&self, desc: &FalconUCodeDescV3) -> Result<&[Bcrt30Rsa3kSignature]> {
// The signatures data follows the descriptor.
let sigs_data_offset = self.falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>();
let sigs_size =
@@ -1144,7 +1136,7 @@ impl FwSecBiosImage {
// Make sure the data is within bounds.
if sigs_data_offset + sigs_size > self.base.data.len() {
dev_err!(
- dev,
+ self.base.dev,
"fwsec signatures data not contained within BIOS bounds\n"
);
return Err(ERANGE);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index d3912e3f2f13..30ebde1273be 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -112,9 +112,9 @@ struct i2c_hid {
struct i2chid_ops *ops;
struct drm_panel_follower panel_follower;
- struct work_struct panel_follower_prepare_work;
+ struct work_struct panel_follower_work;
bool is_panel_follower;
- bool prepare_work_finished;
+ bool panel_follower_work_finished;
};
static const struct i2c_hid_quirks {
@@ -1110,10 +1110,10 @@ err_power_down:
return ret;
}
-static void ihid_core_panel_prepare_work(struct work_struct *work)
+static void ihid_core_panel_follower_work(struct work_struct *work)
{
struct i2c_hid *ihid = container_of(work, struct i2c_hid,
- panel_follower_prepare_work);
+ panel_follower_work);
struct hid_device *hid = ihid->hid;
int ret;
@@ -1130,7 +1130,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
if (ret)
dev_warn(&ihid->client->dev, "Power on failed: %d\n", ret);
else
- WRITE_ONCE(ihid->prepare_work_finished, true);
+ WRITE_ONCE(ihid->panel_follower_work_finished, true);
/*
* The work APIs provide a number of memory ordering guarantees
@@ -1139,12 +1139,12 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
* guarantee that a write that happened in the work is visible after
* cancel_work_sync(). We'll add a write memory barrier here to match
* with i2c_hid_core_panel_unpreparing() to ensure that our write to
- * prepare_work_finished is visible there.
+ * panel_follower_work_finished is visible there.
*/
smp_wmb();
}
-static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_resume(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
@@ -1152,29 +1152,36 @@ static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
* Powering on a touchscreen can be a slow process. Queue the work to
* the system workqueue so we don't block the panel's power up.
*/
- WRITE_ONCE(ihid->prepare_work_finished, false);
- schedule_work(&ihid->panel_follower_prepare_work);
+ WRITE_ONCE(ihid->panel_follower_work_finished, false);
+ schedule_work(&ihid->panel_follower_work);
return 0;
}
-static int i2c_hid_core_panel_unpreparing(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_suspend(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
- cancel_work_sync(&ihid->panel_follower_prepare_work);
+ cancel_work_sync(&ihid->panel_follower_work);
- /* Match with ihid_core_panel_prepare_work() */
+ /* Match with ihid_core_panel_follower_work() */
smp_rmb();
- if (!READ_ONCE(ihid->prepare_work_finished))
+ if (!READ_ONCE(ihid->panel_follower_work_finished))
return 0;
return i2c_hid_core_suspend(ihid, true);
}
-static const struct drm_panel_follower_funcs i2c_hid_core_panel_follower_funcs = {
- .panel_prepared = i2c_hid_core_panel_prepared,
- .panel_unpreparing = i2c_hid_core_panel_unpreparing,
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_prepare_funcs = {
+ .panel_prepared = i2c_hid_core_panel_follower_resume,
+ .panel_unpreparing = i2c_hid_core_panel_follower_suspend,
+};
+
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_enable_funcs = {
+ .panel_enabled = i2c_hid_core_panel_follower_resume,
+ .panel_disabling = i2c_hid_core_panel_follower_suspend,
};
static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
@@ -1182,7 +1189,10 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
struct device *dev = &ihid->client->dev;
int ret;
- ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
+ if (ihid->hid->initial_quirks & HID_QUIRK_POWER_ON_AFTER_BACKLIGHT)
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_enable_funcs;
+ else
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_prepare_funcs;
/*
* If we're not in control of our own power up/power down then we can't
@@ -1237,7 +1247,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->cmd_lock);
mutex_init(&ihid->reset_lock);
- INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work);
+ INIT_WORK(&ihid->panel_follower_work, ihid_core_panel_follower_work);
/* we need to allocate the command buffer without knowing the maximum
* size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 3fcff6daa0d3..0215f217f6d8 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
+#include <linux/hid.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@ struct elan_i2c_hid_chip_data {
unsigned int post_power_delay_ms;
u16 hid_descriptor_address;
const char *main_supply_name;
+ bool power_after_backlight;
};
struct i2c_hid_of_elan {
@@ -97,6 +99,7 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
int ret;
+ u32 quirks = 0;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -131,8 +134,12 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
}
}
+ if (ihid_elan->chip_data->power_after_backlight)
+ quirks = HID_QUIRK_POWER_ON_AFTER_BACKLIGHT;
+
ret = i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ihid_elan->chip_data->hid_descriptor_address,
+ quirks);
if (ret)
goto err_deassert_reset;
@@ -150,6 +157,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
@@ -157,6 +165,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index aeb92b803a17..50dde968febe 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -362,7 +362,6 @@ static int ssi_async_break(struct hsi_msg *msg)
spin_unlock_bh(&omap_port->lock);
}
out:
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return err;
@@ -401,7 +400,6 @@ static int ssi_async(struct hsi_msg *msg)
msg->status = HSI_STATUS_ERROR;
}
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
msg->status, msg->ttype, msg->channel);
@@ -504,7 +502,6 @@ static int ssi_setup(struct hsi_client *cl)
omap_port->ssr.mode = cl->rx_cfg.mode;
out:
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return err;
@@ -570,7 +567,6 @@ static int ssi_flush(struct hsi_client *cl)
pinctrl_pm_select_default_state(omap_port->pdev);
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
@@ -625,7 +621,6 @@ static int ssi_stop_tx(struct hsi_client *cl)
writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
spin_unlock_bh(&omap_port->wk_lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
@@ -653,7 +648,6 @@ static void ssi_transfer(struct omap_ssi_port *omap_port,
}
}
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
@@ -683,7 +677,6 @@ static void ssi_cleanup_queues(struct hsi_client *cl)
txbufstate |= (1 << i);
status |= SSI_DATAACCEPT(i);
/* Release the clocks writes, also GDD ones */
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
ssi_flush_queue(&omap_port->txqueue[i], cl);
@@ -739,7 +732,6 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
* ssi_cleanup_queues
*/
if (msg->ttype == HSI_MSG_READ) {
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
omap_ssi->gdd_trn[i].msg = NULL;
@@ -936,7 +928,6 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
if (msg->ttype == HSI_MSG_WRITE) {
/* Release clocks for write transfer */
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
reg &= ~val;
@@ -981,7 +972,6 @@ static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
/* TODO: sleep if we retry? */
} while (status_reg);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return IRQ_HANDLED;
@@ -1018,7 +1008,6 @@ static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
}
hsi_event(port, HSI_EVENT_STOP_RX);
if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 63a2b5a9abc3..ae8fcc864060 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -165,6 +165,7 @@ config I2C_I801
Birch Stream (SOC)
Arrow Lake (SOC)
Panther Lake (SOC)
+ Wildcat Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -414,7 +415,7 @@ config I2C_ASPEED
config I2C_AT91
tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
This supports the use of the I2C interface on Atmel AT91
processors.
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index cbd88ffa5610..c7a72c28786c 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -1068,11 +1068,10 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
if (!(dev->flags & ACCESS_POLLING)) {
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
irq_flags, dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting irq %i: %d\n",
- dev->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev->dev, ret,
+ "failure requesting irq %i: %d\n",
+ dev->irq, ret);
}
ret = i2c_dw_init_recovery_info(dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index a35e4c64a1d4..34d881572351 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -238,7 +238,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->rst = devm_reset_control_get_optional_exclusive(device, NULL);
if (IS_ERR(dev->rst))
- return PTR_ERR(dev->rst);
+ return dev_err_probe(device, PTR_ERR(dev->rst), "failed to acquire reset\n");
reset_control_deassert(dev->rst);
@@ -247,21 +247,23 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
ret = i2c_dw_probe_lock_support(dev);
- if (ret)
+ if (ret) {
+ ret = dev_err_probe(device, ret, "failed to probe lock support\n");
goto exit_reset;
+ }
i2c_dw_configure(dev);
/* Optional interface clock */
dev->pclk = devm_clk_get_optional(device, "pclk");
if (IS_ERR(dev->pclk)) {
- ret = PTR_ERR(dev->pclk);
+ ret = dev_err_probe(device, PTR_ERR(dev->pclk), "failed to acquire pclk\n");
goto exit_reset;
}
dev->clk = devm_clk_get_optional(device, NULL);
if (IS_ERR(dev->clk)) {
- ret = PTR_ERR(dev->clk);
+ ret = dev_err_probe(device, PTR_ERR(dev->clk), "failed to acquire clock\n");
goto exit_reset;
}
@@ -314,6 +316,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
exit_probe:
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
exit_reset:
reset_control_assert(dev->rst);
return ret;
@@ -331,9 +334,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_dw_disable(dev);
pm_runtime_dont_use_autosuspend(device);
- pm_runtime_put_sync(device);
+ pm_runtime_put_noidle(device);
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
+
i2c_dw_remove_lock_support(dev);
reset_control_assert(dev->rst);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index b936a240db0a..6eb16b7d75a6 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -266,11 +266,10 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave,
IRQF_SHARED, dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting IRQ %i: %d\n",
- dev->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev->dev, ret,
+ "failure requesting IRQ %i: %d\n",
+ dev->irq, ret);
ret = i2c_add_numbered_adapter(adap);
if (ret)
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 370f32974763..5358f5ddf924 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -339,7 +339,7 @@ static int hix5hd2_i2c_xfer_msg(struct hix5hd2_i2c_priv *priv,
ret = priv->state;
/*
- * If this is the last message to be transfered (stop == 1)
+ * If this is the last message to be transferred (stop == 1)
* Then check if the bus can be brought back to idle.
*/
if (priv->state == HIX5I2C_STAT_RW_SUCCESS && stop)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e94ac746a741..cba992fa6557 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -83,6 +83,7 @@
* Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
* Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
* Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
+ * Wildcat Lake-U (SOC) 0x4d22 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -236,6 +237,7 @@
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
+#define PCI_DEVICE_ID_INTEL_WILDCAT_LAKE_U_SMBUS 0x4d22
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
@@ -1056,6 +1058,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, WILDCAT_LAKE_U_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
index b68a21fff0b5..6b918770e612 100644
--- a/drivers/i2c/busses/i2c-k1.c
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -3,6 +3,7 @@
* Copyright (C) 2024-2025 Troy Mitchell <troymitchell988@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#define SPACEMIT_ICR 0x0 /* Control register */
#define SPACEMIT_ISR 0x4 /* Status register */
#define SPACEMIT_IDBR 0xc /* Data buffer register */
+#define SPACEMIT_IRCR 0x18 /* Reset cycle counter */
#define SPACEMIT_IBMR 0x1c /* Bus monitor register */
/* SPACEMIT_ICR register fields */
@@ -25,7 +27,8 @@
#define SPACEMIT_CR_MODE_FAST BIT(8) /* bus mode (master operation) */
/* Bit 9 is reserved */
#define SPACEMIT_CR_UR BIT(10) /* unit reset */
-/* Bits 11-12 are reserved */
+#define SPACEMIT_CR_RSTREQ BIT(11) /* i2c bus reset request */
+/* Bit 12 is reserved */
#define SPACEMIT_CR_SCLE BIT(13) /* master clock enable */
#define SPACEMIT_CR_IUE BIT(14) /* unit enable */
/* Bits 15-17 are reserved */
@@ -76,6 +79,10 @@
SPACEMIT_SR_GCAD | SPACEMIT_SR_IRF | SPACEMIT_SR_ITE | \
SPACEMIT_SR_ALD)
+#define SPACEMIT_RCR_SDA_GLITCH_NOFIX BIT(7) /* bypass the SDA glitch fix */
+/* the cycles of SCL during bus reset */
+#define SPACEMIT_RCR_FIELD_RST_CYC GENMASK(3, 0)
+
/* SPACEMIT_IBMR register fields */
#define SPACEMIT_BMR_SDA BIT(0) /* SDA line level */
#define SPACEMIT_BMR_SCL BIT(1) /* SCL line level */
@@ -88,6 +95,8 @@
#define SPACEMIT_SR_ERR (SPACEMIT_SR_BED | SPACEMIT_SR_RXOV | SPACEMIT_SR_ALD)
+#define SPACEMIT_BUS_RESET_CLK_CNT_MAX 9
+
enum spacemit_i2c_state {
SPACEMIT_STATE_IDLE,
SPACEMIT_STATE_START,
@@ -160,6 +169,7 @@ static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c)
static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
{
u32 status;
+ u8 clk_cnt;
/* if bus is locked, reset unit. 0: locked */
status = readl(i2c->base + SPACEMIT_IBMR);
@@ -169,9 +179,21 @@ static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
spacemit_i2c_reset(i2c);
usleep_range(10, 20);
- /* check scl status again */
+ for (clk_cnt = 0; clk_cnt < SPACEMIT_BUS_RESET_CLK_CNT_MAX; clk_cnt++) {
+ status = readl(i2c->base + SPACEMIT_IBMR);
+ if (status & SPACEMIT_BMR_SDA)
+ return;
+
+ /* There's nothing left to save here, we are about to exit */
+ writel(FIELD_PREP(SPACEMIT_RCR_FIELD_RST_CYC, 1),
+ i2c->base + SPACEMIT_IRCR);
+ writel(SPACEMIT_CR_RSTREQ, i2c->base + SPACEMIT_ICR);
+ usleep_range(20, 30);
+ }
+
+ /* check sda again here */
status = readl(i2c->base + SPACEMIT_IBMR);
- if (!(status & SPACEMIT_BMR_SCL))
+ if (!(status & SPACEMIT_BMR_SDA))
dev_warn_ratelimited(i2c->dev, "unit reset failed\n");
}
@@ -237,6 +259,14 @@ static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
val |= SPACEMIT_CR_MSDE | SPACEMIT_CR_MSDIE;
writel(val, i2c->base + SPACEMIT_ICR);
+
+ /*
+ * The glitch fix in the K1 I2C controller introduces a delay
+ * on restart signals, so we disable the fix here.
+ */
+ val = readl(i2c->base + SPACEMIT_IRCR);
+ val |= SPACEMIT_RCR_SDA_GLITCH_NOFIX;
+ writel(val, i2c->base + SPACEMIT_IRCR);
}
static inline void
@@ -267,19 +297,6 @@ static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
writel(val, i2c->base + SPACEMIT_ICR);
}
-static void spacemit_i2c_stop(struct spacemit_i2c_dev *i2c)
-{
- u32 val;
-
- val = readl(i2c->base + SPACEMIT_ICR);
- val |= SPACEMIT_CR_STOP | SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
-
- if (i2c->read)
- val |= SPACEMIT_CR_ACKNAK;
-
- writel(val, i2c->base + SPACEMIT_ICR);
-}
-
static int spacemit_i2c_xfer_msg(struct spacemit_i2c_dev *i2c)
{
unsigned long time_left;
@@ -412,7 +429,6 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
val = readl(i2c->base + SPACEMIT_ICR);
val &= ~(SPACEMIT_CR_TB | SPACEMIT_CR_ACKNAK | SPACEMIT_CR_STOP | SPACEMIT_CR_START);
- writel(val, i2c->base + SPACEMIT_ICR);
switch (i2c->state) {
case SPACEMIT_STATE_START:
@@ -429,14 +445,16 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
}
if (i2c->state != SPACEMIT_STATE_IDLE) {
+ val |= SPACEMIT_CR_TB | SPACEMIT_CR_ALDIE;
+
if (spacemit_i2c_is_last_msg(i2c)) {
/* trigger next byte with stop */
- spacemit_i2c_stop(i2c);
- } else {
- /* trigger next byte */
- val |= SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
- writel(val, i2c->base + SPACEMIT_ICR);
+ val |= SPACEMIT_CR_STOP;
+
+ if (i2c->read)
+ val |= SPACEMIT_CR_ACKNAK;
}
+ writel(val, i2c->base + SPACEMIT_ICR);
}
err_out:
@@ -476,12 +494,13 @@ static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg *msgs, in
spacemit_i2c_enable(i2c);
ret = spacemit_i2c_wait_bus_idle(i2c);
- if (!ret)
+ if (!ret) {
ret = spacemit_i2c_xfer_msg(i2c);
- else if (ret < 0)
- dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
- else
+ if (ret < 0)
+ dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
+ } else {
spacemit_i2c_check_bus_release(i2c);
+ }
spacemit_i2c_disable(i2c);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index ab456c3717db..dee40704825c 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1243,6 +1243,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
{
int ret;
int left_num = num;
+ bool write_then_read_en = false;
struct mtk_i2c *i2c = i2c_get_adapdata(adap);
ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
@@ -1256,6 +1257,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
msgs[0].addr == msgs[1].addr) {
i2c->auto_restart = 0;
+ write_then_read_en = true;
}
}
@@ -1280,12 +1282,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
else
i2c->op = I2C_MASTER_WR;
- if (!i2c->auto_restart) {
- if (num > 1) {
- /* combined two messages into one transaction */
- i2c->op = I2C_MASTER_WRRD;
- left_num--;
- }
+ if (write_then_read_en) {
+ /* combined two messages into one transaction */
+ i2c->op = I2C_MASTER_WRRD;
+ left_num--;
}
/* always use DMA mode. */
@@ -1293,7 +1293,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (ret < 0)
goto err_exit;
- msgs++;
+ if (i2c->op == I2C_MASTER_WRRD)
+ msgs += 2;
+ else
+ msgs++;
}
/* the return value is number of executed messages */
ret = num;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index ff2289b52c84..95a577764d5c 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -870,7 +870,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
proto = geni_se_read_proto(&gi2c->se);
- if (proto != GENI_SE_I2C) {
+ if (proto == GENI_SE_INVALID_PROTO) {
+ ret = geni_load_se_firmware(&gi2c->se, GENI_SE_I2C);
+ if (ret) {
+ dev_err_probe(dev, ret, "i2c firmware load failed ret: %d\n", ret);
+ goto err_resources;
+ }
+ } else if (proto != GENI_SE_I2C) {
ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto);
goto err_resources;
}
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index f4fa4703acbd..8138f5ef40f0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -138,7 +138,6 @@ static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_i2c_match[] = {
- { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
{ .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
{ .compatible = "samsung,s3c2440-hdmiphy-i2c",
.data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 56b2e5c5fb49..26ec34b19ad5 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -425,7 +425,7 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
* If we did not get one ACK from target when writing data, then we
* should finish this transmission since we got some errors.
*
- * When writing data, if i2c_tran == 0 which means we have writen
+ * When writing data, if i2c_tran == 0 which means we have written
* done all data, then we can finish this transmission.
*
* When reading data, if conut < rx fifo full threshold, which
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index bf28f8e3ee6b..97d70e667227 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -152,7 +152,7 @@ struct st_i2c_timings {
/**
* struct st_i2c_client - client specific data
* @addr: 8-bit target addr, including r/w bit
- * @count: number of bytes to be transfered
+ * @count: number of bytes to be transferred
* @xfered: number of bytes already transferred
* @buf: data buffer
* @result: result of the transfer
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4eb31b913c1a..e533460bccc3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1649,7 +1649,33 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_interface_timing_reg = true,
};
+static const struct tegra_i2c_hw_feature tegra256_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .clk_divisor_hs_mode = 7,
+ .clk_divisor_std_mode = 0x7a,
+ .clk_divisor_fast_mode = 0x40,
+ .clk_divisor_fast_plus_mode = 0x19,
+ .has_config_load_reg = true,
+ .has_multi_master_mode = true,
+ .has_slcg_override_reg = true,
+ .has_mst_fifo = true,
+ .has_mst_reset = true,
+ .quirks = &tegra194_i2c_quirks,
+ .supports_bus_clear = true,
+ .has_apb_dma = false,
+ .tlow_std_mode = 0x8,
+ .thigh_std_mode = 0x7,
+ .tlow_fast_fastplus_mode = 0x3,
+ .thigh_fast_fastplus_mode = 0x3,
+ .setup_hold_time_std_mode = 0x08080808,
+ .setup_hold_time_fast_fast_plus_mode = 0x02020202,
+ .setup_hold_time_hs_mode = 0x090909,
+ .has_interface_timing_reg = true,
+};
+
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra256-i2c", .data = &tegra256_i2c_hw, },
{ .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, },
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 1bd602852e35..f596efcc291c 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -204,7 +204,7 @@ static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
/* copy the received data */
memcpy(msg->buf + start, rmsg, len1);
- /* second read transfer if neccessary */
+ /* second read transfer if necessary */
if (len2 > 0) {
ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
if (ret < 0)
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 4df8ad092df3..338800321f8b 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -22,7 +22,7 @@ EXPORT_SYMBOL_GPL(__i2c_board_lock);
LIST_HEAD(__i2c_board_list);
EXPORT_SYMBOL_GPL(__i2c_board_list);
-int __i2c_first_dynamic_bus_num;
+int __i2c_first_dynamic_bus_num __ro_after_init;
EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
@@ -48,7 +48,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
*/
-int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
+int __init i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
int status;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ecca8c006b02..ae7e9c8b65a6 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -573,7 +573,8 @@ static int i2c_device_probe(struct device *dev)
goto err_clear_wakeup_irq;
do_power_on = !i2c_acpi_waive_d0_probe(dev);
- status = dev_pm_domain_attach(&client->dev, do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0);
+ status = dev_pm_domain_attach(&client->dev, PD_FLAG_DETACH_POWER_OFF |
+ (do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0));
if (status)
goto err_clear_wakeup_irq;
@@ -581,7 +582,7 @@ static int i2c_device_probe(struct device *dev)
GFP_KERNEL);
if (!client->devres_group_id) {
status = -ENOMEM;
- goto err_detach_pm_domain;
+ goto err_clear_wakeup_irq;
}
client->debugfs = debugfs_create_dir(dev_name(&client->dev),
@@ -608,8 +609,6 @@ static int i2c_device_probe(struct device *dev)
err_release_driver_resources:
debugfs_remove_recursive(client->debugfs);
devres_release_group(&client->dev, client->devres_group_id);
-err_detach_pm_domain:
- dev_pm_domain_detach(&client->dev, do_power_on);
err_clear_wakeup_irq:
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
@@ -636,8 +635,6 @@ static void i2c_device_remove(struct device *dev)
devres_release_group(&client->dev, client->devres_group_id);
- dev_pm_domain_detach(&client->dev, true);
-
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 7ee6b992b835..02ca55c2246b 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -112,10 +112,9 @@ bool i2c_detect_slave_mode(struct device *dev)
struct fwnode_handle *fwnode = dev_fwnode(dev);
if (is_of_node(fwnode)) {
- struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- fwnode_for_each_child_node(fwnode, child) {
+ fwnode_for_each_child_node_scoped(fwnode, child) {
fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 4d8690981a55..d59644e50f14 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -241,12 +241,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
muxc->parent = parent;
muxc->dev = dev;
- if (flags & I2C_MUX_LOCKED)
- muxc->mux_locked = true;
- if (flags & I2C_MUX_ARBITRATOR)
- muxc->arbitrator = true;
- if (flags & I2C_MUX_GATE)
- muxc->gate = true;
+ muxc->mux_locked = !!(flags & I2C_MUX_LOCKED);
+ muxc->arbitrator = !!(flags & I2C_MUX_ARBITRATOR);
+ muxc->gate = !!(flags & I2C_MUX_GATE);
muxc->select = select;
muxc->deselect = deselect;
muxc->max_adapters = max_adapters;
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 8663c8a7c269..3d8002caf703 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -63,10 +63,6 @@
#define mybus(x) (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
#define busoff(x) (!((x) & BUSON) || ((x) & BUSON) == BUSON)
-/* arbitration timeouts, in jiffies */
-#define ARB_TIMEOUT (HZ / 8) /* 125 ms until forcing bus ownership */
-#define ARB2_TIMEOUT (HZ / 4) /* 250 ms until acquisition failure */
-
/* arbitration retry delays, in us */
#define SELECT_DELAY_SHORT 50
#define SELECT_DELAY_LONG 1000
@@ -229,6 +225,9 @@ static int pca9541_arbitrate(struct i2c_client *client)
*/
data->select_timeout = SELECT_DELAY_LONG;
if (time_is_before_eq_jiffies(data->arb_timeout)) {
+ dev_warn(&client->dev,
+ "Arbitration timeout on I2C bus, forcing bus ownership\n");
+
/* Time is up, take the bus and reset it. */
pca9541_reg_write(client,
PCA9541_CONTROL,
@@ -251,10 +250,10 @@ static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
struct pca9541 *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
int ret;
- unsigned long timeout = jiffies + ARB2_TIMEOUT;
+ unsigned long timeout = jiffies + (2 * client->adapter->timeout);
/* give up after this time */
- data->arb_timeout = jiffies + ARB_TIMEOUT;
+ data->arb_timeout = jiffies + client->adapter->timeout;
/* force bus ownership after this time */
do {
@@ -267,6 +266,7 @@ static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
else
msleep(data->select_timeout / 1000);
} while (time_is_after_eq_jiffies(timeout));
+ dev_warn(&client->dev, "Failed to acquire I2C bus, timed out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index b9f370c9f018..75c8d08fa24e 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -118,7 +118,6 @@ struct pca954x {
raw_spinlock_t lock;
struct regulator *supply;
- struct gpio_desc *reset_gpio;
struct reset_control *reset_cont;
};
@@ -316,6 +315,25 @@ static u8 pca954x_regval(struct pca954x *data, u8 chan)
return 1 << chan;
}
+static void pca954x_reset_assert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_assert(data->reset_cont);
+}
+
+static void pca954x_reset_deassert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_deassert(data->reset_cont);
+}
+
+static void pca954x_reset_mux(struct pca954x *data)
+{
+ pca954x_reset_assert(data);
+ udelay(1);
+ pca954x_reset_deassert(data);
+}
+
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
@@ -329,6 +347,8 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
ret = pca954x_reg_write(muxc->parent, client, regval);
data->last_chan = ret < 0 ? 0 : regval;
}
+ if (ret == -ETIMEDOUT && data->reset_cont)
+ pca954x_reset_mux(data);
return ret;
}
@@ -338,6 +358,7 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
s32 idle_state;
+ int ret = 0;
idle_state = READ_ONCE(data->idle_state);
if (idle_state >= 0)
@@ -347,8 +368,10 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
if (idle_state == MUX_IDLE_DISCONNECT) {
/* Deselect active channel */
data->last_chan = 0;
- return pca954x_reg_write(muxc->parent, client,
- data->last_chan);
+ ret = pca954x_reg_write(muxc->parent, client,
+ data->last_chan);
+ if (ret == -ETIMEDOUT && data->reset_cont)
+ pca954x_reset_mux(data);
}
/* otherwise leave as-is */
@@ -527,29 +550,10 @@ static int pca954x_get_reset(struct device *dev, struct pca954x *data)
if (IS_ERR(data->reset_cont))
return dev_err_probe(dev, PTR_ERR(data->reset_cont),
"Failed to get reset\n");
- else if (data->reset_cont)
- return 0;
-
- /*
- * fallback to legacy reset-gpios
- */
- data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(data->reset_gpio)) {
- return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
- "Failed to get reset gpio");
- }
return 0;
}
-static void pca954x_reset_deassert(struct pca954x *data)
-{
- if (data->reset_cont)
- reset_control_deassert(data->reset_cont);
- else
- gpiod_set_value_cansleep(data->reset_gpio, 0);
-}
-
/*
* I2C init/probing/exit functions
*/
@@ -589,7 +593,7 @@ static int pca954x_probe(struct i2c_client *client)
if (ret)
goto fail_cleanup;
- if (data->reset_cont || data->reset_gpio) {
+ if (data->reset_cont) {
udelay(1);
pca954x_reset_deassert(data);
/* Give the chip some time to recover. */
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 0d857cc68cc5..79ceaa5f5afd 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -38,7 +38,11 @@ static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
u32 tmp = 0;
memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
- writel(tmp, addr);
+ /*
+ * writesl() instead of writel() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ writesl(addr, &tmp, 1);
}
}
@@ -55,7 +59,11 @@ static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
if (nbytes & 3) {
u32 tmp;
- tmp = readl(addr);
+ /*
+ * readsl() instead of readl() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ readsl(addr, &tmp, 1);
memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
}
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 2ef898a8fd80..d946db75df70 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -8,6 +8,7 @@
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
@@ -1728,6 +1729,79 @@ int i3c_master_do_daa(struct i3c_master_controller *master)
EXPORT_SYMBOL_GPL(i3c_master_do_daa);
/**
+ * i3c_master_dma_map_single() - Map buffer for single DMA transfer
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @force_bounce: true, force to use a bounce buffer,
+ * false, function will auto check is a bounce buffer required
+ * @dir: DMA direction
+ *
+ * Map buffer for a DMA transfer and allocate a bounce buffer if required.
+ *
+ * Return: I3C DMA transfer descriptor or NULL in case of error.
+ */
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *buf,
+ size_t len, bool force_bounce, enum dma_data_direction dir)
+{
+ struct i3c_dma *dma_xfer __free(kfree) = NULL;
+ void *bounce __free(kfree) = NULL;
+ void *dma_buf = buf;
+
+ dma_xfer = kzalloc(sizeof(*dma_xfer), GFP_KERNEL);
+ if (!dma_xfer)
+ return NULL;
+
+ dma_xfer->dev = dev;
+ dma_xfer->buf = buf;
+ dma_xfer->dir = dir;
+ dma_xfer->len = len;
+ dma_xfer->map_len = len;
+
+ if (is_vmalloc_addr(buf))
+ force_bounce = true;
+
+ if (force_bounce) {
+ dma_xfer->map_len = ALIGN(len, cache_line_size());
+ if (dir == DMA_FROM_DEVICE)
+ bounce = kzalloc(dma_xfer->map_len, GFP_KERNEL);
+ else
+ bounce = kmemdup(buf, dma_xfer->map_len, GFP_KERNEL);
+ if (!bounce)
+ return NULL;
+ dma_buf = bounce;
+ }
+
+ dma_xfer->addr = dma_map_single(dev, dma_buf, dma_xfer->map_len, dir);
+ if (dma_mapping_error(dev, dma_xfer->addr))
+ return NULL;
+
+ dma_xfer->bounce_buf = no_free_ptr(bounce);
+ return no_free_ptr(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_map_single);
+
+/**
+ * i3c_master_dma_unmap_single() - Unmap buffer after DMA
+ * @dma_xfer: DMA transfer and mapping descriptor
+ *
+ * Unmap buffer and cleanup DMA transfer descriptor.
+ */
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer)
+{
+ dma_unmap_single(dma_xfer->dev, dma_xfer->addr,
+ dma_xfer->map_len, dma_xfer->dir);
+ if (dma_xfer->bounce_buf) {
+ if (dma_xfer->dir == DMA_FROM_DEVICE)
+ memcpy(dma_xfer->buf, dma_xfer->bounce_buf,
+ dma_xfer->len);
+ kfree(dma_xfer->bounce_buf);
+ }
+ kfree(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_unmap_single);
+
+/**
* i3c_master_set_info() - set master device information
* @master: master used to send frames on the bus
* @info: I3C device information
@@ -2490,9 +2564,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->owner = master->dev.parent->driver->owner;
adap->algo = &i3c_master_i2c_algo;
strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
-
- /* FIXME: Should we allow i3c masters to override these values? */
- adap->timeout = 1000;
+ adap->timeout = HZ;
adap->retries = 3;
id = of_alias_get_id(master->dev.of_node, "i2c");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 13df2944f2ec..82cf330778d5 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config ADI_I3C_MASTER
+ tristate "Analog Devices I3C master driver"
+ depends on HAS_IOMEM
+ help
+ Support for Analog Devices I3C Controller IP, an AXI-interfaced IP
+ core that supports I3C and I2C devices, multiple speed-grades and I3C
+ IBIs.
+
+ This driver can also be built as a module. If so, the module will be
+ called adi-i3c-master.
+
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
depends on HAS_IOMEM
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index aac74f3e3851..816a227b6f7a 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ADI_I3C_MASTER) += adi-i3c-master.o
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o
diff --git a/drivers/i3c/master/adi-i3c-master.c b/drivers/i3c/master/adi-i3c-master.c
new file mode 100644
index 000000000000..82ac0b3d057a
--- /dev/null
+++ b/drivers/i3c/master/adi-i3c-master.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I3C Controller driver
+ * Copyright 2025 Analog Devices Inc.
+ * Author: Jorge Marques <jorge.marques@analog.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/adi-axi-common.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "../internals.h"
+
+#define ADI_MAX_DEVS 16
+#define ADI_HAS_MDB_FROM_BCR(x) (FIELD_GET(BIT(2), (x)))
+
+#define REG_ENABLE 0x040
+
+#define REG_PID_L 0x054
+#define REG_PID_H 0x058
+#define REG_DCR_BCR_DA 0x05c
+#define REG_DCR_BCR_DA_GET_DA(x) FIELD_GET(GENMASK(22, 16), (x))
+#define REG_DCR_BCR_DA_GET_BCR(x) FIELD_GET(GENMASK(15, 8), (x))
+#define REG_DCR_BCR_DA_GET_DCR(x) FIELD_GET(GENMASK(7, 0), (x))
+
+#define REG_IRQ_MASK 0x080
+#define REG_IRQ_PENDING 0x084
+#define REG_IRQ_PENDING_DAA BIT(7)
+#define REG_IRQ_PENDING_IBI BIT(6)
+#define REG_IRQ_PENDING_CMDR BIT(5)
+
+#define REG_CMD_FIFO 0x0d4
+#define REG_CMD_FIFO_0_IS_CCC BIT(22)
+#define REG_CMD_FIFO_0_BCAST BIT(21)
+#define REG_CMD_FIFO_0_SR BIT(20)
+#define REG_CMD_FIFO_0_LEN(l) FIELD_PREP(GENMASK(19, 8), (l))
+#define REG_CMD_FIFO_0_DEV_ADDR(a) FIELD_PREP(GENMASK(7, 1), (a))
+#define REG_CMD_FIFO_0_RNW BIT(0)
+#define REG_CMD_FIFO_1_CCC(id) FIELD_PREP(GENMASK(7, 0), (id))
+
+#define REG_CMD_FIFO_ROOM 0x0c0
+#define REG_CMDR_FIFO 0x0d8
+#define REG_CMDR_FIFO_UDA_ERROR 8
+#define REG_CMDR_FIFO_NACK_RESP 6
+#define REG_CMDR_FIFO_CE2_ERROR 4
+#define REG_CMDR_FIFO_CE0_ERROR 1
+#define REG_CMDR_FIFO_NO_ERROR 0
+#define REG_CMDR_FIFO_ERROR(x) FIELD_GET(GENMASK(23, 20), (x))
+#define REG_CMDR_FIFO_XFER_BYTES(x) FIELD_GET(GENMASK(19, 8), (x))
+
+#define REG_SDO_FIFO 0x0dc
+#define REG_SDO_FIFO_ROOM 0x0c8
+#define REG_SDI_FIFO 0x0e0
+#define REG_IBI_FIFO 0x0e4
+#define REG_FIFO_STATUS 0x0e8
+#define REG_FIFO_STATUS_CMDR_EMPTY BIT(0)
+#define REG_FIFO_STATUS_IBI_EMPTY BIT(1)
+
+#define REG_OPS 0x100
+#define REG_OPS_PP_SG_MASK GENMASK(6, 5)
+#define REG_OPS_SET_SG(x) FIELD_PREP(REG_OPS_PP_SG_MASK, (x))
+
+#define REG_IBI_CONFIG 0x140
+#define REG_IBI_CONFIG_ENABLE BIT(0)
+#define REG_IBI_CONFIG_LISTEN BIT(1)
+
+#define REG_DEV_CHAR 0x180
+#define REG_DEV_CHAR_IS_I2C BIT(0)
+#define REG_DEV_CHAR_IS_ATTACHED BIT(1)
+#define REG_DEV_CHAR_BCR_IBI(x) FIELD_PREP(GENMASK(3, 2), (x))
+#define REG_DEV_CHAR_WEN BIT(8)
+#define REG_DEV_CHAR_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+
+enum speed_grade {PP_SG_UNSET, PP_SG_1MHZ, PP_SG_3MHZ, PP_SG_6MHZ, PP_SG_12MHZ};
+
+struct adi_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct adi_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ unsigned int ncmds_comp;
+ struct adi_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct adi_i3c_master {
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock; /* Protect IBI slot access */
+ } ibi;
+ struct {
+ struct list_head list;
+ struct adi_i3c_xfer *cur;
+ spinlock_t lock; /* Protect transfer */
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long i3c_scl_lim;
+ struct {
+ u8 addrs[ADI_MAX_DEVS];
+ u8 index;
+ } daa;
+};
+
+static inline struct adi_i3c_master *to_adi_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct adi_i3c_master, base);
+}
+
+static void adi_i3c_master_wr_to_tx_fifo(struct adi_i3c_master *master,
+ const u8 *buf, unsigned int nbytes)
+{
+ unsigned int n, m;
+
+ n = readl(master->regs + REG_SDO_FIFO_ROOM);
+ m = min(n, nbytes);
+ i3c_writel_fifo(master->regs + REG_SDO_FIFO, buf, m);
+}
+
+static void adi_i3c_master_rd_from_rx_fifo(struct adi_i3c_master *master,
+ u8 *buf, unsigned int nbytes)
+{
+ i3c_readl_fifo(master->regs + REG_SDI_FIFO, buf, nbytes);
+}
+
+static bool adi_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int adi_i3c_master_disable(struct adi_i3c_master *master)
+{
+ writel(0, master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static struct adi_i3c_xfer *adi_i3c_master_alloc_xfer(struct adi_i3c_master *master,
+ unsigned int ncmds)
+{
+ struct adi_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void adi_i3c_master_start_xfer_locked(struct adi_i3c_master *master)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i, n, m;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ if (!(cmd->cmd0 & REG_CMD_FIFO_0_RNW))
+ adi_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ n = readl(master->regs + REG_CMD_FIFO_ROOM);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ m = cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC ? 2 : 1;
+ if (m > n)
+ break;
+ writel(cmd->cmd0, master->regs + REG_CMD_FIFO);
+ if (cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC)
+ writel(cmd->cmd1, master->regs + REG_CMD_FIFO);
+ n -= m;
+ }
+}
+
+static void adi_i3c_master_end_xfer_locked(struct adi_i3c_master *master,
+ u32 pending)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+
+ if (!xfer)
+ return;
+
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_CMDR_EMPTY)) {
+ struct adi_i3c_cmd *cmd;
+ u32 cmdr, rx_len;
+
+ cmdr = readl(master->regs + REG_CMDR_FIFO);
+
+ cmd = &xfer->cmds[xfer->ncmds_comp++];
+ if (cmd->cmd0 & REG_CMD_FIFO_0_RNW) {
+ rx_len = min_t(u32, REG_CMDR_FIFO_XFER_BYTES(cmdr), cmd->rx_len);
+ adi_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ }
+ cmd->error = REG_CMDR_FIFO_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds_comp; i++) {
+ switch (xfer->cmds[i].error) {
+ case REG_CMDR_FIFO_NO_ERROR:
+ break;
+
+ case REG_CMDR_FIFO_CE0_ERROR:
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ case REG_CMDR_FIFO_UDA_ERROR:
+ ret = -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+
+ if (xfer->ncmds_comp != xfer->ncmds)
+ return;
+
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct adi_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+}
+
+static void adi_i3c_master_queue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ init_completion(&xfer->comp);
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+ }
+}
+
+static void adi_i3c_master_unqueue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+
+ writel(0x01, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+}
+
+static enum i3c_error_code adi_i3c_cmd_get_err(struct adi_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case REG_CMDR_FIFO_CE0_ERROR:
+ return I3C_ERROR_M0;
+
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int adi_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ struct adi_i3c_cmd *ccmd;
+
+ xfer = adi_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = REG_CMD_FIFO_1_CCC(cmd->id);
+ ccmd->cmd0 = REG_CMD_FIFO_0_IS_CCC |
+ REG_CMD_FIFO_0_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ cmd->err = adi_i3c_cmd_get_err(&xfer->cmds[0]);
+
+ return 0;
+}
+
+static int adi_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i, ret;
+
+ if (!nxfers)
+ return 0;
+
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(dev->info.dyn_addr);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_SR;
+
+ if (!i)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_BCAST;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = adi_i3c_cmd_get_err(&xfer->cmds[i]);
+
+ return ret;
+}
+
+struct adi_i3c_i2c_dev_data {
+ struct i3c_generic_ibi_pool *ibi_pool;
+ u16 id;
+ s16 ibi;
+};
+
+static int adi_i3c_master_get_rr_slot(struct adi_i3c_master *master,
+ u8 dyn_addr)
+{
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+}
+
+static int adi_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(dyn_addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static int adi_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+ u8 addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = adi_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_sync_dev_char(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 bcr_ibi;
+ u8 addr;
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ addr = i3cdev->info.dyn_addr ?
+ i3cdev->info.dyn_addr : i3cdev->info.static_addr;
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ bcr_ibi = FIELD_GET(I3C_BCR_IBI_PAYLOAD | I3C_BCR_IBI_REQ_CAP, (i3cdev->info.bcr));
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_BCR_IBI(bcr_ibi) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+ }
+}
+
+static void adi_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int adi_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = adi_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static void adi_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ adi_i3c_master_disable(master);
+}
+
+static void adi_i3c_master_upd_i3c_scl_lim(struct adi_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u8 i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u8 pp_sg;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ u8 max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = PP_SG_6MHZ;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = PP_SG_1MHZ;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = PP_SG_12MHZ;
+ break;
+ }
+
+ if (max_fscl &&
+ (i3c_scl_lim > max_fscl || !i3c_scl_lim))
+ i3c_scl_lim = max_fscl;
+ }
+
+ if (!i3c_scl_lim)
+ return;
+
+ master->i3c_scl_lim = i3c_scl_lim - 1;
+
+ pp_sg = readl(master->regs + REG_OPS) & ~REG_OPS_PP_SG_MASK;
+ pp_sg |= REG_OPS_SET_SG(master->i3c_scl_lim);
+
+ writel(pp_sg, master->regs + REG_OPS);
+}
+
+static void adi_i3c_master_get_features(struct adi_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 buf;
+
+ /* Dynamic address and PID are for identification only */
+ memset(info, 0, sizeof(*info));
+ buf = readl(master->regs + REG_DCR_BCR_DA);
+ info->dyn_addr = REG_DCR_BCR_DA_GET_DA(buf);
+ info->dcr = REG_DCR_BCR_DA_GET_DCR(buf);
+ info->bcr = REG_DCR_BCR_DA_GET_BCR(buf);
+ info->pid = readl(master->regs + REG_PID_L);
+ info->pid |= (u64)readl(master->regs + REG_PID_H) << 32;
+}
+
+static int adi_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ int ret, addr = 0;
+ u32 irq_mask;
+
+ for (u8 i = 0; i < ADI_MAX_DEVS; i++) {
+ addr = i3c_master_get_free_addr(m, addr);
+ if (addr < 0)
+ return addr;
+ master->daa.addrs[i] = addr;
+ }
+
+ irq_mask = readl(master->regs + REG_IRQ_MASK);
+ writel(irq_mask | REG_IRQ_PENDING_DAA,
+ master->regs + REG_IRQ_MASK);
+
+ master->daa.index = 0;
+ ret = i3c_master_entdaa_locked(&master->base);
+
+ writel(irq_mask, master->regs + REG_IRQ_MASK);
+
+ /* DAA always finishes with CE2_ERROR or NACK_RESP */
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ /* Add I3C devices discovered */
+ for (u8 i = 0; i < master->daa.index; i++)
+ i3c_master_add_i3c_dev_locked(m, master->daa.addrs[i]);
+ /* Sync retrieved devs info with the IP */
+ adi_i3c_master_sync_dev_char(m);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ adi_i3c_master_upd_i3c_scl_lim(master);
+
+ return 0;
+}
+
+static int adi_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_device_info info = { };
+ int ret;
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ adi_i3c_master_get_features(master, 0, &info);
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static void adi_i3c_master_handle_ibi(struct adi_i3c_master *master,
+ u32 raw)
+{
+ struct adi_i3c_i2c_dev_data *data;
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ u8 da, id, mdb, len;
+ u8 *buf;
+
+ da = FIELD_GET(GENMASK(23, 17), raw);
+ mdb = FIELD_GET(GENMASK(15, 8), raw);
+ for (id = 0; id < master->ibi.num_slots; id++) {
+ if (master->ibi.slots[id] &&
+ master->ibi.slots[id]->info.dyn_addr == da)
+ break;
+ }
+
+ if (id == master->ibi.num_slots)
+ return;
+
+ dev = master->ibi.slots[id];
+ len = ADI_HAS_MDB_FROM_BCR(dev->info.bcr);
+ data = i3c_dev_get_master_data(dev);
+
+ guard(spinlock)(&master->ibi.lock);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return;
+
+ slot->len = len;
+ buf = slot->data;
+ buf[0] = mdb;
+ i3c_master_queue_ibi(dev, slot);
+}
+
+static void adi_i3c_master_demux_ibis(struct adi_i3c_master *master)
+{
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_IBI_EMPTY)) {
+ u32 raw = readl(master->regs + REG_IBI_FIFO);
+
+ adi_i3c_master_handle_ibi(master, raw);
+ }
+}
+
+static void adi_i3c_master_handle_da_req(struct adi_i3c_master *master)
+{
+ u8 payload0[8];
+ u32 addr;
+
+ adi_i3c_master_rd_from_rx_fifo(master, payload0, 6);
+ addr = master->daa.addrs[master->daa.index++];
+ addr = (addr << 1) | (parity8(addr) ? 0 : 1);
+
+ writel(addr, master->regs + REG_SDO_FIFO);
+}
+
+static irqreturn_t adi_i3c_master_irq(int irq, void *data)
+{
+ struct adi_i3c_master *master = data;
+ u32 pending;
+
+ pending = readl(master->regs + REG_IRQ_PENDING);
+ writel(pending, master->regs + REG_IRQ_PENDING);
+ if (pending & REG_IRQ_PENDING_CMDR) {
+ scoped_guard(spinlock_irqsave, &master->xferqueue.lock) {
+ adi_i3c_master_end_xfer_locked(master, pending);
+ }
+ }
+ if (pending & REG_IRQ_PENDING_IBI)
+ adi_i3c_master_demux_ibis(master);
+ if (pending & REG_IRQ_PENDING_DAA)
+ adi_i3c_master_handle_da_req(master);
+
+ return IRQ_HANDLED;
+}
+
+static int adi_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i;
+
+ if (!nxfers)
+ return 0;
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].flags & I2C_M_TEN)
+ return -EOPNOTSUPP;
+ }
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(xfers[i].addr);
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ m->i2c.timeout))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ return xfer->ret;
+}
+
+static int adi_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 enabled = 0;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ if (dev != i3cdev && i3cdev->ibi)
+ enabled |= i3cdev->ibi->enabled;
+ }
+ if (!enabled) {
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+ writel(readl(master->regs + REG_IRQ_MASK) & ~REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+ }
+
+ return ret;
+}
+
+static int adi_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ writel(REG_IBI_CONFIG_LISTEN | REG_IBI_CONFIG_ENABLE,
+ master->regs + REG_IBI_CONFIG);
+
+ writel(readl(master->regs + REG_IRQ_MASK) | REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+}
+
+static int adi_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ unsigned int i;
+
+ data = i3c_dev_get_master_data(dev);
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ }
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void adi_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ master->ibi.slots[data->ibi] = NULL;
+ }
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void adi_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops adi_i3c_master_ops = {
+ .bus_init = adi_i3c_master_bus_init,
+ .bus_cleanup = adi_i3c_master_bus_cleanup,
+ .attach_i3c_dev = adi_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = adi_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = adi_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = adi_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = adi_i3c_master_detach_i2c_dev,
+ .do_daa = adi_i3c_master_do_daa,
+ .supports_ccc_cmd = adi_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = adi_i3c_master_send_ccc_cmd,
+ .priv_xfers = adi_i3c_master_priv_xfers,
+ .i2c_xfers = adi_i3c_master_i2c_xfers,
+ .request_ibi = adi_i3c_master_request_ibi,
+ .enable_ibi = adi_i3c_master_enable_ibi,
+ .disable_ibi = adi_i3c_master_disable_ibi,
+ .free_ibi = adi_i3c_master_free_ibi,
+ .recycle_ibi_slot = adi_i3c_master_recycle_ibi_slot,
+};
+
+static const struct of_device_id adi_i3c_master_of_match[] = {
+ { .compatible = "adi,i3c-master-v1" },
+ {}
+};
+
+static int adi_i3c_master_probe(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master;
+ struct clk_bulk_data *clk;
+ unsigned int version;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &clk);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get clocks\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ version = readl(master->regs + ADI_AXI_REG_VERSION);
+ if (ADI_AXI_PCORE_VER_MAJOR(version) != 1)
+ dev_err_probe(&pdev->dev, -ENODEV, "Unsupported peripheral version %u.%u.%u\n",
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
+
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+
+ ret = devm_request_irq(&pdev->dev, irq, adi_i3c_master_irq, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, master);
+
+ master->free_rr_slots = GENMASK(ADI_MAX_DEVS, 1);
+
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = 15;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots)
+ return -ENOMEM;
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ return i3c_master_register(&master->base, &pdev->dev,
+ &adi_i3c_master_ops, false);
+}
+
+static void adi_i3c_master_remove(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master = platform_get_drvdata(pdev);
+
+ writel(0xff, master->regs + REG_IRQ_PENDING);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+ writel(0x01, master->regs + REG_ENABLE);
+
+ i3c_master_unregister(&master->base);
+}
+
+static struct platform_driver adi_i3c_master = {
+ .probe = adi_i3c_master_probe,
+ .remove = adi_i3c_master_remove,
+ .driver = {
+ .name = "adi-i3c-master",
+ .of_match_table = adi_i3c_master_of_match,
+ },
+};
+module_platform_driver(adi_i3c_master);
+
+MODULE_AUTHOR("Jorge Marques <jorge.marques@analog.com>");
+MODULE_DESCRIPTION("Analog Devices I3C master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 974122b2d20e..9ceedf09c3b6 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1737,6 +1737,28 @@ static const struct dev_pm_ops dw_i3c_pm_ops = {
SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL)
};
+static void dw_i3c_shutdown(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ cancel_work_sync(&master->hj_work);
+
+ /* Disable interrupts */
+ writel((u32)~INTR_ALL, master->regs + INTR_STATUS_EN);
+ writel((u32)~INTR_ALL, master->regs + INTR_SIGNAL_EN);
+
+ pm_runtime_put_autosuspend(master->dev);
+}
+
static const struct of_device_id dw_i3c_master_of_match[] = {
{ .compatible = "snps,dw-i3c-master-1.00a", },
{},
@@ -1752,6 +1774,7 @@ MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match);
static struct platform_driver dw_i3c_driver = {
.probe = dw_i3c_probe,
.remove = dw_i3c_remove,
+ .shutdown = dw_i3c_shutdown,
.driver = {
.name = "dw-i3c-master",
.of_match_table = dw_i3c_master_of_match,
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index dd636094b07f..eb8a3ae2990d 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -317,7 +317,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ dev_dbg(&hci->master.dev,
+ "next_addr = 0x%02x, DAA using DAT %d",
+ next_addr, dat_idx);
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
mipi_i3c_hci_dct_index_reset(hci);
@@ -349,8 +351,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
}
i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
dat_idx = -1;
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
index 4493b2b067cb..efb4326a25b7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -261,7 +261,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
if (ret < 0)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x", next_addr);
+ dev_dbg(&hci->master.dev, "next_addr = 0x%02x", next_addr);
xfer[0].cmd_tid = hci_get_tid();
xfer[0].cmd_desc[0] =
CMD_0_ATTR_A |
@@ -293,8 +293,9 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
pid = (pid << 32) | device_id[0];
bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
/*
* TODO: Extend the subsystem layer to allow for registering
* new device and provide BCR/DCR/PID at the same time.
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 60f1175f1f37..47e42cb4dbe7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -121,8 +121,6 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
struct i3c_device_info info;
int ret;
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
ret = mipi_i3c_hci_dat_v1.init(hci);
if (ret)
@@ -149,7 +147,7 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
amd_set_resp_buf_thld(hci);
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
- DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+ dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
}
@@ -159,8 +157,6 @@ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
struct i3c_hci *hci = to_i3c_hci(m);
struct platform_device *pdev = to_platform_device(m->dev.parent);
- DBG("");
-
reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
synchronize_irq(platform_get_irq(pdev, 0));
hci->io->cleanup(hci);
@@ -196,8 +192,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
- ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+ dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -255,8 +251,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
}
if (ccc->rnw)
- DBG("got: %*ph",
- ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+ dev_dbg(&hci->master.dev, "got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
out:
hci_free_xfer(xfer, nxfers);
@@ -267,39 +263,9 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
- DBG("");
-
return hci->cmd->perform_daa(hci);
}
-static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
- struct hci_xfer *xfer)
-{
- if (hci->io != &mipi_i3c_hci_dma ||
- xfer->data == NULL || !is_vmalloc_addr(xfer->data))
- return 0;
-
- if (xfer->rnw)
- xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
- else
- xfer->bounce_buf = kmemdup(xfer->data,
- xfer->data_len, GFP_KERNEL);
-
- return xfer->bounce_buf == NULL ? -ENOMEM : 0;
-}
-
-static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
- struct hci_xfer *xfer)
-{
- if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
- return;
-
- if (xfer->rnw)
- memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
-
- kfree(xfer->bounce_buf);
-}
-
static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int nxfers)
@@ -311,7 +277,7 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
unsigned int size_limit;
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -333,9 +299,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
- ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
- if (ret)
- goto out;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -359,9 +322,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
out:
- for (i = 0; i < nxfers; i++)
- i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
-
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -375,14 +335,14 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
- xfer[i].data = i2c_get_dma_safe_msg_buf(&i2c_xfers[i], 1);
+ xfer[i].data = i2c_xfers[i].buf;
xfer[i].data_len = i2c_xfers[i].len;
xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
@@ -408,10 +368,6 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
}
out:
- for (i = 0; i < nxfers; i++)
- i2c_put_dma_safe_msg_buf(xfer[i].data, &i2c_xfers[i],
- ret ? false : true);
-
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -423,8 +379,6 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
@@ -448,8 +402,6 @@ static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
dev->info.dyn_addr);
@@ -462,8 +414,6 @@ static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
i3c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
@@ -477,8 +427,6 @@ static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
if (hci->cmd != &mipi_i3c_hci_cmd_v1)
return 0;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
@@ -502,8 +450,6 @@ static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
- DBG("");
-
if (dev_data) {
i2c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
@@ -591,7 +537,7 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
val = reg_read(INTR_STATUS);
reg_write(INTR_STATUS, val);
- DBG("INTR_STATUS = %#x", val);
+ dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
if (val)
result = IRQ_HANDLED;
@@ -641,7 +587,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
}
hci->caps = reg_read(HC_CAPABILITIES);
- DBG("caps = %#x", hci->caps);
+ dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
size_in_dwords = hci->version_major < 1 ||
(hci->version_major == 1 && hci->version_minor < 1);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 491dfe70b660..c401a9425cdc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include "hci.h"
#include "cmd.h"
@@ -76,7 +77,6 @@
#define INTR_TRANSFER_COMPLETION BIT(11)
#define INTR_RING_OP BIT(10)
#define INTR_TRANSFER_ERR BIT(9)
-#define INTR_WARN_INS_STOP_MODE BIT(7)
#define INTR_IBI_RING_FULL BIT(6)
#define INTR_TRANSFER_ABORT BIT(5)
@@ -138,6 +138,7 @@ struct hci_rh_data {
};
struct hci_rings_data {
+ struct device *sysdev;
unsigned int total;
struct hci_rh_data headers[] __counted_by(total);
};
@@ -165,20 +166,20 @@ static void hci_dma_cleanup(struct i3c_hci *hci)
rh_reg_write(IBI_SETUP, 0);
if (rh->xfer)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->xfer_struct_sz * rh->xfer_entries,
rh->xfer, rh->xfer_dma);
if (rh->resp)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->resp_struct_sz * rh->xfer_entries,
rh->resp, rh->resp_dma);
kfree(rh->src_xfers);
if (rh->ibi_status)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->ibi_status_sz * rh->ibi_status_entries,
rh->ibi_status, rh->ibi_status_dma);
if (rh->ibi_data_dma)
- dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ dma_unmap_single(rings->sysdev, rh->ibi_data_dma,
rh->ibi_chunk_sz * rh->ibi_chunks_total,
DMA_FROM_DEVICE);
kfree(rh->ibi_data);
@@ -194,11 +195,23 @@ static int hci_dma_init(struct i3c_hci *hci)
{
struct hci_rings_data *rings;
struct hci_rh_data *rh;
+ struct device *sysdev;
u32 regval;
unsigned int i, nr_rings, xfers_sz, resps_sz;
unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
int ret;
+ /*
+ * Set pointer to a physical device that does DMA and has IOMMU setup
+ * done for it in case of enabled IOMMU and use it with the DMA API.
+ * Here such device is either
+ * "mipi-i3c-hci" platform device (OF/ACPI enumeration) parent or
+ * grandparent (PCI enumeration).
+ */
+ sysdev = hci->master.dev.parent;
+ if (sysdev->parent && dev_is_pci(sysdev->parent))
+ sysdev = sysdev->parent;
+
regval = rhs_reg_read(CONTROL);
nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
@@ -213,6 +226,7 @@ static int hci_dma_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = rings;
rings->total = nr_rings;
+ rings->sysdev = sysdev;
regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
rhs_reg_write(CONTROL, regval);
@@ -234,14 +248,15 @@ static int hci_dma_init(struct i3c_hci *hci)
regval = rh_reg_read(CR_SETUP);
rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
- DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
- rh->xfer_struct_sz, rh->resp_struct_sz);
+ dev_dbg(&hci->master.dev,
+ "xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
resps_sz = rh->resp_struct_sz * rh->xfer_entries;
- rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ rh->xfer = dma_alloc_coherent(rings->sysdev, xfers_sz,
&rh->xfer_dma, GFP_KERNEL);
- rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz,
&rh->resp_dma, GFP_KERNEL);
rh->src_xfers =
kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
@@ -263,7 +278,6 @@ static int hci_dma_init(struct i3c_hci *hci)
INTR_TRANSFER_COMPLETION |
INTR_RING_OP |
INTR_TRANSFER_ERR |
- INTR_WARN_INS_STOP_MODE |
INTR_IBI_RING_FULL |
INTR_TRANSFER_ABORT);
@@ -295,16 +309,16 @@ static int hci_dma_init(struct i3c_hci *hci)
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
rh->ibi_status =
- dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ dma_alloc_coherent(rings->sysdev, ibi_status_ring_sz,
&rh->ibi_status_dma, GFP_KERNEL);
rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
ret = -ENOMEM;
if (!rh->ibi_status || !rh->ibi_data)
goto err_out;
rh->ibi_data_dma =
- dma_map_single(&hci->master.dev, rh->ibi_data,
+ dma_map_single(rings->sysdev, rh->ibi_data,
ibi_data_ring_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ if (dma_mapping_error(rings->sysdev, rh->ibi_data_dma)) {
rh->ibi_data_dma = 0;
ret = -ENOMEM;
goto err_out;
@@ -349,9 +363,7 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
xfer = xfer_list + i;
if (!xfer->data)
continue;
- dma_unmap_single(&hci->master.dev,
- xfer->data_dma, xfer->data_len,
- xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ i3c_master_dma_unmap_single(xfer->dma);
}
}
@@ -362,7 +374,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
u32 op1_val, op2_val;
- void *buf;
/* For now we only use ring 0 */
ring = 0;
@@ -373,6 +384,9 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+ enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ bool need_bounce;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
@@ -391,21 +405,20 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
- buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
- xfer->data_dma =
- dma_map_single(&hci->master.dev,
- buf,
- xfer->data_len,
- xfer->rnw ?
- DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
- if (dma_mapping_error(&hci->master.dev,
- xfer->data_dma)) {
+ need_bounce = device_iommu_mapped(rings->sysdev) &&
+ xfer->rnw &&
+ xfer->data_len != ALIGN(xfer->data_len, 4);
+ xfer->dma = i3c_master_dma_map_single(rings->sysdev,
+ xfer->data,
+ xfer->data_len,
+ need_bounce,
+ dir);
+ if (!xfer->dma) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
- *ring_data++ = lower_32_bits(xfer->data_dma);
- *ring_data++ = upper_32_bits(xfer->data_dma);
+ *ring_data++ = lower_32_bits(xfer->dma->addr);
+ *ring_data++ = upper_32_bits(xfer->dma->addr);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
@@ -511,11 +524,11 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
resp = *ring_resp;
tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
xfer = rh->src_xfers[done_ptr];
if (!xfer) {
- DBG("orphaned ring entry");
+ dev_dbg(&hci->master.dev, "orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
xfer->ring_entry = -1;
@@ -586,6 +599,7 @@ static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
{
+ struct hci_rings_data *rings = hci->io_data;
struct i3c_dev_desc *dev;
struct i3c_hci_dev_data *dev_data;
struct hci_dma_dev_ibi_data *dev_ibi;
@@ -617,7 +631,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
ibi_status = *ring_ibi_status;
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
if (ibi_status_error) {
/* we no longer care */
@@ -645,7 +659,9 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
if (last_ptr == -1) {
/* this IBI sequence is not yet complete */
- DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ dev_dbg(&hci->master.dev,
+ "no LAST_STATUS available (e=%d d=%d)",
+ enq_ptr, deq_ptr);
return;
}
deq_ptr = last_ptr + 1;
@@ -696,7 +712,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
* rh->ibi_chunk_sz;
if (first_part > ibi_size)
first_part = ibi_size;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
first_part, DMA_FROM_DEVICE);
memcpy(slot->data, ring_ibi_data, first_part);
@@ -705,7 +721,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
/* we wrap back to the start and copy remaining data */
ring_ibi_data = rh->ibi_data;
ring_ibi_data_dma = rh->ibi_data_dma;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
ibi_size - first_part, DMA_FROM_DEVICE);
memcpy(slot->data + first_part, ring_ibi_data,
ibi_size - first_part);
@@ -745,7 +761,8 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
rh = &rings->headers[i];
status = rh_reg_read(INTR_STATUS);
- DBG("rh%d status: %#x", i, status);
+ dev_dbg(&hci->master.dev, "Ring %d: RH_INTR_STATUS %#x",
+ i, status);
if (!status)
continue;
rh_reg_write(INTR_STATUS, status);
@@ -761,7 +778,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
u32 ring_status;
dev_notice_ratelimited(&hci->master.dev,
- "ring %d: Transfer Aborted\n", i);
+ "Ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
ring_status = rh_reg_read(RING_STATUS);
if (!(ring_status & RING_STATUS_RUNNING) &&
@@ -779,12 +796,9 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
RING_CTRL_RUN_STOP);
}
}
- if (status & INTR_WARN_INS_STOP_MODE)
- dev_warn_ratelimited(&hci->master.dev,
- "ring %d: Inserted Stop on Mode Change\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
- "ring %d: IBI Ring Full Condition\n", i);
+ "Ring %d: IBI Ring Full Condition\n", i);
handled = true;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
index 2e9b23efdc45..7714f00ea9cc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -35,7 +35,7 @@ static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
switch (hci->vendor_mipi_id) {
case MIPI_VENDOR_NXP:
hci->quirks |= HCI_QUIRK_RAW_CCC;
- DBG("raw CCC quirks set");
+ dev_dbg(&hci->master.dev, "raw CCC quirks set");
break;
}
@@ -77,7 +77,8 @@ static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
for (index = 0; index < entries; index++) {
u32 mode_entry = readl(base);
- DBG("mode %d: 0x%08x", index, mode_entry);
+ dev_dbg(&hci->master.dev, "mode %d: 0x%08x",
+ index, mode_entry);
/* TODO: will be needed when I3C core does more than SDR */
base += 4;
}
@@ -97,7 +98,8 @@ static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
dev_info(&hci->master.dev, "available data rates:\n");
for (index = 0; index < entries; index++) {
rate_entry = readl(base);
- DBG("entry %d: 0x%08x", index, rate_entry);
+ dev_dbg(&hci->master.dev, "entry %d: 0x%08x",
+ index, rate_entry);
rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
@@ -268,7 +270,8 @@ int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
cap_header = readl(curr_cap);
cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
- DBG("id=0x%02x length=%d", cap_id, cap_length);
+ dev_dbg(&hci->master.dev, "id=0x%02x length=%d",
+ cap_id, cap_length);
if (!cap_length)
break;
if (curr_cap + cap_length * 4 >= end) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index 69ea1d10414b..249ccb13c909 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -12,9 +12,6 @@
#include <linux/io.h>
-/* Handy logging macro to save on line length */
-#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
-
/* 32-bit word aware bit and mask macros */
#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
#define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
@@ -94,8 +91,7 @@ struct hci_xfer {
};
struct {
/* DMA specific */
- dma_addr_t data_dma;
- void *bounce_buf;
+ struct i3c_dma *dma;
int ring_number;
int ring_entry;
};
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
index c6c3a3ec11ea..08e6cbdf89ce 100644
--- a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -124,6 +124,9 @@ static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
}
static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Wildcat Lake-U */
+ { PCI_VDEVICE(INTEL, 0x4d7c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0x4d6f), (kernel_ulong_t)&intel_info},
/* Panther Lake-H */
{ PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
{ PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
index 2fc71e696911..710faa46a00f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/pio.c
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -213,8 +213,8 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
if (pio) {
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
BUG_ON(pio->curr_xfer);
BUG_ON(pio->curr_rx);
BUG_ON(pio->curr_tx);
@@ -226,13 +226,17 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
{
- DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
- DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 0, xfer->cmd_desc[0]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 1, xfer->cmd_desc[1]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
- DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
- DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 2, xfer->cmd_desc[2]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 3, xfer->cmd_desc[3]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
}
@@ -254,7 +258,8 @@ static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -269,7 +274,7 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
struct hci_xfer *xfer = pio->curr_rx;
u32 *p;
- DBG("%d remaining", count);
+ dev_dbg(&hci->master.dev, "%d remaining", count);
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
@@ -278,7 +283,8 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
unsigned int nr_words = count / 4;
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -321,7 +327,8 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
/* push data into the FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
pio_reg_write(XFER_DATA_PORT, *p++);
}
@@ -336,7 +343,7 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
return false;
- DBG("trailing %d", xfer->data_left);
+ dev_dbg(&hci->master.dev, "trailing %d", xfer->data_left);
pio_reg_write(XFER_DATA_PORT, *p);
xfer->data_left = 0;
}
@@ -481,7 +488,7 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
unsigned int tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev,
"response tid=%d when expecting %d\n",
@@ -522,14 +529,15 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
* still exists.
*/
if (pio->curr_rx == xfer) {
- DBG("short RX ?");
+ dev_dbg(&hci->master.dev, "short RX ?");
pio->curr_rx = pio->curr_rx->next_data;
} else if (pio->curr_tx == xfer) {
- DBG("short TX ?");
+ dev_dbg(&hci->master.dev, "short TX ?");
pio->curr_tx = pio->curr_tx->next_data;
} else if (xfer->data_left) {
- DBG("PIO xfer count = %d after response",
- xfer->data_left);
+ dev_dbg(&hci->master.dev,
+ "PIO xfer count = %d after response",
+ xfer->data_left);
}
pio->curr_resp = xfer->next_resp;
@@ -591,7 +599,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
struct hci_xfer *prev_queue_tail;
int i;
- DBG("n = %d", n);
+ dev_dbg(&hci->master.dev, "n = %d", n);
/* link xfer instances together and initialize data count */
for (i = 0; i < n; i++) {
@@ -611,8 +619,9 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
if (!hci_pio_process_cmd(hci, pio))
pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS),
+ pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
return 0;
@@ -686,10 +695,10 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
int ret;
spin_lock_irq(&pio->lock);
- DBG("n=%d status=%#x/%#x", n,
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
- DBG("main_status = %#x/%#x",
- readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+ dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
@@ -733,8 +742,8 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
mipi_i3c_hci_pio_reset(hci);
mipi_i3c_hci_resume(hci);
- DBG("status=%#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
}
static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
@@ -749,7 +758,7 @@ static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
if (regval != pio->reg_queue_thresh) {
pio_reg_write(QUEUE_THLD_CTRL, regval);
pio->reg_queue_thresh = regval;
- DBG("%d", thresh_val);
+ dev_dbg(&hci->master.dev, "%d", thresh_val);
}
}
@@ -773,7 +782,8 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
/* extract the data from the IBI port */
nr_words = thresh_val;
ibi->seg_cnt -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, ibi->seg_cnt);
while (nr_words--)
*p++ = pio_reg_read(IBI_PORT);
}
@@ -791,7 +801,7 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
hci_pio_set_ibi_thresh(hci, pio, 1);
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
- DBG("trailing %d", ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "trailing %d", ibi->seg_cnt);
data = pio_reg_read(IBI_PORT);
data = (__force u32) cpu_to_le32(data);
while (ibi->seg_cnt--) {
@@ -820,7 +830,7 @@ static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
ibi_status = pio_reg_read(IBI_PORT);
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
if (ibi_status & IBI_ERROR) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
@@ -986,7 +996,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
- DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
@@ -1023,8 +1034,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("(out) status: %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
return true;
}
diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c
index 174d3dc5d276..275f7b924288 100644
--- a/drivers/i3c/master/renesas-i3c.c
+++ b/drivers/i3c/master/renesas-i3c.c
@@ -679,7 +679,7 @@ static int renesas_i3c_daa(struct i3c_master_controller *m)
i3c_master_add_i3c_dev_locked(m, i3c->addrs[pos]);
}
- return ret < 0 ? ret : 0;
+ return 0;
}
static bool renesas_i3c_supports_ccc_cmd(struct i3c_master_controller *m,
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 701ae165b25b..9641e66a4e5f 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -417,6 +417,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
if (ret) {
dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
return ret;
}
@@ -517,9 +518,24 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
*/
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
- writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
- SVC_I3C_MCTRL_IBIRESP_AUTO,
+ /*
+ * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
+ * instend of using AUTO_IBI.
+ *
+ * Using AutoIBI request may cause controller to remain in AutoIBI state when
+ * there is a glitch on SDA line (high->low->high).
+ * 1. SDA high->low, raising an interrupt to execute IBI isr.
+ * 2. SDA low->high.
+ * 3. IBI isr writes an AutoIBI request.
+ * 4. The controller will not start AutoIBI process because SDA is not low.
+ * 5. IBIWON polling times out.
+ * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
+ */
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_MANUAL |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
+ SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
master->regs + SVC_I3C_MCTRL);
/* Wait for IBIWON, should take approximately 100us */
@@ -539,10 +555,15 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
- if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
+ if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
svc_i3c_master_nack_ibi(master);
- else
+ } else {
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ svc_i3c_master_ack_ibi(master, true);
+ else
+ svc_i3c_master_ack_ibi(master, false);
svc_i3c_master_handle_ibi(master, dev);
+ }
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 91a7b7e7c0c8..9ba83954c255 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -259,7 +259,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 3,
.target_residency = 6,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -267,7 +267,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -275,7 +275,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -283,7 +283,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -296,7 +296,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -304,7 +304,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -312,7 +312,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -320,7 +320,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -328,7 +328,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -341,7 +341,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -349,7 +349,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -357,7 +357,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 500,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -365,7 +365,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -373,7 +373,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -386,7 +386,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -394,7 +394,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -402,7 +402,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -410,7 +410,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -418,7 +418,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -431,7 +431,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -439,7 +439,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -447,7 +447,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -455,7 +455,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -463,7 +463,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -476,7 +476,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -484,7 +484,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -492,7 +492,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -500,7 +500,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 82,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -513,7 +513,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -521,7 +521,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -529,7 +529,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -537,7 +537,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 84,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -550,7 +550,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -558,7 +558,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -566,7 +566,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -574,7 +574,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 88,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -587,7 +587,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -595,7 +595,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -603,7 +603,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 33,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -611,7 +611,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -619,7 +619,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -627,7 +627,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -635,7 +635,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -643,7 +643,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -655,7 +655,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -663,7 +663,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -671,7 +671,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 40,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -679,7 +679,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -687,7 +687,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -695,7 +695,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -703,7 +703,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -711,7 +711,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -724,7 +724,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -732,7 +732,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -740,7 +740,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 70,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -748,7 +748,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -756,7 +756,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -764,7 +764,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -772,7 +772,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -780,7 +780,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -793,7 +793,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -801,7 +801,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -809,7 +809,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -822,7 +822,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -830,7 +830,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -838,7 +838,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -861,7 +861,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -869,7 +869,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -877,7 +877,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 220,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -885,7 +885,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 280,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -893,7 +893,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 680,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -906,7 +906,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -914,7 +914,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -922,7 +922,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -930,7 +930,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -938,7 +938,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 230,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -951,7 +951,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -959,7 +959,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 420,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -967,7 +967,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 310,
.target_residency = 930,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -980,7 +980,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -988,7 +988,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -996,7 +996,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 195,
.target_residency = 585,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1004,7 +1004,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 260,
.target_residency = 1040,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1012,7 +1012,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 660,
.target_residency = 1980,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1025,7 +1025,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1033,7 +1033,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1042,7 +1042,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
CPUIDLE_FLAG_INIT_XSTATE,
.exit_latency = 290,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1055,7 +1055,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1063,7 +1063,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1073,7 +1073,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1083,7 +1083,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1096,7 +1096,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1104,7 +1104,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1114,7 +1114,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 220,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1124,7 +1124,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 240,
.target_residency = 750,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1137,7 +1137,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C2",
@@ -1145,7 +1145,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x10),
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1153,7 +1153,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1161,7 +1161,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1173,7 +1173,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1181,7 +1181,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1189,7 +1189,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -1197,7 +1197,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1205,7 +1205,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1217,7 +1217,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1225,7 +1225,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 15,
.target_residency = 45,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1237,7 +1237,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.name = "C6",
@@ -1245,7 +1245,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 120,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.enter = NULL }
@@ -1258,7 +1258,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1266,7 +1266,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1274,7 +1274,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 133,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -1282,7 +1282,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 155,
.target_residency = 155,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1290,7 +1290,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1000,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1298,7 +1298,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2000,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1306,7 +1306,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 10000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1319,7 +1319,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1327,7 +1327,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1335,7 +1335,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 50,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1352,7 +1352,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1360,7 +1360,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 15,
.target_residency = 25,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1368,7 +1368,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 130,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1381,7 +1381,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1389,7 +1389,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1397,7 +1397,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1410,7 +1410,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1418,7 +1418,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1427,7 +1427,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6SP",
@@ -1436,7 +1436,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 3a394cd772f6..f0323f1d6f01 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -85,6 +85,7 @@ source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/erdma/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
+source "drivers/infiniband/hw/ionic/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mana/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index be0743dac3ff..61596cda2b65 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -446,63 +446,41 @@ static int addr6_resolve(struct sockaddr *src_sock,
}
#endif
+static bool is_dst_local(const struct dst_entry *dst)
+{
+ if (dst->ops->family == AF_INET)
+ return !!(dst_rtable(dst)->rt_type & RTN_LOCAL);
+ else if (dst->ops->family == AF_INET6)
+ return !!(dst_rt6_info(dst)->rt6i_flags & RTF_LOCAL);
+ else
+ return false;
+}
+
static int addr_resolve_neigh(const struct dst_entry *dst,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
- unsigned int ndev_flags,
u32 seq)
{
- int ret = 0;
-
- if (ndev_flags & IFF_LOOPBACK) {
+ if (is_dst_local(dst)) {
+ /* When the destination is local entry, source and destination
+ * are same. Skip the neighbour lookup.
+ */
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- } else {
- if (!(ndev_flags & IFF_NOARP)) {
- /* If the device doesn't do ARP internally */
- ret = fetch_ha(dst, addr, dst_in, seq);
- }
+ return 0;
}
- return ret;
-}
-
-static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
- const struct sockaddr *dst_in,
- const struct dst_entry *dst,
- const struct net_device *ndev)
-{
- int ret = 0;
-
- if (dst->dev->flags & IFF_LOOPBACK)
- ret = rdma_translate_ip(dst_in, dev_addr);
- else
- rdma_copy_src_l2_addr(dev_addr, dst->dev);
-
- /*
- * If there's a gateway and type of device not ARPHRD_INFINIBAND,
- * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
- * network type accordingly.
- */
- if (has_gateway(dst, dst_in->sa_family) &&
- ndev->type != ARPHRD_INFINIBAND)
- dev_addr->network = dst_in->sa_family == AF_INET ?
- RDMA_NETWORK_IPV4 :
- RDMA_NETWORK_IPV6;
- else
- dev_addr->network = RDMA_NETWORK_IB;
- return ret;
+ return fetch_ha(dst, addr, dst_in, seq);
}
static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
- unsigned int *ndev_flags,
const struct sockaddr *dst_in,
const struct dst_entry *dst)
{
struct net_device *ndev = READ_ONCE(dst->dev);
- *ndev_flags = ndev->flags;
/* A physical device must be the RDMA device to use */
- if (ndev->flags & IFF_LOOPBACK) {
+ if (is_dst_local(dst)) {
+ int ret;
/*
* RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
* loopback IP address. So if route is resolved to loopback
@@ -512,9 +490,27 @@ static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
if (IS_ERR(ndev))
return -ENODEV;
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ if (ret)
+ return ret;
+ } else {
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
}
- return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return 0;
}
static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
@@ -551,7 +547,6 @@ static int addr_resolve(struct sockaddr *src_in,
u32 seq)
{
struct dst_entry *dst = NULL;
- unsigned int ndev_flags = 0;
struct rtable *rt = NULL;
int ret;
@@ -588,7 +583,7 @@ static int addr_resolve(struct sockaddr *src_in,
rcu_read_unlock();
goto done;
}
- ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
+ ret = rdma_set_src_addr_rcu(addr, dst_in, dst);
rcu_read_unlock();
/*
@@ -596,7 +591,7 @@ static int addr_resolve(struct sockaddr *src_in,
* only if src addr translation didn't fail.
*/
if (!ret && resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
+ ret = addr_resolve_neigh(dst, dst_in, addr, seq);
if (src_in->sa_family == AF_INET)
ip_rt_put(rt);
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 3bb46696731e..25a060a28301 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -110,8 +110,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) {
- dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
- PTR_ERR(ah));
+ dev_err(&device->dev, "ib_create_ah_from_wc error %pe\n", ah);
return;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 92678e438ff4..01bede8ba105 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1049,8 +1049,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
- cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9b471548e7ae..5b2d3ae3f9fc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2076,6 +2076,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
kfree(id_priv->id.route.path_rec_inbound);
kfree(id_priv->id.route.path_rec_outbound);
+ kfree(id_priv->id.route.service_recs);
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
@@ -3382,13 +3383,18 @@ err1:
int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
+ enum rdma_cm_state state;
int ret;
if (!timeout_ms)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
+ state = id_priv->state;
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ROUTE_QUERY) &&
+ !cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_RESOLVED,
+ RDMA_CM_ROUTE_QUERY))
return -EINVAL;
cma_id_get(id_priv);
@@ -3409,7 +3415,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
return 0;
err:
- cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, state);
cma_id_put(id_priv);
return ret;
}
@@ -5506,3 +5512,129 @@ static void __exit cma_cleanup(void)
module_init(cma_init);
module_exit(cma_cleanup);
+
+static void cma_query_ib_service_handler(int status,
+ struct sa_service_rec *recs,
+ unsigned int num_recs, void *context)
+{
+ struct cma_work *work = context;
+ struct rdma_id_private *id_priv = work->id;
+ struct sockaddr_ib *addr;
+
+ if (status)
+ goto fail;
+
+ if (!num_recs) {
+ status = -ENOENT;
+ goto fail;
+ }
+
+ if (id_priv->id.route.service_recs) {
+ status = -EALREADY;
+ goto fail;
+ }
+
+ id_priv->id.route.service_recs =
+ kmalloc_array(num_recs, sizeof(*recs), GFP_KERNEL);
+ if (!id_priv->id.route.service_recs) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
+ id_priv->id.route.num_service_recs = num_recs;
+ memcpy(id_priv->id.route.service_recs, recs, sizeof(*recs) * num_recs);
+
+ addr = (struct sockaddr_ib *)&id_priv->id.route.addr.dst_addr;
+ addr->sib_family = AF_IB;
+ addr->sib_addr = *(struct ib_addr *)&recs->gid;
+ addr->sib_pkey = recs->pkey;
+ addr->sib_sid = recs->id;
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr,
+ (union ib_gid *)&addr->sib_addr);
+ ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr,
+ ntohs(addr->sib_pkey));
+
+ queue_work(cma_wq, &work->work);
+ return;
+
+fail:
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDR_BOUND;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited(
+ "RDMA CM: SERVICE_ERROR: failed to query service record. status %d\n",
+ status);
+ queue_work(cma_wq, &work->work);
+}
+
+static int cma_resolve_ib_service(struct rdma_id_private *id_priv,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct sa_service_rec sr = {};
+ ib_sa_comp_mask mask = 0;
+ struct cma_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ cma_id_get(id_priv);
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDRINFO_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_RESOLVED;
+
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_ID) {
+ sr.id = cpu_to_be64(ibs->service_id);
+ mask |= IB_SA_SERVICE_REC_SERVICE_ID;
+ }
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_NAME) {
+ strscpy(sr.name, ibs->service_name, sizeof(sr.name));
+ mask |= IB_SA_SERVICE_REC_SERVICE_NAME;
+ }
+
+ id_priv->query_id = ib_sa_service_rec_get(&sa_client,
+ id_priv->id.device,
+ id_priv->id.port_num,
+ &sr, mask,
+ 2000, GFP_KERNEL,
+ cma_query_ib_service_handler,
+ work, &id_priv->query);
+
+ if (id_priv->query_id < 0) {
+ cma_id_put(id_priv);
+ kfree(work);
+ return id_priv->query_id;
+ }
+
+ return 0;
+}
+
+int rdma_resolve_ib_service(struct rdma_cm_id *id,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (!id_priv->cma_dev ||
+ !cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDRINFO_QUERY))
+ return -EINVAL;
+
+ if (rdma_cap_ib_sa(id->device, id->port_num))
+ ret = cma_resolve_ib_service(id_priv, ibs);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_resolve_ib_service);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index b7354c94cf1b..c604b601f4d9 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -47,7 +47,9 @@ enum rdma_cm_state {
RDMA_CM_ADDR_BOUND,
RDMA_CM_LISTEN,
RDMA_CM_DEVICE_REMOVAL,
- RDMA_CM_DESTROYING
+ RDMA_CM_DESTROYING,
+ RDMA_CM_ADDRINFO_QUERY,
+ RDMA_CM_ADDRINFO_RESOLVED
};
struct rdma_id_private {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 3145cb34a1d2..b4f3c835844a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1543,7 +1543,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
/*
* We have a registration lock so that all the calls to unregister are
- * fully fenced, once any unregister returns the device is truely
+ * fully fenced, once any unregister returns the device is truly
* unregistered even if multiple callers are unregistering it at the
* same time. This also interacts with the registration flow and
* provides sane semantics if register and unregister are racing.
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 53571e6b3162..c23e9c847314 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -107,6 +107,8 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *sa_query, int status,
struct ib_sa_mad *mad);
+ void (*rmpp_callback)(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -150,6 +152,13 @@ struct ib_sa_mcmember_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_service_query {
+ void (*callback)(int status, struct sa_service_rec *rec,
+ unsigned int num_services, void *context);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
static LIST_HEAD(ib_nl_request_list);
static DEFINE_SPINLOCK(ib_nl_request_lock);
static atomic_t ib_nl_sa_request_seq;
@@ -684,6 +693,58 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
+#define SERVICE_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct sa_service_rec, field), \
+ .struct_size_bytes = sizeof_field(struct sa_service_rec, field), \
+ .field_name = "sa_service_rec:" #field
+
+static const struct ib_field service_rec_table[] = {
+ { SERVICE_REC_FIELD(id),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 64 },
+ { SERVICE_REC_FIELD(gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(pkey),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { RESERVED,
+ .offset_words = 6,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { SERVICE_REC_FIELD(lease),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { SERVICE_REC_FIELD(key),
+ .offset_words = 8,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(name),
+ .offset_words = 12,
+ .offset_bits = 0,
+ .size_bits = 512 },
+ { SERVICE_REC_FIELD(data_8),
+ .offset_words = 28,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_16),
+ .offset_words = 32,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_32),
+ .offset_words = 36,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_64),
+ .offset_words = 40,
+ .offset_bits = 0,
+ .size_bits = 128 },
+};
+
#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
@@ -1013,6 +1074,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+
delta = timeout - sa_local_svc_timeout_ms;
if (delta < 0)
abs_delta = -delta;
@@ -1020,7 +1083,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
abs_delta = delta;
if (delta != 0) {
- spin_lock_irqsave(&ib_nl_request_lock, flags);
sa_local_svc_timeout_ms = timeout;
list_for_each_entry(query, &ib_nl_request_list, list) {
if (delta < 0 && abs_delta > query->timeout)
@@ -1038,9 +1100,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (delay)
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
(unsigned long)delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
settimeout_out:
return 0;
}
@@ -1390,6 +1453,20 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
}
EXPORT_SYMBOL(ib_sa_pack_path);
+void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute)
+{
+ ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec,
+ attribute);
+}
+EXPORT_SYMBOL(ib_sa_pack_service);
+
+void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec)
+{
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute,
+ rec);
+}
+EXPORT_SYMBOL(ib_sa_unpack_service);
+
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
u32 port_num)
@@ -1479,6 +1556,68 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
}
}
+#define IB_SA_DATA_OFFS 56
+#define IB_SERVICE_REC_SZ 176
+
+static void ib_unpack_service_rmpp(struct sa_service_rec *rec,
+ struct ib_mad_recv_wc *mad_wc,
+ int num_services)
+{
+ unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0;
+ struct ib_mad_recv_buf *mad_buf;
+ u8 buf[IB_SERVICE_REC_SZ];
+ u8 *data;
+
+ data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data);
+
+ list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) {
+ data = ((struct ib_sa_mad *) mad_buf->mad)->data;
+ data_i = 0;
+ while (data_i < data_size && rec_i < num_services) {
+ cp_sz = min(IB_SERVICE_REC_SZ - buf_i,
+ data_size - data_i);
+ memcpy(buf + buf_i, data + data_i, cp_sz);
+ data_i += cp_sz;
+ buf_i += cp_sz;
+ if (buf_i == IB_SERVICE_REC_SZ) {
+ ib_sa_unpack_service(buf, rec + rec_i);
+ buf_i = 0;
+ rec_i++;
+ }
+ }
+ }
+}
+
+static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+ struct sa_service_rec *rec;
+ int num_services;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
+
+ num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ;
+ if (!num_services) {
+ query->callback(-ENODATA, NULL, 0, query->context);
+ return;
+ }
+
+ rec = kmalloc_array(num_services, sizeof(*rec), GFP_KERNEL);
+ if (!rec) {
+ query->callback(-ENOMEM, NULL, 0, query->context);
+ return;
+ }
+
+ ib_unpack_service_rmpp(rec, mad_wc, num_services);
+ query->callback(status, rec, num_services, query->context);
+ kfree(rec);
+}
+
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
struct ib_sa_path_query *query =
@@ -1488,6 +1627,14 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
kfree(query);
}
+static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+
+ kfree(query);
+}
+
/**
* ib_sa_path_rec_get - Start a Path get query
* @client:SA client
@@ -1618,6 +1765,101 @@ err1:
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
+/**
+ * ib_sa_service_rec_get - Start a Service get query
+ * @client: SA client
+ * @device: device to send query on
+ * @port_num: port number to send query on
+ * @rec: Service Record to send in query
+ * @comp_mask: component mask to send in query
+ * @timeout_ms: time to wait for response
+ * @gfp_mask: GFP mask to use for internal allocations
+ * @callback: function called when query completes, times out or is
+ * canceled
+ * @context: opaque user context passed to callback
+ * @sa_query: query context, used to cancel query
+ *
+ * Send a Service Record Get query to the SA to look up a path. The
+ * callback function will be called when the query completes (or
+ * fails); status is 0 for a successful response, -EINTR if the query
+ * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
+ * occurred sending the query. The resp parameter of the callback is
+ * only valid if status is 0.
+ *
+ * If the return value of ib_sa_service_rec_get() is negative, it is an
+ * error code. Otherwise it is a query ID that can be used to cancel
+ * the query.
+ */
+int ib_sa_service_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u32 port_num,
+ struct sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct sa_service_rec *resp,
+ unsigned int num_services,
+ void *context),
+ void *context, struct ib_sa_query **sa_query)
+{
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_service_query *query;
+ struct ib_mad_agent *agent;
+ struct ib_sa_port *port;
+ struct ib_sa_mad *mad;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ query = kzalloc(sizeof(*query), gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err1;
+
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(&query->sa_query, agent);
+
+ query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback :
+ NULL;
+ query->sa_query.release = ib_sa_service_rec_release;
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
+
+ ib_sa_pack_service(rec, mad->data);
+
+ *sa_query = &query->sa_query;
+ query->sa_query.mad_buf->context[1] = rec;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ *sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
+ free_mad(&query->sa_query);
+err1:
+ kfree(query);
+ return ret;
+}
+EXPORT_SYMBOL(ib_sa_service_rec_get);
+
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
int status, struct ib_sa_mad *mad)
{
@@ -1987,23 +2229,29 @@ static void send_handler(struct ib_mad_agent *agent,
{
struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
unsigned long flags;
+ int status = 0;
- if (query->callback)
+ if (query->callback || query->rmpp_callback) {
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ status = -ETIMEDOUT;
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ status = -EINTR;
break;
default:
- query->callback(query, -EIO, NULL);
+ status = -EIO;
break;
}
+ if (status)
+ query->callback ? query->callback(query, status, NULL) :
+ query->rmpp_callback(query, status, NULL);
+ }
+
xa_lock_irqsave(&queries, flags);
__xa_erase(&queries, query->id);
xa_unlock_irqrestore(&queries, flags);
@@ -2019,17 +2267,25 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_sa_query *query;
+ struct ib_mad *mad;
+
if (!send_buf)
return;
query = send_buf->context[0];
- if (query->callback) {
+ mad = mad_recv_wc->recv_buf.mad;
+
+ if (query->rmpp_callback) {
+ if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
+ query->rmpp_callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, mad_recv_wc);
+ else
+ query->rmpp_callback(query, -EIO, NULL);
+ } else if (query->callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
- query->callback(query,
- mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
- (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
+ query->callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, (struct ib_sa_mad *)mad);
else
query->callback(query, -EIO, NULL);
}
@@ -2181,8 +2437,9 @@ static int ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
- NULL, 0, send_handler,
- recv_handler, sa_dev, 0);
+ NULL, IB_MGMT_RMPP_VERSION,
+ send_handler, recv_handler,
+ sa_dev, 0);
if (IS_ERR(sa_dev->port[i].agent)) {
ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 6e700b974033..f86ece701db6 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -282,6 +282,10 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
}
uevent->resp.event = event->event;
uevent->resp.status = event->status;
+
+ if (event->event == RDMA_CM_EVENT_ADDRINFO_RESOLVED)
+ goto out;
+
if (ctx->cm_id->qp_type == IB_QPT_UD)
ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
&event->param.ud);
@@ -289,6 +293,7 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
+out:
uevent->resp.ece.vendor_id = event->ece.vendor_id;
uevent->resp.ece.attr_mod = event->ece.attr_mod;
return uevent;
@@ -728,6 +733,28 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
return ret;
}
+static ssize_t ucma_resolve_ib_service(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_resolve_ib_service cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_ib_service(ctx->cm_id, &cmd.ibs);
+ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
static ssize_t ucma_resolve_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -994,6 +1021,43 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
return ret;
}
+static ssize_t ucma_query_ib_service(struct ucma_context *ctx,
+ void __user *response, int out_len)
+{
+ struct rdma_ucm_query_ib_service_resp *resp;
+ int n, ret = 0;
+
+ if (out_len < sizeof(struct rdma_ucm_query_ib_service_resp))
+ return -ENOSPC;
+
+ if (!ctx->cm_id->route.service_recs)
+ return -ENODATA;
+
+ resp = kzalloc(out_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->num_service_recs = ctx->cm_id->route.num_service_recs;
+
+ n = (out_len - sizeof(struct rdma_ucm_query_ib_service_resp)) /
+ sizeof(struct ib_user_service_rec);
+
+ if (!n)
+ goto out;
+
+ if (n > ctx->cm_id->route.num_service_recs)
+ n = ctx->cm_id->route.num_service_recs;
+
+ memcpy(resp->recs, ctx->cm_id->route.service_recs,
+ sizeof(*resp->recs) * n);
+ if (copy_to_user(response, resp, struct_size(resp, recs, n)))
+ ret = -EFAULT;
+
+out:
+ kfree(resp);
+ return ret;
+}
+
static ssize_t ucma_query(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -1022,6 +1086,9 @@ static ssize_t ucma_query(struct ucma_file *file,
case RDMA_USER_CM_QUERY_GID:
ret = ucma_query_gid(ctx, response, out_len);
break;
+ case RDMA_USER_CM_QUERY_IB_SERVICE:
+ ret = ucma_query_ib_service(ctx, response, out_len);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1678,6 +1745,55 @@ err_unlock:
return ret;
}
+static ssize_t ucma_write_cm_event(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_write_cm_event cmd;
+ struct rdma_cm_event event = {};
+ struct ucma_event *uevent;
+ struct ucma_context *ctx;
+ int ret = 0;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ if ((cmd.event != RDMA_CM_EVENT_USER) &&
+ (cmd.event != RDMA_CM_EVENT_INTERNAL))
+ return -EINVAL;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ event.event = cmd.event;
+ event.status = cmd.status;
+ event.param.arg = cmd.param.arg;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ uevent->ctx = ctx;
+ uevent->resp.uid = ctx->uid;
+ uevent->resp.id = ctx->id;
+ uevent->resp.event = event.event;
+ uevent->resp.status = event.status;
+ memcpy(uevent->resp.param.arg32, &event.param.arg,
+ sizeof(event.param.arg));
+
+ mutex_lock(&ctx->file->mut);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
+
+out:
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len) = {
@@ -1703,7 +1819,9 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
[RDMA_USER_CM_CMD_QUERY] = ucma_query,
[RDMA_USER_CM_CMD_BIND] = ucma_bind,
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
- [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
+ [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
+ [RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service,
+ [RDMA_USER_CM_CMD_WRITE_CM_EVENT] = ucma_write_cm_event,
};
static ssize_t ucma_write(struct file *filp, const char __user *buf,
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index df61b2299ec0..b706dc0d0263 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 6df5a2738c95..3485e495ac6a 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -172,9 +172,9 @@ struct bnxt_re_dev {
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_STATS_CTX3_ALLOC 1
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
-#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
@@ -187,9 +187,6 @@ struct bnxt_re_dev {
int id;
- struct delayed_work worker;
- u8 cur_prio_map;
-
/* RCFW Channel */
struct bnxt_qplib_rcfw rcfw;
@@ -227,6 +224,13 @@ struct bnxt_re_dev {
struct workqueue_struct *dcb_wq;
struct dentry *cc_config;
struct bnxt_re_dbg_cc_config_params *cc_config_params;
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ /* RoCE mirror */
+ u16 mirror_vnic_id;
+ union ib_gid ugid;
+ u32 ugid_index;
+ u8 sniffer_flow_created : 1;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -243,6 +247,10 @@ int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *ou
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
struct ib_mad *out_mad);
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id);
+
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
@@ -276,4 +284,7 @@ static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev)
#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192
#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192
+#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
+ ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
+
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index e632f1661b92..be5e9b5ca2f0 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/pci.h>
+#include <linux/seq_file.h>
#include <rdma/ib_addr.h>
#include "bnxt_ulp.h"
@@ -314,6 +315,40 @@ static const struct file_operations bnxt_re_cc_config_ops = {
.write = bnxt_re_cc_config_set,
};
+static int info_show(struct seq_file *m, void *unused)
+{
+ struct bnxt_re_dev *rdev = m->private;
+ struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
+
+ seq_puts(m, "Info:\n");
+ seq_printf(m, "Device Name\t\t: %s\n", dev_name(&rdev->ibdev.dev));
+ seq_printf(m, "PD Watermark\t\t: %llu\n", res_s->pd_watermark);
+ seq_printf(m, "AH Watermark\t\t: %llu\n", res_s->ah_watermark);
+ seq_printf(m, "QP Watermark\t\t: %llu\n", res_s->qp_watermark);
+ seq_printf(m, "RC QP Watermark\t\t: %llu\n", res_s->rc_qp_watermark);
+ seq_printf(m, "UD QP Watermark\t\t: %llu\n", res_s->ud_qp_watermark);
+ seq_printf(m, "SRQ Watermark\t\t: %llu\n", res_s->srq_watermark);
+ seq_printf(m, "CQ Watermark\t\t: %llu\n", res_s->cq_watermark);
+ seq_printf(m, "MR Watermark\t\t: %llu\n", res_s->mr_watermark);
+ seq_printf(m, "MW Watermark\t\t: %llu\n", res_s->mw_watermark);
+ seq_printf(m, "CQ Resize Count\t\t: %d\n", atomic_read(&res_s->resize_count));
+ if (rdev->pacing.dbr_pacing) {
+ seq_printf(m, "DB Pacing Reschedule\t: %llu\n", rdev->stats.pacing.resched);
+ seq_printf(m, "DB Pacing Complete\t: %llu\n", rdev->stats.pacing.complete);
+ seq_printf(m, "DB Pacing Alerts\t: %llu\n", rdev->stats.pacing.alerts);
+ seq_printf(m, "DB FIFO Register\t: 0x%x\n",
+ readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(info);
+
+static void bnxt_re_debugfs_add_info(struct bnxt_re_dev *rdev)
+{
+ debugfs_create_file("info", 0400, rdev->dbg_root, rdev, &info_fops);
+}
+
void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
{
struct pci_dev *pdev = rdev->en_dev->pdev;
@@ -325,6 +360,8 @@ void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
+ bnxt_re_debugfs_add_info(rdev);
+
rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 44bb082e0a60..651cf9d0e0c7 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -51,25 +51,6 @@
#include "hw_counters.h"
static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
- [BNXT_RE_ACTIVE_PD].name = "active_pds",
- [BNXT_RE_ACTIVE_AH].name = "active_ahs",
- [BNXT_RE_ACTIVE_QP].name = "active_qps",
- [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps",
- [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps",
- [BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
- [BNXT_RE_ACTIVE_CQ].name = "active_cqs",
- [BNXT_RE_ACTIVE_MR].name = "active_mrs",
- [BNXT_RE_ACTIVE_MW].name = "active_mws",
- [BNXT_RE_WATERMARK_PD].name = "watermark_pds",
- [BNXT_RE_WATERMARK_AH].name = "watermark_ahs",
- [BNXT_RE_WATERMARK_QP].name = "watermark_qps",
- [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps",
- [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps",
- [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs",
- [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs",
- [BNXT_RE_WATERMARK_MR].name = "watermark_mrs",
- [BNXT_RE_WATERMARK_MW].name = "watermark_mws",
- [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt",
[BNXT_RE_RX_PKTS].name = "rx_pkts",
[BNXT_RE_RX_BYTES].name = "rx_bytes",
[BNXT_RE_TX_PKTS].name = "tx_pkts",
@@ -79,22 +60,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
[BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
[BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
- [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
- [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd",
- [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
- [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd",
- [BNXT_RE_MISSING_RESP].name = "missing_resp",
+ [BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err",
+ [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err",
+ [BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err",
[BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
[BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
[BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
[BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
[BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
- [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err",
- [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request",
+ [BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors",
[BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
- [BNXT_RE_DUP_REQ].name = "dup_req",
+ [BNXT_RE_DUP_REQ].name = "duplicate_request",
[BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
- [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch",
+ [BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error",
[BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
[BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
[BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
@@ -118,7 +99,7 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
[BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
[BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
- [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count",
+ [BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence",
[BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
[BNXT_RE_TX_READ_REQ].name = "tx_read_req",
[BNXT_RE_TX_READ_RES].name = "tx_read_resp",
@@ -126,23 +107,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
[BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
[BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
- [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
- [BNXT_RE_RX_READ_REQ].name = "rx_read_req",
+ [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests",
+ [BNXT_RE_RX_READ_REQ].name = "rx_read_requests",
[BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
- [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
+ [BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests",
[BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
[BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
[BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
[BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
[BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
- [BNXT_RE_OOB].name = "rx_out_of_buffer",
- [BNXT_RE_TX_CNP].name = "tx_cnp_pkts",
- [BNXT_RE_RX_CNP].name = "rx_cnp_pkts",
- [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts",
- [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule",
- [BNXT_RE_PACING_CMPL].name = "pacing_complete",
- [BNXT_RE_PACING_ALERT].name = "pacing_alerts",
- [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register",
+ [BNXT_RE_OOB].name = "out_of_buffer",
+ [BNXT_RE_TX_CNP].name = "np_cnp_pkts",
+ [BNXT_RE_RX_CNP].name = "rp_cnp_handled",
+ [BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets",
+ [BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error",
+ [BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error",
+ [BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors",
};
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
@@ -273,18 +253,20 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
err_s->res_rx_pci_err;
stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
err_s->res_oos_drop_count;
-}
-
-static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
- struct rdma_hw_stats *stats)
-{
- struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing;
-
- stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
- stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
- stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
- stats->value[BNXT_RE_DB_FIFO_REG] =
- readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ stats->value[BNXT_RE_REQ_CQE_ERROR] =
+ err_s->bad_resp_err +
+ err_s->local_qp_op_err +
+ err_s->local_protection_err +
+ err_s->mem_mgmt_op_err +
+ err_s->remote_invalid_req_err +
+ err_s->remote_access_err +
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_RESP_CQE_ERROR] =
+ err_s->res_cmp_err +
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
+ err_s->res_rx_no_perm +
+ err_s->res_tx_no_perm;
}
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
@@ -382,7 +364,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
struct bnxt_qplib_roce_stats *err_s = NULL;
struct ctx_hw_stats *hw_stats = NULL;
int rc = 0;
@@ -391,26 +372,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
if (!port || !stats)
return -EINVAL;
- stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
- stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
- stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
- stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
- stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
- stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
- stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
- stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
- stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
- stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
- stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
- stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
- stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
- stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
- stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
- stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
- stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
- stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
- stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
-
if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(hw_stats->tx_bcast_pkts);
@@ -449,8 +410,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
}
- if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
- bnxt_re_copy_db_pacing_stats(rdev, stats);
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index e541b6f8ca9f..09d371d442aa 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -41,25 +41,6 @@
#define __BNXT_RE_HW_STATS_H__
enum bnxt_re_hw_stats {
- BNXT_RE_ACTIVE_PD,
- BNXT_RE_ACTIVE_AH,
- BNXT_RE_ACTIVE_QP,
- BNXT_RE_ACTIVE_RC_QP,
- BNXT_RE_ACTIVE_UD_QP,
- BNXT_RE_ACTIVE_SRQ,
- BNXT_RE_ACTIVE_CQ,
- BNXT_RE_ACTIVE_MR,
- BNXT_RE_ACTIVE_MW,
- BNXT_RE_WATERMARK_PD,
- BNXT_RE_WATERMARK_AH,
- BNXT_RE_WATERMARK_QP,
- BNXT_RE_WATERMARK_RC_QP,
- BNXT_RE_WATERMARK_UD_QP,
- BNXT_RE_WATERMARK_SRQ,
- BNXT_RE_WATERMARK_CQ,
- BNXT_RE_WATERMARK_MR,
- BNXT_RE_WATERMARK_MW,
- BNXT_RE_RESIZE_CQ_CNT,
BNXT_RE_RX_PKTS,
BNXT_RE_RX_BYTES,
BNXT_RE_TX_PKTS,
@@ -129,10 +110,9 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_CNP,
BNXT_RE_RX_CNP,
BNXT_RE_RX_ECN,
- BNXT_RE_PACING_RESCHED,
- BNXT_RE_PACING_CMPL,
- BNXT_RE_PACING_ALERT,
- BNXT_RE_DB_FIFO_REG,
+ BNXT_RE_REQ_CQE_ERROR,
+ BNXT_RE_RESP_CQE_ERROR,
+ BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
BNXT_RE_NUM_EXT_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 260dc67b8b87..4dab5ca7362b 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -288,7 +288,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
}
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
- port_attr->gid_tbl_len = dev_attr->max_sgid;
+ /* One GID is reserved for RawEth QP. Report one less */
+ port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
+ dev_attr->max_sgid);
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP;
@@ -375,7 +377,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx)
return -EINVAL;
- if (sgid_tbl && sgid_tbl->active) {
+ if (sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
@@ -429,7 +431,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
- vlan_id, true, &tbl_idx);
+ vlan_id, true, &tbl_idx, false, 0);
if (rc == -EALREADY) {
ctx_tbl = sgid_tbl->ctx;
ctx_tbl[tbl_idx]->refcnt++;
@@ -955,6 +957,20 @@ fail:
return rc;
}
+static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ 0xFFFF, true);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
@@ -994,6 +1010,9 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ bnxt_re_del_unique_gid(rdev);
+
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -1018,6 +1037,8 @@ static u8 __from_ib_qp_type(enum ib_qp_type type)
return CMDQ_CREATE_QP_TYPE_RC;
case IB_QPT_UD:
return CMDQ_CREATE_QP_TYPE_UD;
+ case IB_QPT_RAW_PACKET:
+ return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
default:
return IB_QPT_MAX;
}
@@ -1595,6 +1616,29 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
return rc;
}
+static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
+ addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
+
+ rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ rdev->qplib_res.netdev->dev_addr,
+ 0xFFFF, true, &rdev->ugid_index, true,
+ hctx->stats3.fw_id);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
+
+ return rc;
+}
+
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
@@ -1656,6 +1700,17 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
}
}
+ /* Support for RawEth QP is added to capture TCP pkt dump.
+ * So unique SGID is used to avoid incorrect statistics on per
+ * function stats_ctx
+ */
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
+ rc = bnxt_re_add_unique_gid(rdev);
+ if (rc)
+ goto qp_destroy;
+ qp->qplib_qp.ugid_index = rdev->ugid_index;
+ }
+
qp->ib_qp.qp_num = qp->qplib_qp.id;
if (qp_init_attr->qp_type == IB_QPT_GSI)
rdev->gsi_ctx.gsi_qp = qp;
@@ -2301,7 +2356,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->pkey_index = qplib_qp->pkey_index;
qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
qplib_qp->ah.host_sgid_index,
qplib_qp->ah.hop_limit,
qplib_qp->ah.traffic_class);
@@ -3248,9 +3303,9 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->resize_umem)) {
rc = PTR_ERR(cq->resize_umem);
+ ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
+ __func__, cq->resize_umem);
cq->resize_umem = NULL;
- ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
- __func__, rc);
goto fail;
}
cq->resize_cqe = entries;
@@ -4392,6 +4447,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
+static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ int rc;
+
+ rc = bnxt_re_hwrm_alloc_vnic(rdev);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
+ if (rc)
+ goto out_free_vnic;
+
+ return 0;
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+ return rc;
+}
+
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_re_flow *flow;
+ int rc;
+
+ if (attr->type != IB_FLOW_ATTR_SNIFFER ||
+ !rdev->rcfw.roce_mirror)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&rdev->qp_lock);
+ if (rdev->sniffer_flow_created) {
+ ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-EBUSY);
+ }
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow) {
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ flow->rdev = rdev;
+
+ rc = bnxt_re_setup_vnic(rdev, qp);
+ if (rc)
+ goto out_free_flow;
+
+ rc = bnxt_qplib_create_flow(&rdev->qplib_res);
+ if (rc)
+ goto out_free_vnic;
+
+ rdev->sniffer_flow_created = 1;
+ mutex_unlock(&rdev->qp_lock);
+
+ return &flow->ib_flow;
+
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+out_free_flow:
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_destroy_flow(struct ib_flow *flow_id)
+{
+ struct bnxt_re_flow *flow =
+ container_of(flow_id, struct bnxt_re_flow, ib_flow);
+ struct bnxt_re_dev *rdev = flow->rdev;
+ int rc;
+
+ mutex_lock(&rdev->qp_lock);
+ rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
+ rdev->sniffer_flow_created = 0;
+
+ bnxt_re_hwrm_free_vnic(rdev);
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+
+ return rc;
+}
+
static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
{
struct bnxt_re_cq *cq = NULL, *tmp_cq;
@@ -4604,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
return err;
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
- &dpi, sizeof(length));
+ &dpi, sizeof(dpi));
if (err)
return err;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index fe00ab691a51..76ba9ab04d5c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -164,6 +164,11 @@ struct bnxt_re_user_mmap_entry {
u8 mmap_flag;
};
+struct bnxt_re_flow {
+ struct ib_flow ib_flow;
+ struct bnxt_re_dev *rdev;
+};
+
static inline u16 bnxt_re_get_swqe_size(int nsge)
{
return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
@@ -267,6 +272,11 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata);
+int bnxt_re_destroy_flow(struct ib_flow *flow_id);
+
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index df7cf8d68e27..b13810572c2e 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,7 @@ MODULE_LICENSE("Dual BSD/GPL");
static DEFINE_MUTEX(bnxt_re_mutex);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
@@ -188,6 +189,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
+ rc = bnxt_re_query_hwrm_intf_version(rdev);
+ if (rc)
+ goto free_dev_attr;
+
bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
@@ -540,6 +545,72 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout;
}
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_free_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE);
+
+ req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to free vnic, rc = %d\n", rc);
+}
+
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_alloc_output resp = {};
+ struct hwrm_vnic_alloc_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC);
+
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to alloc vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_cfg_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG);
+
+ req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE);
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.raw_qp_id = cpu_to_le32(qp_id);
+ req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN);
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to cfg vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
/* Query device config using common hwrm */
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset)
@@ -553,11 +624,12 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc) {
*db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
*offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
+ rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id);
}
return rc;
}
@@ -577,7 +649,7 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
@@ -587,6 +659,8 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
flags_ext2 = le32_to_cpu(resp.flags_ext2);
cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED;
+ cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) &
+ FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED);
return 0;
}
@@ -603,7 +677,7 @@ static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
cctx = rdev->chip_ctx;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
return rc;
@@ -842,20 +916,12 @@ static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
- struct bnxt_en_dev *en_dev;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!rdev)
- return rc;
-
- en_dev = rdev->en_dev;
-
- if (!en_dev)
- return rc;
-
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return 0;
@@ -863,7 +929,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
req.ring_type = type;
req.ring_id = cpu_to_le16(fw_ring_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
@@ -881,9 +947,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!en_dev)
- return rc;
-
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
req.enables = 0;
req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
@@ -899,7 +962,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
req.ring_type = ring_attr->type;
req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
@@ -916,16 +979,13 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!en_dev)
- return rc;
-
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return 0;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
@@ -935,8 +995,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
}
static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
- dma_addr_t dma_map,
- u32 *fw_stats_ctx_id)
+ struct bnxt_qplib_stats *stats)
{
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {};
@@ -945,21 +1004,18 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
-
- if (!en_dev)
- return rc;
+ stats->fw_id = INVALID_STATS_CTX_ID;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
req.update_period_ms = cpu_to_le32(1000);
- req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_addr = cpu_to_le64(stats->dma_map);
req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
- *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
+ stats->fw_id = le32_to_cpu(resp.stat_ctx_id);
return rc;
}
@@ -975,7 +1031,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -985,13 +1041,31 @@ static ssize_t hca_type_show(struct device *device,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
+static ssize_t board_id_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device,
+ struct bnxt_re_dev, ibdev);
+ char buffer[BNXT_VPD_FLD_LEN] = {};
+
+ if (!rdev->is_virtfn)
+ memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1);
+ else
+ scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF",
+ rdev->en_dev->pdev->device);
+
+ return sysfs_emit(buf, "%s\n", buffer);
+}
+static DEVICE_ATTR_RO(board_id);
+
static struct attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev.attr,
&dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
NULL
};
@@ -1207,6 +1281,8 @@ static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold))
+ goto err;
nla_nest_end(msg, table_attr);
return 0;
@@ -1297,6 +1373,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
+ .create_flow = bnxt_re_create_flow,
+ .destroy_flow = bnxt_re_destroy_flow,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
@@ -1323,8 +1401,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA;
- strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
- strlen(BNXT_RE_DESC) + 5);
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA");
ibdev->phys_port_cnt = 1;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
@@ -1850,81 +1927,6 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock(&rdev->qp_lock);
}
-static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
-{
- struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
- struct bnxt_qplib_gid gid;
- u16 gid_idx, index;
- int rc = 0;
-
- if (!ib_device_try_get(&rdev->ibdev))
- return 0;
-
- for (index = 0; index < sgid_tbl->active; index++) {
- gid_idx = sgid_tbl->hw_id[index];
-
- if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
- sizeof(bnxt_qplib_gid_zero)))
- continue;
- /* need to modify the VLAN enable setting of non VLAN GID only
- * as setting is done for VLAN GID while adding GID
- */
- if (sgid_tbl->vlan[index])
- continue;
-
- memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
-
- rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
- rdev->qplib_res.netdev->dev_addr);
- }
-
- ib_device_put(&rdev->ibdev);
- return rc;
-}
-
-static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
-{
- u32 prio_map = 0, tmp_map = 0;
- struct net_device *netdev;
- struct dcb_app app = {};
-
- netdev = rdev->netdev;
-
- app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
- app.protocol = ETH_P_IBOE;
- tmp_map = dcb_ieee_getapp_mask(netdev, &app);
- prio_map = tmp_map;
-
- app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
- app.protocol = ROCE_V2_UDP_DPORT;
- tmp_map = dcb_ieee_getapp_mask(netdev, &app);
- prio_map |= tmp_map;
-
- return prio_map;
-}
-
-static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
-{
- u8 prio_map = 0;
-
- /* Get priority for roce */
- prio_map = bnxt_re_get_priority_mask(rdev);
-
- if (prio_map == rdev->cur_prio_map)
- return 0;
- rdev->cur_prio_map = prio_map;
- /* Actual priorities are not programmed as they are already
- * done by L2 driver; just enable or disable priority vlan tagging
- */
- if ((prio_map == 0 && rdev->qplib_res.prio) ||
- (prio_map != 0 && !rdev->qplib_res.prio)) {
- rdev->qplib_res.prio = prio_map;
- bnxt_re_update_gid(rdev);
- }
-
- return 0;
-}
-
static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
{
if (rdev->is_virtfn)
@@ -1945,7 +1947,31 @@ static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
}
-static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
+static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->en_dev->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto free;
+
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(rdev->board_partno, &vpd_data[pos], size);
+free:
+ kfree(vpd_data);
+}
+
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ver_get_output resp = {};
@@ -1964,7 +1990,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
rc);
- return;
+ return rc;
}
cctx = rdev->chip_ctx;
@@ -1978,6 +2004,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
if (!cctx->hwrm_cmd_max_timeout)
cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
+
+ return 0;
}
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
@@ -2039,6 +2067,72 @@ static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
}
}
+static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+
+ return rc;
+}
+
+static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+
+ return rc;
+}
+
+static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+}
+
+static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+}
+
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
@@ -2049,8 +2143,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_net_unregister_async_event(rdev);
bnxt_re_uninit_dcb_wq(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work_sync(&rdev->worker);
+ bnxt_re_put_stats3_ctx(rdev);
bnxt_re_free_gid_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
@@ -2064,8 +2157,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
ibdev_warn(&rdev->ibdev,
"Failed to deinitialize RCFW: %#x", rc);
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
- bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_re_put_stats_ctx(rdev);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
@@ -2085,16 +2178,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
}
}
-/* worker thread for polling periodic events. Now used for QoS programming*/
-static void bnxt_re_worker(struct work_struct *work)
-{
- struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
- worker.work);
-
- bnxt_re_setup_qos(rdev);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
-}
-
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
@@ -2109,8 +2192,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rc = bnxt_re_register_netdev(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
- "Failed to register with netedev: %#x\n", rc);
- return -EINVAL;
+ "Failed to register with Ethernet driver, rc %d\n",
+ rc);
+ return rc;
}
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -2148,8 +2232,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- bnxt_re_query_hwrm_intf_version(rdev);
-
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
@@ -2199,18 +2281,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
goto disable_rcfw;
+ bnxt_qplib_query_version(&rdev->rcfw);
bnxt_re_set_resource_limits(rdev);
- rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
- bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to allocate QPLIB context: %#x\n", rc);
- goto disable_rcfw;
+ if (!rdev->is_virtfn &&
+ !bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate hw context: %#x\n", rc);
+ goto disable_rcfw;
+ }
}
- rc = bnxt_re_net_stats_ctx_alloc(rdev,
- rdev->qplib_ctx.stats.dma_map,
- &rdev->qplib_ctx.stats.fw_id);
+
+ rc = bnxt_re_get_stats_ctx(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate stats context: %#x\n", rc);
@@ -2249,15 +2333,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- ibdev_info(&rdev->ibdev,
- "RoCE priority not yet configured\n");
-
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
-
if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT))
bnxt_re_vf_res_config(rdev);
}
@@ -2270,11 +2345,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_init_dcb_wq(rdev);
bnxt_re_net_register_async_event(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_read_vpd_info(rdev);
+
+ rc = bnxt_re_get_stats3_ctx(rdev);
+ if (rc)
+ goto fail;
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
- bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index ee36b3d82cc0..ce90d3d834d4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1307,6 +1307,7 @@ static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_modify_qp_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
@@ -1358,9 +1359,14 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
req.flow_label = cpu_to_le32(qp->ah.flow_label);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
- req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
- [qp->ah.sgid_index]);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
+ else
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
+ }
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
req.hop_limit = qp->ah.hop_limit;
@@ -1464,6 +1470,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
+ qp->udp_sport = le16_to_cpu(sb->udp_src_port);
temp32[0] = le32_to_cpu(sb->dgid[0]);
temp32[1] = le32_to_cpu(sb->dgid[1]);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 4921a214c34c..b990d0c0ce1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -299,6 +299,7 @@ struct bnxt_qplib_qp {
u8 smac[6];
u16 vlan_id;
u16 port_id;
+ u16 udp_sport;
u8 nw_type;
struct bnxt_qplib_ah ah;
@@ -344,6 +345,7 @@ struct bnxt_qplib_qp {
u32 msn_tbl_sz;
bool is_host_msn_tbl;
u8 tos_dscp;
+ u32 ugid_index;
};
#define BNXT_RE_MAX_MSG_SIZE 0x80000000
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 804bc773b4ef..295a9610f3e6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -186,7 +186,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
* wait for command completion. Maximum holding interval is 8 second.
*
* Returns:
- * -ETIMEOUT if command is not completed in specific time interval.
+ * -ETIMEDOUT if command is not completed in specific time interval.
* 0 if command is completed by firmware.
*/
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -366,6 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
wmb();
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+ print_hex_dump_bytes("req: ", DUMP_PREFIX_OFFSET, msg->req, msg->req_sz);
spin_unlock_bh(&hwq->lock);
/* Return the CREQ response pointer */
return 0;
@@ -381,7 +382,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
* This function can not be called from non-sleepable context.
*
* Returns:
- * -ETIMEOUT if command is not completed in specific time interval.
+ * -ETIMEDOUT if command is not completed in specific time interval.
* 0 if command is completed by firmware.
*/
static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -631,6 +632,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
int rc = 0;
pdev = rcfw->pdev;
+ print_hex_dump_bytes("event: ", DUMP_PREFIX_OFFSET, qp_event, sizeof(*qp_event));
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
@@ -903,6 +905,10 @@ skip_ctx_setup:
flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT;
+ if (bnxt_qplib_roce_mirror_supported(rcfw->res->cctx)) {
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED;
+ rcfw->roce_mirror = true;
+ }
req.flags |= cpu_to_le16(flags);
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index ff873c5f1b25..988c89b4232e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -236,6 +236,7 @@ struct bnxt_qplib_rcfw {
atomic_t timeout_send;
/* cached from chip cctx for quick reference in slow path */
u16 max_timeout;
+ bool roce_mirror;
};
struct bnxt_qplib_cmdqmsg {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index cc5c82d96839..875d7b52c06a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -53,12 +53,6 @@
#include "qplib_sp.h"
#include "qplib_rcfw.h"
-static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_stats *stats);
-static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_chip_ctx *cctx,
- struct bnxt_qplib_stats *stats);
-
/* PBL */
static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
bool is_umem)
@@ -352,8 +346,8 @@ fail:
}
/* Context Tables */
-void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx)
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
{
int i;
@@ -367,7 +361,6 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
/* restore original pde level before destroy */
ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
- bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
}
static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
@@ -466,7 +459,7 @@ fail:
}
/*
- * Routine: bnxt_qplib_alloc_ctx
+ * Routine: bnxt_qplib_alloc_hwctx
* Description:
* Context tables are memories which are used by the chip fw.
* The 6 tables defined are:
@@ -486,17 +479,13 @@ fail:
* Returns:
* 0 if success, else -ERRORS
*/
-int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx,
- bool virt_fn, bool is_p5)
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
int rc;
- if (virt_fn || is_p5)
- goto stats_alloc;
-
/* QPC Tables */
sginfo.pgsize = PAGE_SIZE;
sginfo.pgshft = PAGE_SHIFT;
@@ -542,16 +531,11 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
if (rc)
goto fail;
-stats_alloc:
- /* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
- if (rc)
- goto fail;
return 0;
fail:
- bnxt_qplib_free_ctx(res, ctx);
+ bnxt_qplib_free_hwctx(res, ctx);
return rc;
}
@@ -832,8 +816,8 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
}
/* Stats */
-static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_stats *stats)
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats)
{
if (stats->dma) {
dma_free_coherent(&pdev->dev, stats->size,
@@ -843,9 +827,9 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
stats->fw_id = -1;
}
-static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_chip_ctx *cctx,
- struct bnxt_qplib_stats *stats)
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6a13927674b4..2ea3b7f232a3 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -65,6 +65,7 @@ struct bnxt_qplib_drv_modes {
bool db_push;
bool dbr_pacing;
u32 toggle_bits;
+ u8 roce_mirror;
};
enum bnxt_re_toggle_modes {
@@ -303,6 +304,7 @@ struct bnxt_qplib_ctx {
struct bnxt_qplib_hwq tim_tbl;
struct bnxt_qplib_tqm_ctx tqm_ctx;
struct bnxt_qplib_stats stats;
+ struct bnxt_qplib_stats stats3;
struct bnxt_qplib_vf_res vf_res;
};
@@ -432,15 +434,19 @@ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
-void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx);
-int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx,
- bool virt_fn, bool is_p5);
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
int bnxt_qplib_determine_atomics(struct pci_dev *dev);
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats);
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats);
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
struct bnxt_qplib_hwq *hwq, u32 cnt)
@@ -582,6 +588,11 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
return cctx->modes.dbr_pacing;
}
+static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.roce_mirror;
+}
+
static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
{
return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 68981399598d..9ef581ed785c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -66,14 +66,15 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
-static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
- char *fw_ver)
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw)
{
struct creq_query_version_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_query_version req = {};
+ struct bnxt_qplib_dev_attr *attr;
int rc;
+ attr = rcfw->res->dattr;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_VERSION,
sizeof(req));
@@ -82,10 +83,10 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
return;
- fw_ver[0] = resp.fw_maj;
- fw_ver[1] = resp.fw_minor;
- fw_ver[2] = resp.fw_bld;
- fw_ver[3] = resp.fw_rsvd;
+ attr->fw_ver[0] = resp.fw_maj;
+ attr->fw_ver[1] = resp.fw_minor;
+ attr->fw_ver[2] = resp.fw_bld;
+ attr->fw_ver[3] = resp.fw_rsvd;
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
@@ -179,8 +180,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
attr->max_srq += le16_to_cpu(sb->max_srq_ext);
- bnxt_qplib_query_version(rcfw, attr->fw_ver);
-
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
tqm_alloc = (u8 *)&temp;
@@ -309,7 +308,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, const u8 *smac,
- u16 vlan_id, bool update, u32 *index)
+ u16 vlan_id, bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -374,6 +374,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+ req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID |
+ (u16)stats_ctx_id);
+
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
@@ -397,46 +400,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return 0;
}
-int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u16 gid_idx,
- const u8 *smac)
-{
- struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
- struct bnxt_qplib_res,
- sgid_tbl);
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct creq_modify_gid_resp resp = {};
- struct bnxt_qplib_cmdqmsg msg = {};
- struct cmdq_modify_gid req = {};
- int rc;
-
- bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
- CMDQ_BASE_OPCODE_MODIFY_GID,
- sizeof(req));
-
- req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
- req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
- req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
- req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
- if (res->prio) {
- req.vlan |= cpu_to_le16
- (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
- CMDQ_ADD_GID_VLAN_VLAN_EN);
- }
-
- /* MAC in network format */
- req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
- req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
- req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
-
- req.gid_index = cpu_to_le16(gid_idx);
-
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
- sizeof(resp), 0);
- rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- return rc;
-}
-
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block)
@@ -1143,3 +1106,40 @@ out:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
+
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE;
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 09faf4a1e849..147b5d9c0313 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -323,7 +323,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
- bool update, u32 *index);
+ bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
@@ -358,6 +359,9 @@ int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res);
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 024845f945ff..99ecd72e72e2 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -144,7 +144,8 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL
#define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL
#define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
- #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT
+ #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -218,6 +219,7 @@ struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
#define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
#define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -788,7 +790,8 @@ struct creq_query_qp_resp_sb {
#define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL
__le16 pkey;
__le32 qkey;
- __le32 reserved32;
+ __le16 udp_src_port;
+ __le16 reserved16;
__le32 dgid[4];
__le32 flow_label;
__le16 sgid_index;
@@ -2108,6 +2111,43 @@ struct creq_query_roce_stats_ext_resp_sb {
__le64 dup_req;
};
+/* cmdq_roce_mirror_cfg (size:192b/24B) */
+struct cmdq_roce_mirror_cfg {
+ u8 opcode;
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST \
+ CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mirror_flags;
+ #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL
+ u8 rsvd[7];
+};
+
+/* creq_roce_mirror_cfg_resp (size:128b/16B) */
+struct creq_roce_mirror_cfg_resp {
+ u8 type;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
+ u8 reserved48[6];
+};
+
/* cmdq_query_func (size:128b/16B) */
struct cmdq_query_func {
u8 opcode;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b67747ae6a68..d892f55febe2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1228,9 +1228,8 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
if (!ctx->dev) {
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
- pr_err("%s: initialization failed: %ld\n",
- pci_name(ctx->lldi.pdev),
- PTR_ERR(ctx->dev));
+ pr_err("%s: initialization failed: %pe\n",
+ pci_name(ctx->lldi.pdev), ctx->dev);
ctx->dev = NULL;
break;
}
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index bafd210dd43e..0e979ca10d24 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -30,6 +30,7 @@ struct efa_comp_ctx {
struct efa_admin_acq_entry *user_cqe;
u32 comp_size;
enum efa_cmd_status status;
+ u16 cmd_id;
u8 cmd_opcode;
u8 occupied;
};
@@ -333,6 +334,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
comp_ctx->comp_size = comp_size_in_bytes;
comp_ctx->user_cqe = comp;
comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+ comp_ctx->cmd_id = cmd_id;
reinit_completion(&comp_ctx->wait_event);
@@ -557,17 +559,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
if (comp_ctx->status == EFA_CMD_COMPLETED)
ibdev_err_ratelimited(
aq->efa_dev,
- "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
else
ibdev_err_ratelimited(
aq->efa_dev,
- "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ "The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
err = -ETIME;
@@ -631,9 +635,9 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
if (IS_ERR(comp_ctx)) {
ibdev_err_ratelimited(
aq->efa_dev,
- "Failed to submit command %s (opcode %u) err %ld\n",
+ "Failed to submit command %s (opcode %u) err %pe\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
+ cmd->aq_common_descriptor.opcode, comp_ctx);
up(&aq->avail_cmds);
atomic64_inc(&aq->stats.cmd_err);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 886923d5fe50..d9a12681f843 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1788,7 +1788,8 @@ struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
goto err_free;
}
@@ -1832,7 +1833,8 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
- "Failed to pin and map user space memory[%d]\n", err);
+ "Failed to pin and map user space memory[%pe]\n",
+ mr->umem);
goto err_free;
}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index fdeec33c71da..109a3f3de911 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -149,7 +149,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.phy_addr[0] = mr->mem.mtt->buf_dma;
mtt_level = ERDMA_MR_MTT_1LEVEL;
} else {
- req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
+ req.phy_addr[0] = mr->mem.mtt->dma_addrs[0];
mtt_level = mr->mem.mtt->level;
}
} else if (mr->type != ERDMA_MR_TYPE_DMA) {
@@ -626,18 +626,27 @@ err_free_mtt:
return ERR_PTR(-ENOMEM);
}
-static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
- struct erdma_mtt *mtt)
+static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma,
+ u32 npages)
{
- dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
- DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
- vfree(mtt->sglist);
+ u32 i;
+
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+}
+
+static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages);
+ vfree(mtt->dma_addrs);
}
static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
struct erdma_mtt *mtt)
{
- erdma_destroy_mtt_buf_sg(dev, mtt);
+ erdma_destroy_mtt_buf_dma_addrs(dev, mtt);
vfree(mtt->buf);
kfree(mtt);
}
@@ -645,50 +654,69 @@ static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
struct erdma_mtt *low_mtt)
{
- struct scatterlist *sg;
- u32 idx = 0, i;
+ dma_addr_t *pg_addr = mtt->buf;
+ u32 i;
- for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
- mtt->buf[idx++] = sg_dma_address(sg);
+ for (i = 0; i < low_mtt->npages; i++)
+ pg_addr[i] = low_mtt->dma_addrs[i];
}
-static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
+static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs,
+ void *buf, u64 len)
{
- struct scatterlist *sglist;
- void *buf = mtt->buf;
- u32 npages, i, nsg;
+ dma_addr_t *pg_dma;
struct page *pg;
+ u32 npages, i;
+ void *addr;
- /* Failed if buf is not page aligned */
- if ((uintptr_t)buf & ~PAGE_MASK)
- return -EINVAL;
-
- npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
- sglist = vzalloc(npages * sizeof(*sglist));
- if (!sglist)
- return -ENOMEM;
+ npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >>
+ PAGE_SHIFT;
+ pg_dma = vcalloc(npages, sizeof(*pg_dma));
+ if (!pg_dma)
+ return 0;
- sg_init_table(sglist, npages);
+ addr = buf;
for (i = 0; i < npages; i++) {
- pg = vmalloc_to_page(buf);
+ pg = vmalloc_to_page(addr);
if (!pg)
goto err;
- sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
- buf += PAGE_SIZE;
+
+ pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
}
- nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
- if (!nsg)
- goto err;
+ *dma_addrs = pg_dma;
- mtt->sglist = sglist;
- mtt->nsg = nsg;
+ return npages;
+err:
+ erdma_unmap_page_list(dev, pg_dma, i);
+ vfree(pg_dma);
return 0;
-err:
- vfree(sglist);
+}
- return -ENOMEM;
+static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ dma_addr_t *addrs;
+ u32 npages;
+
+ /* Failed if buf is not page aligned */
+ if ((uintptr_t)mtt->buf & ~PAGE_MASK)
+ return -EINVAL;
+
+ npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size);
+ if (!npages)
+ return -ENOMEM;
+
+ mtt->dma_addrs = addrs;
+ mtt->npages = npages;
+
+ return 0;
}
static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
@@ -707,12 +735,12 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
if (!mtt->buf)
goto err_free_mtt;
- ret = erdma_create_mtt_buf_sg(dev, mtt);
+ ret = erdma_create_mtt_buf_dma_addrs(dev, mtt);
if (ret)
goto err_free_mtt_buf;
- ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
- mtt->size, mtt->nsg);
+ ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n",
+ mtt->size, mtt->npages);
return mtt;
@@ -746,8 +774,8 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
level = 1;
/* convergence the mtt table. */
- while (mtt->nsg != 1 && level <= 3) {
- tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
+ while (mtt->npages != 1 && level <= 3) {
+ tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages));
if (IS_ERR(tmp_mtt)) {
ret = PTR_ERR(tmp_mtt);
goto err_free_mtt;
@@ -765,7 +793,7 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
mtt->level = level;
ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
- mtt->level, mtt->sglist[0].dma_address);
+ mtt->level, mtt->dma_addrs[0]);
return mtt;
err_free_mtt:
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index ef411b81fbd7..7d8d3fe501d5 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -99,8 +99,8 @@ struct erdma_mtt {
union {
dma_addr_t buf_dma;
struct {
- struct scatterlist *sglist;
- u32 nsg;
+ dma_addr_t *dma_addrs;
+ u32 npages;
u32 level;
};
};
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index 4250d077b06f..a98a4175e53b 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -64,9 +64,9 @@ int hfi1_cdev_init(int minor, const char *name,
if (IS_ERR(device)) {
ret = PTR_ERR(device);
+ pr_err("Could not create device for minor %d, %s (err %pe)\n",
+ minor, name, device);
device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
cdev_del(cdev);
}
done:
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 719b7c34e238..5cfa4f8fbf3d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -990,7 +990,7 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
}
/* Clean up old mappings */
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
struct sdma_rht_node *rht_node;
/* Don't cleanup sdes that are set in the new mask */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index b72625283fcf..9b1aece1b080 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -498,8 +498,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ntids, sizeof(*req->tids));
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
- SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
- ntids, ret);
+ SDMA_DBG(req, "Failed to copy %d TIDs (%pe)", ntids,
+ tmp);
goto free_req;
}
req->tids = tmp;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 0f037e545520..31cb8699e198 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -594,8 +594,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
if (IS_ERR(mtr->umem)) {
- ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
- PTR_ERR(mtr->umem));
+ ibdev_err(ibdev, "failed to get umem, ret = %pe.\n",
+ mtr->umem);
return -ENOMEM;
}
} else {
@@ -605,8 +605,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
!mtr_has_mtt(buf_attr) ?
HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
- ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
- PTR_ERR(mtr->kmem));
+ ibdev_err(ibdev, "failed to alloc kmem, ret = %pe.\n",
+ mtr->kmem);
return PTR_ERR(mtr->kmem);
}
}
diff --git a/drivers/infiniband/hw/ionic/Kconfig b/drivers/infiniband/hw/ionic/Kconfig
new file mode 100644
index 000000000000..de6f10e9b6e9
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018-2025, Advanced Micro Devices, Inc.
+
+config INFINIBAND_IONIC
+ tristate "AMD Pensando DSC RDMA/RoCE Support"
+ depends on NETDEVICES && ETHERNET && PCI && INET && IONIC
+ help
+ This enables RDMA/RoCE support for the AMD Pensando family of
+ Distributed Services Cards (DSCs).
+
+ To learn more, visit our website at
+ <https://www.amd.com/en/products/accelerators/pensando.html>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ionic_rdma.
diff --git a/drivers/infiniband/hw/ionic/Makefile b/drivers/infiniband/hw/ionic/Makefile
new file mode 100644
index 000000000000..957973742820
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/pensando/ionic
+
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic_rdma.o
+
+ionic_rdma-y := \
+ ionic_ibdev.o ionic_lif_cfg.o ionic_queue.o ionic_pgtbl.o ionic_admin.o \
+ ionic_controlpath.o ionic_datapath.o ionic_hw_stats.o
diff --git a/drivers/infiniband/hw/ionic/ionic_admin.c b/drivers/infiniband/hw/ionic/ionic_admin.c
new file mode 100644
index 000000000000..2537aa55d12d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_admin.c
@@ -0,0 +1,1229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_EQ_COUNT_MIN 4
+#define IONIC_AQ_COUNT_MIN 1
+
+/* not a valid queue position or negative error status */
+#define IONIC_ADMIN_POSTED 0x10000
+
+/* cpu can be held with irq disabled for COUNT * MS (for create/destroy_ah) */
+#define IONIC_ADMIN_BUSY_RETRY_COUNT 2000
+#define IONIC_ADMIN_BUSY_RETRY_MS 1
+
+/* admin queue will be considered failed if a command takes longer */
+#define IONIC_ADMIN_TIMEOUT (HZ * 2)
+#define IONIC_ADMIN_WARN (HZ / 8)
+
+/* will poll for admin cq to tolerate and report from missed event */
+#define IONIC_ADMIN_DELAY (HZ / 8)
+
+/* work queue for polling the event queue and admin cq */
+struct workqueue_struct *ionic_evt_workq;
+
+static void ionic_admin_timedout(struct ionic_aq *aq)
+{
+ struct ionic_ibdev *dev = aq->dev;
+ unsigned long irqflags;
+ u16 pos;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ if (ionic_queue_empty(&aq->q))
+ goto out;
+
+ /* Reset ALL adminq if any one times out */
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &dev->reset_work);
+
+ ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n",
+ aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp));
+
+ pos = (aq->q.prod - 1) & aq->q.mask;
+ if (pos == aq->q.cons)
+ goto out;
+
+ ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos);
+ print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at(&aq->q, pos),
+ BIT(aq->q.stride_log2), true);
+
+out:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_reset_dwork(struct ionic_ibdev *dev)
+{
+ if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ queue_delayed_work(ionic_evt_workq, &dev->admin_dwork,
+ IONIC_ADMIN_DELAY);
+}
+
+static void ionic_admin_reset_wdog(struct ionic_aq *aq)
+{
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ aq->stamp = jiffies;
+ ionic_admin_reset_dwork(aq->dev);
+}
+
+static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+ *cqe = qcqe;
+
+ return true;
+}
+
+static void ionic_admin_poll_locked(struct ionic_aq *aq)
+{
+ struct ionic_cq *cq = &aq->vcq->cq[0];
+ struct ionic_admin_wr *wr, *wr_next;
+ struct ionic_ibdev *dev = aq->dev;
+ u32 wr_strides, avlbl_strides;
+ struct ionic_v1_cqe *cqe;
+ u32 qtf, qid;
+ u16 old_prod;
+ u8 type;
+
+ lockdep_assert_held(&aq->lock);
+
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) {
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ aq->q_wr[wr->status].wr = NULL;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_prod);
+
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ return;
+ }
+
+ old_prod = cq->q.prod;
+
+ while (ionic_admin_next_cqe(dev, cq, &cqe)) {
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe type %u\n", type);
+ goto cq_next;
+ }
+
+ if (unlikely(qid != aq->aqid)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe qid %u\n", qid);
+ goto cq_next;
+ }
+
+ if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad idx %u cons %u qid %u\n",
+ be16_to_cpu(cqe->admin.cmd_idx),
+ aq->q.cons, qid);
+ goto cq_next;
+ }
+
+ if (unlikely(ionic_queue_empty(&aq->q))) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe for empty adminq\n");
+ goto cq_next;
+ }
+
+ wr = aq->q_wr[aq->q.cons].wr;
+ if (wr) {
+ aq->q_wr[aq->q.cons].wr = NULL;
+ list_del_init(&wr->aq_ent);
+
+ wr->cqe = *cqe;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+
+ ionic_queue_consume_entries(&aq->q,
+ aq->q_wr[aq->q.cons].wqe_strides);
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ if (old_prod != cq->q.prod) {
+ ionic_admin_reset_wdog(aq);
+ cq->q.cons = cq->q.prod;
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ queue_work(ionic_evt_workq, &aq->work);
+ } else if (!aq->armed) {
+ aq->armed = true;
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ cq->q.dbell | IONIC_CQ_RING_ARM |
+ cq->arm_any_prod);
+ queue_work(ionic_evt_workq, &aq->work);
+ }
+
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE)
+ return;
+
+ old_prod = aq->q.prod;
+
+ if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
+ ionic_admin_reset_wdog(aq);
+
+ if (list_empty(&aq->wr_post))
+ return;
+
+ do {
+ u8 *src;
+ int i, src_len;
+ size_t stride_len;
+
+ wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr,
+ aq_ent);
+ wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN +
+ (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
+ avlbl_strides = ionic_queue_length_remaining(&aq->q);
+
+ if (wr_strides > avlbl_strides)
+ break;
+
+ list_move(&wr->aq_ent, &aq->wr_prod);
+ wr->status = aq->q.prod;
+ aq->q_wr[aq->q.prod].wr = wr;
+ aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
+
+ src_len = le16_to_cpu(wr->wqe.len);
+ src = (uint8_t *)&wr->wqe.cmd;
+
+ /* First stride */
+ memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
+ ADMIN_WQE_HDR_LEN);
+ stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN;
+ if (stride_len > src_len)
+ stride_len = src_len;
+ memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
+ src, stride_len);
+ ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n",
+ aq->q.prod, wr_strides);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+
+ /* Remaining strides */
+ for (i = stride_len; i < src_len; i += stride_len) {
+ stride_len = ADMIN_WQE_STRIDE;
+
+ if (i + stride_len > src_len)
+ stride_len = src_len - i;
+
+ memcpy(ionic_queue_at_prod(&aq->q), src + i,
+ stride_len);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+ }
+ } while (!list_empty(&aq->wr_post));
+
+ if (old_prod != aq->q.prod)
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype,
+ ionic_queue_dbell_val(&aq->q));
+}
+
+static void ionic_admin_dwork(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, admin_dwork.work);
+ struct ionic_aq *aq, *bad_aq = NULL;
+ bool do_reschedule = false;
+ unsigned long irqflags;
+ bool do_reset = false;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ aq = dev->aq_vec[i];
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (ionic_queue_empty(&aq->q))
+ goto next_aq;
+
+ /* Reschedule if any queue has outstanding work */
+ do_reschedule = true;
+
+ if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN))
+ /* Warning threshold not met, nothing to do */
+ goto next_aq;
+
+ /* See if polling now makes some progress */
+ pos = aq->q.cons;
+ ionic_admin_poll_locked(aq);
+ if (pos != aq->q.cons) {
+ ibdev_dbg(&dev->ibdev,
+ "missed event for acq %d\n", aq->cqid);
+ goto next_aq;
+ }
+
+ if (time_is_after_eq_jiffies(aq->stamp +
+ IONIC_ADMIN_TIMEOUT)) {
+ /* Timeout threshold not met */
+ ibdev_dbg(&dev->ibdev, "no progress after %ums\n",
+ (u32)jiffies_to_msecs(jiffies - aq->stamp));
+ goto next_aq;
+ }
+
+ /* Queue timed out */
+ bad_aq = aq;
+ do_reset = true;
+next_aq:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ if (do_reset)
+ /* Reset RDMA lif on a timeout */
+ ionic_admin_timedout(bad_aq);
+ else if (do_reschedule)
+ /* Try to poll again later */
+ ionic_admin_reset_dwork(dev);
+}
+
+static void ionic_admin_work(struct work_struct *ws)
+{
+ struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr)
+{
+ unsigned long irqflags;
+ bool poll;
+
+ wr->status = IONIC_ADMIN_POSTED;
+ wr->aq = aq;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ poll = list_empty(&aq->wr_post);
+ list_add(&wr->aq_ent, &aq->wr_post);
+ if (poll)
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr)
+{
+ int aq_idx;
+
+ /* Use cpu id for the adminq selection */
+ aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count;
+ ionic_admin_post_aq(dev->aq_vec[aq_idx], wr);
+}
+
+static void ionic_admin_cancel(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (!list_empty(&wr->aq_ent)) {
+ list_del(&wr->aq_ent);
+ if (wr->status != IONIC_ADMIN_POSTED)
+ aq->q_wr[wr->status].wr = NULL;
+ }
+
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static int ionic_admin_busy_wait(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+ int try_i;
+
+ for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) {
+ if (completion_done(&wr->work))
+ return 0;
+
+ mdelay(IONIC_ADMIN_BUSY_RETRY_MS);
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ /*
+ * we timed out. Initiate RDMA LIF reset and indicate
+ * error to caller.
+ */
+ ionic_admin_timedout(aq);
+ return -ETIMEDOUT;
+}
+
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags flags)
+{
+ int rc, timo;
+
+ if (flags & IONIC_ADMIN_F_BUSYWAIT) {
+ /* Spin */
+ rc = ionic_admin_busy_wait(wr);
+ } else if (flags & IONIC_ADMIN_F_INTERRUPT) {
+ /*
+ * Interruptible sleep, 1s timeout
+ * This is used for commands which are safe for the caller
+ * to clean up without killing and resetting the adminq.
+ */
+ timo = wait_for_completion_interruptible_timeout(&wr->work,
+ HZ);
+ if (timo > 0)
+ rc = 0;
+ else if (timo == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = timo;
+ } else {
+ /*
+ * Uninterruptible sleep
+ * This is used for commands which are NOT safe for the
+ * caller to clean up. Cleanup must be handled by the
+ * adminq kill and reset process so that host memory is
+ * not corrupted by the device.
+ */
+ wait_for_completion(&wr->work);
+ rc = 0;
+ }
+
+ if (rc) {
+ ibdev_warn(&dev->ibdev, "wait status %d\n", rc);
+ ionic_admin_cancel(wr);
+ } else if (wr->status == IONIC_ADMIN_KILLED) {
+ ibdev_dbg(&dev->ibdev, "admin killed\n");
+
+ /* No error if admin already killed during teardown */
+ rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV;
+ } else if (ionic_v1_cqe_error(&wr->cqe)) {
+ ibdev_warn(&dev->ibdev, "opcode %u error %u\n",
+ wr->wqe.op,
+ be32_to_cpu(wr->cqe.status_length));
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int ionic_rdma_devcmd(struct ionic_ibdev *dev,
+ struct ionic_admin_ctx *admin)
+{
+ int rc;
+
+ rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin);
+ if (rc)
+ return rc;
+
+ return ionic_error_to_errno(admin->comp.comp.status);
+}
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_reset = {
+ .opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev,
+ struct ionic_queue *q,
+ u32 qid, u32 cid, u16 opcode)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_queue = {
+ .opcode = opcode,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ .qid_ver = cpu_to_le32(qid),
+ .cid = cpu_to_le32(cid),
+ .dbid = cpu_to_le16(dev->lif_cfg.dbid),
+ .depth_log2 = q->depth_log2,
+ .stride_log2 = q->stride_log2,
+ .dma_addr = cpu_to_le64(q->dma),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ aq->armed = false;
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &aq->work);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+
+ ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event);
+}
+
+static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev,
+ int comp_vector)
+{
+ struct ib_cq_init_attr attr = {
+ .cqe = IONIC_AQ_DEPTH,
+ .comp_vector = comp_vector,
+ };
+ struct ionic_tbl_buf buf = {};
+ struct ionic_vcq *vcq;
+ struct ionic_cq *cq;
+ int rc;
+
+ vcq = kzalloc(sizeof(*vcq), GFP_KERNEL);
+ if (!vcq)
+ return ERR_PTR(-ENOMEM);
+
+ vcq->ibcq.device = &dev->ibdev;
+ vcq->ibcq.comp_handler = ionic_rdma_admincq_comp;
+ vcq->ibcq.event_handler = ionic_rdma_admincq_event;
+ atomic_set(&vcq->ibcq.usecnt, 0);
+
+ vcq->udma_mask = 1;
+ cq = &vcq->cq[0];
+
+ rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL,
+ NULL, NULL, 0);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
+ IONIC_CMD_RDMA_CREATE_CQ);
+ if (rc)
+ goto err_cmd;
+
+ return vcq;
+
+err_cmd:
+ ionic_destroy_cq_common(dev, cq);
+err_init:
+ kfree(vcq);
+
+ return ERR_PTR(rc);
+}
+
+static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ aq->dev = dev;
+ aq->aqid = aqid;
+ aq->cqid = cqid;
+ spin_lock_init(&aq->lock);
+
+ rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ ADMIN_WQE_STRIDE);
+ if (rc)
+ goto err_q;
+
+ ionic_queue_dbell_init(&aq->q, aq->aqid);
+
+ aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL);
+ if (!aq->q_wr) {
+ rc = -ENOMEM;
+ goto err_wr;
+ }
+
+ INIT_LIST_HEAD(&aq->wr_prod);
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ INIT_WORK(&aq->work, ionic_admin_work);
+ aq->armed = false;
+
+ return aq;
+
+err_wr:
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(aq);
+
+ return ERR_PTR(rc);
+}
+
+static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev,
+ struct ionic_aq *aq)
+{
+ kfree(aq->q_wr);
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+ kfree(aq);
+}
+
+static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = __ionic_create_rdma_adminq(dev, aqid, cqid);
+ if (IS_ERR(aq))
+ return aq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
+ IONIC_CMD_RDMA_CREATE_ADMINQ);
+ if (rc)
+ goto err_cmd;
+
+ return aq;
+
+err_cmd:
+ __ionic_destroy_rdma_adminq(dev, aq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_flush_qs(struct ionic_ibdev *dev)
+{
+ struct ionic_qp *qp, *qp_tmp;
+ struct ionic_cq *cq, *cq_tmp;
+ LIST_HEAD(flush_list);
+ unsigned long index;
+
+ WARN_ON(!irqs_disabled());
+
+ /* Flush qp send and recv */
+ xa_lock(&dev->qp_tbl);
+ xa_for_each(&dev->qp_tbl, index, qp) {
+ kref_get(&qp->qp_kref);
+ list_add_tail(&qp->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->qp_tbl);
+
+ list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_flush_qp(dev, qp);
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ list_del(&qp->ibkill_flush_ent);
+ }
+
+ /* Notify completions */
+ xa_lock(&dev->cq_tbl);
+ xa_for_each(&dev->cq_tbl, index, cq) {
+ kref_get(&cq->cq_kref);
+ list_add_tail(&cq->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->cq_tbl);
+
+ list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_notify_flush_cq(cq);
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ list_del(&cq->ibkill_flush_ent);
+ }
+}
+
+static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
+{
+ unsigned long irqflags;
+ bool do_flush = false;
+ int i;
+
+ /* Mark AQs for drain and flush the QPs while irq is disabled */
+ local_irq_save(irqflags);
+
+ /* Mark the admin queue, flushing at most once */
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) {
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ /* Flush incomplete admin commands */
+ ionic_admin_poll_locked(aq);
+ do_flush = true;
+ }
+ spin_unlock(&aq->lock);
+ }
+
+ if (do_flush)
+ ionic_flush_qs(dev);
+
+ local_irq_restore(irqflags);
+
+ /* Post a fatal event if requested */
+ if (fatal_path) {
+ struct ib_event ev;
+
+ ev.device = &dev->ibdev;
+ ev.element.port_num = 1;
+ ev.event = IB_EVENT_DEVICE_FATAL;
+
+ ib_dispatch_event(&ev);
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+}
+
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path)
+{
+ enum ionic_admin_state old_state;
+ unsigned long irqflags = 0;
+ int i, rc;
+
+ if (!dev->aq_vec)
+ return;
+
+ /*
+ * Admin queues are transitioned from active to paused to killed state.
+ * When in paused state, no new commands are issued to the device,
+ * nor are any completed locally. After resetting the lif, it will be
+ * safe to resume the rdma admin queues in the killed state. Commands
+ * will not be issued to the device, but will complete locally with status
+ * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or
+ * modifying resources fails, but destroying resources succeeds.
+ * If there was a failure resetting the lif using this strategy,
+ * then the state of the device is unknown.
+ */
+ old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE,
+ IONIC_ADMIN_PAUSED);
+ if (old_state != IONIC_ADMIN_ACTIVE)
+ return;
+
+ /* Pause all the AQs */
+ local_irq_save(irqflags);
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ /* pause rdma admin queues to reset lif */
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE)
+ atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED);
+ spin_unlock(&aq->lock);
+ }
+ local_irq_restore(irqflags);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (unlikely(rc)) {
+ ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc);
+ ionic_request_rdma_reset(dev->lif_cfg.lif);
+ }
+
+ ionic_kill_ibdev(dev, fatal_path);
+}
+
+static void ionic_reset_work(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, reset_work);
+
+ ionic_kill_rdma_admin(dev, true);
+}
+
+static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe)
+{
+ struct ionic_v1_eqe *qeqe;
+ bool color;
+
+ qeqe = ionic_queue_at_prod(&eq->q);
+ color = ionic_v1_eqe_color(qeqe);
+
+ /* cons is color for eq */
+ if (eq->q.cons != color)
+ return false;
+
+ /* Prevent out-of-order reads of the EQE */
+ dma_rmb();
+
+ ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
+ print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ qeqe, BIT(eq->q.stride_log2), true);
+ *eqe = *qeqe;
+
+ return true;
+}
+
+static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_cq *cq;
+
+ xa_lock_irqsave(&dev->cq_tbl, irqflags);
+ cq = xa_load(&dev->cq_tbl, cqid);
+ if (cq)
+ kref_get(&cq->cq_kref);
+ xa_unlock_irqrestore(&dev->cq_tbl, irqflags);
+
+ if (!cq) {
+ ibdev_dbg(&dev->ibdev,
+ "missing cqid %#x code %u\n", cqid, code);
+ return;
+ }
+
+ switch (code) {
+ case IONIC_V1_EQE_CQ_NOTIFY:
+ if (cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+ break;
+
+ case IONIC_V1_EQE_CQ_ERR:
+ if (cq->vcq->ibcq.event_handler) {
+ ibev.event = IB_EVENT_CQ_ERR;
+ ibev.device = &dev->ibdev;
+ ibev.element.cq = &cq->vcq->ibcq;
+
+ cq->vcq->ibcq.event_handler(&ibev,
+ cq->vcq->ibcq.cq_context);
+ }
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized cqid %#x code %u\n", cqid, code);
+ break;
+ }
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+}
+
+static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_qp *qp;
+
+ xa_lock_irqsave(&dev->qp_tbl, irqflags);
+ qp = xa_load(&dev->qp_tbl, qpid);
+ if (qp)
+ kref_get(&qp->qp_kref);
+ xa_unlock_irqrestore(&dev->qp_tbl, irqflags);
+
+ if (!qp) {
+ ibdev_dbg(&dev->ibdev,
+ "missing qpid %#x code %u\n", qpid, code);
+ return;
+ }
+
+ ibev.device = &dev->ibdev;
+ ibev.element.qp = &qp->ibqp;
+
+ switch (code) {
+ case IONIC_V1_EQE_SQ_DRAIN:
+ ibev.event = IB_EVENT_SQ_DRAINED;
+ break;
+
+ case IONIC_V1_EQE_QP_COMM_EST:
+ ibev.event = IB_EVENT_COMM_EST;
+ break;
+
+ case IONIC_V1_EQE_QP_LAST_WQE:
+ ibev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR:
+ ibev.event = IB_EVENT_QP_FATAL;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_REQUEST:
+ ibev.event = IB_EVENT_QP_REQ_ERR;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_ACCESS:
+ ibev.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized qpid %#x code %u\n", qpid, code);
+ goto out;
+ }
+
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context);
+
+out:
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+}
+
+static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
+{
+ struct ionic_ibdev *dev = eq->dev;
+ struct ionic_v1_eqe eqe;
+ u16 npolled = 0;
+ u8 type, code;
+ u32 evt, qid;
+
+ while (npolled < budget) {
+ if (!ionic_next_eqe(eq, &eqe))
+ break;
+
+ ionic_queue_produce(&eq->q);
+
+ /* cons is color for eq */
+ eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
+
+ ++npolled;
+
+ evt = ionic_v1_eqe_evt(&eqe);
+ type = ionic_v1_eqe_evt_type(evt);
+ code = ionic_v1_eqe_evt_code(evt);
+ qid = ionic_v1_eqe_evt_qid(evt);
+
+ switch (type) {
+ case IONIC_V1_EQE_TYPE_CQ:
+ ionic_cq_event(dev, qid, code);
+ break;
+
+ case IONIC_V1_EQE_TYPE_QP:
+ ionic_qp_event(dev, qid, code);
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unknown event %#x type %u\n", evt, type);
+ }
+ }
+
+ return npolled;
+}
+
+static void ionic_poll_eq_work(struct work_struct *work)
+{
+ struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
+ u32 npolled;
+
+ if (unlikely(!eq->enable) || WARN_ON(eq->armed))
+ return;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET);
+ if (npolled == IONIC_EQ_WORK_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+}
+
+static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr)
+{
+ struct ionic_eq *eq = eqptr;
+ int was_armed;
+ u32 npolled;
+
+ was_armed = xchg(&eq->armed, 0);
+
+ if (unlikely(!eq->enable) || !was_armed)
+ return IRQ_HANDLED;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET);
+ if (npolled == IONIC_EQ_ISR_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid)
+{
+ struct ionic_intr_info intr_obj = { };
+ struct ionic_eq *eq;
+ int rc;
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return ERR_PTR(-ENOMEM);
+
+ eq->dev = dev;
+
+ rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ sizeof(struct ionic_v1_eqe));
+ if (rc)
+ goto err_q;
+
+ eq->eqid = eqid;
+
+ eq->armed = true;
+ eq->enable = false;
+ INIT_WORK(&eq->work, ionic_poll_eq_work);
+
+ rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj);
+ if (rc < 0)
+ goto err_intr;
+
+ eq->irq = intr_obj.vector;
+ eq->intr = intr_obj.index;
+
+ ionic_queue_dbell_init(&eq->q, eq->eqid);
+
+ /* cons is color for eq */
+ eq->q.cons = true;
+
+ snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq",
+ "ionr", dev->lif_cfg.lif_index, eq->eqid);
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0);
+ ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr);
+
+ eq->enable = true;
+
+ rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq);
+ if (rc)
+ goto err_irq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
+ IONIC_CMD_RDMA_CREATE_EQ);
+ if (rc)
+ goto err_cmd;
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR);
+
+ return eq;
+
+err_cmd:
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+err_irq:
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+err_intr:
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(eq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_destroy_eq(struct ionic_eq *eq)
+{
+ struct ionic_ibdev *dev = eq->dev;
+
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+ kfree(eq);
+}
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev)
+{
+ int eq_i = 0, aq_i = 0, rc = 0;
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ dev->eq_vec = NULL;
+ dev->aq_vec = NULL;
+
+ INIT_WORK(&dev->reset_work, ionic_reset_work);
+ INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork);
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+
+ if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) {
+ ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n",
+ IONIC_AQ_COUNT);
+ dev->lif_cfg.aq_count = IONIC_AQ_COUNT;
+ }
+
+ if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) {
+ dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n",
+ IONIC_EQ_COUNT);
+ dev->lif_cfg.eq_count = IONIC_EQ_COUNT;
+ }
+
+ /* need at least two eq and one aq */
+ if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN ||
+ dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec),
+ GFP_KERNEL);
+ if (!dev->eq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) {
+ eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base);
+ if (IS_ERR(eq)) {
+ rc = PTR_ERR(eq);
+
+ if (eq_i < IONIC_EQ_COUNT_MIN) {
+ ibdev_err(&dev->ibdev,
+ "fail create eq %pe\n", eq);
+ goto out;
+ }
+
+ /* ok, just fewer eq than device supports */
+ ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %pe\n",
+ eq_i, dev->lif_cfg.eq_count, eq);
+
+ rc = 0;
+ break;
+ }
+
+ dev->eq_vec[eq_i] = eq;
+ }
+
+ dev->lif_cfg.eq_count = eq_i;
+
+ dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec),
+ GFP_KERNEL);
+ if (!dev->aq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Create one CQ per AQ */
+ for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) {
+ vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i);
+ if (IS_ERR(vcq)) {
+ rc = PTR_ERR(vcq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create acq %pe\n", vcq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, vcq);
+ break;
+ }
+
+ aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base,
+ vcq->cq[0].cqid);
+ if (IS_ERR(aq)) {
+ /* Clean up the dangling CQ */
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+
+ rc = PTR_ERR(aq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create aq %pe\n", aq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, aq);
+ break;
+ }
+
+ vcq->ibcq.cq_context = aq;
+ aq->vcq = vcq;
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE);
+ dev->aq_vec[aq_i] = aq;
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE);
+out:
+ dev->lif_cfg.eq_count = eq_i;
+ dev->lif_cfg.aq_count = aq_i;
+
+ return rc;
+}
+
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev)
+{
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ /*
+ * Killing the admin before destroy makes sure all admin and
+ * completions are flushed. admin_state = IONIC_ADMIN_KILLED
+ * stops queueing up further works.
+ */
+ cancel_delayed_work_sync(&dev->admin_dwork);
+ cancel_work_sync(&dev->reset_work);
+
+ if (dev->aq_vec) {
+ while (dev->lif_cfg.aq_count > 0) {
+ aq = dev->aq_vec[--dev->lif_cfg.aq_count];
+ vcq = aq->vcq;
+
+ cancel_work_sync(&aq->work);
+
+ __ionic_destroy_rdma_adminq(dev, aq);
+ if (vcq) {
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+ }
+ }
+
+ kfree(dev->aq_vec);
+ }
+
+ if (dev->eq_vec) {
+ while (dev->lif_cfg.eq_count > 0) {
+ eq = dev->eq_vec[--dev->lif_cfg.eq_count];
+ ionic_destroy_eq(eq);
+ }
+
+ kfree(dev->eq_vec);
+ }
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_controlpath.c b/drivers/infiniband/hw/ionic/ionic_controlpath.c
new file mode 100644
index 000000000000..ea12d9b8e125
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_controlpath.c
@@ -0,0 +1,2679 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_user_verbs.h>
+#include <ionic_api.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define ionic_set_ecn(tos) (((tos) | 2u) & ~1u)
+#define ionic_clear_ecn(tos) ((tos) & ~3u)
+
+static int ionic_validate_qdesc(struct ionic_qdesc *q)
+{
+ if (!q->addr || !q->size || !q->mask ||
+ !q->depth_log2 || !q->stride_log2)
+ return -EINVAL;
+
+ if (q->addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (q->mask != BIT(q->depth_log2) - 1)
+ return -EINVAL;
+
+ if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
+{
+ /* EQ per vector per udma, and the first eqs reserved for async events.
+ * The rest of the vectors can be requested for completions.
+ */
+ u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
+
+ return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
+}
+
+static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
+{
+ unsigned int size, base, bound;
+ int rc;
+
+ size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ base = size * udma_idx;
+ bound = base + size;
+
+ rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
+ if (rc >= 0) {
+ /* cq_base is zero or a multiple of two queue groups */
+ *cqid = dev->lif_cfg.cq_base +
+ ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
+{
+ u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_cqid, bitid);
+}
+
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
+ struct ionic_cq *cq = &vcq->cq[udma_idx];
+ void *entry;
+ int rc;
+
+ cq->vcq = vcq;
+
+ if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
+ rc = -EINVAL;
+ goto err_args;
+ }
+
+ rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
+ if (rc)
+ goto err_args;
+
+ cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
+
+ spin_lock_init(&cq->lock);
+ INIT_LIST_HEAD(&cq->poll_sq);
+ INIT_LIST_HEAD(&cq->flush_sq);
+ INIT_LIST_HEAD(&cq->flush_rq);
+
+ if (udata) {
+ rc = ionic_validate_qdesc(req_cq);
+ if (rc)
+ goto err_qdesc;
+
+ cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ rc = PTR_ERR(cq->umem);
+ goto err_qdesc;
+ }
+
+ cq->q.ptr = NULL;
+ cq->q.size = req_cq->size;
+ cq->q.mask = req_cq->mask;
+ cq->q.depth_log2 = req_cq->depth_log2;
+ cq->q.stride_log2 = req_cq->stride_log2;
+
+ *resp_cqid = cq->cqid;
+ } else {
+ rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
+ attr->cqe + IONIC_CQ_GRACE,
+ sizeof(struct ionic_v1_cqe));
+ if (rc)
+ goto err_q_init;
+
+ ionic_queue_dbell_init(&cq->q, cq->cqid);
+ cq->color = true;
+ cq->credit = cq->q.mask;
+ }
+
+ rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl_init;
+
+ init_completion(&cq->cq_rel_comp);
+ kref_init(&cq->cq_kref);
+
+ entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_xa;
+ }
+
+ return 0;
+
+err_xa:
+ ionic_pgtbl_unbuf(dev, buf);
+err_pgtbl_init:
+ if (!udata)
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+err_q_init:
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+err_qdesc:
+ ionic_put_cqid(dev, cq->cqid);
+err_args:
+ cq->vcq = NULL;
+
+ return rc;
+}
+
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!cq->vcq)
+ return;
+
+ xa_erase_irq(&dev->cq_tbl, cq->cqid);
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ wait_for_completion(&cq->cq_rel_comp);
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+
+ ionic_put_cqid(dev, cq->cqid);
+
+ cq->vcq = NULL;
+}
+
+static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
+{
+ if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_pdid);
+ if (rc < 0)
+ return rc;
+
+ *pdid = rc;
+ return 0;
+}
+
+static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_ahid);
+ if (rc < 0)
+ return rc;
+
+ *ahid = rc;
+ return 0;
+}
+
+static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid)
+{
+ int rc;
+
+ /* wrap to 1, skip reserved lkey */
+ rc = ionic_resid_get_shared(&dev->inuse_mrid, 1,
+ dev->inuse_mrid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ *mrid = ionic_mrid(rc, dev->next_mrkey++);
+ return 0;
+}
+
+static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid)
+{
+ int rc = 0;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1);
+ if (rc < 0)
+ return rc;
+
+ *qpid = IB_QPT_GSI;
+ return 0;
+}
+
+static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid,
+ u8 *udma_idx, u8 udma_mask)
+{
+ unsigned int size, base, bound;
+ int udma_i, udma_x, udma_ix;
+ int rc = -EINVAL;
+
+ udma_x = dev->next_qpid_udma_idx;
+
+ dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) {
+ udma_ix = udma_i ^ udma_x;
+
+ if (!(udma_mask & BIT(udma_ix)))
+ continue;
+
+ size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count;
+ base = size * udma_ix;
+ bound = base + size;
+
+ /* skip reserved SMI and GSI qpids in group zero */
+ if (!base)
+ base = 2;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound);
+ if (rc >= 0) {
+ *qpid = ionic_bitid_to_qid(rc,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+ *udma_idx = udma_ix;
+
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr)
+{
+ int rc, dbpage_num;
+
+ /* wrap to 1, skip kernel reserved */
+ rc = ionic_resid_get_shared(&dev->inuse_dbid, 1,
+ dev->inuse_dbid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc;
+ *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT);
+
+ *dbid = rc;
+
+ return 0;
+}
+
+static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid)
+{
+ ionic_resid_put(&dev->inuse_pdid, pdid);
+}
+
+static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid)
+{
+ ionic_resid_put(&dev->inuse_ahid, ahid);
+}
+
+static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid)
+{
+ ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid));
+}
+
+static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid)
+{
+ u32 bitid = ionic_qid_to_bitid(qpid,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_qpid, bitid);
+}
+
+static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid)
+{
+ ionic_resid_put(&dev->inuse_dbid, dbid);
+}
+
+static struct rdma_user_mmap_entry*
+ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size,
+ unsigned long pfn, u8 mmap_flags, u64 *offset)
+{
+ struct ionic_mmap_entry *entry;
+ int rc;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->size = size;
+ entry->pfn = pfn;
+ entry->mmap_flags = mmap_flags;
+
+ rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry,
+ entry->size);
+ if (rc) {
+ kfree(entry);
+ return NULL;
+ }
+
+ if (offset)
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct ionic_ctx_resp resp = {};
+ struct ionic_ctx_req req;
+ phys_addr_t db_phys = 0;
+ int rc;
+
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+
+ /* try to allocate dbid for user ctx */
+ rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys);
+ if (rc < 0)
+ return rc;
+
+ ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid);
+
+ ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
+ PHYS_PFN(db_phys), 0, NULL);
+ if (!ctx->mmap_dbell) {
+ rc = -ENOMEM;
+ goto err_mmap_dbell;
+ }
+
+ resp.page_shift = PAGE_SHIFT;
+
+ resp.dbell_offset = db_phys & ~PAGE_MASK;
+
+ resp.version = dev->lif_cfg.rdma_version;
+ resp.qp_opcodes = dev->lif_cfg.qp_opcodes;
+ resp.admin_opcodes = dev->lif_cfg.admin_opcodes;
+
+ resp.sq_qtype = dev->lif_cfg.sq_qtype;
+ resp.rq_qtype = dev->lif_cfg.rq_qtype;
+ resp.cq_qtype = dev->lif_cfg.cq_qtype;
+ resp.admin_qtype = dev->lif_cfg.aq_qtype;
+ resp.max_stride = dev->lif_cfg.max_stride;
+ resp.max_spec = IONIC_SPEC_HIGH;
+
+ resp.udma_count = dev->lif_cfg.udma_count;
+ resp.expdb_mask = dev->lif_cfg.expdb_mask;
+
+ if (dev->lif_cfg.sq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_SQ;
+ if (dev->lif_cfg.rq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_RQ;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+
+ return 0;
+
+err_resp:
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+err_mmap_dbell:
+ ionic_put_dbid(dev, ctx->dbid);
+
+ return rc;
+}
+
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+ ionic_put_dbid(dev, ctx->dbid);
+}
+
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct ionic_mmap_entry *ionic_entry;
+ int rc = 0;
+
+ rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev, "not found %#lx\n",
+ vma->vm_pgoff << PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+
+ ibdev_dbg(&dev->ibdev, "writecombine? %d\n",
+ ionic_entry->mmap_flags & IONIC_MMAP_WC);
+ if (ionic_entry->mmap_flags & IONIC_MMAP_WC)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n",
+ vma->vm_start, ionic_entry->pfn, ionic_entry->size);
+ rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
+ ionic_entry->size, vma->vm_page_prot,
+ rdma_entry);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
+}
+
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct ionic_mmap_entry *ionic_entry;
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+ kfree(ionic_entry);
+}
+
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ return ionic_get_pdid(dev, &pd->pdid);
+}
+
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ ionic_put_pdid(dev, pd->pdid);
+
+ return 0;
+}
+
+static int ionic_build_hdr(struct ionic_ibdev *dev,
+ struct ib_ud_header *hdr,
+ const struct rdma_ah_attr *attr,
+ u16 sport, bool want_ecn)
+{
+ const struct ib_global_route *grh;
+ enum rdma_network_type net;
+ u16 vlan;
+ int rc;
+
+ if (attr->ah_flags != IB_AH_GRH)
+ return -EINVAL;
+ if (attr->type != RDMA_AH_ATTR_TYPE_ROCE)
+ return -EINVAL;
+
+ grh = rdma_ah_read_grh(attr);
+
+ rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]);
+ if (rc)
+ return rc;
+
+ net = rdma_gid_attr_network_type(grh->sgid_attr);
+
+ rc = ib_ud_header_init(0, /* no payload */
+ 0, /* no lrh */
+ 1, /* yes eth */
+ vlan != 0xffff,
+ 0, /* no grh */
+ net == RDMA_NETWORK_IPV4 ? 4 : 6,
+ 1, /* yes udp */
+ 0, /* no imm */
+ hdr);
+ if (rc)
+ return rc;
+
+ ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac);
+
+ if (net == RDMA_NETWORK_IPV4) {
+ hdr->eth.type = cpu_to_be16(ETH_P_IP);
+ hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */
+ hdr->ip4.ttl = grh->hop_limit;
+ hdr->ip4.tot_len = cpu_to_be16(0xffff);
+ hdr->ip4.saddr =
+ *(const __be32 *)(grh->sgid_attr->gid.raw + 12);
+ hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12);
+
+ if (want_ecn)
+ hdr->ip4.tos = ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class);
+ } else {
+ hdr->eth.type = cpu_to_be16(ETH_P_IPV6);
+ hdr->grh.flow_label = cpu_to_be32(grh->flow_label);
+ hdr->grh.hop_limit = grh->hop_limit;
+ hdr->grh.source_gid = grh->sgid_attr->gid;
+ hdr->grh.destination_gid = grh->dgid;
+
+ if (want_ecn)
+ hdr->grh.traffic_class =
+ ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->grh.traffic_class =
+ ionic_clear_ecn(grh->traffic_class);
+ }
+
+ if (vlan != 0xffff) {
+ vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT;
+ hdr->vlan.tag = cpu_to_be16(vlan);
+ hdr->vlan.type = hdr->eth.type;
+ hdr->eth.type = cpu_to_be16(ETH_P_8021Q);
+ }
+
+ hdr->udp.sport = cpu_to_be16(sport);
+ hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT);
+
+ return 0;
+}
+
+static void ionic_set_ah_attr(struct ionic_ibdev *dev,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_ud_header *hdr,
+ int sgid_index)
+{
+ u32 flow_label;
+ u16 vlan = 0;
+ u8 tos, ttl;
+
+ if (hdr->vlan_present)
+ vlan = be16_to_cpu(hdr->vlan.tag);
+
+ if (hdr->ipv4_present) {
+ flow_label = 0;
+ ttl = hdr->ip4.ttl;
+ tos = hdr->ip4.tos;
+ *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff);
+ *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr;
+ } else {
+ flow_label = be32_to_cpu(hdr->grh.flow_label);
+ ttl = hdr->grh.hop_limit;
+ tos = hdr->grh.traffic_class;
+ }
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hdr->eth_present)
+ ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h);
+ rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT);
+ rdma_ah_set_port_num(ah_attr, 1);
+ rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos);
+ rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid);
+}
+
+static int ionic_create_ah_cmd(struct ionic_ibdev *dev,
+ struct ionic_ah *ah,
+ struct ionic_pd *pd,
+ struct rdma_ah_attr *attr,
+ u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN),
+ .cmd.create_ah = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid),
+ .id_ver = cpu_to_le32(ah->ahid),
+ }
+ }
+ };
+ enum ionic_admin_flags admin_flags = 0;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf;
+ gfp_t gfp = GFP_ATOMIC;
+ int rc, hdr_len = 0;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH)
+ return -EBADRQC;
+
+ if (flags & RDMA_CREATE_AH_SLEEPABLE)
+ gfp = GFP_KERNEL;
+ else
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false);
+ if (rc)
+ return rc;
+
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) {
+ if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP;
+ } else {
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, gfp);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len);
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, admin_flags);
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN),
+ .cmd.destroy_ah = {
+ .ah_id = cpu_to_le32(ahid),
+ },
+ }
+ };
+ enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH)
+ return -EBADRQC;
+
+ if (!(flags & RDMA_CREATE_AH_SLEEPABLE))
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ ionic_admin_post(dev, &wr);
+ ionic_admin_wait(dev, &wr, admin_flags);
+
+ /* No host-memory resource is associated with ah, so it is ok
+ * to "succeed" and complete this destroy ah on the host.
+ */
+ return 0;
+}
+
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
+ struct ionic_pd *pd = to_ionic_pd(ibah->pd);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ struct ionic_ah_resp resp = {};
+ u32 flags = init_attr->flags;
+ int rc;
+
+ rc = ionic_get_ahid(dev, &ah->ahid);
+ if (rc)
+ return rc;
+
+ rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.ahid = ah->ahid;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+err_cmd:
+ ionic_put_ahid(dev, ah->ahid);
+ return rc;
+}
+
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+
+ ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index);
+
+ return 0;
+}
+
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ int rc;
+
+ rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+ if (rc)
+ return rc;
+
+ ionic_put_ahid(dev, ah->ahid);
+
+ return 0;
+}
+
+static int ionic_create_mr_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_mr *mr,
+ u64 addr,
+ u64 length)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN),
+ .cmd.create_mr = {
+ .va = cpu_to_le64(addr),
+ .length = cpu_to_le64(length),
+ .pd_id = cpu_to_le32(pd->pdid),
+ .page_size_log2 = mr->buf.page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(mr->buf.tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(&mr->buf, addr),
+ .dbid_flags = cpu_to_le16(mr->flags),
+ .id_ver = cpu_to_le32(mr->mrid),
+ }
+ }
+ };
+ int rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, 0);
+ if (!rc)
+ mr->created = true;
+
+ return rc;
+}
+
+static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN),
+ .cmd.destroy_mr = {
+ .mr_id = cpu_to_le32(mrid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.lkey = IONIC_DMA_LKEY;
+ mr->ibmr.rkey = IONIC_DMA_RKEY;
+
+ if (pd)
+ pd->flags |= IONIC_QPF_PRIVILEGED;
+
+ return &mr->ibmr;
+}
+
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ unsigned long pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ mr->umem = ib_umem_get(&dev->ibdev, start, length, access);
+ if (IS_ERR(mr->umem)) {
+ rc = PTR_ERR(mr->umem);
+ goto err_umem;
+ }
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ionic_mr *mr;
+ u64 pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length,
+ fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ rc = PTR_ERR(umem_dmabuf);
+ goto err_umem;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ if (!mr->ibmr.lkey)
+ goto out;
+
+ if (mr->created) {
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+ }
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ ionic_put_mrid(dev, mr->mrid);
+
+out:
+ kfree(mr);
+
+ return 0;
+}
+
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ int rc;
+
+ if (type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+
+ mr->flags = IONIC_MRF_PHYS_MR;
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl;
+
+ mr->buf.tbl_pages = 0;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+
+ ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma);
+ return ionic_pgtbl_page(&mr->buf, dma);
+}
+
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ /* mr must be allocated using ib_alloc_mr() */
+ if (unlikely(!mr->buf.tbl_limit))
+ return -EINVAL;
+
+ mr->buf.tbl_pages = 0;
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents);
+ rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page);
+
+ mr->buf.page_size_log2 = order_base_2(ibmr->page_size);
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_pd *pd = to_ionic_pd(ibmw->pd);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ return rc;
+
+ mr->ibmw.rkey = mr->mrid;
+
+ if (mr->ibmw.type == IB_MW_TYPE_1)
+ mr->flags = IONIC_MRF_MW_1;
+ else
+ mr->flags = IONIC_MRF_MW_2;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return 0;
+
+err_cmd:
+ ionic_put_mrid(dev, mr->mrid);
+ return rc;
+}
+
+int ionic_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+
+ ionic_put_mrid(dev, mr->mrid);
+
+ return 0;
+}
+
+static int ionic_create_cq_cmd(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_cq *cq,
+ struct ionic_tbl_buf *buf)
+{
+ const u16 dbid = ionic_ctx_dbid(dev, ctx);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN),
+ .cmd.create_cq = {
+ .eq_id = cpu_to_le32(cq->eqid),
+ .depth_log2 = cq->q.depth_log2,
+ .stride_log2 = cq->q.stride_log2,
+ .page_size_log2 = buf->page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(buf->tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(buf, 0),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(cq->cqid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN),
+ .cmd.destroy_cq = {
+ .cq_id = cpu_to_le32(cqid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ struct ionic_tbl_buf buf = {};
+ struct ionic_cq_resp resp;
+ struct ionic_cq_req req;
+ int udma_idx = 0, rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ }
+
+ vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (udata)
+ vcq->udma_mask &= req.udma_mask;
+
+ if (!vcq->udma_mask) {
+ rc = -EINVAL;
+ goto err_init;
+ }
+
+ for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) {
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata,
+ &req.cq[udma_idx],
+ &resp.cqid[udma_idx],
+ udma_idx);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &buf);
+ }
+
+ vcq->ibcq.cqe = attr->cqe;
+
+ if (udata) {
+ resp.udma_mask = vcq->udma_mask;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ while (udma_idx) {
+ --udma_idx;
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+ ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &buf);
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+err_init:
+ ;
+ }
+
+ return rc;
+}
+
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int udma_idx, rc_tmp, rc = 0;
+
+ for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) {
+ --udma_idx;
+
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+ if (rc_tmp) {
+ if (!rc)
+ rc = rc_tmp;
+
+ continue;
+ }
+
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+ }
+
+ return rc;
+}
+
+static bool pd_remote_privileged(struct ib_pd *pd)
+{
+ return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+}
+
+static int ionic_create_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_cq *send_cq,
+ struct ionic_cq *recv_cq,
+ struct ionic_qp *qp,
+ struct ionic_tbl_buf *sq_buf,
+ struct ionic_tbl_buf *rq_buf,
+ struct ib_qp_init_attr *attr)
+{
+ const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject);
+ const u32 flags = to_ionic_qp_flags(0, 0,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(&pd->ibpd));
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN),
+ .cmd.create_qp = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .priv_flags = cpu_to_be32(flags),
+ .type_state = to_ionic_qp_type(attr->qp_type),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid);
+ wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2;
+ wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2;
+ wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.sq_map_count =
+ cpu_to_le32(sq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0);
+ }
+
+ if (qp->has_rq) {
+ wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid);
+ wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
+ wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
+ wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.rq_map_count =
+ cpu_to_le32(rq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0);
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_modify_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ const u32 flags = to_ionic_qp_flags(attr->qp_access_flags,
+ attr->en_sqd_async_notify,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(qp->ibqp.pd));
+ const u8 state = to_ionic_qp_modify_state(attr->qp_state,
+ attr->cur_qp_state);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_MODIFY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN),
+ .cmd.mod_qp = {
+ .attr_mask = cpu_to_be32(mask),
+ .access_flags = cpu_to_be16(flags),
+ .rq_psn = cpu_to_le32(attr->rq_psn),
+ .sq_psn = cpu_to_le32(attr->sq_psn),
+ .rate_limit_kbps =
+ cpu_to_le32(attr->rate_limit),
+ .pmtu = (attr->path_mtu + 7),
+ .retry = (attr->retry_cnt |
+ (attr->rnr_retry << 4)),
+ .rnr_timer = attr->min_rnr_timer,
+ .retry_timeout = attr->timeout,
+ .type_state = state,
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ void *hdr_buf = NULL;
+ dma_addr_t hdr_dma = 0;
+ int rc, hdr_len = 0;
+ u16 sport;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP)
+ return -EBADRQC;
+
+ if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
+ /* Note, round up/down was already done for allocating
+ * resources on the device. The allocation order is in cache
+ * line size. We can't use the order of the resource
+ * allocation to determine the order wqes here, because for
+ * queue length <= one cache line it is not distinct.
+ *
+ * Therefore, order wqes is computed again here.
+ *
+ * Account for hole and round up to the next order.
+ */
+ wr.wqe.cmd.mod_qp.rsq_depth =
+ order_base_2(attr->max_dest_rd_atomic + 1);
+ wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0);
+ }
+
+ if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ /* Account for hole and round down to the next order */
+ wr.wqe.cmd.mod_qp.rrq_depth =
+ order_base_2(attr->max_rd_atomic + 2) - 1;
+ wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0);
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn =
+ cpu_to_le32(attr->dest_qp_num);
+ else
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey);
+
+ if (mask & IB_QP_AV) {
+ if (!qp->hdr)
+ return -ENOMEM;
+
+ sport = rdma_get_udp_sport(grh->flow_label,
+ qp->qpid,
+ attr->dest_qp_num);
+
+ rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true);
+ if (rc)
+ return rc;
+
+ qp->sgid_index = grh->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ if (qp->hdr->ipv4_present) {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ } else {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ wr.wqe.cmd.mod_qp.ah_id_len =
+ cpu_to_le32(qp->ahid | (hdr_len << 24));
+ wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma);
+
+ wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl;
+ wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2;
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (mask & IB_QP_AV)
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ if (mask & IB_QP_AV)
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_query_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_QUERY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN),
+ .cmd.query_qp = {
+ .id_ver = cpu_to_le32(qp->qpid),
+ },
+ }
+ };
+ struct ionic_v1_admin_query_qp_sq *query_sqbuf;
+ struct ionic_v1_admin_query_qp_rq *query_rqbuf;
+ dma_addr_t query_sqdma;
+ dma_addr_t query_rqdma;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf = NULL;
+ int flags, rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB);
+
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ expdb);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb);
+ }
+
+ if (qp->has_rq) {
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ }
+
+ query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_sqbuf)
+ return -ENOMEM;
+
+ query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_rqbuf) {
+ rc = -ENOMEM;
+ goto err_rqbuf;
+ }
+
+ query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma);
+ if (rc)
+ goto err_sqdma;
+
+ query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma);
+ if (rc)
+ goto err_rqdma;
+
+ if (mask & IB_QP_AV) {
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf) {
+ rc = -ENOMEM;
+ goto err_hdrbuf;
+ }
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_hdrdma;
+ }
+
+ wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma);
+ wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma);
+ wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid);
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (rc)
+ goto err_hdrdma;
+
+ flags = be16_to_cpu(query_sqbuf->access_perms_flags |
+ query_rqbuf->access_perms_flags);
+
+ print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_sqbuf, sizeof(*query_sqbuf), true);
+ print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_rqbuf, sizeof(*query_rqbuf), true);
+ ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x",
+ qp->qpid, query_rqbuf->state_pmtu, flags);
+
+ attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4);
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7;
+ attr->path_mig_state = IB_MIG_MIGRATED;
+ attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn);
+ attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn);
+ attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn);
+ attr->dest_qp_num = attr->qkey;
+ attr->qp_access_flags = from_ionic_qp_flags(flags);
+ attr->pkey_index = 0;
+ attr->alt_pkey_index = 0;
+ attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY);
+ attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING);
+ attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1;
+ attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1;
+ attr->min_rnr_timer = query_sqbuf->rnr_timer;
+ attr->port_num = 0;
+ attr->timeout = query_sqbuf->retry_timeout;
+ attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf;
+ attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4;
+ attr->alt_port_num = 0;
+ attr->alt_timeout = 0;
+ attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps);
+
+ if (mask & IB_QP_AV)
+ ionic_set_ah_attr(dev, &attr->ah_attr,
+ qp->hdr, qp->sgid_index);
+
+err_hdrdma:
+ if (mask & IB_QP_AV) {
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ kfree(hdr_buf);
+ }
+err_hdrbuf:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf),
+ DMA_FROM_DEVICE);
+err_rqdma:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf),
+ DMA_FROM_DEVICE);
+err_sqdma:
+ kfree(query_rqbuf);
+err_rqbuf:
+ kfree(query_sqbuf);
+
+ return rc;
+}
+
+static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN),
+ .cmd.destroy_qp = {
+ .qp_id = cpu_to_le32(qpid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev,
+ uint32_t wqe_size)
+{
+ switch (wqe_size) {
+ case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64;
+ case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128;
+ case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256;
+ case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512;
+ }
+
+ return false;
+}
+
+static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata,
+ int max_data)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
+
+ if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->sq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid,
+ &qp->sq_cmb_addr, qp->sq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+not_in_cmb:
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n");
+
+ qp->sq_cmb = 0;
+ qp->sq_cmb_order = IONIC_RES_INVALID;
+ qp->sq_cmb_pgid = 0;
+ qp->sq_cmb_addr = 0;
+}
+
+static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+}
+
+static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *sq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int max_data, int sq_spec, struct ib_udata *udata)
+{
+ u32 wqe_size;
+ int rc = 0;
+
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+
+ if (!qp->has_sq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(sq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (max_data < 0)
+ return rc;
+
+ if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride,
+ qp->sq_cmb & IONIC_CMB_EXPDB))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(sq);
+ if (rc)
+ return rc;
+
+ qp->sq_spec = sq_spec;
+
+ qp->sq.ptr = NULL;
+ qp->sq.size = sq->size;
+ qp->sq.mask = sq->mask;
+ qp->sq.depth_log2 = sq->depth_log2;
+ qp->sq.stride_log2 = sq->stride_log2;
+
+ qp->sq_meta = NULL;
+ qp->sq_msn_idx = NULL;
+
+ qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0);
+ if (IS_ERR(qp->sq_umem))
+ return PTR_ERR(qp->sq_umem);
+ } else {
+ qp->sq_umem = NULL;
+
+ qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec);
+ if (sq_spec && !qp->sq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init sq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->sq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->sq, qp->qpid);
+
+ qp->sq_meta = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_meta),
+ GFP_KERNEL);
+ if (!qp->sq_meta) {
+ rc = -ENOMEM;
+ goto err_sq_meta;
+ }
+
+ qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_msn_idx),
+ GFP_KERNEL);
+ if (!qp->sq_msn_idx) {
+ rc = -ENOMEM;
+ goto err_sq_msn;
+ }
+ }
+
+ ionic_qp_sq_init_cmb(dev, qp, udata, max_data);
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->sq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_sq_tbl;
+
+ return 0;
+
+err_sq_tbl:
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->sq_msn_idx);
+err_sq_msn:
+ kfree(qp->sq_meta);
+err_sq_meta:
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_sq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_sq)
+ return;
+
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->sq_msn_idx);
+ kfree(qp->sq_meta);
+
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+}
+
+static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
+
+ if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->rq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid,
+ &qp->rq_cmb_addr, qp->rq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+not_in_cmb:
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n");
+
+ qp->rq_cmb = 0;
+ qp->rq_cmb_order = IONIC_RES_INVALID;
+ qp->rq_cmb_pgid = 0;
+ qp->rq_cmb_addr = 0;
+}
+
+static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+}
+
+static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *rq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int rq_spec, struct ib_udata *udata)
+{
+ int rc = 0, i;
+ u32 wqe_size;
+
+ if (!qp->has_rq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(rq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(rq);
+ if (rc)
+ return rc;
+
+ qp->rq_spec = rq_spec;
+
+ qp->rq.ptr = NULL;
+ qp->rq.size = rq->size;
+ qp->rq.mask = rq->mask;
+ qp->rq.depth_log2 = rq->depth_log2;
+ qp->rq.stride_log2 = rq->stride_log2;
+
+ qp->rq_meta = NULL;
+
+ qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
+ if (IS_ERR(qp->rq_umem))
+ return PTR_ERR(qp->rq_umem);
+ } else {
+ qp->rq_umem = NULL;
+
+ qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec);
+ if (rq_spec && !qp->rq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init rq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->rq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->rq, qp->qpid);
+
+ qp->rq_meta = kmalloc_array((u32)qp->rq.mask + 1,
+ sizeof(*qp->rq_meta),
+ GFP_KERNEL);
+ if (!qp->rq_meta) {
+ rc = -ENOMEM;
+ goto err_rq_meta;
+ }
+
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ qp->rq_meta_head = &qp->rq_meta[0];
+ }
+
+ ionic_qp_rq_init_cmb(dev, qp, udata);
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->rq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_rq_tbl;
+
+ return 0;
+
+err_rq_tbl:
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->rq_meta);
+err_rq_meta:
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_rq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_rq)
+ return;
+
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->rq_meta);
+
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+}
+
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_tbl_buf sq_buf = {}, rq_buf = {};
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_qp_resp resp = {};
+ struct ionic_qp_req req = {};
+ struct ionic_cq *cq;
+ u8 udma_mask;
+ void *entry;
+ int rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ } else {
+ req.sq_spec = IONIC_SPEC_HIGH;
+ req.rq_spec = IONIC_SPEC_HIGH;
+ }
+
+ if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD)
+ return -EOPNOTSUPP;
+
+ qp->state = IB_QPS_RESET;
+
+ INIT_LIST_HEAD(&qp->cq_poll_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_rq);
+
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+
+ qp->has_sq = 1;
+ qp->has_rq = 1;
+
+ if (attr->qp_type == IB_QPT_GSI) {
+ rc = ionic_get_gsi_qpid(dev, &qp->qpid);
+ } else {
+ udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (qp->has_sq)
+ udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask;
+
+ if (qp->has_rq)
+ udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask;
+
+ if (udata && req.udma_mask)
+ udma_mask &= req.udma_mask;
+
+ if (!udma_mask)
+ return -EINVAL;
+
+ rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask);
+ }
+ if (rc)
+ return rc;
+
+ qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ qp->has_ah = attr->qp_type == IB_QPT_RC;
+
+ if (qp->has_ah) {
+ qp->hdr = kzalloc(sizeof(*qp->hdr), GFP_KERNEL);
+ if (!qp->hdr) {
+ rc = -ENOMEM;
+ goto err_ah_alloc;
+ }
+
+ rc = ionic_get_ahid(dev, &qp->ahid);
+ if (rc)
+ goto err_ahid;
+ }
+
+ if (udata) {
+ if (req.rq_cmb & IONIC_CMB_ENABLE)
+ qp->rq_cmb = req.rq_cmb;
+
+ if (req.sq_cmb & IONIC_CMB_ENABLE)
+ qp->sq_cmb = req.sq_cmb;
+ }
+
+ rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf,
+ attr->cap.max_send_wr, attr->cap.max_send_sge,
+ attr->cap.max_inline_data, req.sq_spec, udata);
+ if (rc)
+ goto err_sq;
+
+ rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
+ attr->cap.max_recv_wr, attr->cap.max_recv_sge,
+ req.rq_spec, udata);
+ if (rc)
+ goto err_rq;
+
+ rc = ionic_create_qp_cmd(dev, pd,
+ to_ionic_vcq_cq(attr->send_cq, qp->udma_idx),
+ to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx),
+ qp, &sq_buf, &rq_buf, attr);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.qpid = qp->qpid;
+ resp.udma_idx = qp->udma_idx;
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->sq_cmb |= IONIC_CMB_WC;
+ else
+ qp->sq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_sq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->sq.size,
+ PHYS_PFN(qp->sq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.sq_cmb_offset);
+ if (!qp->mmap_sq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_sq;
+ }
+
+ resp.sq_cmb = qp->sq_cmb;
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ == IONIC_CMB_WC;
+ else
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->rq_cmb |= IONIC_CMB_WC;
+ else
+ qp->rq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_rq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->rq.size,
+ PHYS_PFN(qp->rq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.rq_cmb_offset);
+ if (!qp->mmap_rq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_rq;
+ }
+
+ resp.rq_cmb = qp->rq_cmb;
+ }
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+
+ qp->ibqp.qp_num = qp->qpid;
+
+ init_completion(&qp->qp_rel_comp);
+ kref_init(&qp->qp_kref);
+
+ entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_resp;
+ }
+
+ if (qp->has_sq) {
+ cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx);
+
+ attr->cap.max_send_wr = qp->sq.mask;
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ qp->sq_cmb & IONIC_CMB_EXPDB);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB);
+ qp->sq_cqid = cq->cqid;
+ }
+
+ if (qp->has_rq) {
+ cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx);
+
+ attr->cap.max_recv_wr = qp->rq.mask;
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ qp->rq_cqid = cq->cqid;
+ }
+
+ return 0;
+
+err_resp:
+ if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+err_mmap_rq:
+ if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+err_mmap_sq:
+ ionic_destroy_qp_cmd(dev, qp->qpid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_qp_rq_destroy(dev, ctx, qp);
+err_rq:
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+err_sq:
+ if (qp->has_ah)
+ ionic_put_ahid(dev, qp->ahid);
+err_ahid:
+ kfree(qp->hdr);
+err_ah_alloc:
+ ionic_put_qpid(dev, qp->qpid);
+ return rc;
+}
+
+void ionic_notify_flush_cq(struct ionic_cq *cq)
+{
+ if (cq->flush && cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+}
+
+static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ if (qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq,
+ qp->udma_idx));
+ if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq,
+ qp->udma_idx));
+}
+
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP sq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = true;
+ if (!ionic_queue_empty(&qp->sq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP rq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = true;
+ if (!ionic_queue_empty(&qp->rq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+}
+
+static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid)
+{
+ struct ionic_v1_cqe *qcqe;
+ int prod, qtf, qid, type;
+ bool color;
+
+ if (!cq->q.ptr)
+ return;
+
+ color = cq->color;
+ prod = cq->q.prod;
+ qcqe = ionic_queue_at(&cq->q, prod);
+
+ while (color == ionic_v1_cqe_color(qcqe)) {
+ qtf = ionic_v1_cqe_qtf(qcqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
+ ionic_v1_cqe_clean(qcqe);
+
+ prod = ionic_queue_next(&cq->q, prod);
+ qcqe = ionic_queue_at(&cq->q, prod);
+ color = ionic_color_wrap(prod, color);
+ }
+}
+
+static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int i;
+
+ local_irq_save(irqflags);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->has_sq) {
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = false;
+ qp->sq_flush_rcvd = false;
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+ qp->sq.prod = 0;
+ qp->sq.cons = 0;
+ spin_unlock(&qp->sq_lock);
+ }
+
+ if (qp->has_rq) {
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = false;
+ qp->rq.prod = 0;
+ qp->rq.cons = 0;
+ if (qp->rq_meta) {
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ }
+ qp->rq_meta_head = &qp->rq_meta[0];
+ spin_unlock(&qp->rq_lock);
+ }
+
+ local_irq_restore(irqflags);
+}
+
+static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,
+ enum ib_qp_state attr_state)
+{
+ if (q_state == attr_state)
+ return true;
+
+ if (attr_state == IB_QPS_ERR)
+ return true;
+
+ if (attr_state == IB_QPS_SQE)
+ return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD;
+
+ return false;
+}
+
+static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr,
+ int mask)
+{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->state;
+ enum ib_qp_state next_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if ((mask & IB_QP_CUR_STATE) &&
+ !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state))
+ return -EINVAL;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask))
+ return -EINVAL;
+
+ /* unprivileged qp not allowed privileged qkey */
+ if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) &&
+ qp->ibqp.uobject)
+ return -EPERM;
+
+ return 0;
+}
+
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ rc = ionic_check_modify_qp(qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_CAP)
+ return -EINVAL;
+
+ rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_STATE) {
+ qp->state = attr->qp_state;
+
+ if (attr->qp_state == IB_QPS_ERR) {
+ ionic_flush_qp(dev, qp);
+ ionic_notify_qp_cqs(dev, qp);
+ } else if (attr->qp_state == IB_QPS_RESET) {
+ ionic_reset_qp(dev, qp);
+ }
+ }
+
+ return 0;
+}
+
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ rc = ionic_query_qp_cmd(dev, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (qp->has_sq)
+ attr->cap.max_send_wr = qp->sq.mask;
+
+ if (qp->has_rq)
+ attr->cap.max_recv_wr = qp->rq.mask;
+
+ init_attr->event_handler = ibqp->event_handler;
+ init_attr->qp_context = ibqp->qp_context;
+ init_attr->send_cq = ibqp->send_cq;
+ init_attr->recv_cq = ibqp->recv_cq;
+ init_attr->srq = ibqp->srq;
+ init_attr->xrcd = ibqp->xrcd;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = qp->sig_all ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+ init_attr->qp_type = ibqp->qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = 0;
+ init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl;
+ init_attr->source_qpn = 0;
+
+ return rc;
+}
+
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int rc;
+
+ rc = ionic_destroy_qp_cmd(dev, qp->qpid);
+ if (rc)
+ return rc;
+
+ xa_erase_irq(&dev->qp_tbl, qp->qpid);
+
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ wait_for_completion(&qp->qp_rel_comp);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_poll_sq);
+ list_del(&qp->cq_flush_sq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_flush_rq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ ionic_qp_rq_destroy(dev, ctx, qp);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+ if (qp->has_ah) {
+ ionic_put_ahid(dev, qp->ahid);
+ kfree(qp->hdr);
+ }
+ ionic_put_qpid(dev, qp->qpid);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_datapath.c b/drivers/infiniband/hw/ionic/ionic_datapath.c
new file mode 100644
index 000000000000..aa2944887f23
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_datapath.c
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_OP(version, opname) \
+ ((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname)
+
+static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+
+ *cqe = qcqe;
+
+ return true;
+}
+
+static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (!qp->rq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->rq))
+ return 0;
+
+ wqe = ionic_queue_at_cons(&qp->rq);
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[wqe->base.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ ionic_queue_consume(&qp->rq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ return 1;
+}
+
+static int ionic_flush_recv_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_recv(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (!qp->sq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->sq))
+ return 0;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ ionic_queue_consume(&qp->sq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ return 1;
+}
+
+static int ionic_flush_send_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_send(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe,
+ struct ib_wc *wc)
+{
+ struct ionic_qp *qp = NULL;
+ struct ionic_rq_meta *meta;
+ u32 src_qpn, st_len;
+ u16 vlan_tag;
+ u8 op;
+
+ if (cqe_qp->rq_flush)
+ return 0;
+
+ qp = cqe_qp;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ /* ignore wqe_id in case of flush error */
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ /* posted recvs (if any) flushed by ionic_flush_recv */
+ return 0;
+ }
+
+ /* there had better be something in the recv queue to complete */
+ if (ionic_queue_empty(&qp->rq)) {
+ ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid);
+ return -EIO;
+ }
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[cqe->recv.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->wr_id = meta->wrid;
+
+ wc->qp = &cqe_qp->ibqp;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ wc->vendor_err = st_len;
+ wc->status = ionic_to_ib_status(st_len);
+
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ ibdev_warn(&dev->ibdev,
+ "qp %d recv cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, BIT(cq->q.stride_log2), true);
+ goto out;
+ }
+
+ wc->vendor_err = 0;
+ wc->status = IB_WC_SUCCESS;
+
+ src_qpn = be32_to_cpu(cqe->recv.src_qpn_op);
+ op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT;
+
+ src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK;
+ op &= IONIC_V1_CQE_RECV_OP_MASK;
+
+ wc->opcode = IB_WC_RECV;
+ switch (op) {
+ case IONIC_V1_CQE_RECV_OP_RDMA_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_INV:
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey);
+ break;
+ }
+
+ wc->byte_len = st_len;
+ wc->src_qp = src_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC;
+ ether_addr_copy(wc->smac, cqe->recv.src_mac);
+
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (ionic_v1_cqe_recv_is_ipv4(cqe))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (ionic_v1_cqe_recv_is_vlan(cqe))
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+
+ /* vlan_tag in cqe will be valid from dpath even if no vlan */
+ vlan_tag = be16_to_cpu(cqe->recv.vlan_tag);
+ wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */
+ wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */
+ }
+
+ wc->pkey_index = 0;
+ wc->port_num = 1;
+
+out:
+ ionic_queue_consume(&qp->rq);
+
+ return 1;
+}
+
+static bool ionic_peek_send(struct ionic_qp *qp)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return false;
+
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ return false;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ return false;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ return false;
+
+ return true;
+}
+
+static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return 0;
+
+ do {
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ goto out_empty;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ goto out_empty;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ goto out_empty;
+
+ ionic_queue_consume(&qp->sq);
+
+ /* produce wc only if signaled or error status */
+ } while (!meta->signal && meta->ibsts == IB_WC_SUCCESS);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = meta->ibsts;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ if (meta->ibsts == IB_WC_SUCCESS) {
+ wc->byte_len = meta->len;
+ wc->opcode = meta->ibop;
+ } else {
+ wc->vendor_err = meta->len;
+
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ return 1;
+
+out_empty:
+ if (qp->sq_flush_rcvd) {
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ return 0;
+}
+
+static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_poll_send(dev, cq, qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_validate_cons(u16 prod, u16 cons,
+ u16 comp, u16 mask)
+{
+ if (((prod - cons) & mask) <= ((comp - cons) & mask))
+ return -EIO;
+
+ return 0;
+}
+
+static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_seq, cqe_idx;
+ int rc;
+
+ if (qp->sq_flush)
+ return 0;
+
+ cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask;
+
+ rc = ionic_validate_cons(qp->sq_msn_prod,
+ qp->sq_msn_cons,
+ cqe_seq - 1,
+ qp->sq.mask);
+ if (rc) {
+ ibdev_warn(qp->ibqp.device,
+ "qp %u bad msn %#x seq %u for prod %u cons %u\n",
+ qp->qpid, be32_to_cpu(cqe->send.msg_msn),
+ cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons);
+ return rc;
+ }
+
+ qp->sq_msn_cons = cqe_seq;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask];
+
+ meta = &qp->sq_meta[cqe_idx];
+ meta->len = be32_to_cpu(cqe->status_length);
+ meta->ibsts = ionic_to_ib_status(meta->len);
+
+ ibdev_warn(qp->ibqp.device,
+ "qp %d msn cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_idx;
+ u32 st_len;
+
+ if (qp->sq_flush)
+ return 0;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ /*
+ * Flush cqe does not consume a wqe on the device, and maybe
+ * no such work request is posted.
+ *
+ * The driver should begin flushing after the last indicated
+ * normal or error completion. Here, only set a hint that the
+ * flush request was indicated. In poll_send, if nothing more
+ * can be polled normally, then begin flushing.
+ */
+ qp->sq_flush_rcvd = true;
+ return 0;
+ }
+
+ cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask;
+ meta = &qp->sq_meta[cqe_idx];
+ meta->local_comp = true;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ meta->len = st_len;
+ meta->ibsts = ionic_to_ib_status(st_len);
+ meta->remote = false;
+ ibdev_warn(qp->ibqp.device,
+ "qp %d npg cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!ionic_queue_empty(&cq->q)) {
+ cq->credit += ionic_queue_length(&cq->q);
+ cq->q.cons = cq->q.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ }
+}
+
+static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ int spend)
+{
+ cq->credit -= spend;
+
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+}
+
+static int ionic_poll_vcq_cq(struct ionic_ibdev *dev,
+ struct ionic_cq *cq,
+ int nwc, struct ib_wc *wc)
+{
+ struct ionic_qp *qp, *qp_next;
+ struct ionic_v1_cqe *cqe;
+ int rc = 0, npolled = 0;
+ unsigned long irqflags;
+ u32 qtf, qid;
+ bool peek;
+ u8 type;
+
+ if (nwc < 1)
+ return 0;
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+
+ /* poll already indicated work completions for send queue */
+ list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_poll_send_many(dev, cq, qp, wc + npolled,
+ nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_poll_sq);
+ }
+
+ /* poll for more work completions */
+ while (likely(ionic_next_cqe(dev, cq, &cqe))) {
+ if (npolled == nwc)
+ goto out;
+
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ /*
+ * Safe to access QP without additional reference here as,
+ * 1. We hold cq->lock throughout
+ * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup
+ * 3. QP is removed from qp_tbl before any cleanup begins
+ * This ensures no concurrent access between polling and destruction.
+ */
+ qp = xa_load(&dev->qp_tbl, qid);
+ if (unlikely(!qp)) {
+ ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
+ goto cq_next;
+ }
+
+ switch (type) {
+ case IONIC_V1_CQE_TYPE_RECV:
+ spin_lock(&qp->rq_lock);
+ rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_MSN:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_msn(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_NPG:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_npg(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ default:
+ ibdev_warn(&dev->ibdev,
+ "unexpected cqe type %u\n", type);
+ rc = -EIO;
+ goto out;
+ }
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ /* lastly, flush send and recv queues */
+ if (likely(!cq->flush))
+ goto out;
+
+ cq->flush = false;
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_sq);
+ else
+ cq->flush = true;
+ }
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->rq_lock);
+ rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_rq);
+ else
+ cq->flush = true;
+ }
+
+out:
+ /* in case credit was depleted (more work posted than cq depth) */
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ return npolled ?: rc;
+}
+
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc_tmp, rc = 0, npolled = 0;
+ int cq_i, cq_x, cq_ix;
+
+ cq_x = vcq->poll_idx;
+ vcq->poll_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ cq_ix = cq_i ^ cq_x;
+
+ if (!(vcq->udma_mask & BIT(cq_ix)))
+ continue;
+
+ rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix],
+ nwc - npolled,
+ wc + npolled);
+
+ if (rc_tmp >= 0)
+ npolled += rc_tmp;
+ else if (!rc)
+ rc = rc_tmp;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ enum ib_cq_notify_flags flags)
+{
+ u64 dbell_val = cq->q.dbell;
+
+ if (flags & IB_CQ_SOLICITED) {
+ cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
+ dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL;
+ } else {
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM;
+ }
+
+ ionic_reserve_sync_cq(dev, cq);
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val);
+
+ /*
+ * IB_CQ_REPORT_MISSED_EVENTS:
+ *
+ * The queue index in ring zero guarantees no missed events.
+ *
+ * Here, we check if the color bit in the next cqe is flipped. If it
+ * is flipped, then progress can be made by immediately polling the cq.
+ * Still, the cq will be armed, and an event will be generated. The cq
+ * may be empty when polled after the event, because the next poll
+ * after arming the cq can empty it.
+ */
+ return (flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
+}
+
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc = 0, cq_i;
+
+ for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ if (!(vcq->udma_mask & BIT(cq_i)))
+ continue;
+
+ if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags))
+ rc = 1;
+ }
+
+ return rc;
+}
+
+static s64 ionic_prep_inline(void *data, u32 max_data,
+ const struct ib_sge *ib_sgl, int num_sge)
+{
+ static const s64 bit_31 = 1u << 31;
+ s64 len = 0, sg_len;
+ int sg_i;
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than max inline data is invalid */
+ if (unlikely(len + sg_len > max_data))
+ return -EINVAL;
+
+ memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len);
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe,
+ union ionic_v1_pld *pld,
+ int spec, u32 max_sge,
+ const struct ib_sge *ib_sgl,
+ int num_sge)
+{
+ static const s64 bit_31 = 1l << 31;
+ struct ionic_sge *sgl;
+ __be32 *spec32 = NULL;
+ __be16 *spec16 = NULL;
+ s64 len = 0, sg_len;
+ int sg_i = 0;
+
+ if (unlikely(num_sge < 0 || (u32)num_sge > max_sge))
+ return -EINVAL;
+
+ if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) {
+ sg_i = IONIC_V1_SPEC_FIRST_SGE;
+
+ if (num_sge > 8) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16);
+ spec16 = pld->spec16;
+ } else {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32);
+ spec32 = pld->spec32;
+ }
+ }
+
+ sgl = &pld->sgl[sg_i];
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than 2GB data is invalid */
+ if (unlikely(len + sg_len > bit_31))
+ return -EINVAL;
+
+ sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr);
+ sgl[sg_i].len = cpu_to_be32(sg_len);
+ sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey);
+
+ if (spec32) {
+ spec32[sg_i] = sgl[sg_i].len;
+ } else if (spec16) {
+ if (unlikely(sg_len > U16_MAX))
+ return -EINVAL;
+ spec16[sg_i] = cpu_to_be16(sg_len);
+ }
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static void ionic_prep_base(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ meta->wrid = wr->wr_id;
+ meta->ibsts = IB_WC_SUCCESS;
+ meta->signal = false;
+ meta->local_comp = false;
+
+ wqe->base.wqe_id = qp->sq.prod;
+
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE);
+
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL);
+
+ if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG);
+ meta->signal = true;
+ }
+
+ meta->seq = qp->sq_msn_prod;
+ meta->remote =
+ qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_GSI &&
+ !ionic_ibop_is_local(wr->opcode);
+
+ if (meta->remote) {
+ qp->sq_msn_idx[meta->seq] = qp->sq.prod;
+ qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod);
+ }
+
+ ionic_queue_produce(&qp->sq);
+}
+
+static int ionic_prep_common(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ s64 signed_len;
+ u32 mval;
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ wqe->base.num_sge_key = 0;
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL);
+ mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false);
+ signed_len = ionic_prep_inline(wqe->common.pld.data, mval,
+ wr->sg_list, wr->num_sge);
+ } else {
+ wqe->base.num_sge_key = wr->num_sge;
+ mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->common.pld,
+ qp->sq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ }
+
+ if (unlikely(signed_len < 0))
+ return signed_len;
+
+ meta->len = signed_len;
+ wqe->common.length = cpu_to_be32(signed_len);
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->sq.stride_log2);
+}
+
+static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->rq.stride_log2);
+}
+
+static int ionic_prep_send(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV);
+ wqe->base.imm_data_key =
+ cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, wr, meta, wqe);
+}
+
+static int ionic_prep_send_ud(struct ionic_qp *qp,
+ const struct ib_ud_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ struct ionic_ah *ah;
+
+ if (unlikely(!wr->ah))
+ return -EINVAL;
+
+ ah = to_ionic_ah(wr->ah);
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->common.send.ah_id = cpu_to_be32(ah->ahid);
+ wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn);
+ wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_rdma(struct ionic_qp *qp,
+ const struct ib_rdma_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_RDMA_READ:
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+ meta->ibop = IB_WC_RDMA_READ;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ);
+ break;
+ case IB_WR_RDMA_WRITE:
+ if (wr->wr.send_flags & IB_SEND_SOLICITED)
+ return -EINVAL;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_atomic(struct ionic_qp *qp,
+ const struct ib_atomic_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8)
+ return -EINVAL;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ meta->ibop = IB_WC_COMP_SWAP;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->swap);
+ wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.compare_low = cpu_to_be32(wr->compare_add);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ meta->ibop = IB_WC_FETCH_ADD;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey);
+
+ wqe->base.num_sge_key = 1;
+ wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr);
+ wqe->atomic.sge.len = cpu_to_be32(8);
+ wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_inv(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV);
+ wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_LOCAL_INV;
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_reg(struct ionic_qp *qp,
+ const struct ib_reg_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_mr *mr = to_ionic_mr(wr->mr);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ __le64 dma_addr;
+ int flags;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ /* must call ib_map_mr_sg before posting reg wr */
+ if (!mr->buf.tbl_pages)
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ flags = to_ionic_mr_flags(wr->access);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR);
+ wqe->base.num_sge_key = wr->key;
+ wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey);
+ wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova);
+ wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length);
+ wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova);
+ dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova);
+ wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr));
+
+ wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages);
+ wqe->reg_mr.flags = cpu_to_be16(flags);
+ wqe->reg_mr.dir_size_log2 = 0;
+ wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_REG_MR;
+
+ ionic_prep_base(qp, &wr->wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_one_rc(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ rc = ionic_prep_send(qp, wr);
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc = ionic_prep_rdma(qp, rdma_wr(wr));
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc = ionic_prep_atomic(qp, atomic_wr(wr));
+ break;
+ case IB_WR_LOCAL_INV:
+ rc = ionic_prep_inv(qp, wr);
+ break;
+ case IB_WR_REG_MR:
+ rc = ionic_prep_reg(qp, reg_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_one_ud(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ rc = ionic_prep_send_ud(qp, ud_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_recv(struct ionic_qp *qp,
+ const struct ib_recv_wr *wr)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ s64 signed_len;
+ u32 mval;
+
+ wqe = ionic_queue_at_prod(&qp->rq);
+
+ /* if wqe is owned by device, caller can try posting again soon */
+ if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE))
+ return -EAGAIN;
+
+ meta = qp->rq_meta_head;
+ if (unlikely(meta == IONIC_META_LAST) ||
+ unlikely(meta == IONIC_META_POSTED))
+ return -EIO;
+
+ ionic_prep_rq_wqe(qp, wqe);
+
+ mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->recv.pld,
+ qp->rq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ if (signed_len < 0)
+ return signed_len;
+
+ meta->wrid = wr->wr_id;
+
+ wqe->base.wqe_id = meta - qp->rq_meta;
+ wqe->base.num_sge_key = wr->num_sge;
+
+ /* total length for recv goes in base imm_data_key */
+ wqe->base.imm_data_key = cpu_to_be32(signed_len);
+
+ ionic_queue_produce(&qp->rq);
+
+ qp->rq_meta_head = meta->next;
+ meta->next = IONIC_META_POSTED;
+
+ return 0;
+}
+
+static int ionic_post_send_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_sq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_RTS) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->sq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ rc = ionic_prep_one_ud(qp, wr);
+ else
+ rc = ionic_prep_one_rc(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&qp->sq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+
+ if (likely(qp->sq.prod != qp->sq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->sq_old_prod = qp->sq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype,
+ ionic_queue_dbell_val(&qp->sq));
+ }
+
+ if (qp->sq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+ *bad = wr;
+ return rc;
+}
+
+static int ionic_post_recv_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_rq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_INIT) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->rq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = ionic_prep_recv(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ if (!cq) {
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+ goto out_unlocked;
+ }
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+
+ if (likely(qp->rq.prod != qp->rq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->rq_old_prod = qp->rq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype,
+ ionic_queue_dbell_val(&qp->rq));
+ }
+
+ if (qp->rq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+out_unlocked:
+ *bad = wr;
+ return rc;
+}
+
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx);
+
+ return ionic_post_send_common(dev, vcq, cq, qp, wr, bad);
+}
+
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx);
+
+ return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_fw.h b/drivers/infiniband/hw/ionic/ionic_fw.h
new file mode 100644
index 000000000000..adfbb89d856c
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_fw.h
@@ -0,0 +1,1029 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_FW_H_
+#define _IONIC_FW_H_
+
+#include <linux/kernel.h>
+#include <rdma/ib_verbs.h>
+
+/* common for ib spec */
+
+#define IONIC_EXP_DBELL_SZ 8
+
+enum ionic_mrid_bits {
+ IONIC_MRID_INDEX_SHIFT = 8,
+};
+
+static inline u32 ionic_mrid(u32 index, u8 key)
+{
+ return (index << IONIC_MRID_INDEX_SHIFT) | key;
+}
+
+static inline u32 ionic_mrid_index(u32 lrkey)
+{
+ return lrkey >> IONIC_MRID_INDEX_SHIFT;
+}
+
+/* common to all versions */
+
+/* wqe scatter gather element */
+struct ionic_sge {
+ __be64 va;
+ __be32 len;
+ __be32 lkey;
+};
+
+/* admin queue mr type */
+enum ionic_mr_flags {
+ /* bits that determine mr access */
+ IONIC_MRF_LOCAL_WRITE = BIT(0),
+ IONIC_MRF_REMOTE_WRITE = BIT(1),
+ IONIC_MRF_REMOTE_READ = BIT(2),
+ IONIC_MRF_REMOTE_ATOMIC = BIT(3),
+ IONIC_MRF_MW_BIND = BIT(4),
+ IONIC_MRF_ZERO_BASED = BIT(5),
+ IONIC_MRF_ON_DEMAND = BIT(6),
+ IONIC_MRF_PB = BIT(7),
+ IONIC_MRF_ACCESS_MASK = BIT(12) - 1,
+
+ /* bits that determine mr type */
+ IONIC_MRF_UKEY_EN = BIT(13),
+ IONIC_MRF_IS_MW = BIT(14),
+ IONIC_MRF_INV_EN = BIT(15),
+
+ /* base flags combinations for mr types */
+ IONIC_MRF_USER_MR = 0,
+ IONIC_MRF_PHYS_MR = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_INV_EN),
+ IONIC_MRF_MW_1 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW),
+ IONIC_MRF_MW_2 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW |
+ IONIC_MRF_INV_EN),
+};
+
+static inline int to_ionic_mr_flags(int access)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_LOCAL_WRITE)
+ flags |= IONIC_MRF_LOCAL_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_MRF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_MRF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_MRF_REMOTE_ATOMIC;
+
+ if (access & IB_ACCESS_MW_BIND)
+ flags |= IONIC_MRF_MW_BIND;
+
+ if (access & IB_ZERO_BASED)
+ flags |= IONIC_MRF_ZERO_BASED;
+
+ return flags;
+}
+
+enum ionic_qp_flags {
+ /* bits that determine qp access */
+ IONIC_QPF_REMOTE_WRITE = BIT(0),
+ IONIC_QPF_REMOTE_READ = BIT(1),
+ IONIC_QPF_REMOTE_ATOMIC = BIT(2),
+
+ /* bits that determine other qp behavior */
+ IONIC_QPF_SQ_PB = BIT(6),
+ IONIC_QPF_RQ_PB = BIT(7),
+ IONIC_QPF_SQ_SPEC = BIT(8),
+ IONIC_QPF_RQ_SPEC = BIT(9),
+ IONIC_QPF_REMOTE_PRIVILEGED = BIT(10),
+ IONIC_QPF_SQ_DRAINING = BIT(11),
+ IONIC_QPF_SQD_NOTIFY = BIT(12),
+ IONIC_QPF_SQ_CMB = BIT(13),
+ IONIC_QPF_RQ_CMB = BIT(14),
+ IONIC_QPF_PRIVILEGED = BIT(15),
+};
+
+static inline int from_ionic_qp_flags(int flags)
+{
+ int access_flags = 0;
+
+ if (flags & IONIC_QPF_REMOTE_WRITE)
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+
+ if (flags & IONIC_QPF_REMOTE_READ)
+ access_flags |= IB_ACCESS_REMOTE_READ;
+
+ if (flags & IONIC_QPF_REMOTE_ATOMIC)
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return access_flags;
+}
+
+static inline int to_ionic_qp_flags(int access, bool sqd_notify,
+ bool sq_is_cmb, bool rq_is_cmb,
+ bool sq_spec, bool rq_spec,
+ bool privileged, bool remote_privileged)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_QPF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_QPF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_QPF_REMOTE_ATOMIC;
+
+ if (sqd_notify)
+ flags |= IONIC_QPF_SQD_NOTIFY;
+
+ if (sq_is_cmb)
+ flags |= IONIC_QPF_SQ_CMB;
+
+ if (rq_is_cmb)
+ flags |= IONIC_QPF_RQ_CMB;
+
+ if (sq_spec)
+ flags |= IONIC_QPF_SQ_SPEC;
+
+ if (rq_spec)
+ flags |= IONIC_QPF_RQ_SPEC;
+
+ if (privileged)
+ flags |= IONIC_QPF_PRIVILEGED;
+
+ if (remote_privileged)
+ flags |= IONIC_QPF_REMOTE_PRIVILEGED;
+
+ return flags;
+}
+
+/* cqe non-admin status indicated in status_length field when err bit is set */
+enum ionic_status {
+ IONIC_STS_OK,
+ IONIC_STS_LOCAL_LEN_ERR,
+ IONIC_STS_LOCAL_QP_OPER_ERR,
+ IONIC_STS_LOCAL_PROT_ERR,
+ IONIC_STS_WQE_FLUSHED_ERR,
+ IONIC_STS_MEM_MGMT_OPER_ERR,
+ IONIC_STS_BAD_RESP_ERR,
+ IONIC_STS_LOCAL_ACC_ERR,
+ IONIC_STS_REMOTE_INV_REQ_ERR,
+ IONIC_STS_REMOTE_ACC_ERR,
+ IONIC_STS_REMOTE_OPER_ERR,
+ IONIC_STS_RETRY_EXCEEDED,
+ IONIC_STS_RNR_RETRY_EXCEEDED,
+ IONIC_STS_XRC_VIO_ERR,
+ IONIC_STS_LOCAL_SGL_INV_ERR,
+};
+
+static inline int ionic_to_ib_status(int sts)
+{
+ switch (sts) {
+ case IONIC_STS_OK:
+ return IB_WC_SUCCESS;
+ case IONIC_STS_LOCAL_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case IONIC_STS_LOCAL_QP_OPER_ERR:
+ case IONIC_STS_LOCAL_SGL_INV_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case IONIC_STS_LOCAL_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case IONIC_STS_WQE_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case IONIC_STS_MEM_MGMT_OPER_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case IONIC_STS_BAD_RESP_ERR:
+ return IB_WC_BAD_RESP_ERR;
+ case IONIC_STS_LOCAL_ACC_ERR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case IONIC_STS_REMOTE_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case IONIC_STS_REMOTE_ACC_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case IONIC_STS_REMOTE_OPER_ERR:
+ return IB_WC_REM_OP_ERR;
+ case IONIC_STS_RETRY_EXCEEDED:
+ return IB_WC_RETRY_EXC_ERR;
+ case IONIC_STS_RNR_RETRY_EXCEEDED:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case IONIC_STS_XRC_VIO_ERR:
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+/* admin queue qp type */
+enum ionic_qp_type {
+ IONIC_QPT_RC,
+ IONIC_QPT_UC,
+ IONIC_QPT_RD,
+ IONIC_QPT_UD,
+ IONIC_QPT_SRQ,
+ IONIC_QPT_XRC_INI,
+ IONIC_QPT_XRC_TGT,
+ IONIC_QPT_XRC_SRQ,
+};
+
+static inline int to_ionic_qp_type(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ return IONIC_QPT_UD;
+ case IB_QPT_RC:
+ return IONIC_QPT_RC;
+ case IB_QPT_UC:
+ return IONIC_QPT_UC;
+ case IB_QPT_XRC_INI:
+ return IONIC_QPT_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return IONIC_QPT_XRC_TGT;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* admin queue qp state */
+enum ionic_qp_state {
+ IONIC_QPS_RESET,
+ IONIC_QPS_INIT,
+ IONIC_QPS_RTR,
+ IONIC_QPS_RTS,
+ IONIC_QPS_SQD,
+ IONIC_QPS_SQE,
+ IONIC_QPS_ERR,
+};
+
+static inline int from_ionic_qp_state(enum ionic_qp_state state)
+{
+ switch (state) {
+ case IONIC_QPS_RESET:
+ return IB_QPS_RESET;
+ case IONIC_QPS_INIT:
+ return IB_QPS_INIT;
+ case IONIC_QPS_RTR:
+ return IB_QPS_RTR;
+ case IONIC_QPS_RTS:
+ return IB_QPS_RTS;
+ case IONIC_QPS_SQD:
+ return IB_QPS_SQD;
+ case IONIC_QPS_SQE:
+ return IB_QPS_SQE;
+ case IONIC_QPS_ERR:
+ return IB_QPS_ERR;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int to_ionic_qp_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return IONIC_QPS_RESET;
+ case IB_QPS_INIT:
+ return IONIC_QPS_INIT;
+ case IB_QPS_RTR:
+ return IONIC_QPS_RTR;
+ case IB_QPS_RTS:
+ return IONIC_QPS_RTS;
+ case IB_QPS_SQD:
+ return IONIC_QPS_SQD;
+ case IB_QPS_SQE:
+ return IONIC_QPS_SQE;
+ case IB_QPS_ERR:
+ return IONIC_QPS_ERR;
+ default:
+ return 0;
+ }
+}
+
+static inline int to_ionic_qp_modify_state(enum ib_qp_state to_state,
+ enum ib_qp_state from_state)
+{
+ return to_ionic_qp_state(to_state) |
+ (to_ionic_qp_state(from_state) << 4);
+}
+
+/* fw abi v1 */
+
+/* data payload part of v1 wqe */
+union ionic_v1_pld {
+ struct ionic_sge sgl[2];
+ __be32 spec32[8];
+ __be16 spec16[16];
+ __u8 data[32];
+};
+
+/* completion queue v1 cqe */
+struct ionic_v1_cqe {
+ union {
+ struct {
+ __be16 cmd_idx;
+ __u8 cmd_op;
+ __u8 rsvd[17];
+ __le16 old_sq_cindex;
+ __le16 old_rq_cq_cindex;
+ } admin;
+ struct {
+ __u64 wqe_id;
+ __be32 src_qpn_op;
+ __u8 src_mac[6];
+ __be16 vlan_tag;
+ __be32 imm_data_rkey;
+ } recv;
+ struct {
+ __u8 rsvd[4];
+ __be32 msg_msn;
+ __u8 rsvd2[8];
+ __u64 npg_wqe_id;
+ } send;
+ };
+ __be32 status_length;
+ __be32 qid_type_flags;
+};
+
+/* bits for cqe recv */
+enum ionic_v1_cqe_src_qpn_bits {
+ IONIC_V1_CQE_RECV_QPN_MASK = 0xffffff,
+ IONIC_V1_CQE_RECV_OP_SHIFT = 24,
+
+ /* MASK could be 0x3, but need 0x1f for makeshift values:
+ * OP_TYPE_RDMA_OPER_WITH_IMM, OP_TYPE_SEND_RCVD
+ */
+ IONIC_V1_CQE_RECV_OP_MASK = 0x1f,
+ IONIC_V1_CQE_RECV_OP_SEND = 0,
+ IONIC_V1_CQE_RECV_OP_SEND_INV = 1,
+ IONIC_V1_CQE_RECV_OP_SEND_IMM = 2,
+ IONIC_V1_CQE_RECV_OP_RDMA_IMM = 3,
+
+ IONIC_V1_CQE_RECV_IS_IPV4 = BIT(7 + IONIC_V1_CQE_RECV_OP_SHIFT),
+ IONIC_V1_CQE_RECV_IS_VLAN = BIT(6 + IONIC_V1_CQE_RECV_OP_SHIFT),
+};
+
+/* bits for cqe qid_type_flags */
+enum ionic_v1_cqe_qtf_bits {
+ IONIC_V1_CQE_COLOR = BIT(0),
+ IONIC_V1_CQE_ERROR = BIT(1),
+ IONIC_V1_CQE_TYPE_SHIFT = 5,
+ IONIC_V1_CQE_TYPE_MASK = 0x7,
+ IONIC_V1_CQE_QID_SHIFT = 8,
+
+ IONIC_V1_CQE_TYPE_ADMIN = 0,
+ IONIC_V1_CQE_TYPE_RECV = 1,
+ IONIC_V1_CQE_TYPE_SEND_MSN = 2,
+ IONIC_V1_CQE_TYPE_SEND_NPG = 3,
+};
+
+static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR);
+}
+
+static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
+}
+
+static inline bool ionic_v1_cqe_recv_is_ipv4(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_IPV4);
+}
+
+static inline bool ionic_v1_cqe_recv_is_vlan(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_VLAN);
+}
+
+static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
+{
+ cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
+}
+
+static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe)
+{
+ return be32_to_cpu(cqe->qid_type_flags);
+}
+
+static inline u8 ionic_v1_cqe_qtf_type(u32 qtf)
+{
+ return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK;
+}
+
+static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
+{
+ return qtf >> IONIC_V1_CQE_QID_SHIFT;
+}
+
+/* v1 base wqe header */
+struct ionic_v1_base_hdr {
+ __u64 wqe_id;
+ __u8 op;
+ __u8 num_sge_key;
+ __be16 flags;
+ __be32 imm_data_key;
+};
+
+/* v1 receive wqe body */
+struct ionic_v1_recv_bdy {
+ __u8 rsvd[16];
+ union ionic_v1_pld pld;
+};
+
+/* v1 send/rdma wqe body (common, has sgl) */
+struct ionic_v1_common_bdy {
+ union {
+ struct {
+ __be32 ah_id;
+ __be32 dest_qpn;
+ __be32 dest_qkey;
+ } send;
+ struct {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ } rdma;
+ };
+ __be32 length;
+ union ionic_v1_pld pld;
+};
+
+/* v1 atomic wqe body */
+struct ionic_v1_atomic_bdy {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ __be32 swap_add_high;
+ __be32 swap_add_low;
+ __be32 compare_high;
+ __be32 compare_low;
+ __u8 rsvd[4];
+ struct ionic_sge sge;
+};
+
+/* v1 reg mr wqe body */
+struct ionic_v1_reg_mr_bdy {
+ __be64 va;
+ __be64 length;
+ __be64 offset;
+ __be64 dma_addr;
+ __be32 map_count;
+ __be16 flags;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+ __u8 rsvd[8];
+};
+
+/* v1 bind mw wqe body */
+struct ionic_v1_bind_mw_bdy {
+ __be64 va;
+ __be64 length;
+ __be32 lkey;
+ __be16 flags;
+ __u8 rsvd[26];
+};
+
+/* v1 send/recv wqe */
+struct ionic_v1_wqe {
+ struct ionic_v1_base_hdr base;
+ union {
+ struct ionic_v1_recv_bdy recv;
+ struct ionic_v1_common_bdy common;
+ struct ionic_v1_atomic_bdy atomic;
+ struct ionic_v1_reg_mr_bdy reg_mr;
+ struct ionic_v1_bind_mw_bdy bind_mw;
+ };
+};
+
+/* queue pair v1 send opcodes */
+enum ionic_v1_op {
+ IONIC_V1_OP_SEND,
+ IONIC_V1_OP_SEND_INV,
+ IONIC_V1_OP_SEND_IMM,
+ IONIC_V1_OP_RDMA_READ,
+ IONIC_V1_OP_RDMA_WRITE,
+ IONIC_V1_OP_RDMA_WRITE_IMM,
+ IONIC_V1_OP_ATOMIC_CS,
+ IONIC_V1_OP_ATOMIC_FA,
+ IONIC_V1_OP_REG_MR,
+ IONIC_V1_OP_LOCAL_INV,
+ IONIC_V1_OP_BIND_MW,
+
+ /* flags */
+ IONIC_V1_FLAG_FENCE = BIT(0),
+ IONIC_V1_FLAG_SOL = BIT(1),
+ IONIC_V1_FLAG_INL = BIT(2),
+ IONIC_V1_FLAG_SIG = BIT(3),
+
+ /* flags last four bits for sgl spec format */
+ IONIC_V1_FLAG_SPEC32 = (1u << 12),
+ IONIC_V1_FLAG_SPEC16 = (2u << 12),
+ IONIC_V1_SPEC_FIRST_SGE = 2,
+};
+
+/* queue pair v2 send opcodes */
+enum ionic_v2_op {
+ IONIC_V2_OPSL_OUT = 0x20,
+ IONIC_V2_OPSL_IMM = 0x40,
+ IONIC_V2_OPSL_INV = 0x80,
+
+ IONIC_V2_OP_SEND = 0x0 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_SEND_IMM = IONIC_V2_OP_SEND | IONIC_V2_OPSL_IMM,
+ IONIC_V2_OP_SEND_INV = IONIC_V2_OP_SEND | IONIC_V2_OPSL_INV,
+
+ IONIC_V2_OP_RDMA_WRITE = 0x1 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_RDMA_WRITE_IMM = IONIC_V2_OP_RDMA_WRITE | IONIC_V2_OPSL_IMM,
+
+ IONIC_V2_OP_RDMA_READ = 0x2,
+
+ IONIC_V2_OP_ATOMIC_CS = 0x4,
+ IONIC_V2_OP_ATOMIC_FA = 0x5,
+ IONIC_V2_OP_REG_MR = 0x6,
+ IONIC_V2_OP_LOCAL_INV = 0x7,
+ IONIC_V2_OP_BIND_MW = 0x8,
+};
+
+static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data,
+ int spec, bool expdb)
+{
+ size_t sz_wqe, sz_sgl, sz_data;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb) {
+ min_sge += 1;
+ min_data += IONIC_EXP_DBELL_SZ;
+ }
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, common.pld.sgl[min_sge]);
+ sz_data = offsetof(struct ionic_v1_wqe, common.pld.data[min_data]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ if (sz_data > sz_wqe)
+ sz_wqe = sz_data;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_send_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->common.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_send_wqe_max_data(u8 stride_log2, bool expdb)
+{
+ struct ionic_v1_wqe *wqe = (void *)0;
+ __u8 *data = (void *)(1ull << stride_log2);
+
+ if (expdb)
+ data -= IONIC_EXP_DBELL_SZ;
+
+ return data - wqe->common.pld.data;
+}
+
+static inline size_t ionic_v1_recv_wqe_min_size(int min_sge, int spec,
+ bool expdb)
+{
+ size_t sz_wqe, sz_sgl;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb)
+ min_sge += 1;
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, recv.pld.sgl[min_sge]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_recv_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->recv.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_use_spec_sge(int min_sge, int spec)
+{
+ if (!spec || min_sge > spec)
+ return 0;
+
+ if (min_sge <= IONIC_V1_SPEC_FIRST_SGE)
+ return IONIC_V1_SPEC_FIRST_SGE;
+
+ return spec;
+}
+
+struct ionic_admin_stats_hdr {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 id_ver;
+ __u8 type_state;
+} __packed;
+
+#define IONIC_ADMIN_STATS_HDRS_IN_V1_LEN 17
+static_assert(sizeof(struct ionic_admin_stats_hdr) ==
+ IONIC_ADMIN_STATS_HDRS_IN_V1_LEN);
+
+struct ionic_admin_create_ah {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 csum_profile;
+ __u8 crypto;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_AH_IN_V1_LEN 24
+static_assert(sizeof(struct ionic_admin_create_ah) ==
+ IONIC_ADMIN_CREATE_AH_IN_V1_LEN);
+
+struct ionic_admin_destroy_ah {
+ __le32 ah_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_AH_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_ah) ==
+ IONIC_ADMIN_DESTROY_AH_IN_V1_LEN);
+
+struct ionic_admin_query_ah {
+ __le64 dma_addr;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_AH_IN_V1_LEN 8
+static_assert(sizeof(struct ionic_admin_query_ah) ==
+ IONIC_ADMIN_QUERY_AH_IN_V1_LEN);
+
+struct ionic_admin_create_mr {
+ __le64 va;
+ __le64 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+ __u8 pt_type;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_MR_IN_V1_LEN 45
+static_assert(sizeof(struct ionic_admin_create_mr) ==
+ IONIC_ADMIN_CREATE_MR_IN_V1_LEN);
+
+struct ionic_admin_destroy_mr {
+ __le32 mr_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_MR_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_mr) ==
+ IONIC_ADMIN_DESTROY_MR_IN_V1_LEN);
+
+struct ionic_admin_create_cq {
+ __le32 eq_id;
+ __u8 depth_log2;
+ __u8 stride_log2;
+ __u8 dir_size_log2_rsvd;
+ __u8 page_size_log2;
+ __le32 cq_flags;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_CQ_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_create_cq) ==
+ IONIC_ADMIN_CREATE_CQ_IN_V1_LEN);
+
+struct ionic_admin_destroy_cq {
+ __le32 cq_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_cq) ==
+ IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN);
+
+struct ionic_admin_create_qp {
+ __le32 pd_id;
+ __be32 priv_flags;
+ __le32 sq_cq_id;
+ __u8 sq_depth_log2;
+ __u8 sq_stride_log2;
+ __u8 sq_dir_size_log2_rsvd;
+ __u8 sq_page_size_log2;
+ __le32 sq_tbl_index_xrcd_id;
+ __le32 sq_map_count;
+ __le64 sq_dma_addr;
+ __le32 rq_cq_id;
+ __u8 rq_depth_log2;
+ __u8 rq_stride_log2;
+ __u8 rq_dir_size_log2_rsvd;
+ __u8 rq_page_size_log2;
+ __le32 rq_tbl_index_srq_id;
+ __le32 rq_map_count;
+ __le64 rq_dma_addr;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 type_state;
+ __u8 rsvd;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_QP_IN_V1_LEN 64
+static_assert(sizeof(struct ionic_admin_create_qp) ==
+ IONIC_ADMIN_CREATE_QP_IN_V1_LEN);
+
+struct ionic_admin_destroy_qp {
+ __le32 qp_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_QP_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_qp) ==
+ IONIC_ADMIN_DESTROY_QP_IN_V1_LEN);
+
+struct ionic_admin_mod_qp {
+ __be32 attr_mask;
+ __u8 dcqcn_profile;
+ __u8 tfp_csum_profile;
+ __be16 access_flags;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ __le32 qkey_dest_qpn;
+ __le32 rate_limit_kbps;
+ __u8 pmtu;
+ __u8 retry;
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __u8 rsq_depth;
+ __u8 rrq_depth;
+ __le16 pkey_id;
+ __le32 ah_id_len;
+ __u8 en_pcp;
+ __u8 ip_dscp;
+ __u8 rsvd2;
+ __u8 type_state;
+ union {
+ struct {
+ __le16 rsvd1;
+ };
+ __le32 rrq_index;
+ };
+ __le32 rsq_index;
+ __le64 dma_addr;
+ __le32 id_ver;
+} __packed;
+
+#define IONIC_ADMIN_MODIFY_QP_IN_V1_LEN 60
+static_assert(sizeof(struct ionic_admin_mod_qp) ==
+ IONIC_ADMIN_MODIFY_QP_IN_V1_LEN);
+
+struct ionic_admin_query_qp {
+ __le64 hdr_dma_addr;
+ __le64 sq_dma_addr;
+ __le64 rq_dma_addr;
+ __le32 ah_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_QP_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_query_qp) ==
+ IONIC_ADMIN_QUERY_QP_IN_V1_LEN);
+
+#define ADMIN_WQE_STRIDE 64
+#define ADMIN_WQE_HDR_LEN 4
+
+/* admin queue v1 wqe */
+struct ionic_v1_admin_wqe {
+ __u8 op;
+ __u8 rsvd;
+ __le16 len;
+
+ union {
+ struct ionic_admin_stats_hdr stats;
+ struct ionic_admin_create_ah create_ah;
+ struct ionic_admin_destroy_ah destroy_ah;
+ struct ionic_admin_query_ah query_ah;
+ struct ionic_admin_create_mr create_mr;
+ struct ionic_admin_destroy_mr destroy_mr;
+ struct ionic_admin_create_cq create_cq;
+ struct ionic_admin_destroy_cq destroy_cq;
+ struct ionic_admin_create_qp create_qp;
+ struct ionic_admin_destroy_qp destroy_qp;
+ struct ionic_admin_mod_qp mod_qp;
+ struct ionic_admin_query_qp query_qp;
+ } cmd;
+};
+
+/* side data for query qp */
+struct ionic_v1_admin_query_qp_sq {
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+ __be16 pkey_id;
+ __be32 qkey_dest_qpn;
+ __be32 rate_limit_kbps;
+ __be32 rq_psn;
+};
+
+struct ionic_v1_admin_query_qp_rq {
+ __u8 state_pmtu;
+ __u8 retry_rnrtry;
+ __u8 rrq_depth;
+ __u8 rsq_depth;
+ __be32 sq_psn;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+};
+
+/* admin queue v1 opcodes */
+enum ionic_v1_admin_op {
+ IONIC_V1_ADMIN_NOOP,
+ IONIC_V1_ADMIN_CREATE_CQ,
+ IONIC_V1_ADMIN_CREATE_QP,
+ IONIC_V1_ADMIN_CREATE_MR,
+ IONIC_V1_ADMIN_STATS_HDRS,
+ IONIC_V1_ADMIN_STATS_VALS,
+ IONIC_V1_ADMIN_DESTROY_MR,
+ IONIC_V1_ADMIN_RSVD_7, /* RESIZE_CQ */
+ IONIC_V1_ADMIN_DESTROY_CQ,
+ IONIC_V1_ADMIN_MODIFY_QP,
+ IONIC_V1_ADMIN_QUERY_QP,
+ IONIC_V1_ADMIN_DESTROY_QP,
+ IONIC_V1_ADMIN_DEBUG,
+ IONIC_V1_ADMIN_CREATE_AH,
+ IONIC_V1_ADMIN_QUERY_AH,
+ IONIC_V1_ADMIN_MODIFY_DCQCN,
+ IONIC_V1_ADMIN_DESTROY_AH,
+ IONIC_V1_ADMIN_QP_STATS_HDRS,
+ IONIC_V1_ADMIN_QP_STATS_VALS,
+ IONIC_V1_ADMIN_OPCODES_MAX,
+};
+
+/* admin queue v1 cqe status */
+enum ionic_v1_admin_status {
+ IONIC_V1_ASTS_OK,
+ IONIC_V1_ASTS_BAD_CMD,
+ IONIC_V1_ASTS_BAD_INDEX,
+ IONIC_V1_ASTS_BAD_STATE,
+ IONIC_V1_ASTS_BAD_TYPE,
+ IONIC_V1_ASTS_BAD_ATTR,
+ IONIC_V1_ASTS_MSG_TOO_BIG,
+};
+
+/* event queue v1 eqe */
+struct ionic_v1_eqe {
+ __be32 evt;
+};
+
+/* bits for cqe queue_type_flags */
+enum ionic_v1_eqe_evt_bits {
+ IONIC_V1_EQE_COLOR = BIT(0),
+ IONIC_V1_EQE_TYPE_SHIFT = 1,
+ IONIC_V1_EQE_TYPE_MASK = 0x7,
+ IONIC_V1_EQE_CODE_SHIFT = 4,
+ IONIC_V1_EQE_CODE_MASK = 0xf,
+ IONIC_V1_EQE_QID_SHIFT = 8,
+
+ /* cq events */
+ IONIC_V1_EQE_TYPE_CQ = 0,
+ /* cq normal events */
+ IONIC_V1_EQE_CQ_NOTIFY = 0,
+ /* cq error events */
+ IONIC_V1_EQE_CQ_ERR = 8,
+
+ /* qp and srq events */
+ IONIC_V1_EQE_TYPE_QP = 1,
+ /* qp normal events */
+ IONIC_V1_EQE_SRQ_LEVEL = 0,
+ IONIC_V1_EQE_SQ_DRAIN = 1,
+ IONIC_V1_EQE_QP_COMM_EST = 2,
+ IONIC_V1_EQE_QP_LAST_WQE = 3,
+ /* qp error events */
+ IONIC_V1_EQE_QP_ERR = 8,
+ IONIC_V1_EQE_QP_ERR_REQUEST = 9,
+ IONIC_V1_EQE_QP_ERR_ACCESS = 10,
+};
+
+enum ionic_tfp_csum_profiles {
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP = 0,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP = 1,
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP = 2,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP = 3,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 4,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 5,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 6,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 7,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_IPV4_UDP = 8,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_ESP_UDP = 9,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_UDP = 10,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_ESP_UDP = 11,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_CSUM = 12,
+};
+
+static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
+{
+ return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);
+}
+
+static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe)
+{
+ return be32_to_cpu(eqe->evt);
+}
+
+static inline u8 ionic_v1_eqe_evt_type(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK;
+}
+
+static inline u8 ionic_v1_eqe_evt_code(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK;
+}
+
+static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
+{
+ return evt >> IONIC_V1_EQE_QID_SHIFT;
+}
+
+enum ionic_v1_stat_bits {
+ IONIC_V1_STAT_TYPE_SHIFT = 28,
+ IONIC_V1_STAT_TYPE_NONE = 0,
+ IONIC_V1_STAT_TYPE_8 = 1,
+ IONIC_V1_STAT_TYPE_LE16 = 2,
+ IONIC_V1_STAT_TYPE_LE32 = 3,
+ IONIC_V1_STAT_TYPE_LE64 = 4,
+ IONIC_V1_STAT_TYPE_BE16 = 5,
+ IONIC_V1_STAT_TYPE_BE32 = 6,
+ IONIC_V1_STAT_TYPE_BE64 = 7,
+ IONIC_V1_STAT_OFF_MASK = BIT(IONIC_V1_STAT_TYPE_SHIFT) - 1,
+};
+
+struct ionic_v1_stat {
+ union {
+ __be32 be_type_off;
+ u32 type_off;
+ };
+ char name[28];
+};
+
+static inline int ionic_v1_stat_type(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off >> IONIC_V1_STAT_TYPE_SHIFT;
+}
+
+static inline unsigned int ionic_v1_stat_off(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off & IONIC_V1_STAT_OFF_MASK;
+}
+
+#endif /* _IONIC_FW_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_hw_stats.c b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
new file mode 100644
index 000000000000..244a80dde08f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ stat->type_off = be32_to_cpu(stat->be_type_off);
+ stat->name[sizeof(stat->name) - 1] = 0;
+ if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
+ break;
+ }
+
+ return hw_stat_i;
+}
+
+static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
+ struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ hw_stats_hdrs[hw_stat_i].name = stat->name;
+ }
+}
+
+static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
+ void *vals_buf, size_t vals_len)
+{
+ unsigned int off = ionic_v1_stat_off(stat);
+ int type = ionic_v1_stat_type(stat);
+
+#define __ionic_v1_stat_validate(__type) \
+ ((off + sizeof(__type) <= vals_len) && \
+ (IS_ALIGNED(off, sizeof(__type))))
+
+ switch (type) {
+ case IONIC_V1_STAT_TYPE_8:
+ if (__ionic_v1_stat_validate(u8))
+ return *(u8 *)(vals_buf + off);
+ break;
+ case IONIC_V1_STAT_TYPE_LE16:
+ if (__ionic_v1_stat_validate(__le16))
+ return le16_to_cpu(*(__le16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE32:
+ if (__ionic_v1_stat_validate(__le32))
+ return le32_to_cpu(*(__le32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE64:
+ if (__ionic_v1_stat_validate(__le64))
+ return le64_to_cpu(*(__le64 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE16:
+ if (__ionic_v1_stat_validate(__be16))
+ return be16_to_cpu(*(__be16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE32:
+ if (__ionic_v1_stat_validate(__be32))
+ return be32_to_cpu(*(__be32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE64:
+ if (__ionic_v1_stat_validate(__be64))
+ return be64_to_cpu(*(__be64 *)(vals_buf + off));
+ break;
+ }
+
+ return ~0ull;
+#undef __ionic_v1_stat_validate
+}
+
+static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
+ dma_addr_t dma, size_t len, int qid, int op)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = op,
+ .len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
+ .cmd.stats = {
+ .dma_addr = cpu_to_le64(dma),
+ .length = cpu_to_le32(len),
+ .id_ver = cpu_to_le32(qid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= op)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
+}
+
+static int ionic_init_hw_stats(struct ionic_ibdev *dev)
+{
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stats_count;
+
+ if (dev->hw_stats_hdrs)
+ return 0;
+
+ dev->hw_stats_count = 0;
+
+ /* buffer for current values from the device */
+ dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats_buf) {
+ rc = -ENOMEM;
+ goto err_buf;
+ }
+
+ /* buffer for names, sizes, offsets of values */
+ dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats) {
+ rc = -ENOMEM;
+ goto err_hw_stats;
+ }
+
+ /* request the names, sizes, offsets */
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count =
+ ionic_v1_stat_normalize(dev->hw_stats,
+ PAGE_SIZE / sizeof(*dev->hw_stats));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ dev->hw_stats_count = hw_stats_count;
+
+ /* alloc and init array of names, for alloc_hw_stats */
+ dev->hw_stats_hdrs = kcalloc(hw_stats_count,
+ sizeof(*dev->hw_stats_hdrs),
+ GFP_KERNEL);
+ if (!dev->hw_stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
+ hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(dev->hw_stats);
+err_hw_stats:
+ kfree(dev->hw_stats_buf);
+err_buf:
+ dev->hw_stats_count = 0;
+ dev->hw_stats = NULL;
+ dev->hw_stats_buf = NULL;
+ dev->hw_stats_hdrs = NULL;
+ return rc;
+}
+
+static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (port != 1)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
+ dev->hw_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int ionic_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 port, int index)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stat_i;
+
+ if (port != 1)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ 0, IONIC_V1_ADMIN_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
+ hw_stats->value[hw_stat_i] =
+ ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
+ dev->hw_stats_buf, PAGE_SIZE);
+
+ return hw_stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ return rc;
+}
+
+static struct rdma_hw_stats *
+ionic_counter_alloc_stats(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+ int err;
+
+ cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
+ if (!cntr)
+ return NULL;
+
+ /* buffer for current values from the device */
+ cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cntr->vals)
+ goto err_vals;
+
+ err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
+ cntr,
+ XA_LIMIT(0, IONIC_MAX_QPID),
+ GFP_KERNEL);
+ if (err)
+ goto err_xa;
+
+ INIT_LIST_HEAD(&cntr->qp_list);
+
+ return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
+ dev->counter_stats->queue_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+err_xa:
+ kfree(cntr->vals);
+err_vals:
+ kfree(cntr);
+
+ return NULL;
+}
+
+static int ionic_counter_dealloc(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+
+ cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ kfree(cntr->vals);
+ kfree(cntr);
+
+ return 0;
+}
+
+static int ionic_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *ibqp,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_counter *cntr;
+
+ cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
+ ibqp->counter = counter;
+
+ return 0;
+}
+
+static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
+{
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+
+ if (ibqp->counter) {
+ list_del(&qp->qp_list_counter);
+ ibqp->counter = NULL;
+ }
+
+ return 0;
+}
+
+static int ionic_get_qp_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 counter_id)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct ionic_counter_stats *cs;
+ struct ionic_counter *cntr;
+ dma_addr_t hw_stats_dma;
+ struct ionic_qp *qp;
+ int rc, stat_i = 0;
+
+ cs = dev->counter_stats;
+ cntr = xa_load(&cs->xa_counters, counter_id);
+ if (!cntr)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ return rc;
+
+ memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
+
+ list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ qp->qpid,
+ IONIC_V1_ADMIN_QP_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
+ hw_stats->value[stat_i] +=
+ ionic_v1_stat_val(&cs->hdr[stat_i],
+ cntr->vals,
+ PAGE_SIZE);
+ }
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ return stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ return rc;
+}
+
+static int ionic_counter_update_stats(struct rdma_counter *counter)
+{
+ return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
+}
+
+static int ionic_alloc_counters(struct ionic_ibdev *dev)
+{
+ struct ionic_counter_stats *cs = dev->counter_stats;
+ int rc, hw_stats_count;
+ dma_addr_t hdr_dma;
+
+ /* buffer for names, sizes, offsets of values */
+ cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cs->hdr)
+ return -ENOMEM;
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_QP_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
+ PAGE_SIZE / sizeof(*cs->hdr));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ cs->queue_stats_count = hw_stats_count;
+
+ /* alloc and init array of names */
+ cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
+ GFP_KERNEL);
+ if (!cs->stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(cs->hdr);
+
+ return rc;
+}
+
+static const struct ib_device_ops ionic_hw_stats_ops = {
+ .driver_id = RDMA_DRIVER_IONIC,
+ .alloc_hw_port_stats = ionic_alloc_hw_stats,
+ .get_hw_stats = ionic_get_hw_stats,
+};
+
+static const struct ib_device_ops ionic_counter_stats_ops = {
+ .counter_alloc_stats = ionic_counter_alloc_stats,
+ .counter_dealloc = ionic_counter_dealloc,
+ .counter_bind_qp = ionic_counter_bind_qp,
+ .counter_unbind_qp = ionic_counter_unbind_qp,
+ .counter_update_stats = ionic_counter_update_stats,
+};
+
+void ionic_stats_init(struct ionic_ibdev *dev)
+{
+ u16 stats_type = dev->lif_cfg.stats_type;
+ int rc;
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
+ rc = ionic_init_hw_stats(dev);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
+ else
+ ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
+ }
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
+ dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
+ GFP_KERNEL);
+ if (!dev->counter_stats)
+ return;
+
+ rc = ionic_alloc_counters(dev);
+ if (rc) {
+ ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ return;
+ }
+
+ xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
+
+ ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
+ }
+}
+
+void ionic_stats_cleanup(struct ionic_ibdev *dev)
+{
+ if (dev->counter_stats) {
+ xa_destroy(&dev->counter_stats->xa_counters);
+ kfree(dev->counter_stats->hdr);
+ kfree(dev->counter_stats->stats_hdrs);
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ }
+
+ kfree(dev->hw_stats);
+ kfree(dev->hw_stats_buf);
+ kfree(dev->hw_stats_hdrs);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.c b/drivers/infiniband/hw/ionic/ionic_ibdev.c
new file mode 100644
index 000000000000..164046d00e5d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+
+#include "ionic_ibdev.h"
+
+#define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver"
+#define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA"
+
+MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("NET_IONIC");
+
+static int ionic_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(ibdev, 1);
+ addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev);
+ dev_put(ndev);
+ attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
+ attr->page_size_cap = dev->lif_cfg.page_size_supported;
+
+ attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor;
+ attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device;
+
+ attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif);
+ attr->fw_ver = 0;
+ attr->max_qp = dev->lif_cfg.qp_count;
+ attr->max_qp_wr = IONIC_MAX_DEPTH;
+ attr->device_cap_flags =
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B |
+ 0;
+ attr->max_send_sge =
+ min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_recv_sge =
+ min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_sge_rd = attr->max_send_sge;
+ attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE;
+ attr->max_mr = dev->lif_cfg.nmrs_per_lif;
+ attr->max_pd = IONIC_MAX_PD;
+ attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_rd_atom = 0;
+ attr->max_res_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_init_rd_atom = 0;
+ attr->atomic_cap = IB_ATOMIC_GLOB;
+ attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ attr->max_mw = dev->lif_cfg.nmrs_per_lif;
+ attr->max_mcast_grp = 0;
+ attr->max_mcast_qp_attach = 0;
+ attr->max_ah = dev->lif_cfg.nahs_per_lif;
+ attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2;
+ attr->max_pkeys = IONIC_PKEY_TBL_LEN;
+
+ return 0;
+}
+
+static int ionic_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct net_device *ndev;
+
+ if (port != 1)
+ return -EINVAL;
+
+ ndev = ib_device_get_netdev(ibdev, port);
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev)) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else if (netif_running(ndev)) {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ attr->max_mtu = iboe_get_mtu(ndev->max_mtu);
+ attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu));
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->ip_gids = true;
+ attr->port_cap_flags = 0;
+ attr->max_msg_sz = 0x80000000;
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->max_vl_num = 1;
+ attr->subnet_prefix = 0xfe80000000000000ull;
+
+ dev_put(ndev);
+
+ return ib_get_eth_speed(ibdev, port,
+ &attr->active_speed,
+ &attr->active_width);
+}
+
+static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev,
+ u32 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ if (index != 0)
+ return -EINVAL;
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+
+ return 0;
+}
+
+static int ionic_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC)
+ memcpy(dev->ibdev.node_desc, attr->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
+
+ return 0;
+}
+
+static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *attr)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX);
+}
+
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif));
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ionic_rdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ionic_rdma_attr_group = {
+ .attrs = ionic_rdma_attributes,
+};
+
+static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ /*
+ * Dummy define disassociate_ucontext so that it does not
+ * wait for user context before cleaning up hw resources.
+ */
+}
+
+static const struct ib_device_ops ionic_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IONIC,
+ .uverbs_abi_ver = IONIC_ABI_VERSION,
+
+ .alloc_ucontext = ionic_alloc_ucontext,
+ .dealloc_ucontext = ionic_dealloc_ucontext,
+ .mmap = ionic_mmap,
+ .mmap_free = ionic_mmap_free,
+ .alloc_pd = ionic_alloc_pd,
+ .dealloc_pd = ionic_dealloc_pd,
+ .create_ah = ionic_create_ah,
+ .query_ah = ionic_query_ah,
+ .destroy_ah = ionic_destroy_ah,
+ .create_user_ah = ionic_create_ah,
+ .get_dma_mr = ionic_get_dma_mr,
+ .reg_user_mr = ionic_reg_user_mr,
+ .reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
+ .dereg_mr = ionic_dereg_mr,
+ .alloc_mr = ionic_alloc_mr,
+ .map_mr_sg = ionic_map_mr_sg,
+ .alloc_mw = ionic_alloc_mw,
+ .dealloc_mw = ionic_dealloc_mw,
+ .create_cq = ionic_create_cq,
+ .destroy_cq = ionic_destroy_cq,
+ .create_qp = ionic_create_qp,
+ .modify_qp = ionic_modify_qp,
+ .query_qp = ionic_query_qp,
+ .destroy_qp = ionic_destroy_qp,
+
+ .post_send = ionic_post_send,
+ .post_recv = ionic_post_recv,
+ .poll_cq = ionic_poll_cq,
+ .req_notify_cq = ionic_req_notify_cq,
+
+ .query_device = ionic_query_device,
+ .query_port = ionic_query_port,
+ .get_link_layer = ionic_get_link_layer,
+ .query_pkey = ionic_query_pkey,
+ .modify_device = ionic_modify_device,
+ .get_port_immutable = ionic_get_port_immutable,
+ .get_dev_fw_str = ionic_get_dev_fw_str,
+ .device_group = &ionic_rdma_attr_group,
+ .disassociate_ucontext = ionic_disassociate_ucontext,
+
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
+ INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
+};
+
+static void ionic_init_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
+ dev->half_cqid_udma_shift =
+ order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
+ ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
+ ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
+ /* skip reserved lkey */
+ dev->next_mrkey = 1;
+ ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
+ /* skip reserved SMI and GSI qpids */
+ dev->half_qpid_udma_shift =
+ order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
+}
+
+static void ionic_destroy_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_destroy(&dev->inuse_cqid);
+ ionic_resid_destroy(&dev->inuse_pdid);
+ ionic_resid_destroy(&dev->inuse_ahid);
+ ionic_resid_destroy(&dev->inuse_mrid);
+ ionic_resid_destroy(&dev->inuse_qpid);
+ ionic_resid_destroy(&dev->inuse_dbid);
+}
+
+static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
+{
+ ionic_kill_rdma_admin(dev, false);
+ ib_unregister_device(&dev->ibdev);
+ ionic_stats_cleanup(dev);
+ ionic_destroy_rdma_admin(dev);
+ ionic_destroy_resids(dev);
+ WARN_ON(!xa_empty(&dev->qp_tbl));
+ xa_destroy(&dev->qp_tbl);
+ WARN_ON(!xa_empty(&dev->cq_tbl));
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
+{
+ struct ib_device *ibdev;
+ struct ionic_ibdev *dev;
+ struct net_device *ndev;
+ int rc;
+
+ dev = ib_alloc_device(ionic_ibdev, ibdev);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
+
+ xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
+ xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
+
+ ionic_init_resids(dev);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (rc)
+ goto err_reset;
+
+ rc = ionic_create_rdma_admin(dev);
+ if (rc)
+ goto err_admin;
+
+ ibdev = &dev->ibdev;
+ ibdev->dev.parent = dev->lif_cfg.hwdev;
+
+ strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX);
+ strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX);
+
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = 1;
+
+ /* the first two eq are reserved for async events */
+ ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2;
+
+ ndev = ionic_lif_netdev(ionic_adev->lif);
+ addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev);
+ rc = ib_device_set_netdev(ibdev, ndev, 1);
+ /* ionic_lif_netdev() returns ndev with refcount held */
+ dev_put(ndev);
+ if (rc)
+ goto err_admin;
+
+ ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
+
+ ionic_stats_init(dev);
+
+ rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
+ if (rc)
+ goto err_register;
+
+ return dev;
+
+err_register:
+ ionic_stats_cleanup(dev);
+err_admin:
+ ionic_kill_rdma_admin(dev, false);
+ ionic_destroy_rdma_admin(dev);
+err_reset:
+ ionic_destroy_resids(dev);
+ xa_destroy(&dev->qp_tbl);
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+
+ return ERR_PTR(rc);
+}
+
+static int ionic_aux_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct ionic_ibdev *dev;
+
+ ionic_adev = container_of(adev, struct ionic_aux_dev, adev);
+ dev = ionic_create_ibdev(ionic_adev);
+ if (IS_ERR(dev))
+ return dev_err_probe(&adev->dev, PTR_ERR(dev),
+ "Failed to register ibdev\n");
+
+ auxiliary_set_drvdata(adev, dev);
+ ibdev_dbg(&dev->ibdev, "registered\n");
+
+ return 0;
+}
+
+static void ionic_aux_remove(struct auxiliary_device *adev)
+{
+ struct ionic_ibdev *dev = auxiliary_get_drvdata(adev);
+
+ dev_dbg(&adev->dev, "unregister ibdev\n");
+ ionic_destroy_ibdev(dev);
+ dev_dbg(&adev->dev, "unregistered\n");
+}
+
+static const struct auxiliary_device_id ionic_aux_id_table[] = {
+ { .name = "ionic.rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table);
+
+static struct auxiliary_driver ionic_aux_r_driver = {
+ .name = "rdma",
+ .probe = ionic_aux_probe,
+ .remove = ionic_aux_remove,
+ .id_table = ionic_aux_id_table,
+};
+
+static int __init ionic_mod_init(void)
+{
+ int rc;
+
+ ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
+ if (!ionic_evt_workq)
+ return -ENOMEM;
+
+ rc = auxiliary_driver_register(&ionic_aux_r_driver);
+ if (rc)
+ goto err_aux;
+
+ return 0;
+
+err_aux:
+ destroy_workqueue(ionic_evt_workq);
+
+ return rc;
+}
+
+static void __exit ionic_mod_exit(void)
+{
+ auxiliary_driver_unregister(&ionic_aux_r_driver);
+ destroy_workqueue(ionic_evt_workq);
+}
+
+module_init(ionic_mod_init);
+module_exit(ionic_mod_exit);
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.h b/drivers/infiniband/hw/ionic/ionic_ibdev.h
new file mode 100644
index 000000000000..82fda1e3cdb6
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_IBDEV_H_
+#define _IONIC_IBDEV_H_
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include <rdma/ionic-abi.h>
+#include <ionic_api.h>
+#include <ionic_regs.h>
+
+#include "ionic_fw.h"
+#include "ionic_queue.h"
+#include "ionic_res.h"
+
+#include "ionic_lif_cfg.h"
+
+/* Config knobs */
+#define IONIC_EQ_DEPTH 511
+#define IONIC_EQ_COUNT 32
+#define IONIC_AQ_DEPTH 63
+#define IONIC_AQ_COUNT 4
+#define IONIC_EQ_ISR_BUDGET 10
+#define IONIC_EQ_WORK_BUDGET 1000
+#define IONIC_MAX_RD_ATOM 16
+#define IONIC_PKEY_TBL_LEN 1
+#define IONIC_GID_TBL_LEN 256
+
+#define IONIC_MAX_QPID 0xffffff
+#define IONIC_SPEC_HIGH 8
+#define IONIC_MAX_PD 1024
+#define IONIC_SPEC_HIGH 8
+#define IONIC_SQCMB_ORDER 5
+#define IONIC_RQCMB_ORDER 0
+
+#define IONIC_META_LAST ((void *)1ul)
+#define IONIC_META_POSTED ((void *)2ul)
+
+#define IONIC_CQ_GRACE 100
+
+#define IONIC_ROCE_UDP_SPORT 28272
+#define IONIC_DMA_LKEY 0
+#define IONIC_DMA_RKEY IONIC_DMA_LKEY
+
+#define IONIC_CMB_SUPPORTED \
+ (IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \
+ IONIC_CMB_WC | IONIC_CMB_UC)
+
+/* resource is not reserved on the device, indicated in tbl_order */
+#define IONIC_RES_INVALID -1
+
+struct ionic_aq;
+struct ionic_cq;
+struct ionic_eq;
+struct ionic_vcq;
+
+enum ionic_admin_state {
+ IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */
+ IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */
+ IONIC_ADMIN_KILLED, /* not submitting, locally completed */
+};
+
+enum ionic_admin_flags {
+ IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */
+ IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */
+ IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
+};
+
+enum ionic_mmap_flag {
+ IONIC_MMAP_WC = BIT(0),
+};
+
+struct ionic_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ unsigned long size;
+ unsigned long pfn;
+ u8 mmap_flags;
+};
+
+struct ionic_ibdev {
+ struct ib_device ibdev;
+
+ struct ionic_lif_cfg lif_cfg;
+
+ struct xarray qp_tbl;
+ struct xarray cq_tbl;
+
+ struct ionic_resid_bits inuse_dbid;
+ struct ionic_resid_bits inuse_pdid;
+ struct ionic_resid_bits inuse_ahid;
+ struct ionic_resid_bits inuse_mrid;
+ struct ionic_resid_bits inuse_qpid;
+ struct ionic_resid_bits inuse_cqid;
+
+ u8 half_cqid_udma_shift;
+ u8 half_qpid_udma_shift;
+ u8 next_qpid_udma_idx;
+ u8 next_mrkey;
+
+ struct work_struct reset_work;
+ bool reset_posted;
+ u32 reset_cnt;
+
+ struct delayed_work admin_dwork;
+ struct ionic_aq **aq_vec;
+ atomic_t admin_state;
+
+ struct ionic_eq **eq_vec;
+
+ struct ionic_v1_stat *hw_stats;
+ void *hw_stats_buf;
+ struct rdma_stat_desc *hw_stats_hdrs;
+ struct ionic_counter_stats *counter_stats;
+ int hw_stats_count;
+};
+
+struct ionic_eq {
+ struct ionic_ibdev *dev;
+
+ u32 eqid;
+ u32 intr;
+
+ struct ionic_queue q;
+
+ int armed;
+ bool enable;
+
+ struct work_struct work;
+
+ int irq;
+ char name[32];
+};
+
+struct ionic_admin_wr {
+ struct completion work;
+ struct list_head aq_ent;
+ struct ionic_v1_admin_wqe wqe;
+ struct ionic_v1_cqe cqe;
+ struct ionic_aq *aq;
+ int status;
+};
+
+struct ionic_admin_wr_q {
+ struct ionic_admin_wr *wr;
+ int wqe_strides;
+};
+
+struct ionic_aq {
+ struct ionic_ibdev *dev;
+ struct ionic_vcq *vcq;
+
+ struct work_struct work;
+
+ atomic_t admin_state;
+ unsigned long stamp;
+ bool armed;
+
+ u32 aqid;
+ u32 cqid;
+
+ spinlock_t lock; /* for posting */
+ struct ionic_queue q;
+ struct ionic_admin_wr_q *q_wr;
+ struct list_head wr_prod;
+ struct list_head wr_post;
+};
+
+struct ionic_ctx {
+ struct ib_ucontext ibctx;
+ u32 dbid;
+ struct rdma_user_mmap_entry *mmap_dbell;
+};
+
+struct ionic_tbl_buf {
+ u32 tbl_limit;
+ u32 tbl_pages;
+ size_t tbl_size;
+ __le64 *tbl_buf;
+ dma_addr_t tbl_dma;
+ u8 page_size_log2;
+};
+
+struct ionic_pd {
+ struct ib_pd ibpd;
+
+ u32 pdid;
+ u32 flags;
+};
+
+struct ionic_cq {
+ struct ionic_vcq *vcq;
+
+ u32 cqid;
+ u32 eqid;
+
+ spinlock_t lock; /* for polling */
+ struct list_head poll_sq;
+ bool flush;
+ struct list_head flush_sq;
+ struct list_head flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ struct ionic_queue q;
+ bool color;
+ int credit;
+ u16 arm_any_prod;
+ u16 arm_sol_prod;
+
+ struct kref cq_kref;
+ struct completion cq_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ struct ib_umem *umem;
+};
+
+struct ionic_vcq {
+ struct ib_cq ibcq;
+ struct ionic_cq cq[2];
+ u8 udma_mask;
+ u8 poll_idx;
+};
+
+struct ionic_sq_meta {
+ u64 wrid;
+ u32 len;
+ u16 seq;
+ u8 ibop;
+ u8 ibsts;
+ u8 remote:1;
+ u8 signal:1;
+ u8 local_comp:1;
+};
+
+struct ionic_rq_meta {
+ struct ionic_rq_meta *next;
+ u64 wrid;
+};
+
+struct ionic_qp {
+ struct ib_qp ibqp;
+ enum ib_qp_state state;
+
+ u32 qpid;
+ u32 ahid;
+ u32 sq_cqid;
+ u32 rq_cqid;
+ u8 udma_idx;
+ u8 has_ah:1;
+ u8 has_sq:1;
+ u8 has_rq:1;
+ u8 sig_all:1;
+
+ struct list_head qp_list_counter;
+
+ struct list_head cq_poll_sq;
+ struct list_head cq_flush_sq;
+ struct list_head cq_flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ spinlock_t sq_lock; /* for posting and polling */
+ struct ionic_queue sq;
+ struct ionic_sq_meta *sq_meta;
+ u16 *sq_msn_idx;
+ int sq_spec;
+ u16 sq_old_prod;
+ u16 sq_msn_prod;
+ u16 sq_msn_cons;
+ u8 sq_cmb;
+ bool sq_flush;
+ bool sq_flush_rcvd;
+
+ spinlock_t rq_lock; /* for posting and polling */
+ struct ionic_queue rq;
+ struct ionic_rq_meta *rq_meta;
+ struct ionic_rq_meta *rq_meta_head;
+ int rq_spec;
+ u16 rq_old_prod;
+ u8 rq_cmb;
+ bool rq_flush;
+
+ struct kref qp_kref;
+ struct completion qp_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ int sgid_index;
+ int sq_cmb_order;
+ u32 sq_cmb_pgid;
+ phys_addr_t sq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_sq_cmb;
+
+ struct ib_umem *sq_umem;
+
+ int rq_cmb_order;
+ u32 rq_cmb_pgid;
+ phys_addr_t rq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_rq_cmb;
+
+ struct ib_umem *rq_umem;
+
+ int dcqcn_profile;
+
+ struct ib_ud_header *hdr;
+};
+
+struct ionic_ah {
+ struct ib_ah ibah;
+ u32 ahid;
+ int sgid_index;
+ struct ib_ud_header hdr;
+};
+
+struct ionic_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+
+ u32 mrid;
+ int flags;
+
+ struct ib_umem *umem;
+ struct ionic_tbl_buf buf;
+ bool created;
+};
+
+struct ionic_counter_stats {
+ int queue_stats_count;
+ struct ionic_v1_stat *hdr;
+ struct rdma_stat_desc *stats_hdrs;
+ struct xarray xa_counters;
+};
+
+struct ionic_counter {
+ void *vals;
+ struct list_head qp_list;
+};
+
+static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct ionic_ibdev, ibdev);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct ionic_ctx, ibctx);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj)
+{
+ if (!uobj)
+ return NULL;
+
+ if (!uobj->context)
+ return NULL;
+
+ return to_ionic_ctx(uobj->context);
+}
+
+static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct ionic_pd, ibpd);
+}
+
+static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct ionic_mr, ibmr);
+}
+
+static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct ionic_mr, ibmw);
+}
+
+static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct ionic_vcq, ibcq);
+}
+
+static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq,
+ uint8_t udma_idx)
+{
+ return &to_ionic_vcq(ibcq)->cq[udma_idx];
+}
+
+static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct ionic_qp, ibqp);
+}
+
+static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct ionic_ah, ibah);
+}
+
+static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx)
+{
+ if (!ctx)
+ return dev->lif_cfg.dbid;
+
+ return ctx->dbid;
+}
+
+static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
+ struct ib_uobject *uobj)
+{
+ return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
+}
+
+static inline bool ionic_ibop_is_local(enum ib_wr_opcode op)
+{
+ return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR;
+}
+
+static inline void ionic_qp_complete(struct kref *kref)
+{
+ struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
+
+ complete(&qp->qp_rel_comp);
+}
+
+static inline void ionic_cq_complete(struct kref *kref)
+{
+ struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
+
+ complete(&cq->cq_rel_comp);
+}
+
+/* ionic_admin.c */
+extern struct workqueue_struct *ionic_evt_workq;
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr);
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags);
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev);
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev);
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev);
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path);
+
+/* ionic_controlpath.c */
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx);
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp);
+void ionic_notify_flush_cq(struct ionic_cq *cq);
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata);
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx);
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma);
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags);
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access);
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg);
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
+int ionic_dealloc_mw(struct ib_mw *ibmw);
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata);
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata);
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+/* ionic_datapath.c */
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad);
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad);
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+/* ionic_hw_stats.c */
+void ionic_stats_init(struct ionic_ibdev *dev);
+void ionic_stats_cleanup(struct ionic_ibdev *dev);
+
+/* ionic_pgtbl.c */
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size);
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf);
+#endif /* _IONIC_IBDEV_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.c b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
new file mode 100644
index 000000000000..f3cd281c3a2f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+
+#include <ionic.h>
+#include <ionic_lif.h>
+
+#include "ionic_lif_cfg.h"
+
+#define IONIC_MIN_RDMA_VERSION 0
+#define IONIC_MAX_RDMA_VERSION 2
+
+static u8 ionic_get_expdb(struct ionic_lif *lif)
+{
+ u8 expdb_support = 0;
+
+ if (lif->ionic->idev.phy_cmb_expdb64_pages)
+ expdb_support |= IONIC_EXPDB_64B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb128_pages)
+ expdb_support |= IONIC_EXPDB_128B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb256_pages)
+ expdb_support |= IONIC_EXPDB_256B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb512_pages)
+ expdb_support |= IONIC_EXPDB_512B_WQE;
+
+ return expdb_support;
+}
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg)
+{
+ union ionic_lif_identity *ident = &lif->ionic->ident.lif;
+
+ cfg->lif = lif;
+ cfg->hwdev = &lif->ionic->pdev->dev;
+ cfg->lif_index = lif->index;
+ cfg->lif_hw_index = lif->hw_index;
+
+ cfg->dbid = lif->kern_pid;
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+ cfg->dbpage = lif->kern_dbpage;
+ cfg->intr_ctrl = lif->ionic->idev.intr_ctrl;
+
+ cfg->db_phys = lif->ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr;
+
+ if (IONIC_VERSION(ident->rdma.version, ident->rdma.minor_version) >=
+ IONIC_VERSION(2, 1))
+ cfg->page_size_supported =
+ le64_to_cpu(ident->rdma.page_size_cap);
+ else
+ cfg->page_size_supported = IONIC_PAGE_SIZE_SUPPORTED;
+
+ cfg->rdma_version = ident->rdma.version;
+ cfg->qp_opcodes = ident->rdma.qp_opcodes;
+ cfg->admin_opcodes = ident->rdma.admin_opcodes;
+
+ cfg->stats_type = le16_to_cpu(ident->rdma.stats_type);
+ cfg->npts_per_lif = le32_to_cpu(ident->rdma.npts_per_lif);
+ cfg->nmrs_per_lif = le32_to_cpu(ident->rdma.nmrs_per_lif);
+ cfg->nahs_per_lif = le32_to_cpu(ident->rdma.nahs_per_lif);
+
+ cfg->aq_base = le32_to_cpu(ident->rdma.aq_qtype.qid_base);
+ cfg->cq_base = le32_to_cpu(ident->rdma.cq_qtype.qid_base);
+ cfg->eq_base = le32_to_cpu(ident->rdma.eq_qtype.qid_base);
+
+ /*
+ * ionic_create_rdma_admin() may reduce aq_count or eq_count if
+ * it is unable to allocate all that were requested.
+ * aq_count is tunable; see ionic_aq_count
+ * eq_count is tunable; see ionic_eq_count
+ */
+ cfg->aq_count = le32_to_cpu(ident->rdma.aq_qtype.qid_count);
+ cfg->eq_count = le32_to_cpu(ident->rdma.eq_qtype.qid_count);
+ cfg->cq_count = le32_to_cpu(ident->rdma.cq_qtype.qid_count);
+ cfg->qp_count = le32_to_cpu(ident->rdma.sq_qtype.qid_count);
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+
+ cfg->aq_qtype = ident->rdma.aq_qtype.qtype;
+ cfg->sq_qtype = ident->rdma.sq_qtype.qtype;
+ cfg->rq_qtype = ident->rdma.rq_qtype.qtype;
+ cfg->cq_qtype = ident->rdma.cq_qtype.qtype;
+ cfg->eq_qtype = ident->rdma.eq_qtype.qtype;
+ cfg->udma_qgrp_shift = ident->rdma.udma_shift;
+ cfg->udma_count = 2;
+
+ cfg->max_stride = ident->rdma.max_stride;
+ cfg->expdb_mask = ionic_get_expdb(lif);
+
+ cfg->sq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EXPDB);
+ cfg->rq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EXPDB);
+}
+
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+
+ dev_hold(netdev);
+ return netdev;
+}
+
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len)
+{
+ strscpy(str, lif->ionic->idev.dev_info.fw_version, len);
+}
+
+u8 ionic_lif_asic_rev(struct ionic_lif *lif)
+{
+ return lif->ionic->idev.dev_info.asic_rev;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.h b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
new file mode 100644
index 000000000000..20853429f623
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_LIF_CFG_H_
+
+#define IONIC_VERSION(a, b) (((a) << 16) + ((b) << 8))
+#define IONIC_PAGE_SIZE_SUPPORTED 0x40201000 /* 4kb, 2Mb, 1Gb */
+
+#define IONIC_EXPDB_64B_WQE BIT(0)
+#define IONIC_EXPDB_128B_WQE BIT(1)
+#define IONIC_EXPDB_256B_WQE BIT(2)
+#define IONIC_EXPDB_512B_WQE BIT(3)
+
+struct ionic_lif_cfg {
+ struct device *hwdev;
+ struct ionic_lif *lif;
+
+ int lif_index;
+ int lif_hw_index;
+
+ u32 dbid;
+ int dbid_count;
+ u64 __iomem *dbpage;
+ struct ionic_intr __iomem *intr_ctrl;
+ phys_addr_t db_phys;
+
+ u64 page_size_supported;
+ u32 npts_per_lif;
+ u32 nmrs_per_lif;
+ u32 nahs_per_lif;
+
+ u32 aq_base;
+ u32 cq_base;
+ u32 eq_base;
+
+ int aq_count;
+ int eq_count;
+ int cq_count;
+ int qp_count;
+
+ u16 stats_type;
+ u8 aq_qtype;
+ u8 sq_qtype;
+ u8 rq_qtype;
+ u8 cq_qtype;
+ u8 eq_qtype;
+
+ u8 udma_count;
+ u8 udma_qgrp_shift;
+
+ u8 rdma_version;
+ u8 qp_opcodes;
+ u8 admin_opcodes;
+
+ u8 max_stride;
+ bool sq_expdb;
+ bool rq_expdb;
+ u8 expdb_mask;
+};
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg);
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif);
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len);
+u8 ionic_lif_asic_rev(struct ionic_lif *lif);
+
+#endif /* _IONIC_LIF_CFG_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_pgtbl.c b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
new file mode 100644
index 000000000000..e74db73c9246
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/mman.h>
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
+{
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+ u64 dma;
+
+ if (!buf->tbl_pages)
+ return cpu_to_le64(0);
+
+ if (buf->tbl_pages > 1)
+ return cpu_to_le64(buf->tbl_dma);
+
+ if (buf->tbl_buf)
+ dma = le64_to_cpu(buf->tbl_buf[0]);
+ else
+ dma = buf->tbl_dma;
+
+ return cpu_to_le64(dma + (va & pg_mask));
+}
+
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va)
+{
+ if (buf->tbl_pages > 1) {
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+
+ return cpu_to_be64(va & pg_mask);
+ }
+
+ return 0;
+}
+
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
+{
+ if (unlikely(buf->tbl_pages == buf->tbl_limit))
+ return -ENOMEM;
+
+ if (buf->tbl_buf)
+ buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma);
+ else
+ buf->tbl_dma = dma;
+
+ ++buf->tbl_pages;
+
+ return 0;
+}
+
+static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf)
+{
+ int rc;
+
+ buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf);
+ buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL);
+ if (!buf->tbl_buf)
+ return -ENOMEM;
+
+ buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf,
+ buf->tbl_size, DMA_TO_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma);
+ if (rc) {
+ kfree(buf->tbl_buf);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem)
+{
+ struct ib_block_iter biter;
+ u64 page_dma;
+ int rc;
+
+ rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) {
+ page_dma = rdma_block_iter_dma_address(&biter);
+ rc = ionic_pgtbl_page(buf, page_dma);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf)
+{
+ if (buf->tbl_buf)
+ dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma,
+ buf->tbl_size, DMA_TO_DEVICE);
+
+ kfree(buf->tbl_buf);
+ memset(buf, 0, sizeof(*buf));
+}
+
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size)
+{
+ int rc;
+
+ memset(buf, 0, sizeof(*buf));
+
+ if (umem) {
+ limit = ib_umem_num_dma_blocks(umem, page_size);
+ buf->page_size_log2 = order_base_2(page_size);
+ }
+
+ if (limit < 1)
+ return -EINVAL;
+
+ buf->tbl_limit = limit;
+
+ /* skip pgtbl if contiguous / direct translation */
+ if (limit > 1) {
+ rc = ionic_tbl_buf_alloc(dev, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (umem)
+ rc = ionic_pgtbl_umem(buf, umem);
+ else
+ rc = ionic_pgtbl_page(buf, dma);
+
+ if (rc)
+ goto err_unbuf;
+
+ return 0;
+
+err_unbuf:
+ ionic_pgtbl_unbuf(dev, buf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.c b/drivers/infiniband/hw/ionic/ionic_queue.c
new file mode 100644
index 000000000000..aa897ed2a412
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_queue.h"
+
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride)
+{
+ if (depth < 0 || depth > 0xffff)
+ return -EINVAL;
+
+ if (stride == 0 || stride > 0x10000)
+ return -EINVAL;
+
+ if (depth == 0)
+ depth = 1;
+
+ q->depth_log2 = order_base_2(depth + 1);
+ q->stride_log2 = order_base_2(stride);
+
+ if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
+ q->depth_log2 = PAGE_SHIFT - q->stride_log2;
+
+ if (q->depth_log2 > 16 || q->stride_log2 > 16)
+ return -EINVAL;
+
+ q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
+ q->mask = BIT(q->depth_log2) - 1;
+
+ q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
+ if (!q->ptr)
+ return -ENOMEM;
+
+ /* it will always be page aligned, but just to be sure... */
+ if (!PAGE_ALIGNED(q->ptr)) {
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+ return -ENOMEM;
+ }
+
+ q->prod = 0;
+ q->cons = 0;
+ q->dbell = 0;
+
+ return 0;
+}
+
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
+{
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.h b/drivers/infiniband/hw/ionic/ionic_queue.h
new file mode 100644
index 000000000000..d18020d4cad5
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_QUEUE_H_
+#define _IONIC_QUEUE_H_
+
+#include <linux/io.h>
+#include <ionic_regs.h>
+
+#define IONIC_MAX_DEPTH 0xffff
+#define IONIC_MAX_CQ_DEPTH 0xffff
+#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1
+#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2
+
+/**
+ * struct ionic_queue - Ring buffer used between device and driver
+ * @size: Size of the buffer, in bytes
+ * @dma: Dma address of the buffer
+ * @ptr: Buffer virtual address
+ * @prod: Driver position in the queue
+ * @cons: Device position in the queue
+ * @mask: Capacity of the queue, subtracting the hole
+ * This value is equal to ((1 << depth_log2) - 1)
+ * @depth_log2: Log base two size depth of the queue
+ * @stride_log2: Log base two size of an element in the queue
+ * @dbell: Doorbell identifying bits
+ */
+struct ionic_queue {
+ size_t size;
+ dma_addr_t dma;
+ void *ptr;
+ u16 prod;
+ u16 cons;
+ u16 mask;
+ u8 depth_log2;
+ u8 stride_log2;
+ u64 dbell;
+};
+
+/**
+ * ionic_queue_init() - Initialize user space queue
+ * @q: Uninitialized queue structure
+ * @dma_dev: DMA device for mapping
+ * @depth: Depth of the queue
+ * @stride: Size of each element of the queue
+ *
+ * Return: status code
+ */
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride);
+
+/**
+ * ionic_queue_destroy() - Destroy user space queue
+ * @q: Queue structure
+ * @dma_dev: DMA device for mapping
+ *
+ * Return: status code
+ */
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
+
+/**
+ * ionic_queue_empty() - Test if queue is empty
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is empty
+ */
+static inline bool ionic_queue_empty(struct ionic_queue *q)
+{
+ return q->prod == q->cons;
+}
+
+/**
+ * ionic_queue_length() - Get the current length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length
+ */
+static inline u16 ionic_queue_length(struct ionic_queue *q)
+{
+ return (q->prod - q->cons) & q->mask;
+}
+
+/**
+ * ionic_queue_length_remaining() - Get the remaining length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length remaining
+ */
+static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
+{
+ return q->mask - ionic_queue_length(q);
+}
+
+/**
+ * ionic_queue_full() - Test if queue is full
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is full
+ */
+static inline bool ionic_queue_full(struct ionic_queue *q)
+{
+ return q->mask == ionic_queue_length(q);
+}
+
+/**
+ * ionic_color_wrap() - Flip the color if prod is wrapped
+ * @prod: Queue index just after advancing
+ * @color: Queue color just prior to advancing the index
+ *
+ * Return: color after advancing the index
+ */
+static inline bool ionic_color_wrap(u16 prod, bool color)
+{
+ /* logical xor color with (prod == 0) */
+ return color != (prod == 0);
+}
+
+/**
+ * ionic_queue_at() - Get the element at the given index
+ * @q: Queue structure
+ * @idx: Index in the queue
+ *
+ * The index must be within the bounds of the queue. It is not checked here.
+ *
+ * Return: pointer to element at index
+ */
+static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
+{
+ return q->ptr + ((unsigned long)idx << q->stride_log2);
+}
+
+/**
+ * ionic_queue_at_prod() - Get the element at the producer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at producer index
+ */
+static inline void *ionic_queue_at_prod(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->prod);
+}
+
+/**
+ * ionic_queue_at_cons() - Get the element at the consumer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at consumer index
+ */
+static inline void *ionic_queue_at_cons(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->cons);
+}
+
+/**
+ * ionic_queue_next() - Compute the next index
+ * @q: Queue structure
+ * @idx: Index
+ *
+ * Return: next index after idx
+ */
+static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
+{
+ return (idx + 1) & q->mask;
+}
+
+/**
+ * ionic_queue_produce() - Increase the producer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not full. It is not checked here.
+ */
+static inline void ionic_queue_produce(struct ionic_queue *q)
+{
+ q->prod = ionic_queue_next(q, q->prod);
+}
+
+/**
+ * ionic_queue_consume() - Increase the consumer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume(struct ionic_queue *q)
+{
+ q->cons = ionic_queue_next(q, q->cons);
+}
+
+/**
+ * ionic_queue_consume_entries() - Increase the consumer index by entries
+ * @q: Queue structure
+ * @entries: Number of entries to increment
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume_entries(struct ionic_queue *q,
+ u16 entries)
+{
+ q->cons = (q->cons + entries) & q->mask;
+}
+
+/**
+ * ionic_queue_dbell_init() - Initialize doorbell bits for queue id
+ * @q: Queue structure
+ * @qid: Queue identifying number
+ */
+static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
+{
+ q->dbell = IONIC_DBELL_QID(qid);
+}
+
+/**
+ * ionic_queue_dbell_val() - Get current doorbell update value
+ * @q: Queue structure
+ *
+ * Return: current doorbell update value
+ */
+static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
+{
+ return q->dbell | q->prod;
+}
+
+#endif /* _IONIC_QUEUE_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_res.h b/drivers/infiniband/hw/ionic/ionic_res.h
new file mode 100644
index 000000000000..46c8c584bd9a
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_res.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_RES_H_
+#define _IONIC_RES_H_
+
+#include <linux/kernel.h>
+#include <linux/idr.h>
+
+/**
+ * struct ionic_resid_bits - Number allocator based on IDA
+ *
+ * @inuse: IDA handle
+ * @inuse_size: Highest ID limit for IDA
+ */
+struct ionic_resid_bits {
+ struct ida inuse;
+ unsigned int inuse_size;
+};
+
+/**
+ * ionic_resid_init() - Initialize a resid allocator
+ * @resid: Uninitialized resid allocator
+ * @size: Capacity of the allocator
+ *
+ * Return: Zero on success, or negative error number
+ */
+static inline void ionic_resid_init(struct ionic_resid_bits *resid,
+ unsigned int size)
+{
+ resid->inuse_size = size;
+ ida_init(&resid->inuse);
+}
+
+/**
+ * ionic_resid_destroy() - Destroy a resid allocator
+ * @resid: Resid allocator
+ */
+static inline void ionic_resid_destroy(struct ionic_resid_bits *resid)
+{
+ ida_destroy(&resid->inuse);
+}
+
+/**
+ * ionic_resid_get_shared() - Allocate an available shared resource id
+ * @resid: Resid allocator
+ * @min: Smallest valid resource id
+ * @size: One after largest valid resource id
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid,
+ unsigned int min,
+ unsigned int size)
+{
+ return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL);
+}
+
+/**
+ * ionic_resid_get() - Allocate an available resource id
+ * @resid: Resid allocator
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get(struct ionic_resid_bits *resid)
+{
+ return ionic_resid_get_shared(resid, 0, resid->inuse_size);
+}
+
+/**
+ * ionic_resid_put() - Free a resource id
+ * @resid: Resid allocator
+ * @id: Resource id
+ */
+static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id)
+{
+ ida_free(&resid->inuse, id);
+}
+
+/**
+ * ionic_bitid_to_qid() - Transform a resource bit index into a queue id
+ * @bitid: Bit index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Queue id
+ *
+ * Udma-constrained queues (QPs and CQs) are associated with their udma by
+ * queue group. Even queue groups are associated with udma0, and odd queue
+ * groups with udma1.
+ *
+ * For allocating queue ids, we want to arrange the bits into two halves,
+ * with the even queue groups of udma0 in the lower half of the bitset,
+ * and the odd queue groups of udma1 in the upper half of the bitset.
+ * Then, one or two calls of find_next_zero_bit can examine all the bits
+ * for queues of an entire udma.
+ *
+ * For example, assuming eight queue groups with qgrp qids per group:
+ *
+ * bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1
+ * bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1
+ * bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1
+ * bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1
+ * bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1
+ * bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1
+ * bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1
+ * bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1
+ *
+ * There are three important ranges of bits in the qid. There is the udma
+ * bit "U" at qgrp_shift, which is the least significant bit of the group
+ * index, and determines which udma a queue is associated with.
+ * The bits of lesser significance we can call the idx bits "I", which are
+ * the index of the queue within the group. The bits of greater significance
+ * we can call the grp bits "G", which are other bits of the group index that
+ * do not determine the udma. Those bits are just rearranged in the bit index
+ * in the bitset. A bitid has the udma bit in the most significant place,
+ * then the grp bits, then the idx bits.
+ *
+ * bitid: 00000000000000 U GGG IIIIII
+ * qid: 00000000000000 GGG U IIIIII
+ *
+ * Transforming from bit index to qid, or from qid to bit index, can be
+ * accomplished by rearranging the bits by masking and shifting.
+ */
+static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift,
+ u8 half_qid_shift)
+{
+ u32 udma_bit =
+ (bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1;
+ u32 idx_bits = bitid & (BIT(qgrp_shift) - 1);
+
+ return grp_bits | udma_bit | idx_bits;
+}
+
+/**
+ * ionic_qid_to_bitid() - Transform a queue id into a resource bit index
+ * @qid: queue index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Resource bit index
+ *
+ * This is the inverse of ionic_bitid_to_qid().
+ */
+static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
+{
+ u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
+ u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
+
+ return udma_bit | grp_bits | idx_bits;
+}
+#endif /* _IONIC_RES_H_ */
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
index 5f49a58590ed..0bd7e3fca1fb 100644
--- a/drivers/infiniband/hw/irdma/Kconfig
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -4,10 +4,11 @@ config INFINIBAND_IRDMA
depends on INET
depends on IPV6 || !IPV6
depends on PCI
- depends on ICE && I40E
+ depends on IDPF && ICE && I40E
select GENERIC_ALLOCATOR
select AUXILIARY_BUS
select CRC32
help
- This is an Intel(R) Ethernet Protocol Driver for RDMA driver
- that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
+ This is an Intel(R) Ethernet Protocol Driver for RDMA that
+ supports IPU E2000 (RoCEv2), E810 (iWARP/RoCEv2) and X722 (iWARP)
+ network devices.
diff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile
index 48c3854235a0..03ceb9e5475f 100644
--- a/drivers/infiniband/hw/irdma/Makefile
+++ b/drivers/infiniband/hw/irdma/Makefile
@@ -13,7 +13,10 @@ irdma-objs := cm.o \
hw.o \
i40iw_hw.o \
i40iw_if.o \
+ ig3rdma_if.o\
+ icrdma_if.o \
icrdma_hw.o \
+ ig3rdma_hw.o\
main.o \
pble.o \
puda.o \
@@ -22,6 +25,7 @@ irdma-objs := cm.o \
uk.o \
utils.o \
verbs.o \
+ virtchnl.o \
ws.o \
CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 99a7f1a6c0b5..4ef1c29032f7 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -74,6 +74,14 @@ static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
{
u8 i;
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ vsi->qos[i].qs_handle = vsi->dev->qos[i].qs_handle;
+ vsi->qos[i].valid = true;
+ }
+
+ return;
+ }
vsi->qos_rel_bw = l2p->vsi_rel_bw;
vsi->qos_prio_type = l2p->vsi_prio_type;
vsi->dscp_mode = l2p->dscp_mode;
@@ -404,7 +412,8 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
- (info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ (!info->qp_uk_init_info.srq_uk &&
+ info->virtual_map && info->rq_pa >= pble_obj_cnt))
return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
@@ -439,6 +448,208 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
}
/**
+ * irdma_sc_srq_init - init sc_srq structure
+ * @srq: srq sc struct
+ * @info: parameters for srq init
+ */
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info)
+{
+ u32 srq_size_quanta;
+ int ret_code;
+
+ ret_code = irdma_uk_srq_init(&srq->srq_uk, &info->srq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ srq->dev = info->pd->dev;
+ srq->pd = info->pd;
+ srq->vsi = info->vsi;
+ srq->srq_pa = info->srq_pa;
+ srq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ srq->pasid = info->pasid;
+ srq->pasid_valid = info->pasid_valid;
+ srq->srq_limit = info->srq_limit;
+ srq->leaf_pbl_size = info->leaf_pbl_size;
+ srq->virtual_map = info->virtual_map;
+ srq->tph_en = info->tph_en;
+ srq->arm_limit_event = info->arm_limit_event;
+ srq->tph_val = info->tph_value;
+ srq->shadow_area_pa = info->shadow_area_pa;
+
+ /* Smallest SRQ size is 256B i.e. 8 quanta */
+ srq_size_quanta = max((u32)IRDMA_SRQ_MIN_QUANTA,
+ srq->srq_uk.srq_size *
+ srq->srq_uk.wqe_size_multiplier);
+ srq->hw_srq_size = irdma_get_encoded_wqe_size(srq_size_quanta,
+ IRDMA_QUEUE_TYPE_SRQ);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_create - send srq create CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_create(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->pd->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, srq->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ srq->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_modify - send modify_srq CQP WQE
+ * @srq: srq sc struct
+ * @info: parameters for srq modification
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_modify(struct irdma_sc_srq *srq,
+ struct irdma_modify_srq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, info->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQCTX, srq->srq_uk.srq_id));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ info->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_destroy - send srq_destroy CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_destroy(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
* irdma_sc_qp_create - create qp
* @qp: sc qp
* @info: qp create info
@@ -629,13 +840,14 @@ static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
}
/**
- * irdma_sc_qp_setctx_roce - set qp's context
+ * irdma_sc_qp_setctx_roce_gen_2 - set qp's context
* @qp: sc qp
* @qp_ctx: context ptr
* @info: ctx info
*/
-void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
- struct irdma_qp_host_ctx_info *info)
+static void irdma_sc_qp_setctx_roce_gen_2(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
{
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp;
@@ -753,6 +965,189 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
}
+/**
+ * irdma_sc_get_encoded_ird_size_gen_3 - get encoded IRD size for GEN 3
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u8 irdma_sc_get_encoded_ird_size_gen_3(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 4096:
+ return IRDMA_IRD_HW_SIZE_4096_GEN3;
+ case 2048:
+ return IRDMA_IRD_HW_SIZE_2048_GEN3;
+ case 1024:
+ return IRDMA_IRD_HW_SIZE_1024_GEN3;
+ case 512:
+ return IRDMA_IRD_HW_SIZE_512_GEN3;
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256_GEN3;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128_GEN3;
+ case 64:
+ return IRDMA_IRD_HW_SIZE_64_GEN3;
+ case 32:
+ return IRDMA_IRD_HW_SIZE_32_GEN3;
+ case 16:
+ return IRDMA_IRD_HW_SIZE_16_GEN3;
+ case 8:
+ return IRDMA_IRD_HW_SIZE_8_GEN3;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4_GEN3;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce_gen_3 - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info = info->roce_info;
+ struct irdma_udp_offload_info *udp = info->udp_info;
+ u64 qw0, qw3, qw7 = 0, qw8 = 0;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_USE_SRQ, !qp->qp_uk.srq_uk ? 0 : 1) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag);
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) |
+ FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ qw7 = FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label);
+ set_64bit_val(qp_ctx, 56, qw7);
+ qw8 = FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp);
+ set_64bit_val(qp_ctx, 64, qw8);
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_MINRNR_TIMER, udp->min_rnr_timer) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_TMR, udp->rnr_nak_tmr) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 152,
+ FIELD_PREP(IRDMAQPC_MACADDRESS,
+ ether_addr_to_u64(roce_info->mac_addr)) |
+ FIELD_PREP(IRDMAQPC_LOCALACKTIMEOUT,
+ roce_info->local_ack_timeout));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE_GEN3, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE_GEN3,
+ irdma_sc_get_encoded_ird_size_gen_3(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE,
+ info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE,
+ roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE,
+ roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_SRQ_ID,
+ !qp->qp_uk.srq_uk ?
+ 0 : qp->qp_uk.srq_uk->srq_id) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208, roce_info->pd_id |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX_GEN3, info->stats_idx) |
+ FIELD_PREP(IRDMAQPC_PKT_LIMIT, qp->pkt_limit));
+
+ print_hex_dump_debug("WQE: QP_HOST ROCE CTX WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ irdma_sc_qp_setctx_roce_gen_2(qp, qp_ctx, info);
+ else
+ irdma_sc_qp_setctx_roce_gen_3(qp, qp_ctx, info);
+}
+
/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
@@ -1080,7 +1475,8 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
@@ -1096,6 +1492,8 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1165,6 +1563,7 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18) |
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
if (!info->chunk_size) {
set_64bit_val(wqe, 32, info->reg_addr_pa);
@@ -1187,6 +1586,8 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1223,7 +1624,8 @@ static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
@@ -1263,7 +1665,8 @@ static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
@@ -1343,6 +1746,7 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_ATOMICS_EN, info->remote_atomics_en) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1873,7 +2277,7 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
- if (vsi->register_qset) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
vsi->dev->ws_add = irdma_ws_add;
vsi->dev->ws_remove = irdma_ws_remove;
vsi->dev->ws_reset = irdma_ws_reset;
@@ -1888,7 +2292,7 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
* irdma_get_stats_idx - Return stats index
* @vsi: pointer to the vsi
*/
-static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
+static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
@@ -1964,12 +2368,13 @@ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
(void *)((uintptr_t)stats_buff_mem->va +
IRDMA_GATHER_STATS_BUF_SIZE);
- irdma_hw_stats_start_timer(vsi);
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_start_timer(vsi);
/* when stat allocation is not required default to fcn_id. */
vsi->stats_idx = info->fcn_id;
if (info->alloc_stats_inst) {
- u8 stats_idx = irdma_get_stats_idx(vsi);
+ u16 stats_idx = irdma_get_stats_idx(vsi);
if (stats_idx != IRDMA_INVALID_STATS_IDX) {
vsi->stats_inst_alloc = true;
@@ -1993,7 +2398,7 @@ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
- u8 stats_idx = vsi->stats_idx;
+ u16 stats_idx = vsi->stats_idx;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
if (vsi->stats_inst_alloc) {
@@ -2009,7 +2414,9 @@ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
if (!vsi->pestat)
return;
- irdma_hw_stats_stop_timer(vsi);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_stop_timer(vsi);
dma_free_coherent(vsi->pestat->hw->device,
vsi->pestat->gather_info.stats_buff_mem.size,
vsi->pestat->gather_info.stats_buff_mem.va,
@@ -2026,6 +2433,14 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
{
u8 encoded_size = 0;
+ if (queue_type == IRDMA_QUEUE_TYPE_SRQ) {
+ /* Smallest SRQ size is 256B (8 quanta) that gets
+ * encoded to 0.
+ */
+ encoded_size = ilog2(wqsize) - 3;
+
+ return encoded_size;
+ }
/* cqp sq's hw coded value starts from 1 for size of 4
* while it starts from 0 for qp' wq's.
*/
@@ -2259,6 +2674,12 @@ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src) : 0;
set_64bit_val(wqe, 8, temp);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX, info->err_sq_idx));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX, info->err_rq_idx));
+ }
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
@@ -2267,6 +2688,9 @@ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ hdr |= FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID, info->err_sq_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID, info->err_rq_idx_valid);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
@@ -2562,6 +2986,9 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQID_HIGH, cq->cq_uk.cq_id >> 22) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQID_HIGH,
+ (cq->ceq_id_valid ? cq->ceq_id : 0) >> 10) |
FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
@@ -2706,6 +3133,41 @@ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
}
/**
+ * irdma_sc_get_decoded_ird_size_gen_3 - get decoded IRD size for GEN 3
+ * @ird_enc: IRD encoding
+ * IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u16 irdma_sc_get_decoded_ird_size_gen_3(u8 ird_enc)
+{
+ switch (ird_enc) {
+ case IRDMA_IRD_HW_SIZE_4096_GEN3:
+ return 4096;
+ case IRDMA_IRD_HW_SIZE_2048_GEN3:
+ return 2048;
+ case IRDMA_IRD_HW_SIZE_1024_GEN3:
+ return 1024;
+ case IRDMA_IRD_HW_SIZE_512_GEN3:
+ return 512;
+ case IRDMA_IRD_HW_SIZE_256_GEN3:
+ return 256;
+ case IRDMA_IRD_HW_SIZE_128_GEN3:
+ return 128;
+ case IRDMA_IRD_HW_SIZE_64_GEN3:
+ return 64;
+ case IRDMA_IRD_HW_SIZE_32_GEN3:
+ return 32;
+ case IRDMA_IRD_HW_SIZE_16_GEN3:
+ return 16;
+ case IRDMA_IRD_HW_SIZE_8_GEN3:
+ return 8;
+ case IRDMA_IRD_HW_SIZE_4_GEN3:
+ return 4;
+ default:
+ return 4;
+ }
+}
+
+/**
* irdma_check_cqp_progress - check cqp processing progress
* @timeout: timeout info struct
* @dev: sc device struct
@@ -2738,6 +3200,89 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
}
/**
+ * irdma_sc_cqp_def_cmpl_ae_handler - remove completed requests from pending list
+ * @dev: sc device struct
+ * @info: AE entry info
+ * @first: true if this is the first call to this handler for given AEQE
+ * @scratch: (out) scratch entry pointer
+ * @sw_def_info: (in/out) SW ticket value for this AE
+ *
+ * In case of AE_DEF_CMPL event, this function should be called in a loop
+ * until it returns NULL-ptr via scratch.
+ * For each call, it looks for a matching CQP request on pending list,
+ * removes it from the list and returns the pointer to the associated scratch
+ * entry.
+ * If this is the first call to this function for given AEQE, sw_def_info
+ * value is not used to find matching requests. Instead, it is populated
+ * with the value from the first matching cqp_request on the list.
+ * For subsequent calls, ooo_op->sw_def_info need to match the value passed
+ * by a caller.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if no matching request is found.
+ */
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ *scratch = 0;
+
+ spin_lock_irqsave(&dev->cqp->ooo_list_lock, flags);
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ if (ooo_op->deferred &&
+ ((first && ooo_op->def_info == info->def_info) ||
+ (!first && ooo_op->sw_def_info == *sw_def_info))) {
+ *sw_def_info = ooo_op->sw_def_info;
+ *scratch = ooo_op->scratch;
+
+ list_move(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cqp->ooo_list_lock, flags);
+
+ if (first && !*scratch)
+ ibdev_dbg(to_ibdev(dev),
+ "AEQ: deferred completion with unknown ticket: def_info 0x%x\n",
+ info->def_info);
+}
+
+/**
+ * irdma_sc_cqp_cleanup_handler - remove requests from pending list
+ * @dev: sc device struct
+ *
+ * This function should be called in a loop from irdma_cleanup_pending_cqp_op.
+ * For each call, it returns first CQP request on pending list, removes it
+ * from the list and returns the pointer to the associated scratch entry.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if pending list is empty.
+ */
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u64 scratch = 0;
+
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ scratch = ooo_op->scratch;
+
+ list_del(&ooo_op->list_entry);
+ list_add(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+
+ return scratch;
+}
+
+/**
* irdma_cqp_poll_registers - poll cqp registers
* @cqp: struct for cqp hw
* @tail: wqtail register value
@@ -2794,7 +3339,10 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
break;
case IRDMA_HMC_IW_APBVT_ENTRY:
- obj_info[rsrc_idx].cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ obj_info[rsrc_idx].cnt = 1;
+ else
+ obj_info[rsrc_idx].cnt = 0;
break;
default:
obj_info[rsrc_idx].cnt = (u32)temp;
@@ -2829,7 +3377,8 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
IRDMA_HMC_IW_QP);
irdma_sc_decode_fpm_commit(dev, buf, 8, info,
IRDMA_HMC_IW_CQ);
- /* skiping RSRVD */
+ irdma_sc_decode_fpm_commit(dev, buf, 16, info,
+ IRDMA_HMC_IW_SRQ);
irdma_sc_decode_fpm_commit(dev, buf, 24, info,
IRDMA_HMC_IW_HTE);
irdma_sc_decode_fpm_commit(dev, buf, 32, info,
@@ -2864,15 +3413,17 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_commit(dev, buf, 152, info,
IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_commit(dev, buf, 160, info,
- IRDMA_HMC_IW_OOISC);
- irdma_sc_decode_fpm_commit(dev, buf, 168, info,
- IRDMA_HMC_IW_OOISCFFL);
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_commit(dev, buf, 160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, 168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
}
/* searching for the last object in HMC to find the size of the HMC area. */
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
- if (info[i].base > max_base) {
+ if (info[i].base > max_base && info[i].cnt) {
max_base = info[i].base;
last_hmc_obj = i;
}
@@ -2927,6 +3478,7 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_fpm_misc *hmc_fpm_misc)
{
struct irdma_hmc_obj_info *obj_info;
+ u8 ird_encoding;
u64 temp;
u32 size;
u16 max_pe_sds;
@@ -2935,7 +3487,19 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
get_64bit_val(buf, 0, &temp);
hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
- max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3, temp);
+ else
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ /* Reduce SD count for unprivleged functions by 1 to account for PBLE
+ * backing page rounding
+ */
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2 &&
+ (hmc_info->hmc_fn_id >= dev->hw_attrs.first_hw_vf_fpm_id ||
+ !dev->privileged))
+ max_pe_sds--;
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
@@ -2949,11 +3513,17 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
size = (u32)(temp >> 32);
obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
+ irdma_sc_decode_fpm_query(buf, 24, obj_info, IRDMA_HMC_IW_SRQ);
irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
- obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
- obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 0;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 0;
+ } else {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ }
irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
@@ -2962,7 +3532,7 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->xf_block_size)
+ if (obj_info[IRDMA_HMC_IW_XF].max_cnt && !hmc_fpm_misc->xf_block_size)
return -EINVAL;
irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
@@ -2984,6 +3554,14 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2,
+ dev->feature_info[IRDMA_FTN_FLAGS])) {
+ ird_encoding = (u8)FIELD_GET(IRDMA_QUERY_FPM_MAX_IRD, temp);
+ hmc_fpm_misc->ird =
+ irdma_sc_get_decoded_ird_size_gen_3(ird_encoding) / 2;
+ dev->hw_attrs.max_hw_ird = hmc_fpm_misc->ird;
+ dev->hw_attrs.max_hw_ord = hmc_fpm_misc->ird;
+ }
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
return 0;
irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
@@ -3000,15 +3578,25 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
-
- get_64bit_val(buf, 168, &temp);
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
- obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
- hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->ooiscf_block_size &&
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return -EINVAL;
+
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, 168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ get_64bit_val(buf, 176, &temp);
+ hmc_fpm_misc->loc_mem_pages = (u32)FIELD_GET(IRDMA_QUERY_FPM_LOC_MEM_PAGES, temp);
+ if (!hmc_fpm_misc->loc_mem_pages)
+ return -EINVAL;
+ }
return 0;
}
@@ -3088,6 +3676,8 @@ exit:
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
struct irdma_cqp_init_info *info)
{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u32 num_ooo_ops;
u8 hw_sq_size;
if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
@@ -3118,17 +3708,43 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
cqp->rocev2_rto_policy = info->rocev2_rto_policy;
cqp->protocol_used = info->protocol_used;
memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ cqp->ooisc_blksize = info->ooisc_blksize;
+ cqp->rrsp_blksize = info->rrsp_blksize;
+ cqp->q1_blksize = info->q1_blksize;
+ cqp->xmit_blksize = info->xmit_blksize;
+ cqp->blksizes_valid = info->blksizes_valid;
+ cqp->ts_shift = info->ts_shift;
+ cqp->ts_override = info->ts_override;
+ cqp->en_fine_grained_timers = info->en_fine_grained_timers;
+ cqp->pe_en_vf_cnt = info->pe_en_vf_cnt;
+ cqp->ooo_op_array = info->ooo_op_array;
+ /* initialize the OOO lists */
+ INIT_LIST_HEAD(&cqp->ooo_avail);
+ INIT_LIST_HEAD(&cqp->ooo_pnd);
+ if (cqp->ooo_op_array) {
+ /* Populate avail list entries */
+ for (num_ooo_ops = 0, ooo_op = info->ooo_op_array;
+ num_ooo_ops < cqp->sq_size;
+ num_ooo_ops++, ooo_op++)
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ }
+ }
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->last_def_cmpl_ticket = 0;
+ cqp->sw_def_cmpl_ticket = 0;
cqp->requested_ops = 0;
atomic64_set(&cqp->completed_ops, 0);
/* for the cqp commands backlog. */
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
- writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
- writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) {
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ }
ibdev_dbg(to_ibdev(cqp->dev),
"WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
@@ -3160,6 +3776,7 @@ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
return -ENOMEM;
spin_lock_init(&cqp->dev->cqp_lock);
+ spin_lock_init(&cqp->ooo_list_lock);
temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
@@ -3171,12 +3788,29 @@ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
cqp->protocol_used);
}
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS,
+ cqp->en_fine_grained_timers);
set_64bit_val(cqp->host_ctx, 0, temp);
set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_OOISC_BLKSIZE,
+ cqp->ooisc_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_RRSP_BLKSIZE,
+ cqp->rrsp_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_Q1_BLKSIZE, cqp->q1_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_XMIT_BLKSIZE,
+ cqp->xmit_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_BLKSIZES_VALID,
+ cqp->blksizes_valid) |
+ FIELD_PREP(IRDMA_CQPHC_TIMESTAMP_OVERRIDE,
+ cqp->ts_override) |
+ FIELD_PREP(IRDMA_CQPHC_TS_SHIFT, cqp->ts_shift);
set_64bit_val(cqp->host_ctx, 16, temp);
set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
@@ -3338,6 +3972,87 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
}
/**
+ * irdma_sc_process_def_cmpl - process deferred or pending completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @wqe_idx: CQP WQE descriptor index
+ * @def_info: deferred op ticket value or out-of-order completion id
+ * @def_cmpl: true for deferred completion, false for pending (RCA)
+ */
+static void irdma_sc_process_def_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 wqe_idx, u32 def_info, bool def_cmpl)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ /* Deferred and out-of-order completions share the same list of pending
+ * completions. Since the list can be also accessed from AE handler,
+ * it must be protected by a lock.
+ */
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+
+ /* For deferred completions bump up SW completion ticket value. */
+ if (def_cmpl) {
+ cqp->last_def_cmpl_ticket = def_info;
+ cqp->sw_def_cmpl_ticket++;
+ }
+ if (!list_empty(&cqp->ooo_avail)) {
+ ooo_op = (struct irdma_ooo_cqp_op *)
+ list_entry(cqp->ooo_avail.next,
+ struct irdma_ooo_cqp_op, list_entry);
+
+ list_del(&ooo_op->list_entry);
+ ooo_op->scratch = info->scratch;
+ ooo_op->def_info = def_info;
+ ooo_op->sw_def_info = cqp->sw_def_cmpl_ticket;
+ ooo_op->deferred = def_cmpl;
+ ooo_op->wqe_idx = wqe_idx;
+ /* Pending completions must be chronologically ordered,
+ * so adding at the end of list.
+ */
+ list_add_tail(&ooo_op->list_entry, &cqp->ooo_pnd);
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ info->pending = true;
+}
+
+/**
+ * irdma_sc_process_ooo_cmpl - process out-of-order (final) completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @def_info: out-of-order completion id
+ */
+static void irdma_sc_process_ooo_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op_tmp;
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ info->scratch = 0;
+
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+ list_for_each_entry_safe(ooo_op, ooo_op_tmp, &cqp->ooo_pnd,
+ list_entry) {
+ if (!ooo_op->deferred && ooo_op->def_info == def_info) {
+ list_del(&ooo_op->list_entry);
+ info->scratch = ooo_op->scratch;
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ if (!info->scratch)
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: DEBUG_FW_OOO out-of-order completion with unknown def_info = 0x%x\n",
+ def_info);
+}
+
+/**
* irdma_sc_ccq_get_cqe_info - get ccq's cq entry
* @ccq: ccq sc struct
* @info: completion q entry to return
@@ -3345,6 +4060,10 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
struct irdma_ccq_cqe_info *info)
{
+ u32 def_info;
+ bool def_cmpl = false;
+ bool pend_cmpl = false;
+ bool ooo_final_cmpl = false;
u64 qp_ctx, temp, temp1;
__le64 *cqe;
struct irdma_sc_cqp *cqp;
@@ -3352,6 +4071,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
u32 error;
u8 polarity;
int ret_code = 0;
+ unsigned long flags;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3383,6 +4103,25 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
get_64bit_val(cqe, 16, &temp1);
info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ def_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_DEF_CMPL;
+ def_info = (u32)FIELD_GET(IRDMA_CCQ_DEFINFO, temp1);
+
+ pend_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_OOO_CMPL;
+
+ ooo_final_cmpl = (bool)FIELD_GET(IRDMA_OOO_CMPL, temp);
+
+ if (def_cmpl || pend_cmpl || ooo_final_cmpl) {
+ if (ooo_final_cmpl)
+ irdma_sc_process_ooo_cmpl(cqp, info, def_info);
+ else
+ irdma_sc_process_def_cmpl(cqp, info, wqe_idx,
+ def_info, def_cmpl);
+ }
+ }
+
get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
info->cqp = cqp;
@@ -3399,7 +4138,16 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
dma_wmb(); /* make sure shadow area is updated before moving tail */
- IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_lock_irqsave(&cqp->dev->cqp_lock, flags);
+ if (!ooo_final_cmpl)
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags);
+
+ /* Do not increment completed_ops counter on pending or deferred
+ * completions.
+ */
+ if (pend_cmpl || def_cmpl)
+ return ret_code;
atomic64_inc(&cqp->completed_ops);
return ret_code;
@@ -3647,7 +4395,7 @@ int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
ceq->tph_en = info->tph_en;
ceq->tph_val = info->tph_val;
- ceq->vsi = info->vsi;
+ ceq->vsi_idx = info->vsi_idx;
ceq->polarity = 1;
IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
ceq->dev->ceq[info->ceq_id] = ceq;
@@ -3680,13 +4428,16 @@ static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
(ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
set_64bit_val(wqe, 56,
FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
- FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi_idx));
hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID_HIGH, ceq->ceq_id >> 10) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3741,7 +4492,7 @@ int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
int ret_code;
struct irdma_sc_dev *dev = ceq->dev;
- dev->ccq->vsi = ceq->vsi;
+ dev->ccq->vsi_idx = ceq->vsi_idx;
if (ceq->reg_cq) {
ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
if (ret_code)
@@ -3774,11 +4525,14 @@ int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid));
hdr = ceq->ceq_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3942,10 +4696,13 @@ static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
(aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
set_64bit_val(wqe, 48,
(aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3974,7 +4731,8 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
u64 hdr;
dev = aeq->dev;
- writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+ if (dev->privileged)
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -3982,9 +4740,12 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -4025,18 +4786,39 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
- ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
- info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
- info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC_GEN_3, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX_GEN_3,
+ temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_GEN_3, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE_GEN_3, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE_GEN_3, compl_ctx);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE_GEN_3, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA_GEN_3, compl_ctx);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW_GEN_3, temp);
+ info->compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx);
+ compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx) << IRDMA_AEQE_CMPL_CTXT_S;
+ } else {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
- info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
- info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
- info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
- info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
- info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW,
+ temp);
+ }
info->ae_src = ae_src;
switch (info->ae_id) {
+ case IRDMA_AE_SRQ_LIMIT:
+ info->srq = true;
+ /* [63:6] from CMPL_CTXT, [5:0] from WQDESCIDX. */
+ info->compl_ctx = compl_ctx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
@@ -4069,6 +4851,10 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
case IRDMA_AE_RESET_SENT:
@@ -4085,6 +4871,10 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx << 1;
ae_src = IRDMA_AE_SOURCE_RSVD;
break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ info->def_info = info->wqe_idx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
case IRDMA_AE_ROCE_EMPTY_MCG:
case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
case IRDMA_AE_ROCE_BAD_MC_QPID:
@@ -4110,6 +4900,7 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->qp = true;
info->rq = true;
info->compl_ctx = compl_ctx;
+ info->err_rq_idx_valid = true;
break;
case IRDMA_AE_SOURCE_CQ:
case IRDMA_AE_SOURCE_CQ_0110:
@@ -4125,8 +4916,18 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_RR_WR:
+ info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ info->err_rq_idx_valid = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
case IRDMA_AE_SOURCE_IN_RR_WR_1011:
info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info->sq = true;
+ info->err_rq_idx_valid = true;
+ }
info->compl_ctx = compl_ctx;
info->in_rdrsp_wr = true;
break;
@@ -4336,6 +5137,26 @@ int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
}
/**
+ * irdma_set_loc_mem() - set a local memory bit field
+ * @buf: ptr to a buffer where local memory gets enabled
+ */
+static void irdma_set_loc_mem(__le64 *buf)
+{
+ u64 loc_mem_en = BIT_ULL(ENABLE_LOC_MEM);
+ u32 offset;
+ u64 temp;
+
+ for (offset = 0; offset < IRDMA_COMMIT_FPM_BUF_SIZE;
+ offset += sizeof(__le64)) {
+ if (offset == IRDMA_PBLE_COMMIT_OFFSET)
+ continue;
+ get_64bit_val(buf, offset, &temp);
+ if (temp)
+ set_64bit_val(buf, offset, temp | loc_mem_en);
+ }
+}
+
+/**
* irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
* command and populates fpm base address in hmc_info
* @dev : ptr to irdma_dev struct
@@ -4356,7 +5177,7 @@ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
- set_64bit_val(buf, 16, (u64)0); /* RSRVD */
+ set_64bit_val(buf, 16, (u64)obj_info[IRDMA_HMC_IW_SRQ].cnt);
set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
set_64bit_val(buf, 40, (u64)0); /* RSVD */
@@ -4383,7 +5204,9 @@ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
(u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
set_64bit_val(buf, 168,
(u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
-
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_fpm_misc.loc_mem_pages)
+ irdma_set_loc_mem(buf);
commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
commit_fpm_mem.va = dev->fpm_commit_buf;
@@ -4592,6 +5415,7 @@ static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
static u32 irdma_est_sd(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info)
{
+ struct irdma_hmc_obj_info *pble_info;
int i;
u64 size = 0;
u64 sd;
@@ -4600,12 +5424,22 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
if (i != IRDMA_HMC_IW_PBLE)
size += round_up(hmc_info->hmc_obj[i].cnt *
hmc_info->hmc_obj[i].size, 512);
- size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
- hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
+
+ pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
+ if (dev->privileged)
+ size += round_up(pble_info->cnt * pble_info->size, 512);
if (size & 0x1FFFFF)
sd = (size >> 21) + 1; /* add 1 for remainder */
else
sd = size >> 21;
+ if (!dev->privileged && !dev->hmc_fpm_misc.loc_mem_pages) {
+ /* 2MB alignment for VF PBLE HMC */
+ size = pble_info->cnt * pble_info->size;
+ if (size & 0x1FFFFF)
+ sd += (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd += size >> 21;
+ }
if (sd > 0xFFFFFFFF) {
ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
sd = 0xFFFFFFFF - 1;
@@ -4615,17 +5449,6 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
}
/**
- * irdma_sc_query_rdma_features_done - poll cqp for query features done
- * @cqp: struct for cqp hw
- */
-static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
-{
- return irdma_sc_poll_for_cqp_op_done(cqp,
- IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
- NULL);
-}
-
-/**
* irdma_sc_query_rdma_features - query RDMA features and FW ver
* @cqp: struct for cqp hw
* @buf: buffer to hold query info
@@ -4634,7 +5457,9 @@ static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
struct irdma_dma_mem *buf, u64 scratch)
{
+ u32 tail, val, error;
__le64 *wqe;
+ int status;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -4654,9 +5479,15 @@ static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
irdma_sc_cqp_post_sq(cqp);
+ status = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ if (error || status)
+ status = -EINVAL;
- return 0;
+ return status;
}
/**
@@ -4678,8 +5509,6 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
if (ret_code)
goto exit;
@@ -4703,8 +5532,6 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
if (ret_code)
goto exit;
@@ -4731,6 +5558,10 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
}
dev->feature_info[feat_type] = temp;
}
+
+ if (dev->feature_info[IRDMA_FTN_FLAGS] & IRDMA_ATOMICS_ALLOWED_BIT)
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_ATOMIC_OPS;
+
exit:
dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
feat_buf.pa);
@@ -4786,22 +5617,354 @@ static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
}
/**
+ * irdma_get_rsrc_mem_config - configure resources if local memory or host
+ * @dev: sc device struct
+ * @is_mrte_loc_mem: if true, MR's to be in local memory because sd=loc pages
+ *
+ * Only mr can be configured host or local memory if qp's are in local memory.
+ * If qp is in local memory, then all resource object will be in local memory
+ * except mr which can be either host or local memory. The only exception
+ * is pble's which are always in host memory.
+ */
+static void irdma_get_rsrc_mem_config(struct irdma_sc_dev *dev, bool is_mrte_loc_mem)
+{
+ struct irdma_hmc_info *hmc_info = dev->hmc_info;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].mem_loc = IRDMA_LOC_MEM;
+
+ if (dev->feature_info[IRDMA_OBJ_1] && !is_mrte_loc_mem) {
+ u8 mem_type;
+
+ mem_type = (u8)FIELD_GET(IRDMA_MR_MEM_LOC, dev->feature_info[IRDMA_OBJ_1]);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc =
+ (mem_type & IRDMA_OBJ_LOC_MEM_BIT) ?
+ IRDMA_LOC_MEM : IRDMA_HOST_MEM;
+ } else {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc = IRDMA_LOC_MEM;
+ }
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc = IRDMA_HOST_MEM;
+
+ ibdev_dbg(to_ibdev(dev), "HMC: INFO: mrte_mem_loc = %d pble = %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc);
+}
+
+/**
+ * irdma_cfg_sd_mem - allocate sd memory
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ */
+static int irdma_cfg_sd_mem(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 mem_size;
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) * hmc_info->sd_table.sd_cnt;
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va)
+ return -ENOMEM;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return 0;
+}
+
+/**
+ * irdma_get_objs_pages - get number of 2M pages needed
+ * @dev: sc device struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @mem_loc: pages for local or host memory
+ */
+static u32 irdma_get_objs_pages(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ enum irdma_hmc_obj_mem mem_loc)
+{
+ u64 size = 0;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (hmc_info->hmc_obj[i].mem_loc == mem_loc) {
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+ }
+ }
+
+ return DIV_ROUND_UP(size, IRDMA_HMC_PAGE_SIZE);
+}
+
+/**
+ * irdma_set_host_hmc_rsrc_gen_3 - calculate host hmc resources for gen 3
+ * @dev: sc device struct
+ */
+static void irdma_set_host_hmc_rsrc_gen_3(struct irdma_sc_dev *dev)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_hmc_info *hmc_info;
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, pblewanted;
+ u32 avail_sds, mr_sds;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ avail_sds = hmc_fpm_misc->max_sds;
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ if (mrte_loc == IRDMA_HOST_MEM && avail_sds > IRDMA_MIN_PBLE_PAGES) {
+ mr_sds = avail_sds - IRDMA_MIN_PBLE_PAGES;
+ mrwanted = min(mrwanted, mr_sds * MAX_MR_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+ avail_sds -= DIV_ROUND_UP(mrwanted, MAX_MR_PER_SD);
+ }
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]) &&
+ pblewanted > avail_sds * MAX_PBLE_PER_SD)
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: Warn: Resource version 2: pble wanted = 0x%x available = 0x%x\n",
+ pblewanted, avail_sds * MAX_PBLE_PER_SD);
+
+ pblewanted = min(pblewanted, avail_sds * MAX_PBLE_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+}
+
+/**
+ * irdma_verify_commit_fpm_gen_3 - verify query fpm values
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_verify_commit_fpm_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 rrffl_cnt = 0;
+ u32 xffl_cnt = 0;
+ u32 q1fl_cnt;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ rrffl_cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+
+ if (xf_cnt)
+ xffl_cnt = xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+
+ q1fl_cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed > max_pages) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts rrf_cnt = %u rrffl_cnt = %u timer_cnt = %u",
+ rrf_cnt, rrffl_cnt, timer_cnt);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts xf_cnt = %u xffl_cnt = %u q1fl_cnt = %u",
+ xf_cnt, xffl_cnt, q1fl_cnt);
+
+ return -EINVAL;
+ }
+
+ hmc_fpm_misc->max_sds -= pages_needed;
+ hmc_fpm_misc->loc_mem_pages -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * irdma_set_loc_hmc_rsrc_gen_3 - calculate hmc resources for gen 3
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_set_loc_hmc_rsrc_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 ird, ord;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return irdma_verify_commit_fpm_gen_3(dev, max_pages, qpwanted);
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ ird = dev->hw_attrs.max_hw_ird;
+ ord = dev->hw_attrs.max_hw_ord;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, qpwanted * 2);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ min(qpwanted * 8, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt, rrf_cnt);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt, xf_cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ min(timer_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt);
+
+ do {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = roundup_pow_of_two(ird * 2 * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed <= max_pages)
+ break;
+
+ ird /= 2;
+ ord /= 2;
+ } while (ird >= IRDMA_MIN_IRD);
+
+ if (ird < IRDMA_MIN_IRD) {
+ ibdev_dbg(to_ibdev(dev), "HMC: FAIL: IRD=%u Q1 CNT = %u\n",
+ ird, hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt);
+ return -EINVAL;
+ }
+
+ dev->hw_attrs.max_hw_ird = ird;
+ dev->hw_attrs.max_hw_ord = ord;
+ hmc_fpm_misc->max_sds -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * cfg_fpm_value_gen_3 - configure fpm for gen 3
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ */
+static int cfg_fpm_value_gen_3(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, qpwanted;
+ int i, ret_code = 0;
+ u32 loc_mem_pages;
+ bool is_mrte_loc_mem;
+
+ loc_mem_pages = hmc_fpm_misc->loc_mem_pages;
+ is_mrte_loc_mem = hmc_fpm_misc->loc_mem_pages == hmc_fpm_misc->max_sds ?
+ true : false;
+
+ irdma_get_rsrc_mem_config(dev, is_mrte_loc_mem);
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+
+ if (is_mrte_loc_mem)
+ loc_mem_pages -= IRDMA_MIN_PBLE_PAGES;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: mrte_loc %d loc_mem %u fpm max sds %u host_obj %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_fpm_misc->loc_mem_pages, hmc_fpm_misc->max_sds,
+ is_mrte_loc_mem);
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ qpwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt = 0;
+
+ if (!FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt,
+ (u32)IRDMA_FSIAV_CNT_MAX);
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+ while (qpwanted >= IRDMA_MIN_QP_CNT) {
+ if (!irdma_set_loc_hmc_rsrc_gen_3(dev, loc_mem_pages, qpwanted))
+ break;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return -EINVAL;
+
+ qpwanted /= 2;
+ if (mrte_loc == IRDMA_LOC_MEM) {
+ mrwanted = qpwanted * IRDMA_MIN_MR_PER_QP;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, mrwanted);
+ }
+ }
+
+ if (qpwanted < IRDMA_MIN_QP_CNT) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: ERROR: could not allocate fpm resources\n");
+ return -EINVAL;
+ }
+
+ irdma_set_host_hmc_rsrc_gen_3(dev);
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+
+ return ret_code;
+ }
+
+ return irdma_cfg_sd_mem(dev, hmc_info);
+}
+
+/**
* irdma_cfg_fpm_val - configure HMC objects
* @dev: sc device struct
* @qp_count: desired qp count
*/
int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
{
- struct irdma_virt_mem virt_mem;
- u32 i, mem_size;
u32 qpwanted, mrwanted, pblewanted;
- u32 powerof2, hte;
+ u32 powerof2, hte, i;
u32 sd_needed;
u32 sd_diff;
u32 loop_count = 0;
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
int ret_code = 0;
+ u32 max_sds;
hmc_info = dev->hmc_info;
hmc_fpm_misc = &dev->hmc_fpm_misc;
@@ -4814,14 +5977,16 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
+ max_sds = hmc_fpm_misc->max_sds;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ return cfg_fpm_value_gen_3(dev, hmc_info, hmc_fpm_misc);
+
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
sd_needed = irdma_est_sd(dev, hmc_info);
- ibdev_dbg(to_ibdev(dev),
- "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
- sd_needed, hmc_info->first_sd_index);
- ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
- hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
+ ibdev_dbg(to_ibdev(dev), "HMC: sd count %u where max sd is %u\n",
+ hmc_info->sd_table.sd_cnt, max_sds);
qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
@@ -4835,21 +6000,21 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
ibdev_dbg(to_ibdev(dev),
- "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
- qp_count, hmc_fpm_misc->max_sds,
+ "HMC: req_qp=%d max_sd=%u, max_qp = %u, max_cq=%u, max_mr=%u, max_pble=%u, mc=%d, av=%u\n",
+ qp_count, max_sds,
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
-
hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
@@ -4860,7 +6025,7 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
- hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
+ hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt = 0; /* Reserved */
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
@@ -4898,11 +6063,12 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
} else {
- mrwanted /= 2;
pblewanted /= 2;
+ mrwanted /= 2;
}
continue;
}
+
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
@@ -4928,14 +6094,13 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (sd_needed > hmc_fpm_misc->max_sds) {
ibdev_dbg(to_ibdev(dev),
- "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
+ "HMC: cfg_fpm failed loop_cnt=%u, sd_needed=%u, max sd count %u\n",
loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
return -EINVAL;
}
- if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
- pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
- FPM_MULTIPLIER;
+ if (loop_count > 1 && sd_needed < max_sds) {
+ pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER;
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
sd_needed = irdma_est_sd(dev, hmc_info);
}
@@ -4959,18 +6124,7 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
- mem_size = sizeof(struct irdma_hmc_sd_entry) *
- (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
- virt_mem.size = mem_size;
- virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
- if (!virt_mem.va) {
- ibdev_dbg(to_ibdev(dev),
- "HMC: failed to allocate memory for sd_entry buffer\n");
- return -ENOMEM;
- }
- hmc_info->sd_table.sd_entry = virt_mem.va;
-
- return ret_code;
+ return irdma_cfg_sd_mem(dev, hmc_info);
}
/**
@@ -5242,6 +6396,22 @@ static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
&pcmdinfo->in.u.mc_modify.info,
pcmdinfo->in.u.mc_modify.scratch);
break;
+ case IRDMA_OP_SRQ_CREATE:
+ status = irdma_sc_srq_create(pcmdinfo->in.u.srq_create.srq,
+ pcmdinfo->in.u.srq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_MODIFY:
+ status = irdma_sc_srq_modify(pcmdinfo->in.u.srq_modify.srq,
+ &pcmdinfo->in.u.srq_modify.info,
+ pcmdinfo->in.u.srq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_DESTROY:
+ status = irdma_sc_srq_destroy(pcmdinfo->in.u.srq_destroy.srq,
+ pcmdinfo->in.u.srq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
default:
status = -EOPNOTSUPP;
break;
@@ -5314,14 +6484,26 @@ void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
*/
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
{
- struct irdma_gather_stats *gather_stats;
- struct irdma_gather_stats *last_gather_stats;
+ struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats;
+ struct irdma_gather_stats *gather_stats =
+ vsi->pestat->gather_info.gather_stats_va;
+ struct irdma_gather_stats *last_gather_stats =
+ vsi->pestat->gather_info.last_gather_stats_va;
+ const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
+ u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx;
+ u16 i;
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < max_stat_idx; i++) {
+ u16 idx = map[i].byteoff / sizeof(u64);
+
+ hw_stats->stats_val[i] = gather_stats->val[idx];
+ }
+ return;
+ }
- gather_stats = vsi->pestat->gather_info.gather_stats_va;
- last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
- irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
- last_gather_stats, vsi->dev->hw_stats_map,
- vsi->dev->hw_attrs.max_stat_idx);
+ irdma_update_stats(hw_stats, gather_stats, last_gather_stats,
+ map, max_stat_idx);
}
/**
@@ -5356,6 +6538,9 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
case IRDMA_GEN_2:
icrdma_init_hw(dev);
break;
+ case IRDMA_GEN_3:
+ ig3rdma_init_hw(dev);
+ break;
}
}
@@ -5381,10 +6566,15 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->fpm_commit_buf = info->fpm_commit_buf;
dev->hw = info->hw;
dev->hw->hw_addr = info->bar0;
+ dev->protocol_used = info->protocol_used;
/* Setup the hardware limits, hmc may limit further */
dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
+ dev->hw_attrs.min_hw_srq_id = IRDMA_MIN_IW_SRQ_ID;
dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
- dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES_GEN_3;
+ else
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
@@ -5409,21 +6599,39 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
- dev->hw_attrs.uk_attrs.hw_rev = ver;
+ if (!dev->privileged) {
+ ret_code = irdma_vchnl_req_get_hmc_fcn(dev);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get HMC function ret = %d\n",
+ ret_code);
+
+ return ret_code;
+ }
+ }
+
irdma_sc_init_hw(dev);
- if (irdma_wait_pe_ready(dev))
- return -ETIMEDOUT;
+ if (dev->privileged) {
+ if (irdma_wait_pe_ready(dev))
+ return -ETIMEDOUT;
- val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
- db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
- if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
- ibdev_dbg(to_ibdev(dev),
- "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
- val, db_size);
- return -ENODEV;
+ val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
+ if (db_size != IRDMA_PE_DB_SIZE_4M &&
+ db_size != IRDMA_PE_DB_SIZE_8M) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
+ val, db_size);
+ return -ENODEV;
+ }
+ } else {
+ ret_code = irdma_vchnl_req_get_reg_layout(dev);
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Register layout failed ret = %d\n",
+ ret_code);
}
- dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
return ret_code;
}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 2cb4b96db721..983b22d7ae23 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -14,6 +14,18 @@
#define IRDMA_PE_DB_SIZE_4M 1
#define IRDMA_PE_DB_SIZE_8M 2
+#define IRDMA_IRD_HW_SIZE_4_GEN3 0
+#define IRDMA_IRD_HW_SIZE_8_GEN3 1
+#define IRDMA_IRD_HW_SIZE_16_GEN3 2
+#define IRDMA_IRD_HW_SIZE_32_GEN3 3
+#define IRDMA_IRD_HW_SIZE_64_GEN3 4
+#define IRDMA_IRD_HW_SIZE_128_GEN3 5
+#define IRDMA_IRD_HW_SIZE_256_GEN3 6
+#define IRDMA_IRD_HW_SIZE_512_GEN3 7
+#define IRDMA_IRD_HW_SIZE_1024_GEN3 8
+#define IRDMA_IRD_HW_SIZE_2048_GEN3 9
+#define IRDMA_IRD_HW_SIZE_4096_GEN3 10
+
#define IRDMA_IRD_HW_SIZE_4 0
#define IRDMA_IRD_HW_SIZE_16 1
#define IRDMA_IRD_HW_SIZE_64 2
@@ -114,6 +126,13 @@ enum irdma_protocol_used {
#define IRDMA_UPDATE_SD_BUFF_SIZE 128
#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
+#define ENABLE_LOC_MEM 63
+#define IRDMA_ATOMICS_ALLOWED_BIT 1
+#define MAX_PBLE_PER_SD 0x40000
+#define MAX_PBLE_SD_PER_FCN 0x400
+#define MAX_MR_PER_SD 0x8000
+#define MAX_MR_SD_PER_FCN 0x80
+#define IRDMA_PBLE_COMMIT_OFFSET 112
#define IRDMA_MAX_QUANTA_PER_WR 8
#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
@@ -121,6 +140,10 @@ enum irdma_protocol_used {
#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
+#define IRDMA_SRQ_MIN_QUANTA 8
+#define IRDMA_SRQ_MAX_QUANTA 262144
+#define IRDMA_MAX_SRQ_WRS \
+ ((IRDMA_SRQ_MAX_QUANTA - IRDMA_RQ_RSVD) / IRDMA_MAX_QUANTA_PER_WR)
#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
#define IRDMAQP_TERM_SEND_TERM_ONLY 1
@@ -147,8 +170,13 @@ enum irdma_protocol_used {
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
-#define IRDMA_FEATURE_RTS_AE 1ULL
-#define IRDMA_FEATURE_CQ_RESIZE 2ULL
+#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
+#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
+#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
+#define IRDMA_FEATURE_ATOMIC_OPS BIT_ULL(6)
+#define IRDMA_FEATURE_SRQ BIT_ULL(7)
+#define IRDMA_FEATURE_CQE_TIMESTAMPING BIT_ULL(8)
+
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
@@ -161,6 +189,8 @@ enum irdma_protocol_used {
#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
#define IRDMAQP_OP_NOP 0x0c
#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+#define IRDMAQP_OP_ATOMIC_FETCH_ADD 0x0f
+#define IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD 0x11
#define IRDMAQP_OP_GEN_RTS_AE 0x30
enum irdma_cqp_op_type {
@@ -212,9 +242,12 @@ enum irdma_cqp_op_type {
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
IRDMA_OP_CQ_MODIFY = 48,
+ IRDMA_OP_SRQ_CREATE = 49,
+ IRDMA_OP_SRQ_MODIFY = 50,
+ IRDMA_OP_SRQ_DESTROY = 51,
/* Must be last entry*/
- IRDMA_MAX_CQP_OPS = 49,
+ IRDMA_MAX_CQP_OPS = 52,
};
/* CQP SQ WQES */
@@ -224,6 +257,9 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_CREATE_CQ 0x03
#define IRDMA_CQP_OP_MODIFY_CQ 0x04
#define IRDMA_CQP_OP_DESTROY_CQ 0x05
+#define IRDMA_CQP_OP_CREATE_SRQ 0x06
+#define IRDMA_CQP_OP_MODIFY_SRQ 0x07
+#define IRDMA_CQP_OP_DESTROY_SRQ 0x08
#define IRDMA_CQP_OP_ALLOC_STAG 0x09
#define IRDMA_CQP_OP_REG_MR 0x0a
#define IRDMA_CQP_OP_QUERY_STAG 0x0b
@@ -265,97 +301,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_GATHER_STATS 0x2e
#define IRDMA_CQP_OP_UP_MAP 0x2f
-/* Async Events codes */
-#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
-#define IRDMA_AE_AMP_INVALID_STAG 0x0103
-#define IRDMA_AE_AMP_BAD_QP 0x0104
-#define IRDMA_AE_AMP_BAD_PD 0x0105
-#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
-#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
-#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
-#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
-#define IRDMA_AE_AMP_TO_WRAP 0x010a
-#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
-#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
-#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
-#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
-#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
-#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
-#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
-#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
-#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
-#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
-#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
-#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
-#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
-#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
-#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
-#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
-#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
-#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
-#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
-#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
-#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
-#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
-#define IRDMA_AE_BAD_CLOSE 0x0201
-#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
-#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
-#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
-#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
-#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
-#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
-#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
-#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
-#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
-#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
-#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
-#define IRDMA_AE_INVALID_REQUEST 0x0223
-#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
-#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
-#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
-#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
-#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
-#define IRDMA_AE_DDP_NO_L_BIT 0x0308
-#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
-#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
-#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
-#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
-#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
-#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
-#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
-#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
-#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
-#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
-#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
-#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
-#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
-#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
-#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
-#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
-#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
-#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
-#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
-#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
-#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
-#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
-#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
-#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
-#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
-#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
-#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
-#define IRDMA_AE_RESET_SENT 0x0601
-#define IRDMA_AE_TERMINATE_SENT 0x0602
-#define IRDMA_AE_RESET_NOT_SENT 0x0603
-#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
-#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
-#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
-
#define FLD_LS_64(dev, val, field) \
(((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
#define FLD_RS_64(dev, val, field) \
@@ -393,9 +338,13 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
-#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
-#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(55, 52)
+#define IRDMA_SD_MAX GENMASK_ULL(15, 0)
+#define IRDMA_MEM_MAX GENMASK_ULL(15, 0)
+#define IRDMA_QP_MEM_LOC GENMASK_ULL(47, 44)
+#define IRDMA_MR_MEM_LOC GENMASK_ULL(27, 24)
#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
@@ -404,16 +353,16 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
-#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
-#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
-#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(29, 16)
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(13, 0)
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(63, 48)
#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
-#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
@@ -448,6 +397,16 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
+#define IRDMA_CQPHC_TIMESTAMP_OVERRIDE BIT_ULL(5)
+#define IRDMA_CQPHC_TS_SHIFT GENMASK_ULL(12, 8)
+#define IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS BIT_ULL(0)
+
+#define IRDMA_CQPHC_OOISC_BLKSIZE GENMASK_ULL(63, 60)
+#define IRDMA_CQPHC_RRSP_BLKSIZE GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_Q1_BLKSIZE GENMASK_ULL(55, 52)
+#define IRDMA_CQPHC_XMIT_BLKSIZE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_BLKSIZES_VALID BIT_ULL(4)
+
#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
@@ -461,6 +420,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
+#define IRDMA_CCQ_DEFINFO GENMASK_ULL(63, 32)
+
#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
@@ -469,6 +430,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQ_ERROR BIT_ULL(55)
#define IRDMA_CQ_SQ BIT_ULL(62)
+#define IRDMA_CQ_SRQ BIT_ULL(52)
#define IRDMA_CQ_VALID BIT_ULL(63)
#define IRDMA_CQ_IMMVALID BIT_ULL(62)
#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
@@ -476,8 +438,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
-#define IRDMA_CQ_IMMDATA_S 0
-#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
@@ -508,6 +468,17 @@ enum irdma_cqp_op_type {
#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
#define IRDMA_AEQE_VALID BIT_ULL(63)
+#define IRDMA_AEQE_Q2DATA_GEN_3 GENMASK_ULL(5, 4)
+#define IRDMA_AEQE_TCPSTATE_GEN_3 GENMASK_ULL(3, 0)
+#define IRDMA_AEQE_QPCQID_GEN_3 GENMASK_ULL(24, 0)
+#define IRDMA_AEQE_AECODE_GEN_3 GENMASK_ULL(61, 50)
+#define IRDMA_AEQE_OVERFLOW_GEN_3 BIT_ULL(62)
+#define IRDMA_AEQE_WQDESCIDX_GEN_3 GENMASK_ULL(49, 32)
+#define IRDMA_AEQE_IWSTATE_GEN_3 GENMASK_ULL(31, 29)
+#define IRDMA_AEQE_AESRC_GEN_3 GENMASK_ULL(28, 25)
+#define IRDMA_AEQE_CMPL_CTXT_S 6
+#define IRDMA_AEQE_CMPL_CTXT GENMASK_ULL(63, 6)
+
#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
@@ -530,11 +501,14 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
-#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(23, 8)
#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_PASID GENMASK_ULL(51, 32)
+#define IRDMA_CQPSQ_PASID_VALID BIT_ULL(62)
+
/* Create/Modify/Destroy QP */
#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
@@ -566,10 +540,30 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_SRQ_RQSIZE GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE GENMASK_ULL(5, 4)
+#define IRDMA_CQPSQ_SRQ_SRQ_LIMIT GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_SRQ_SRQCTX GENMASK_ULL(63, 6)
+#define IRDMA_CQPSQ_SRQ_PD_ID GENMASK_ULL(39, 16)
+#define IRDMA_CQPSQ_SRQ_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_SRQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_SRQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_SRQ_TPH_EN BIT_ULL(60)
+#define IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT BIT_ULL(61)
+#define IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_SRQ_TPH_VALUE GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S 8
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR GENMASK_ULL(63, 8)
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S 6
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR GENMASK_ULL(63, 6)
+
#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
+#define IRDMA_CQPSQ_CQ_CQID_HIGH GENMASK_ULL(52, 50)
+#define IRDMA_CQPSQ_CQ_CEQID_HIGH GENMASK_ULL(59, 54)
#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
@@ -590,6 +584,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
+#define IRDMA_CQPSQ_STAG_PDID_HI GENMASK_ULL(59, 54)
#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
@@ -600,7 +595,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
-#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
@@ -628,11 +624,8 @@ enum irdma_cqp_op_type {
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
-
-#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
-#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
-
#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
/* Upload Context - UCTX */
@@ -651,6 +644,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_CEQ_CEQID_HIGH GENMASK_ULL(15, 10)
+
#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
@@ -660,10 +655,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
-#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
-
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
#define IRDMA_COMMIT_FPM_BASE_S 32
-#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(15, 0)
+
#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
@@ -675,6 +670,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID BIT_ULL(42)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID BIT_ULL(43)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
@@ -693,9 +692,12 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
+#define IRDMA_MANAGE_RSRC_VER2 BIT_ULL(2)
#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
+#define IRDMA_CQPSQ_MIN_DEF_CMPL 0x0006
+#define IRDMA_CQPSQ_MIN_OOO_CMPL 0x0007
#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
@@ -712,6 +714,11 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
+#define IRDMAQPC_USE_SRQ BIT_ULL(10)
+#define IRDMAQPC_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMAQPC_PASID GENMASK_ULL(19, 0)
+#define IRDMAQPC_PASID_VALID BIT_ULL(11)
+
#define IRDMAQPC_ECN_EN BIT_ULL(14)
#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
@@ -782,21 +789,31 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
-#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32)
+#define IRDMAQPC_MINRNR_TIMER GENMASK_ULL(4, 0)
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
-#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
-#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(24, 0)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(56, 32)
#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
+#define IRDMAQPC_LOCALACKTIMEOUT GENMASK_ULL(12, 8)
+#define IRDMAQPC_RNRNAK_TMR GENMASK_ULL(4, 0)
+#define IRDMAQPC_ORDSIZE_GEN3 GENMASK_ULL(10, 0)
+#define IRDMAQPC_REMOTE_ATOMIC_EN BIT_ULL(18)
+#define IRDMAQPC_STAT_INDEX_GEN3 GENMASK_ULL(47, 32)
+#define IRDMAQPC_PKT_LIMIT GENMASK_ULL(55, 48)
+
#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
+#define IRDMAQPC_IRDSIZE_GEN3 GENMASK_ULL(17, 14)
+
#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
#define IRDMAQPC_RDOK BIT_ULL(21)
@@ -833,6 +850,7 @@ enum irdma_cqp_op_type {
#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
+#define IRDMA_FEATURE_RSRC_MAX GENMASK_ULL(31, 0)
#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
@@ -856,7 +874,7 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
-#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_INLINE_VALID_S 7
@@ -869,6 +887,9 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_REMOTE_STAG GENMASK_ULL(31, 0)
+
#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
@@ -879,6 +900,8 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_REMOTE_ATOMICS_EN BIT_ULL(55)
+
#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
@@ -903,11 +926,14 @@ enum irdma_cqp_op_type {
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
-#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
-#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
+#define IRDMA_QUERY_FPM_LOC_MEM_PAGES GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(31, 0)
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(31, 0)
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
-#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(44, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3 GENMASK_ULL(47, 32)
#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
+#define IRDMA_QUERY_FPM_MAX_IRD GENMASK_ULL(53, 50)
#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
@@ -1103,7 +1129,7 @@ enum irdma_alignment {
IRDMA_CEQ_ALIGNMENT = 0x100,
IRDMA_CQ0_ALIGNMENT = 0x100,
IRDMA_SD_BUF_ALIGNMENT = 0x80,
- IRDMA_FEATURE_BUF_ALIGNMENT = 0x8,
+ IRDMA_FEATURE_BUF_ALIGNMENT = 0x10,
};
enum icrdma_protocol_used {
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
index ac58088a8e41..da18add141da 100644
--- a/drivers/infiniband/hw/irdma/hmc.c
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -5,6 +5,7 @@
#include "defs.h"
#include "type.h"
#include "protos.h"
+#include "virtchnl.h"
/**
* irdma_find_sd_index_limit - finds segment descriptor index limit
@@ -228,6 +229,10 @@ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
bool pd_error = false;
int ret_code = 0;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return -EINVAL;
@@ -330,7 +335,7 @@ static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
u32 i, sd_idx;
struct irdma_dma_mem *mem;
- if (!reset)
+ if (dev->privileged && !reset)
ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
@@ -376,6 +381,9 @@ int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
u32 i, j;
int ret_code = 0;
+ if (dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
@@ -589,7 +597,10 @@ int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
pd_table->use_cnt++;
- irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
+
+ if (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id &&
+ dev->privileged)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
}
pd_entry->bp.use_cnt++;
@@ -640,7 +651,8 @@ int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
+ if (dev->privileged && dev->hmc_fn_id == hmc_info->hmc_fn_id)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
index 415f9e23bbf6..257a5d22aa96 100644
--- a/drivers/infiniband/hw/irdma/hmc.h
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -16,11 +16,21 @@
#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
#define IRDMA_FIRST_VF_FPM_ID 8
#define FPM_MULTIPLIER 1024
+#define IRDMA_OBJ_LOC_MEM_BIT 0x4
+#define IRDMA_XF_MULTIPLIER 16
+#define IRDMA_RRF_MULTIPLIER 8
+#define IRDMA_MIN_PBLE_PAGES 3
+#define IRDMA_HMC_PAGE_SIZE 2097152
+#define IRDMA_MIN_MR_PER_QP 4
+#define IRDMA_MIN_QP_CNT 64
+#define IRDMA_FSIAV_CNT_MAX 1048576
+#define IRDMA_MIN_IRD 8
+#define IRDMA_HMC_MIN_RRF 16
enum irdma_hmc_rsrc_type {
IRDMA_HMC_IW_QP = 0,
IRDMA_HMC_IW_CQ = 1,
- IRDMA_HMC_IW_RESERVED = 2,
+ IRDMA_HMC_IW_SRQ = 2,
IRDMA_HMC_IW_HTE = 3,
IRDMA_HMC_IW_ARP = 4,
IRDMA_HMC_IW_APBVT_ENTRY = 5,
@@ -48,11 +58,17 @@ enum irdma_sd_entry_type {
IRDMA_SD_TYPE_DIRECT = 2,
};
+enum irdma_hmc_obj_mem {
+ IRDMA_HOST_MEM = 0,
+ IRDMA_LOC_MEM = 1,
+};
+
struct irdma_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
+ enum irdma_hmc_obj_mem mem_loc;
};
struct irdma_hmc_bp {
@@ -117,6 +133,7 @@ struct irdma_update_sds_info {
struct irdma_ccq_cqe_info;
struct irdma_hmc_fcn_info {
u32 vf_id;
+ u8 protocol_used;
u8 free_fcn;
};
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 69ce1862eabe..7bad0e38786a 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -33,6 +33,7 @@ static struct irdma_rsrc_limits rsrc_limits_table[] = {
static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_QP,
IRDMA_HMC_IW_CQ,
+ IRDMA_HMC_IW_SRQ,
IRDMA_HMC_IW_HTE,
IRDMA_HMC_IW_ARP,
IRDMA_HMC_IW_APBVT_ENTRY,
@@ -134,75 +135,68 @@ static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
+ struct qp_err_code qp_err;
+
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
-
- switch (info->ae_id) {
- case IRDMA_AE_AMP_BOUNDS_VIOLATION:
- case IRDMA_AE_AMP_INVALID_STAG:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
- case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_AMP_BAD_QP:
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- qp->flush_code = FLUSH_PROT_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_UDA_XMIT_BAD_PD:
- case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
- qp->flush_code = FLUSH_LOC_QP_OP_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
- case IRDMA_AE_DDP_UBE_INVALID_MO:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- qp->flush_code = FLUSH_REM_ACCESS_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_OP_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_LCE_QP_CATASTROPHIC:
- qp->flush_code = FLUSH_FATAL_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- qp->flush_code = FLUSH_GENERAL_ERR;
- break;
- case IRDMA_AE_LLP_TOO_MANY_RETRIES:
- qp->flush_code = FLUSH_RETRY_EXC_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
- case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
- case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
- case IRDMA_AE_AMP_MWBIND_VALID_STAG:
- qp->flush_code = FLUSH_MW_BIND_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_IB_INVALID_REQUEST:
- qp->flush_code = FLUSH_REM_INV_REQ_ERR;
- qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
- break;
- default:
- qp->flush_code = FLUSH_GENERAL_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
+ if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
+ if (info->sq) {
+ qp->err_sq_idx_valid = true;
+ qp->err_sq_idx = info->wqe_idx;
+ }
+ if (info->rq) {
+ qp->err_rq_idx_valid = true;
+ qp->err_rq_idx = info->wqe_idx;
+ }
+ }
+
+ qp_err = irdma_ae_to_qp_err_code(info->ae_id);
+ qp->flush_code = qp_err.flush_code;
+ qp->event_type = qp_err.event_type;
+}
+
+/**
+ * irdma_complete_cqp_request - perform post-completion cleanup
+ * @cqp: device CQP
+ * @cqp_request: CQP request
+ *
+ * Mark CQP request as done, wake up waiting thread or invoke
+ * callback function and release/free CQP request.
+ */
+static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ } else if (cqp_request->callback_fcn) {
+ cqp_request->callback_fcn(cqp_request);
+ }
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
+ * @rf: RDMA PCI function
+ * @info: AEQ entry info
+ */
+static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
+ struct irdma_aeqe_info *info)
+{
+ u32 sw_def_info;
+ u64 scratch;
+
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
+ &scratch, &sw_def_info);
+ while (scratch) {
+ struct irdma_cqp_request *cqp_request =
+ (struct irdma_cqp_request *)(uintptr_t)scratch;
+
+ irdma_complete_cqp_request(&rf->cqp, cqp_request);
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
+ &scratch, &sw_def_info);
}
}
@@ -223,6 +217,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_sc_qp *qp = NULL;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
struct irdma_device *iwdev = rf->iwdev;
+ struct irdma_sc_srq *srq;
unsigned long flags;
u32 aeqcnt = 0;
@@ -236,6 +231,13 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
if (ret)
break;
+ if (info->aeqe_overflow) {
+ ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ return;
+ }
+
aeqcnt++;
ibdev_dbg(&iwdev->ibdev,
"AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
@@ -266,9 +268,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
- ctx_info = &iwqp->ctx_info;
+ } else if (info->srq) {
+ if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
+ continue;
} else {
- if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
+ if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
+ info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
continue;
}
@@ -363,6 +368,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
}
irdma_cq_rem_ref(&iwcq->ibcq);
break;
+ case IRDMA_AE_SRQ_LIMIT:
+ srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
+ irdma_srq_event(srq);
+ break;
+ case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
+ break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ /* Remove completed CQP requests from pending list
+ * and notify about those CQP ops completion.
+ */
+ irdma_process_ae_def_cmpl(rf, info);
+ break;
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_RESOURCE_EXHAUSTION:
@@ -389,13 +406,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
- if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- ctx_info->roce_info->err_rq_idx_valid = info->rq;
- if (info->rq) {
+ ctx_info = &iwqp->ctx_info;
+ if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
+ ctx_info->roce_info->err_rq_idx_valid =
+ ctx_info->srq_valid ? false : info->err_rq_idx_valid;
+ if (ctx_info->roce_info->err_rq_idx_valid) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
@@ -599,6 +621,8 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
cqp->sq.pa);
cqp->sq.va = NULL;
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
kfree(cqp->cqp_requests);
@@ -631,7 +655,9 @@ static void irdma_destroy_aeq(struct irdma_pci_f *rf)
int status = -EBUSY;
if (!rf->msix_shared) {
- rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
+ rf->iw_msixtbl->idx, false);
irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
}
if (rf->reset)
@@ -697,9 +723,10 @@ static void irdma_del_ceq_0(struct irdma_pci_f *rf)
if (rf->msix_shared) {
msix_vec = &rf->iw_msixtbl[0];
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
- msix_vec->ceq_id,
- msix_vec->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
irdma_destroy_irq(rf, msix_vec, rf);
} else {
msix_vec = &rf->iw_msixtbl[1];
@@ -730,8 +757,10 @@ static void irdma_del_ceqs(struct irdma_pci_f *rf)
msix_vec = &rf->iw_msixtbl[2];
for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
- msix_vec->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
irdma_destroy_irq(rf, msix_vec, iwceq);
irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
IRDMA_OP_CEQ_DESTROY);
@@ -942,6 +971,13 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
goto err_scratch;
}
+ cqp->oop_op_array = kcalloc(sqsize, sizeof(*cqp->oop_op_array),
+ GFP_KERNEL);
+ if (!cqp->oop_op_array) {
+ status = -ENOMEM;
+ goto err_oop;
+ }
+ cqp_init_info.ooo_op_array = cqp->oop_op_array;
dev->cqp = &cqp->sc_cqp;
dev->cqp->dev = dev;
cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
@@ -978,6 +1014,10 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
case IRDMA_GEN_2:
cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
break;
+ case IRDMA_GEN_3:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
+ cqp_init_info.ts_override = 1;
+ break;
}
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
if (status) {
@@ -1012,6 +1052,9 @@ err_ctx:
cqp->sq.va, cqp->sq.pa);
cqp->sq.va = NULL;
err_sq:
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
+err_oop:
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
err_scratch:
@@ -1033,13 +1076,15 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_ccq_init_info info = {};
struct irdma_ccq *ccq = &rf->ccq;
+ int ccq_size;
int status;
dev->ccq = &ccq->sc_cq;
dev->ccq->dev = dev;
info.dev = dev;
+ ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
- ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
+ ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
IRDMA_CQ0_ALIGNMENT);
ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
&ccq->mem_cq.pa, GFP_KERNEL);
@@ -1056,7 +1101,7 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
/* populate the ccq init info */
info.cq_base = ccq->mem_cq.va;
info.cq_pa = ccq->mem_cq.pa;
- info.num_elem = IW_CCQ_SIZE;
+ info.num_elem = ccq_size;
info.shadow_area = ccq->shadow_area.va;
info.shadow_area_pa = ccq->shadow_area.pa;
info.ceqe_mask = false;
@@ -1140,9 +1185,13 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
}
msix_vec->ceq_id = ceq_id;
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
-
- return 0;
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
+ msix_vec->idx, true);
+ else
+ status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
+ msix_vec->idx);
+ return status;
}
/**
@@ -1155,7 +1204,7 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
{
struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
- u32 ret = 0;
+ int ret = 0;
if (!rf->msix_shared) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
@@ -1166,12 +1215,16 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
}
if (ret) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
- return -EINVAL;
+ return ret;
}
- rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
+ true);
+ else
+ ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
- return 0;
+ return ret;
}
/**
@@ -1179,13 +1232,13 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
* @rf: RDMA PCI function
* @iwceq: pointer to the ceq resources to be created
* @ceq_id: the id number of the iwceq
- * @vsi: SC vsi struct
+ * @vsi_idx: vsi idx
*
* Return 0, if the ceq and the resources associated with it
* are successfully created, otherwise return error
*/
static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
- u32 ceq_id, struct irdma_sc_vsi *vsi)
+ u32 ceq_id, u16 vsi_idx)
{
int status;
struct irdma_ceq_init_info info = {};
@@ -1209,7 +1262,7 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
info.elem_cnt = ceq_size;
iwceq->sc_ceq.ceq_id = ceq_id;
info.dev = dev;
- info.vsi = vsi;
+ info.vsi_idx = vsi_idx;
status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
if (!status) {
if (dev->ceq_valid)
@@ -1252,7 +1305,7 @@ static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
}
iwceq = &rf->ceqlist[0];
- status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
+ status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
status);
@@ -1287,13 +1340,13 @@ exit:
/**
* irdma_setup_ceqs - manage the device ceq's and their interrupt resources
* @rf: RDMA PCI function
- * @vsi: VSI structure for this CEQ
+ * @vsi_idx: vsi_idx for this CEQ
*
* Allocate a list for all device completion event queues
* Create the ceq's and configure their msix interrupt vectors
* Return 0, if ceqs are successfully set up, otherwise return error
*/
-static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
+static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
{
u32 i;
u32 ceq_id;
@@ -1306,7 +1359,7 @@ static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
i = (rf->msix_shared) ? 1 : 2;
for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
iwceq = &rf->ceqlist[ceq_id];
- status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
+ status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev,
"ERR: create ceq status = %d\n", status);
@@ -1387,7 +1440,10 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
-
+ /* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
+ if (rf->rdma_ver == IRDMA_GEN_3)
+ aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
+ sizeof(struct irdma_sc_aeqe)));
aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
IRDMA_AEQ_ALIGNMENT);
aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
@@ -1395,6 +1451,8 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
GFP_KERNEL | __GFP_NOWARN);
if (aeq->mem.va)
goto skip_virt_aeq;
+ else if (rf->rdma_ver == IRDMA_GEN_3)
+ return -ENOMEM;
/* physically mapped aeq failed. setup virtual aeq */
status = irdma_create_virt_aeq(rf, aeq_size);
@@ -1569,6 +1627,8 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
+ if (!rf->sc_dev.privileged)
+ irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
kfree(dev->hmc_info->sd_table.sd_entry);
dev->hmc_info->sd_table.sd_entry = NULL;
vfree(rf->mem_rsrc);
@@ -1635,6 +1695,7 @@ static int irdma_initialize_dev(struct irdma_pci_f *rf)
info.bar0 = rf->hw.hw_addr;
info.hmc_fn_id = rf->pf_id;
+ info.protocol_used = rf->protocol_used;
info.hw = &rf->hw;
status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
if (status)
@@ -1665,9 +1726,6 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
irdma_del_local_mac_entry(iwdev->rf,
(u8)iwdev->mac_ip_table_idx);
fallthrough;
- case AEQ_CREATED:
- case PBLE_CHUNK_MEM:
- case CEQS_CREATED:
case IEQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
@@ -1740,7 +1798,9 @@ static void irdma_get_used_rsrc(struct irdma_device *iwdev)
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
- iwdev->rf->max_cq);
+ iwdev->rf->max_cq);
+ iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
+ iwdev->rf->max_srq);
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr);
}
@@ -1750,13 +1810,17 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
enum init_completion_state state = rf->init_state;
rf->init_state = INVALID_STATE;
- if (rf->rsrc_created) {
+
+ switch (state) {
+ case AEQ_CREATED:
irdma_destroy_aeq(rf);
+ fallthrough;
+ case PBLE_CHUNK_MEM:
irdma_destroy_pble_prm(rf->pble_rsrc);
+ fallthrough;
+ case CEQS_CREATED:
irdma_del_ceqs(rf);
- rf->rsrc_created = false;
- }
- switch (state) {
+ fallthrough;
case CEQ0_CREATED:
irdma_del_ceq_0(rf);
fallthrough;
@@ -1835,32 +1899,6 @@ int irdma_rt_init_hw(struct irdma_device *iwdev,
break;
iwdev->init_state = IEQ_CREATED;
}
- if (!rf->rsrc_created) {
- status = irdma_setup_ceqs(rf, &iwdev->vsi);
- if (status)
- break;
-
- iwdev->init_state = CEQS_CREATED;
-
- status = irdma_hmc_init_pble(&rf->sc_dev,
- rf->pble_rsrc);
- if (status) {
- irdma_del_ceqs(rf);
- break;
- }
-
- iwdev->init_state = PBLE_CHUNK_MEM;
-
- status = irdma_setup_aeq(rf);
- if (status) {
- irdma_destroy_pble_prm(rf->pble_rsrc);
- irdma_del_ceqs(rf);
- break;
- }
- iwdev->init_state = AEQ_CREATED;
- rf->rsrc_created = true;
- }
-
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev);
@@ -1907,6 +1945,13 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CQP_CREATED;
+ dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ status = irdma_get_rdma_features(dev);
+ if (status)
+ break;
+ }
+
status = irdma_hmc_setup(rf);
if (status)
break;
@@ -1922,13 +1967,6 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CCQ_CREATED;
- dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
- if (rf->rdma_ver != IRDMA_GEN_1) {
- status = irdma_get_rdma_features(dev);
- if (status)
- break;
- }
-
status = irdma_setup_ceq_0(rf);
if (status)
break;
@@ -1942,6 +1980,25 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
}
INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
irdma_sc_ccq_arm(dev->ccq);
+
+ status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
+ if (status)
+ break;
+
+ rf->init_state = CEQS_CREATED;
+
+ status = irdma_hmc_init_pble(&rf->sc_dev,
+ rf->pble_rsrc);
+ if (status)
+ break;
+
+ rf->init_state = PBLE_CHUNK_MEM;
+
+ status = irdma_setup_aeq(rf);
+ if (status)
+ break;
+ rf->init_state = AEQ_CREATED;
+
return 0;
} while (0);
@@ -1960,7 +2017,8 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
- rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
@@ -1988,12 +2046,14 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
+ rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
return rsrc_size;
}
@@ -2021,6 +2081,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
@@ -2040,6 +2101,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps);
set_bit(0, rf->allocated_cqs);
+ set_bit(0, rf->allocated_srqs);
set_bit(0, rf->allocated_pds);
set_bit(0, rf->allocated_arps);
set_bit(0, rf->allocated_ahs);
@@ -2100,15 +2162,16 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
- if (cqp_request->waiting) {
- WRITE_ONCE(cqp_request->request_done, true);
- wake_up(&cqp_request->waitq);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- } else {
- if (cqp_request->callback_fcn)
- cqp_request->callback_fcn(cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- }
+ /*
+ * If this is deferred or pending completion, then mark
+ * CQP request as pending to not block the CQ, but don't
+ * release CQP request, as it is still on the OOO list.
+ */
+ if (info.pending)
+ cqp_request->pending = true;
+ else
+ irdma_complete_cqp_request(&rf->cqp,
+ cqp_request);
}
cqe_count++;
@@ -2718,7 +2781,9 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
struct irdma_pci_f *rf = iwqp->iwdev->rf;
u8 flush_code = iwqp->sc_qp.flush_code;
- if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
+ if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
+ !(flush_mask & IRDMA_FLUSH_RQ)) ||
+ ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
return;
/* Set flush info fields*/
@@ -2731,6 +2796,10 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.rq_minor_code = FLUSH_GENERAL_ERR;
info.userflushcode = true;
+ info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
+ info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
+ info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
+ info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
if (flush_mask & IRDMA_REFLUSH) {
if (info.sq)
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index ce61a27cb1f6..60c1f2b1811d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -85,6 +85,7 @@ static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
I40E_CQPSQ_CQ_CEQID,
I40E_CQPSQ_CQ_CQID,
I40E_COMMIT_FPM_CQCNT,
+ I40E_CQPSQ_UPESD_HMCFNID,
};
static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
@@ -94,6 +95,7 @@ static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
I40E_CQPSQ_CQ_CEQID_S,
I40E_CQPSQ_CQ_CQID_S,
I40E_COMMIT_FPM_CQCNT_S,
+ I40E_CQPSQ_UPESD_HMCFNID_S,
};
/**
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
index e1db84d8a62c..0095b327afcc 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.h
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -123,6 +123,8 @@
#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
#define I40E_COMMIT_FPM_CQCNT_S 0
#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
+#define I40E_CQPSQ_UPESD_HMCFNID_S 0
+#define I40E_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index cc50a7070371..15e036ddaffb 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -75,6 +75,9 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info
struct irdma_pci_f *rf = iwdev->rf;
rf->rdma_ver = IRDMA_GEN_1;
+ rf->sc_dev.hw = &rf->hw;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_1;
+ rf->sc_dev.privileged = true;
rf->gen_ops.request_reset = i40iw_request_reset;
rf->pcidev = cdev_info->pcidev;
rf->pf_id = cdev_info->fid;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index 941d3edffadb..32f26284a788 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -38,6 +38,7 @@ static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
ICRDMA_CQPSQ_CQ_CEQID,
ICRDMA_CQPSQ_CQ_CQID,
ICRDMA_COMMIT_FPM_CQCNT,
+ ICRDMA_CQPSQ_UPESD_HMCFNID,
};
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
@@ -47,6 +48,7 @@ static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
+ ICRDMA_CQPSQ_UPESD_HMCFNID_S,
};
/**
@@ -194,6 +196,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
index 697b9572b5c6..d97944ab45da 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.h
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -58,14 +58,15 @@
#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
-
+#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
+#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
enum icrdma_device_caps_const {
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
-
+ ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
};
void icrdma_init_hw(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/icrdma_if.c b/drivers/infiniband/hw/irdma/icrdma_if.c
new file mode 100644
index 000000000000..27b191f61caf
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_if.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_ice.h>
+
+static void icrdma_prep_tc_change(struct irdma_device *iwdev)
+{
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+}
+
+static void icrdma_fill_qos_info(struct irdma_l2params *l2params,
+ struct iidc_rdma_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->vsi_prio_type = qos_info->vport_priority_type;
+ l2params->vsi_rel_bw = qos_info->vport_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
+}
+
+static void icrdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
+ if (iwdev->vsi.tc_change_pending)
+ return;
+
+ icrdma_prep_tc_change(iwdev);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+
+ if (!iwdev->vsi.tc_change_pending)
+ return;
+
+ l2params.tc_changed = true;
+ ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
+
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
+ ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
+ event->reg);
+ if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
+ }
+ }
+ if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ ibdev_err(&iwdev->ibdev, "HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
+ ibdev_err(&iwdev->ibdev, "PE Push Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+}
+
+/**
+ * icrdma_lan_register_qset - Register qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static int icrdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+ int ret;
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ ret = ice_add_rdma_qset(cdev_info, &qset);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
+ return ret;
+ }
+
+ tc_node->l2_sched_node_id = qset.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
+
+ return 0;
+}
+
+/**
+ * icrdma_lan_unregister_qset - Unregister qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static void icrdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ qset.teid = tc_node->l2_sched_node_id;
+
+ if (ice_del_rdma_qset(cdev_info, &qset))
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
+}
+
+/**
+ * icrdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void icrdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int icrdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
+ rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
+ GFP_KERNEL);
+ if (!rf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < rf->msix_count; i++)
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
+ break;
+
+ if (i < IRDMA_MIN_MSIX) {
+ while (--i >= 0)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+ return -ENOMEM;
+ }
+
+ rf->msix_count = i;
+
+ return 0;
+}
+
+static void icrdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ for (i = 0; i < rf->msix_count; i++)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+}
+
+static void icrdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = idc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = idc_priv->pf_id;
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
+ rf->sc_dev.is_pf = true;
+ rf->sc_dev.privileged = true;
+
+ rf->gen_ops.register_qset = icrdma_lan_register_qset;
+ rf->gen_ops.unregister_qset = icrdma_lan_unregister_qset;
+
+ rf->default_vsi.vsi_idx = idc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = icrdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ iwdev->netdev = idc_priv->netdev;
+ iwdev->vsi_num = idc_priv->vport_id;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->roce_mode = true;
+}
+
+static int icrdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *idc_priv;
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ idc_priv = cdev_info->iidc_priv;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ icrdma_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ err = icrdma_init_interrupts(rf, cdev_info);
+ if (err)
+ goto err_init_interrupts;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ l2params.mtu = iwdev->netdev->mtu;
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
+ goto err_rt_init;
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
+ auxiliary_set_drvdata(aux_dev, iwdev);
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ icrdma_deinit_interrupts(rf, cdev_info);
+err_init_interrupts:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static void icrdma_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ u8 rdma_ver = iwdev->rf->rdma_ver;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
+ irdma_ib_unregister_device(iwdev);
+ icrdma_deinit_interrupts(iwdev->rf, cdev_info);
+
+ pr_debug("INIT: Gen[%d] func[%d] device remove success\n",
+ rdma_ver, PCI_FUNC(cdev_info->pdev->devfn));
+}
+
+static const struct auxiliary_device_id icrdma_auxiliary_id_table[] = {
+ {.name = "ice.iwarp", },
+ {.name = "ice.roce", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, icrdma_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "gen_2",
+ .id_table = icrdma_auxiliary_id_table,
+ .probe = icrdma_probe,
+ .remove = icrdma_remove,
+ },
+ .event_handler = icrdma_iidc_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.c b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
new file mode 100644
index 000000000000..2e8bb475e22a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2018 - 2024 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "protos.h"
+#include "ig3rdma_hw.h"
+
+/**
+ * ig3rdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+/**
+ * ig3rdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+static const struct irdma_irq_ops ig3rdma_irq_ops = {
+ .irdma_dis_irq = ig3rdma_disable_irq,
+ .irdma_en_irq = ig3rdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map ig3rdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 48, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 56, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 64, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 72, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 80, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 88, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 96, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 104, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 112, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 120, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 128, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 136, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 144, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 152, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 160, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 168, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 176, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 184, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 192, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 200, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 208, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 224, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 232, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 240, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 248, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 256, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 264, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 272, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 280, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 288, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 296, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 304, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 312, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 320, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 328, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 336, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 344, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 352, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 360, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_SENT] = { 368, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD] = { 376, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT] = { 384, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT] = { 392, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXATS] = { 408, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXATS] = { 416, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR] = { 424, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED] = { 432, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RTO] = { 440, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS] = { 448, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_ICRCERR] = { 456, 0, 0 },
+};
+
+void ig3rdma_init_hw(struct irdma_sc_dev *dev)
+{
+ dev->irq_ops = &ig3rdma_irq_ops;
+ dev->hw_stats_map = ig3rdma_hw_stat_map;
+
+ dev->hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_3;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IG3RDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IG3RDMA_MAX_SGE_RD;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.first_hw_vf_fpm_id = 0;
+ dev->hw_attrs.max_hw_vf_fpm_id = IG3_MAX_APFS + IG3_MAX_AVFS;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_64_BYTE_CQE;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_CQE_TIMESTAMPING;
+
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_SRQ;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ dev->hw_attrs.max_hw_ird = IG3RDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = IG3RDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = IG3RDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_3;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = IG3RDMA_MIN_WQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_srq_quanta = IRDMA_SRQ_MAX_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IG3RDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_device_pages =
+ dev->is_pf ? IG3RDMA_MAX_PF_PUSH_PAGE_COUNT : IG3RDMA_MAX_VF_PUSH_PAGE_COUNT;
+}
+
+static void __iomem *__ig3rdma_get_reg_addr(struct irdma_mmio_region *region, u64 reg_offset)
+{
+ if (reg_offset >= region->offset &&
+ reg_offset < (region->offset + region->len)) {
+ reg_offset -= region->offset;
+
+ return region->addr + reg_offset;
+ }
+
+ return NULL;
+}
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset)
+{
+ u8 __iomem *reg_addr;
+ int i;
+
+ reg_addr = __ig3rdma_get_reg_addr(&hw->rdma_reg, reg_offset);
+ if (reg_addr)
+ return reg_addr;
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ reg_addr = __ig3rdma_get_reg_addr(&hw->io_regs[i], reg_offset);
+ if (reg_addr)
+ return reg_addr;
+ }
+
+ WARN_ON_ONCE(1);
+
+ return NULL;
+}
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.h b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
new file mode 100644
index 000000000000..03d5f1188789
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2021 - 2024 Intel Corporation */
+#ifndef IG3RDMA_HW_H
+#define IG3RDMA_HW_H
+
+#define IG3_MAX_APFS 1
+#define IG3_MAX_AVFS 0
+
+#define IG3_PF_RDMA_REGION_OFFSET 0xBC00000
+#define IG3_PF_RDMA_REGION_LEN 0x401000
+#define IG3_VF_RDMA_REGION_OFFSET 0x8C00
+#define IG3_VF_RDMA_REGION_LEN 0x8400
+
+enum ig3rdma_device_caps_const {
+ IG3RDMA_MAX_WQ_FRAGMENT_COUNT = 14,
+ IG3RDMA_MAX_SGE_RD = 14,
+
+ IG3RDMA_MAX_STATS_COUNT = 128,
+
+ IG3RDMA_MAX_IRD_SIZE = 64,
+ IG3RDMA_MAX_ORD_SIZE = 64,
+ IG3RDMA_MIN_WQ_SIZE = 16 /* WQEs */,
+ IG3RDMA_MAX_INLINE_DATA_SIZE = 216,
+ IG3RDMA_MAX_PF_PUSH_PAGE_COUNT = 8192,
+ IG3RDMA_MAX_VF_PUSH_PAGE_COUNT = 16,
+};
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* IG3RDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_if.c b/drivers/infiniband/hw/irdma/ig3rdma_if.c
new file mode 100644
index 000000000000..1bb42eb298ba
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_if.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2023 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
+#include "ig3rdma_hw.h"
+
+static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
+ rf->reset = true;
+ rf->sc_dev.vchnl_up = false;
+ }
+}
+
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
+ int ret;
+
+ ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
+ recv_len);
+ if (ret == -ETIMEDOUT) {
+ ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
+ "Virtual channel Req <-> Resp completion timeout\n");
+ dev->vchnl_up = false;
+ }
+
+ return ret;
+}
+
+static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *rdma_ver)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_vchnl_init_info virt_info;
+ u8 gen = rf->rdma_ver;
+ int ret;
+
+ rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
+ if (!rf->vchnl_wq)
+ return -ENOMEM;
+
+ mutex_init(&rf->sc_dev.vchnl_mutex);
+
+ virt_info.is_pf = !idc_priv->ftype;
+ virt_info.hw_rev = gen;
+ virt_info.privileged = gen == IRDMA_GEN_2;
+ virt_info.vchnl_wq = rf->vchnl_wq;
+ ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
+ if (ret) {
+ destroy_workqueue(rf->vchnl_wq);
+ return ret;
+ }
+
+ *rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
+
+ return 0;
+}
+
+/**
+ * ig3rdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void ig3rdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int ig3rdma_cfg_regions(struct irdma_hw *hw,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct pci_dev *pdev = cdev_info->pdev;
+ int i;
+
+ switch (idc_priv->ftype) {
+ case IIDC_FUNCTION_TYPE_PF:
+ hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
+ break;
+ case IIDC_FUNCTION_TYPE_VF:
+ hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
+ hw->rdma_reg.len);
+
+ if (!hw->rdma_reg.addr)
+ return -ENOMEM;
+
+ hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
+ hw->io_regs = kcalloc(hw->num_io_regions,
+ sizeof(struct irdma_mmio_region), GFP_KERNEL);
+
+ if (!hw->io_regs) {
+ iounmap(hw->rdma_reg.addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ hw->io_regs[i].addr =
+ idc_priv->mapped_mem_regions[i].region_addr;
+ hw->io_regs[i].len =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
+ hw->io_regs[i].offset =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
+ }
+
+ return 0;
+}
+
+static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
+{
+ struct irdma_hw *hw = &rf->hw;
+
+ destroy_workqueue(rf->vchnl_wq);
+ kfree(hw->io_regs);
+ iounmap(hw->rdma_reg.addr);
+}
+
+static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ int err;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->cdev = cdev_info;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->msix_count = idc_priv->msix_count;
+ rf->msix_entries = idc_priv->msix_entries;
+
+ err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
+ if (err)
+ return err;
+
+ err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
+ if (err) {
+ destroy_workqueue(rf->vchnl_wq);
+ return err;
+ }
+
+ rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = ig3rdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ return 0;
+}
+
+static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf;
+ int err;
+
+ rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ err = ig3rdma_cfg_rf(rf, cdev_info);
+ if (err)
+ goto err_cfg_rf;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ auxiliary_set_drvdata(aux_dev, rf);
+
+ err = idpf_idc_vport_dev_ctrl(cdev_info, true);
+ if (err)
+ goto err_vport_ctrl;
+
+ return 0;
+
+err_vport_ctrl:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ ig3rdma_decfg_rf(rf);
+err_cfg_rf:
+ kfree(rf);
+
+ return err;
+}
+
+static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
+
+ idpf_idc_vport_dev_ctrl(cdev_info, false);
+ irdma_ctrl_deinit_hw(rf);
+ ig3rdma_decfg_rf(rf);
+ kfree(rf);
+}
+
+static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.core", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "core",
+ .id_table = ig3rdma_core_auxiliary_id_table,
+ .probe = ig3rdma_core_probe,
+ .remove = ig3rdma_core_remove,
+ },
+ .event_handler = ig3rdma_idc_core_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 20d2e7393e3d..ff938a01d70c 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -32,7 +32,16 @@
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
-#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_Q_INVALID_IDX 0xffff
+
+enum irdma_dyn_idx_t {
+ IRDMA_IDX_ITR0 = 0,
+ IRDMA_IDX_ITR1 = 1,
+ IRDMA_IDX_ITR2 = 2,
+ IRDMA_IDX_NOITR = 3,
+};
+
enum irdma_registers {
IRDMA_CQPTAIL,
IRDMA_CQPDB,
@@ -67,6 +76,7 @@ enum irdma_shifts {
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_CQPSQ_UPESD_HMCFNID_S,
IRDMA_MAX_SHIFTS,
};
@@ -77,6 +87,7 @@ enum irdma_masks {
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_CQPSQ_UPESD_HMCFNID_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
@@ -92,7 +103,7 @@ struct irdma_mcast_grp_ctx_entry_info {
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
- u8 hmc_fcn_id;
+ u16 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
@@ -107,6 +118,9 @@ enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
+ IRDMA_GEN_3,
+ IRDMA_GEN_NEXT,
+ IRDMA_GEN_MAX = IRDMA_GEN_NEXT-1
};
struct irdma_uk_attrs {
@@ -118,6 +132,7 @@ struct irdma_uk_attrs {
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
+ u32 max_hw_srq_quanta;
u16 max_hw_sq_chunk;
u16 min_hw_wq_size;
u8 hw_rev;
@@ -147,10 +162,13 @@ struct irdma_hw_attrs {
u32 max_done_count;
u32 max_sleep_count;
u32 max_cqp_compl_wait_time_ms;
+ u32 min_hw_srq_id;
u16 max_stat_inst;
u16 max_stat_idx;
};
void i40iw_init_hw(struct irdma_sc_dev *dev);
void icrdma_init_hw(struct irdma_sc_dev *dev);
+void ig3rdma_init_hw(struct irdma_sc_dev *dev);
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
#endif /* IRDMA_H*/
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 1e840bbd619d..95957d52883d 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
MODULE_ALIAS("i40iw");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
@@ -38,19 +39,7 @@ static void irdma_unregister_notifiers(void)
unregister_netdevice_notifier(&irdma_netdevice_notifier);
}
-static void irdma_prep_tc_change(struct irdma_device *iwdev)
-{
- iwdev->vsi.tc_change_pending = true;
- irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
-
- /* Wait for all qp's to suspend */
- wait_event_timeout(iwdev->suspend_wq,
- !atomic_read(&iwdev->vsi.qp_suspend_reqs),
- msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
- irdma_ws_reset(&iwdev->vsi);
-}
-
-static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
{
if (mtu < IRDMA_MIN_MTU_IPV4)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
@@ -58,35 +47,10 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
}
-static void irdma_fill_qos_info(struct irdma_l2params *l2params,
- struct iidc_rdma_qos_params *qos_info)
+static void ig3rdma_idc_vport_event_handler(struct iidc_rdma_vport_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
{
- int i;
-
- l2params->num_tc = qos_info->num_tc;
- l2params->vsi_prio_type = qos_info->vport_priority_type;
- l2params->vsi_rel_bw = qos_info->vport_relative_bw;
- for (i = 0; i < l2params->num_tc; i++) {
- l2params->tc_info[i].egress_virt_up =
- qos_info->tc_info[i].egress_virt_up;
- l2params->tc_info[i].ingress_virt_up =
- qos_info->tc_info[i].ingress_virt_up;
- l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
- l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
- l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
- }
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- l2params->up2tc[i] = qos_info->up2tc[i];
- if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
- l2params->dscp_mode = true;
- memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
- }
-}
-
-static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
- struct iidc_rdma_event *event)
-{
- struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(cdev_info->adev);
struct irdma_l2params l2params = {};
if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
@@ -97,248 +61,39 @@ static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
- if (iwdev->vsi.tc_change_pending)
- return;
-
- irdma_prep_tc_change(iwdev);
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
- struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
-
- if (!iwdev->vsi.tc_change_pending)
- return;
-
- l2params.tc_changed = true;
- ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
-
- irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
- if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode =
- l2params.num_tc > 1 && !l2params.dscp_mode;
- irdma_change_l2params(&iwdev->vsi, &l2params);
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
- ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
- event->reg);
- if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
- u32 pe_criterr;
-
- pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
-#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
- if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
- ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
- pe_criterr);
- iwdev->rf->reset = true;
- } else {
- ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
- }
- }
- if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
- ibdev_err(&iwdev->ibdev, "HMC Error\n");
- iwdev->rf->reset = true;
- }
- if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
- ibdev_err(&iwdev->ibdev, "PE Push Error\n");
- iwdev->rf->reset = true;
- }
- if (iwdev->rf->reset)
- iwdev->rf->gen_ops.request_reset(iwdev->rf);
}
}
-/**
- * irdma_request_reset - Request a reset
- * @rf: RDMA PCI function
- */
-static void irdma_request_reset(struct irdma_pci_f *rf)
-{
- ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
- ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
-}
-
-/**
- * irdma_lan_register_qset - Register qset with LAN driver
- * @vsi: vsi structure
- * @tc_node: Traffic class node
- */
-static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
-{
- struct irdma_device *iwdev = vsi->back_vsi;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_qset_params qset = {};
- int ret;
-
- cdev_info = iwdev->rf->cdev;
- qset.qs_handle = tc_node->qs_handle;
- qset.tc = tc_node->traffic_class;
- qset.vport_id = vsi->vsi_idx;
- ret = ice_add_rdma_qset(cdev_info, &qset);
- if (ret) {
- ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
- return ret;
- }
-
- tc_node->l2_sched_node_id = qset.teid;
- vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
-
- return 0;
-}
-
-/**
- * irdma_lan_unregister_qset - Unregister qset with LAN driver
- * @vsi: vsi structure
- * @tc_node: Traffic class node
- */
-static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
+static int ig3rdma_vport_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
{
- struct irdma_device *iwdev = vsi->back_vsi;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_qset_params qset = {};
-
- cdev_info = iwdev->rf->cdev;
- qset.qs_handle = tc_node->qs_handle;
- qset.tc = tc_node->traffic_class;
- qset.vport_id = vsi->vsi_idx;
- qset.teid = tc_node->l2_sched_node_id;
-
- if (ice_del_rdma_qset(cdev_info, &qset))
- ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
-}
-
-static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
-{
- int i;
-
- rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
- rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
- GFP_KERNEL);
- if (!rf->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < rf->msix_count; i++)
- if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
- break;
-
- if (i < IRDMA_MIN_MSIX) {
- while (--i >= 0)
- ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct auxiliary_device *aux_core_dev = idc_adev->vdev_info->core_adev;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_core_dev);
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ int err;
- kfree(rf->msix_entries);
+ if (!rf) {
+ WARN_ON_ONCE(1);
return -ENOMEM;
}
-
- rf->msix_count = i;
-
- return 0;
-}
-
-static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
-{
- int i;
-
- for (i = 0; i < rf->msix_count; i++)
- ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
-
- kfree(rf->msix_entries);
-}
-
-static void irdma_remove(struct auxiliary_device *aux_dev)
-{
- struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
- struct iidc_rdma_core_auxiliary_dev *iidc_adev;
- struct iidc_rdma_core_dev_info *cdev_info;
-
- iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
- cdev_info = iidc_adev->cdev_info;
-
- ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
- irdma_ib_unregister_device(iwdev);
- irdma_deinit_interrupts(iwdev->rf, cdev_info);
-
- kfree(iwdev->rf);
-
- pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
-}
-
-static void irdma_fill_device_info(struct irdma_device *iwdev,
- struct iidc_rdma_core_dev_info *cdev_info)
-{
- struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
- struct irdma_pci_f *rf = iwdev->rf;
-
- rf->sc_dev.hw = &rf->hw;
- rf->iwdev = iwdev;
- rf->cdev = cdev_info;
- rf->hw.hw_addr = iidc_priv->hw_addr;
- rf->pcidev = cdev_info->pdev;
- rf->hw.device = &rf->pcidev->dev;
- rf->pf_id = iidc_priv->pf_id;
- rf->gen_ops.register_qset = irdma_lan_register_qset;
- rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
-
- rf->default_vsi.vsi_idx = iidc_priv->vport_id;
- rf->protocol_used =
- cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
- IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
- rf->rdma_ver = IRDMA_GEN_2;
- rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
- rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
- rf->gen_ops.request_reset = irdma_request_reset;
- rf->limits_sel = 7;
- rf->iwdev = iwdev;
-
- mutex_init(&iwdev->ah_tbl_lock);
-
- iwdev->netdev = iidc_priv->netdev;
- iwdev->vsi_num = iidc_priv->vport_id;
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ /* Fill iwdev info */
+ iwdev->is_vport = true;
+ iwdev->rf = rf;
+ iwdev->vport_id = idc_adev->vdev_info->vport_id;
+ iwdev->netdev = idc_adev->vdev_info->netdev;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
- if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
- iwdev->roce_mode = true;
-}
-
-static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
-{
- struct iidc_rdma_core_auxiliary_dev *iidc_adev;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_priv_dev_info *iidc_priv;
- struct irdma_l2params l2params = {};
- struct irdma_device *iwdev;
- struct irdma_pci_f *rf;
- int err;
-
- iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
- cdev_info = iidc_adev->cdev_info;
- iidc_priv = cdev_info->iidc_priv;
-
- iwdev = ib_alloc_device(irdma_device, ibdev);
- if (!iwdev)
- return -ENOMEM;
- iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
- if (!iwdev->rf) {
- ib_dealloc_device(&iwdev->ibdev);
- return -ENOMEM;
- }
-
- irdma_fill_device_info(iwdev, cdev_info);
- rf = iwdev->rf;
-
- err = irdma_init_interrupts(rf, cdev_info);
- if (err)
- goto err_init_interrupts;
-
- err = irdma_ctrl_init_hw(rf);
- if (err)
- goto err_ctrl_init;
+ iwdev->roce_mode = true;
+ iwdev->push_mode = false;
l2params.mtu = iwdev->netdev->mtu;
- irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
- if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
err = irdma_rt_init_hw(iwdev, &l2params);
if (err)
@@ -348,43 +103,57 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (err)
goto err_ibreg;
- ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
-
- ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
auxiliary_set_drvdata(aux_dev, iwdev);
- return 0;
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] vport[%d] probe success. dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ rf->rdma_ver, idc_adev->vdev_info->vport_id,
+ dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+ return 0;
err_ibreg:
irdma_rt_deinit_hw(iwdev);
err_rt_init:
- irdma_ctrl_deinit_hw(rf);
-err_ctrl_init:
- irdma_deinit_interrupts(rf, cdev_info);
-err_init_interrupts:
- kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
return err;
}
-static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
- {.name = "ice.iwarp", },
- {.name = "ice.roce", },
+static void ig3rdma_vport_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ iwdev->rf->rdma_ver, dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+
+ irdma_ib_unregister_device(iwdev);
+}
+
+static const struct auxiliary_device_id ig3rdma_vport_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.vdev", },
{},
};
-MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_vport_auxiliary_id_table);
-static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_vport_auxiliary_drv ig3rdma_vport_auxiliary_drv = {
.adrv = {
- .id_table = irdma_auxiliary_id_table,
- .probe = irdma_probe,
- .remove = irdma_remove,
+ .name = "vdev",
+ .id_table = ig3rdma_vport_auxiliary_id_table,
+ .probe = ig3rdma_vport_probe,
+ .remove = ig3rdma_vport_remove,
},
- .event_handler = irdma_iidc_event_handler,
+ .event_handler = ig3rdma_idc_vport_event_handler,
};
+
static int __init irdma_init_module(void)
{
int ret;
@@ -396,14 +165,34 @@ static int __init irdma_init_module(void)
return ret;
}
- ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
+ ret = auxiliary_driver_register(&icrdma_core_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed icrdma(gen_2) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&ig3rdma_core_auxiliary_drv.adrv);
if (ret) {
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
- pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
+ pr_err("Failed ig3rdma(gen_3) core auxiliary_driver_register() ret=%d\n",
ret);
+
return ret;
}
+ ret = auxiliary_driver_register(&ig3rdma_vport_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed ig3rdma vport auxiliary_driver_register() ret=%d\n",
+ ret);
+
+ return ret;
+ }
irdma_register_notifiers();
return 0;
@@ -412,8 +201,10 @@ static int __init irdma_init_module(void)
static void __exit irdma_exit_module(void)
{
irdma_unregister_notifiers();
- auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&ig3rdma_vport_auxiliary_drv.adrv);
}
module_init(irdma_init_module);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 674acc952168..886b30da188a 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -30,7 +30,6 @@
#endif
#include <linux/auxiliary_bus.h>
#include <linux/net/intel/iidc_rdma.h>
-#include <linux/net/intel/iidc_rdma_ice.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -54,6 +53,8 @@
#include "puda.h"
extern struct auxiliary_driver i40iw_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv;
#define IRDMA_FW_VER_DEFAULT 2
#define IRDMA_HW_VER 2
@@ -65,7 +66,8 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_MACIP_ADD 1
#define IRDMA_MACIP_DELETE 2
-#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
+#define IW_GEN_3_CCQ_SIZE (2 * IRDMA_CQP_SW_SQSIZE_2048 + 2)
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 2)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
@@ -127,12 +129,12 @@ enum init_completion_state {
HMC_OBJS_CREATED,
HW_RSRC_INITIALIZED,
CCQ_CREATED,
- CEQ0_CREATED, /* Last state of probe */
- ILQ_CREATED,
- IEQ_CREATED,
+ CEQ0_CREATED,
CEQS_CREATED,
PBLE_CHUNK_MEM,
AEQ_CREATED,
+ ILQ_CREATED,
+ IEQ_CREATED, /* Last state of probe */
IP_ADDR_REGISTERED, /* Last state of open */
};
@@ -167,6 +169,7 @@ struct irdma_cqp_request {
bool request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
bool dynamic:1;
+ bool pending:1;
};
struct irdma_cqp {
@@ -179,6 +182,7 @@ struct irdma_cqp {
struct irdma_dma_mem host_ctx;
u64 *scratch_array;
struct irdma_cqp_request *cqp_requests;
+ struct irdma_ooo_cqp_op *oop_op_array;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
@@ -257,6 +261,7 @@ struct irdma_pci_f {
bool reset:1;
bool rsrc_created:1;
bool msix_shared:1;
+ bool hwqp1_rsvd:1;
u8 rsrc_profile;
u8 *hmc_info_mem;
u8 *mem_rsrc;
@@ -269,6 +274,8 @@ struct irdma_pci_f {
u32 max_mr;
u32 max_qp;
u32 max_cq;
+ u32 max_srq;
+ u32 next_srq;
u32 max_ah;
u32 next_ah;
u32 max_mcg;
@@ -282,6 +289,7 @@ struct irdma_pci_f {
u32 mr_stagmask;
u32 used_pds;
u32 used_cqs;
+ u32 used_srqs;
u32 used_mrs;
u32 used_qps;
u32 arp_table_size;
@@ -293,6 +301,7 @@ struct irdma_pci_f {
unsigned long *allocated_ws_nodes;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
+ unsigned long *allocated_srqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_mcgs;
@@ -327,10 +336,13 @@ struct irdma_pci_f {
wait_queue_head_t vchnl_waitq;
struct workqueue_struct *cqp_cmpl_wq;
struct work_struct cqp_cmpl_work;
+ struct workqueue_struct *vchnl_wq;
struct irdma_sc_vsi default_vsi;
void *back_fcn;
struct irdma_gen_ops gen_ops;
struct irdma_device *iwdev;
+ DECLARE_HASHTABLE(ah_hash_tbl, 8);
+ struct mutex ah_tbl_lock; /* protect AH hash table access */
};
struct irdma_device {
@@ -340,8 +352,6 @@ struct irdma_device {
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
- DECLARE_HASHTABLE(ah_hash_tbl, 8);
- struct mutex ah_tbl_lock; /* protect AH hash table access */
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
@@ -350,12 +360,14 @@ struct irdma_device {
u32 rcv_wnd;
u16 mac_ip_table_idx;
u16 vsi_num;
+ u16 vport_id;
u8 rcv_wscale;
u8 iw_status;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
bool iw_ooo:1;
+ bool is_vport:1;
enum init_completion_state init_state;
wait_queue_head_t suspend_wq;
@@ -413,6 +425,11 @@ static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
return container_of(dev, struct irdma_pci_f, sc_dev);
}
+static inline struct irdma_srq *to_iwsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct irdma_srq, ibsrq);
+}
+
/**
* irdma_alloc_resource - allocate a resource
* @iwdev: device pointer
@@ -508,7 +525,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void irdma_cq_add_ref(struct ib_cq *ibcq);
void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
-
+void irdma_srq_event(struct irdma_sc_srq *srq);
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
struct irdma_modify_qp_info *info, bool wait);
@@ -557,4 +575,5 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
void irdma_add_ip(struct irdma_device *iwdev);
void cqp_compl_worker(struct work_struct *work);
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev);
#endif /* IRDMA_MAIN_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index 37ce35cb10e7..3091f9345f12 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -193,8 +193,15 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
{
enum irdma_sd_entry_type sd_entry_type;
- sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
- IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ else
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD &&
+ dev->privileged) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
return sd_entry_type;
}
@@ -279,10 +286,11 @@ static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
-
- if (!sd_entry->valid) {
- ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
- idx->sd_idx, sd_entry->entry_type, true);
+ if ((dev->privileged && !sd_entry->valid) ||
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ sd_reg_val, idx->sd_idx,
+ sd_entry->entry_type, true);
if (ret_code)
goto error;
}
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index c0c9441885d3..324cfbf21764 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -10,6 +10,7 @@
#define ALL_TC2PFC 0xff
#define CQP_COMPL_WAIT_TIME_MS 10
#define CQP_TIMEOUT_THRESHOLD 500
+#define CQP_DEF_CMPL_TIMEOUT_THRESHOLD 2500
/* init operations */
int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
index 2fc638f2b143..d65041bee667 100644
--- a/drivers/infiniband/hw/irdma/puda.h
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -91,7 +91,7 @@ struct irdma_puda_rsrc_info {
u32 rq_size;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
u16 buf_size;
- u8 stats_idx;
+ u16 stats_idx;
bool stats_idx_valid:1;
int abi_ver;
};
@@ -140,7 +140,7 @@ struct irdma_puda_rsrc {
u64 crc_err;
u64 pmode_count;
u64 partials_handled;
- u8 stats_idx;
+ u16 stats_idx;
bool check_crc:1;
bool stats_idx_valid:1;
};
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 527c6da2c1ac..4ae77cdde9dc 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -8,6 +8,8 @@
#include "hmc.h"
#include "uda.h"
#include "ws.h"
+#include "virtchnl.h"
+
#define IRDMA_DEBUG_ERR "ERR"
#define IRDMA_DEBUG_INIT "INIT"
#define IRDMA_DEBUG_DEV "DEV"
@@ -95,12 +97,6 @@ enum irdma_term_mpa_errors {
MPA_REQ_RSP = 0x04,
};
-enum irdma_qp_event_type {
- IRDMA_QP_EVENT_CATASTROPHIC,
- IRDMA_QP_EVENT_ACCESS_ERR,
- IRDMA_QP_EVENT_REQ_ERR,
-};
-
enum irdma_hw_stats_index {
/* gen1 - 32-bit */
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
@@ -154,12 +150,46 @@ enum irdma_hw_stats_index {
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
+
+ /* gen3 */
+ IRDMA_HW_STAT_INDEX_RNR_SENT = 46,
+ IRDMA_HW_STAT_INDEX_RNR_RCVD = 47,
+ IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT = 48,
+ IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT = 49,
+ IRDMA_HW_STAT_INDEX_RDMARXATS = 50,
+ IRDMA_HW_STAT_INDEX_RDMATXATS = 51,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR = 52,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED = 53,
+ IRDMA_HW_STAT_INDEX_RTO = 54,
+ IRDMA_HW_STAT_INDEX_RXOOOPKTS = 55,
+ IRDMA_HW_STAT_INDEX_ICRCERR = 56,
+
+ IRDMA_HW_STAT_INDEX_MAX_GEN_3 = 57,
};
enum irdma_feature_type {
IRDMA_FEATURE_FW_INFO = 0,
IRDMA_HW_VERSION_INFO = 1,
+ IRDMA_QP_MAX_INCR = 2,
+ IRDMA_CQ_MAX_INCR = 3,
+ IRDMA_CEQ_MAX_INCR = 4,
+ IRDMA_SD_MAX_INCR = 5,
+ IRDMA_MR_MAX_INCR = 6,
+ IRDMA_Q1_MAX_INCR = 7,
+ IRDMA_AH_MAX_INCR = 8,
+ IRDMA_SRQ_MAX_INCR = 9,
+ IRDMA_TIMER_MAX_INCR = 10,
+ IRDMA_XF_MAX_INCR = 11,
+ IRDMA_RRF_MAX_INCR = 12,
+ IRDMA_PBLE_MAX_INCR = 13,
+ IRDMA_OBJ_1 = 22,
+ IRDMA_OBJ_2 = 23,
+ IRDMA_ENDPT_TRK = 24,
+ IRDMA_FTN_INLINE_MAX = 25,
IRDMA_QSETS_MAX = 26,
+ IRDMA_ASO = 27,
+ IRDMA_FTN_FLAGS = 32,
+ IRDMA_FTN_NOP = 33,
IRDMA_MAX_FEATURES, /* Must be last entry */
};
@@ -206,6 +236,7 @@ enum irdma_syn_rst_handling {
enum irdma_queue_type {
IRDMA_QUEUE_TYPE_SQ_RQ = 0,
IRDMA_QUEUE_TYPE_CQP,
+ IRDMA_QUEUE_TYPE_SRQ,
};
struct irdma_sc_dev;
@@ -233,12 +264,22 @@ struct irdma_cqp_init_info {
__le64 *host_ctx;
u64 *scratch_array;
u32 sq_size;
+ struct irdma_ooo_cqp_op *ooo_op_array;
+ u32 pe_en_vf_cnt;
u16 hw_maj_ver;
u16 hw_min_ver;
u8 struct_ver;
u8 hmc_profile;
u8 ena_vf_count;
u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -310,9 +351,21 @@ struct irdma_vsi_pestat {
spinlock_t lock; /* rdma stats lock */
};
+struct irdma_mmio_region {
+ u8 __iomem *addr;
+ resource_size_t len;
+ resource_size_t offset;
+};
+
struct irdma_hw {
- u8 __iomem *hw_addr;
- u8 __iomem *priv_hw_addr;
+ union {
+ u8 __iomem *hw_addr;
+ struct {
+ struct irdma_mmio_region rdma_reg; /* RDMA region */
+ struct irdma_mmio_region *io_regs; /* Non-RDMA MMIO regions */
+ u16 num_io_regions; /* Number of Non-RDMA MMIO regions */
+ };
+ };
struct device *device;
struct irdma_hmc_info hmc;
};
@@ -351,7 +404,21 @@ struct irdma_cqp_quanta {
__le64 elem[IRDMA_CQP_WQE_SIZE];
};
+struct irdma_ooo_cqp_op {
+ struct list_head list_entry;
+ u64 scratch;
+ u32 def_info;
+ u32 sw_def_info;
+ u32 wqe_idx;
+ bool deferred:1;
+};
+
struct irdma_sc_cqp {
+ spinlock_t ooo_list_lock; /* protects list of pending completions */
+ struct list_head ooo_avail;
+ struct list_head ooo_pnd;
+ u32 last_def_cmpl_ticket;
+ u32 sw_def_cmpl_ticket;
u32 size;
u64 sq_pa;
u64 host_ctx_pa;
@@ -367,8 +434,10 @@ struct irdma_sc_cqp {
u64 *scratch_array;
u64 requested_ops;
atomic64_t completed_ops;
+ struct irdma_ooo_cqp_op *ooo_op_array;
u32 cqp_id;
u32 sq_size;
+ u32 pe_en_vf_cnt;
u32 hw_sq_size;
u16 hw_maj_ver;
u16 hw_min_ver;
@@ -378,6 +447,14 @@ struct irdma_sc_cqp {
u8 ena_vf_count;
u8 timeout_count;
u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -397,6 +474,8 @@ struct irdma_sc_aeq {
u32 msix_idx;
u8 polarity;
bool virtual_map:1;
+ bool pasid_valid:1;
+ u32 pasid;
};
struct irdma_sc_ceq {
@@ -412,13 +491,15 @@ struct irdma_sc_ceq {
u8 tph_val;
u32 first_pm_pbl_idx;
u8 polarity;
- struct irdma_sc_vsi *vsi;
+ u16 vsi_idx;
struct irdma_sc_cq **reg_cq;
u32 reg_cq_size;
spinlock_t req_cq_lock; /* protect access to reg_cq array */
bool virtual_map:1;
bool tph_en:1;
bool itr_no_expire:1;
+ bool pasid_valid:1;
+ u32 pasid;
};
struct irdma_sc_cq {
@@ -426,6 +507,7 @@ struct irdma_sc_cq {
u64 cq_pa;
u64 shadow_area_pa;
struct irdma_sc_dev *dev;
+ u16 vsi_idx;
struct irdma_sc_vsi *vsi;
void *pbl_list;
void *back_cq;
@@ -477,8 +559,13 @@ struct irdma_sc_qp {
bool virtual_map:1;
bool flush_sq:1;
bool flush_rq:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
+ u32 err_sq_idx;
+ u32 err_rq_idx;
bool sq_flush_code:1;
bool rq_flush_code:1;
+ u32 pkt_limit;
enum irdma_flush_opcode flush_code;
enum irdma_qp_event_type event_type;
u8 term_flags;
@@ -489,13 +576,13 @@ struct irdma_sc_qp {
struct irdma_stats_inst_info {
bool use_hmc_fcn_index;
u8 hmc_fn_id;
- u8 stats_idx;
+ u16 stats_idx;
};
struct irdma_up_info {
u8 map[8];
u8 cnp_up_override;
- u8 hmc_fcn_idx;
+ u16 hmc_fcn_idx;
bool use_vlan:1;
bool use_cnp_up_override:1;
};
@@ -518,6 +605,8 @@ struct irdma_ws_node_info {
struct irdma_hmc_fpm_misc {
u32 max_ceqs;
u32 max_sds;
+ u32 loc_mem_pages;
+ u8 ird;
u32 xf_block_size;
u32 q1_block_size;
u32 ht_multiplier;
@@ -526,6 +615,7 @@ struct irdma_hmc_fpm_misc {
u32 ooiscf_block_size;
};
+#define IRDMA_VCHNL_MAX_MSG_SIZE 512
#define IRDMA_LEAF_DEFAULT_REL_BW 64
#define IRDMA_PARENT_DEFAULT_REL_BW 1
@@ -601,19 +691,28 @@ struct irdma_sc_dev {
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
struct irdma_hw_attrs hw_attrs;
struct irdma_hmc_info *hmc_info;
+ struct irdma_vchnl_rdma_caps vc_caps;
+ u8 vc_recv_buf[IRDMA_VCHNL_MAX_MSG_SIZE];
+ u16 vc_recv_len;
struct irdma_sc_cqp *cqp;
struct irdma_sc_aeq *aeq;
struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
struct irdma_sc_cq *ccq;
const struct irdma_irq_ops *irq_ops;
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
struct irdma_hmc_fpm_misc hmc_fpm_misc;
struct irdma_ws_node *ws_tree_root;
struct mutex ws_mutex; /* ws tree mutex */
+ u32 vchnl_ver;
u16 num_vfs;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u8 vf_id;
+ bool privileged:1;
bool vchnl_up:1;
bool ceq_valid:1;
+ bool is_pf:1;
+ u8 protocol_used;
+ struct mutex vchnl_mutex; /* mutex to synchronize RDMA virtual channel messages */
u8 pci_rev;
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
@@ -632,6 +731,51 @@ struct irdma_modify_cq_info {
bool cq_resize:1;
};
+struct irdma_srq_init_info {
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 wqe_size;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_value;
+ u8 pbl_chunk_size;
+ struct irdma_srq_uk_init_info srq_uk_init_info;
+};
+
+struct irdma_sc_srq {
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ struct irdma_srq_uk srq_uk;
+ void *back_srq;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 hw_srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_val;
+};
+
+struct irdma_modify_srq_info {
+ u16 srq_limit;
+ u8 arm_limit_event;
+};
+
struct irdma_create_qp_info {
bool ord_valid:1;
bool tcp_ctx_valid:1;
@@ -671,7 +815,8 @@ struct irdma_ccq_cqe_info {
u16 maj_err_code;
u16 min_err_code;
u8 op_code;
- bool error;
+ bool error:1;
+ bool pending:1;
};
struct irdma_dcb_app_info {
@@ -720,7 +865,7 @@ struct irdma_vsi_init_info {
struct irdma_vsi_stats_info {
struct irdma_vsi_pestat *pestat;
- u8 fcn_id;
+ u16 fcn_id;
bool alloc_stats_inst;
};
@@ -731,7 +876,8 @@ struct irdma_device_init_info {
__le64 *fpm_commit_buf;
struct irdma_hw *hw;
void __iomem *bar0;
- u8 hmc_fn_id;
+ enum irdma_protocol_used protocol_used;
+ u16 hmc_fn_id;
};
struct irdma_ceq_init_info {
@@ -746,8 +892,8 @@ struct irdma_ceq_init_info {
bool itr_no_expire:1;
u8 pbl_chunk_size;
u8 tph_val;
+ u16 vsi_idx;
u32 first_pm_pbl_idx;
- struct irdma_sc_vsi *vsi;
struct irdma_sc_cq **reg_cq;
u32 reg_cq_idx;
};
@@ -807,6 +953,8 @@ struct irdma_udp_offload_info {
u32 cwnd;
u8 rexmit_thresh;
u8 rnr_nak_thresh;
+ u8 rnr_nak_tmr;
+ u8 min_rnr_timer;
};
struct irdma_roce_offload_info {
@@ -833,6 +981,7 @@ struct irdma_roce_offload_info {
bool dctcp_en:1;
bool fw_cc_enable:1;
bool use_stats_inst:1;
+ u8 local_ack_timeout;
u16 t_high;
u16 t_low;
u8 last_byte_sent;
@@ -933,8 +1082,10 @@ struct irdma_qp_host_ctx_info {
};
u32 send_cq_num;
u32 rcv_cq_num;
+ u32 srq_id;
u32 rem_endpoint_idx;
- u8 stats_idx;
+ u16 stats_idx;
+ bool remote_atomics_en:1;
bool srq_valid:1;
bool tcp_info_valid:1;
bool iwarp_info_valid:1;
@@ -945,6 +1096,7 @@ struct irdma_qp_host_ctx_info {
struct irdma_aeqe_info {
u64 compl_ctx;
u32 qp_cq_id;
+ u32 def_info; /* only valid for DEF_CMPL */
u16 ae_id;
u16 wqe_idx;
u8 tcp_state;
@@ -953,9 +1105,11 @@ struct irdma_aeqe_info {
bool cq:1;
bool sq:1;
bool rq:1;
+ bool srq:1;
bool in_rdrsp_wr:1;
bool out_rdrsp:1;
bool aeqe_overflow:1;
+ bool err_rq_idx_valid:1;
u8 q2_data_written;
u8 ae_src;
};
@@ -972,7 +1126,8 @@ struct irdma_allocate_stag_info {
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
bool all_memory:1;
- u8 hmc_fcn_index;
+ bool remote_atomics_en:1;
+ u16 hmc_fcn_index;
};
struct irdma_mw_alloc_info {
@@ -1000,6 +1155,7 @@ struct irdma_reg_ns_stag_info {
u8 hmc_fcn_index;
bool use_pf_rid:1;
bool all_memory:1;
+ bool remote_atomics_en:1;
};
struct irdma_fast_reg_stag_info {
@@ -1023,6 +1179,7 @@ struct irdma_fast_reg_stag_info {
u8 hmc_fcn_index;
bool use_pf_rid:1;
bool defer_flag:1;
+ bool remote_atomics_en:1;
};
struct irdma_dealloc_stag_info {
@@ -1130,6 +1287,8 @@ struct irdma_cqp_manage_push_page_info {
};
struct irdma_qp_flush_info {
+ u32 err_sq_idx;
+ u32 err_rq_idx;
u16 sq_minor_code;
u16 sq_major_code;
u16 rq_minor_code;
@@ -1140,6 +1299,8 @@ struct irdma_qp_flush_info {
bool rq:1;
bool userflushcode:1;
bool generate_ae:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
};
struct irdma_gen_ae_info {
@@ -1189,6 +1350,11 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev);
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info);
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev);
int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
@@ -1224,6 +1390,8 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
bool poll_registers);
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
struct cqp_info {
@@ -1467,6 +1635,23 @@ struct cqp_info {
struct irdma_dma_mem query_buff_mem;
u64 scratch;
} query_rdma;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_create;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ struct irdma_modify_srq_info info;
+ u64 scratch;
+ } srq_modify;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_destroy;
+
} u;
};
diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h
index 5a9e6eabf032..4fb4daa20722 100644
--- a/drivers/infiniband/hw/irdma/uda_d.h
+++ b/drivers/infiniband/hw/irdma/uda_d.h
@@ -78,8 +78,7 @@
#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
-
-#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(27, 20)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
@@ -94,7 +93,7 @@
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
-#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(23, 0)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 38c54e59cc2e..ce1ae10c30fc 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -198,6 +198,26 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
return wqe;
}
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
+{
+ int ret_code;
+ __le64 *wqe;
+
+ if (IRDMA_RING_FULL_ERR(srq->srq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ srq->srwqe_polarity = !srq->srwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
+
+ return wqe;
+}
+
/**
* irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
* @qp: hw qp ptr
@@ -318,6 +338,160 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
}
/**
+ * irdma_uk_atomic_fetch_add - atomic fetch and add operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_fetch_add *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_fetch_add;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_compare_swap - atomic compare and swap operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_compare_swap *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_compare_swap;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->swap_data_bytes);
+ set_64bit_val(wqe, 40, op_info->compare_data_bytes);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
+ * @srq: shared rq ptr
+ * @info: post rq information
+ */
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (srq->max_srq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
+ srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ srq->srwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ srq->srwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ srq->srwqe_polarity);
+ if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, (u64)info->wr_id);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
+
+ return 0;
+}
+
+/**
* irdma_uk_rdma_read - rdma read command
* @qp: hw qp ptr
* @info: post sq information
@@ -973,6 +1147,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
u64 comp_ctx, qword0, qword2, qword3;
__le64 *cqe;
struct irdma_qp_uk *qp;
+ struct irdma_srq_uk *srq;
+ struct qp_err_code qp_err;
+ u8 is_srq;
struct irdma_ring *pring = NULL;
u32 wqe_idx;
int ret_code;
@@ -1046,21 +1223,46 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if (is_srq)
+ get_64bit_val(cqe, 40, (u64 *)&qp);
+ else
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
- if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
- info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ switch (info->major_err) {
+ case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
+ qp_err = irdma_ae_to_qp_err_code(info->minor_err);
+ info->minor_err = qp_err.flush_code;
+ fallthrough;
+ case IRDMA_FLUSH_MAJOR_ERR:
/* Set the min error to standard flush error code for remaining cqes */
if (info->minor_err != FLUSH_GENERAL_ERR) {
qword3 &= ~IRDMA_CQ_MINERR;
qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
set_64bit_val(cqe, 24, qword3);
}
- } else {
- info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
+#define IRDMA_CIE_SIGNATURE 0xE
+#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
+ if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
+ qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
+ FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
+ == IRDMA_CIE_SIGNATURE) {
+ info->error = 0;
+ info->major_err = 0;
+ info->minor_err = 0;
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ break;
}
} else {
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
@@ -1069,7 +1271,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
@@ -1085,7 +1286,22 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
- if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
+ srq = qp->srq_uk;
+
+ get_64bit_val(cqe, 8, &info->wr_id);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
+ qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_MOVE_TAIL(srq->srq_ring);
+ pring = &srq->srq_ring;
+ } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
u32 array_idx;
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
@@ -1180,9 +1396,15 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
ret_code = 0;
exit:
- if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
if (pring && IRDMA_RING_MORE_WORK(*pring))
- move_cq_head = false;
+ /* Park CQ head during a flush to generate additional CQEs
+ * from SW for all unprocessed WQEs. For GEN3 and beyond
+ * FW will generate/flush these CQEs so move to the next CQE
+ */
+ move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
+ false : true;
+ }
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
@@ -1210,10 +1432,10 @@ exit:
}
/**
- * irdma_qp_round_up - return round up qp wq depth
+ * irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
-static int irdma_qp_round_up(u32 wqdepth)
+static int irdma_round_up_wq(u32 wqdepth)
{
int scount = 1;
@@ -1268,7 +1490,7 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
{
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
if (*sqdepth < min_size)
*sqdepth = min_size;
@@ -1290,7 +1512,7 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
{
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
if (*rqdepth < min_size)
*rqdepth = min_size;
@@ -1300,6 +1522,26 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
return 0;
}
+/*
+ * irdma_get_srqdepth - get SRQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @srq_size: SRQ size
+ * @shift: shift which determines size of WQE
+ * @srqdepth: depth of SRQ
+ */
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth)
+{
+ *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *srqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
.iw_copy_inline_data = irdma_copy_inline_data,
.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
@@ -1336,6 +1578,42 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
}
/**
+ * irdma_uk_srq_init - initialize shared qp
+ * @srq: hw srq (user and kernel)
+ * @info: srq initialization info
+ *
+ * Initializes the vars used in both user and kernel mode.
+ * The size of the wqe depends on number of max fragments
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for srq.
+ */
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info)
+{
+ u8 rqshift;
+
+ srq->uk_attrs = info->uk_attrs;
+ if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
+ srq->srq_caps = info->srq_caps;
+ srq->srq_base = info->srq;
+ srq->shadow_area = info->shadow_area;
+ srq->srq_id = info->srq_id;
+ srq->srwqe_polarity = 0;
+ srq->srq_size = info->srq_size;
+ srq->wqe_size = rqshift;
+ srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
+ ((u32)2 << rqshift) - 1);
+ IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
+ srq->wqe_size_multiplier = 1 << rqshift;
+ srq->wqe_ops = iw_wqe_uk_ops;
+
+ return 0;
+}
+
+/**
* irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
* @ukinfo: qp initialization info
* @sq_shift: Returns shift of SQ
@@ -1461,6 +1739,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
qp->wqe_ops = iw_wqe_uk_ops;
+ qp->srq_uk = info->srq_uk;
return ret_code;
}
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 380e4a47aede..ab57f689827a 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -41,10 +41,114 @@
#define IRDMA_OP_TYPE_INV_STAG 0x0a
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD 0x0f
+#define IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP 0x11
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
-#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_SRQ_LIMIT 0x0209
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_SRQ_CATASTROPHIC_ERROR 0x020f
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_ATOMIC_ALIGNMENT 0x0221
+#define IRDMA_AE_ATOMIC_MASK 0x0222
+#define IRDMA_AE_INVALID_REQUEST 0x0223
+#define IRDMA_AE_PCIE_ATOMIC_DISABLE 0x0224
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_REMOTE_QP_CATASTROPHIC 0x0703
+#define IRDMA_AE_LOCAL_QP_CATASTROPHIC 0x0704
+#define IRDMA_AE_RCE_QP_CATASTROPHIC 0x0705
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
+#define IRDMA_AE_CQP_DEFERRED_COMPLETE 0x0901
+#define IRDMA_AE_ADAPTER_CATASTROPHIC 0x0B0B
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@@ -55,11 +159,12 @@ enum irdma_device_caps_const {
IRDMA_CEQE_SIZE = 1,
IRDMA_CQP_CTX_SIZE = 8,
IRDMA_SHADOW_AREA_SIZE = 8,
- IRDMA_QUERY_FPM_BUF_SIZE = 176,
- IRDMA_COMMIT_FPM_BUF_SIZE = 176,
+ IRDMA_QUERY_FPM_BUF_SIZE = 192,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 192,
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_IW_SRQ_ID = 0,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
@@ -67,6 +172,7 @@ enum irdma_device_caps_const {
IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MAX_AEQ_ENTRIES_GEN_3 = 262144,
IRDMA_MIN_CEQ_ENTRIES = 1,
IRDMA_MAX_CEQ_ENTRIES = 262143,
IRDMA_MIN_CQ_SIZE = 1,
@@ -105,6 +211,13 @@ enum irdma_flush_opcode {
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
+ FLUSH_RNR_RETRY_EXC_ERR,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@@ -147,6 +260,8 @@ enum irdma_qp_caps {
IRDMA_PUSH_MODE = 8,
};
+struct irdma_srq_uk;
+struct irdma_srq_uk_init_info;
struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
@@ -201,6 +316,24 @@ struct irdma_bind_window {
bool ena_writes:1;
irdma_stag mw_stag;
bool mem_window_type_1:1;
+ bool remote_atomics_en:1;
+};
+
+struct irdma_atomic_fetch_add {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 fetch_add_data_bytes;
+ u32 stag;
+ u32 remote_stag;
+};
+
+struct irdma_atomic_compare_swap {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 swap_data_bytes;
+ u64 compare_data_bytes;
+ u32 stag;
+ u32 remote_stag;
};
struct irdma_inv_local_stag {
@@ -219,6 +352,7 @@ struct irdma_post_sq_info {
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
+ bool remote_atomic_en:1;
u32 imm_data;
u32 stag_to_inv;
union {
@@ -227,6 +361,8 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
+ struct irdma_atomic_fetch_add atomic_fetch_add;
+ struct irdma_atomic_compare_swap atomic_compare_swap;
} op;
};
@@ -255,6 +391,15 @@ struct irdma_cq_poll_info {
bool imm_valid:1;
};
+struct qp_err_code {
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+};
+
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
@@ -300,6 +445,39 @@ int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift);
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info);
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info);
+
+struct irdma_srq_uk {
+ u32 srq_caps;
+ struct irdma_qp_quanta *srq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ struct irdma_ring srq_ring;
+ struct irdma_ring initial_ring;
+ u32 srq_id;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u8 srwqe_polarity;
+ u8 wqe_size;
+ u8 wqe_size_multiplier;
+ u8 deferred_flag;
+};
+
+struct irdma_srq_uk_init_info {
+ struct irdma_qp_quanta *srq;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ u64 *srq_wrid_array;
+ u32 srq_id;
+ u32 srq_caps;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+};
+
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -344,6 +522,7 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
u8 dbg_rq_flushed;
+ struct irdma_srq_uk *srq_uk;
u8 sq_flush_seen;
u8 rq_flush_seen;
};
@@ -383,6 +562,7 @@ struct irdma_qp_uk_init_info {
u8 rq_shift;
int abi_ver;
bool legacy_mode;
+ struct irdma_srq_uk *srq_uk;
};
struct irdma_cq_uk_init_info {
@@ -398,6 +578,7 @@ struct irdma_cq_uk_init_info {
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
struct irdma_post_sq_info *info);
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
@@ -409,5 +590,85 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *wqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *wqdepth);
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+
+static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
+{
+ struct qp_err_code qp_err = {};
+
+ switch (ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp_err.flush_code = FLUSH_PROT_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp_err.flush_code = FLUSH_LOC_LEN_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp_err.flush_code = FLUSH_MW_BIND_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp_err.flush_code = FLUSH_REM_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
+ qp_err.flush_code = FLUSH_FATAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ default:
+ qp_err.flush_code = FLUSH_GENERAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+
+ return qp_err;
+}
#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index b510ef747399..8b94d87b0192 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -481,6 +481,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
WRITE_ONCE(cqp_request->request_done, false);
cqp_request->callback_fcn = NULL;
cqp_request->waiting = false;
+ cqp_request->pending = false;
spin_lock_irqsave(&cqp->req_lock, flags);
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
@@ -521,6 +522,22 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
}
/**
+ * irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions
+ * @dev: sc_dev
+ * @cqp: cqp
+ */
+static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev,
+ struct irdma_cqp *cqp)
+{
+ u64 scratch;
+
+ /* process all CQP requests with deferred/pending completions */
+ while ((scratch = irdma_sc_cqp_cleanup_handler(dev)))
+ irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *)
+ (uintptr_t)scratch);
+}
+
+/**
* irdma_cleanup_pending_cqp_op - clean-up cqp with no
* completions
* @rf: RDMA PCI function
@@ -533,6 +550,8 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
struct cqp_cmds_info *pcmdinfo = NULL;
u32 i, pending_work, wqe_idx;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ irdma_cleanup_deferred_cqp_ops(dev, cqp);
pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
for (i = 0; i < pending_work; i++) {
@@ -552,6 +571,26 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
}
}
+static int irdma_get_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_timeout_s;
+
+ if (!time_s)
+ return CQP_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
+static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_def_timeout_s;
+
+ if (!time_s)
+ return CQP_DEF_CMPL_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
/**
* irdma_wait_event - wait for completion
* @rf: RDMA PCI function
@@ -561,6 +600,7 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {};
+ int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev);
bool cqp_error = false;
int err_code = 0;
@@ -572,9 +612,17 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
break;
+ if (cqp_request->pending)
+ /* There was a deferred or pending completion
+ * received for this CQP request, so we need
+ * to wait longer than usual.
+ */
+ timeout_threshold =
+ irdma_get_def_timeout_threshold(&rf->sc_dev);
+
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
- if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ if (cqp_timeout.count < timeout_threshold)
continue;
if (!rf->reset) {
@@ -649,6 +697,9 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
+ [IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd",
+ [IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd",
+ [IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd",
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
@@ -1065,6 +1116,26 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
irdma_put_cqp_request(&rf->cqp, cqp_request);
}
+static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ return;
+
+ irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num);
+
+ if (qp_num == 1) {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else if (qp_num > 2) {
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
+}
+
/**
* irdma_free_qp_rsrc - free up memory resources for qp
* @iwqp: qp ptr (user or kernel)
@@ -1073,7 +1144,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
{
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_pci_f *rf = iwdev->rf;
- u32 qp_num = iwqp->ibqp.qp_num;
+ u32 qp_num = iwqp->sc_qp.qp_uk.qp_id;
irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
irdma_dealloc_push_page(rf, &iwqp->sc_qp);
@@ -1083,8 +1154,12 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
iwqp->sc_qp.user_pri);
}
- if (qp_num > 2)
- irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ irdma_free_gsi_qp_rsrc(iwqp, qp_num);
+ } else {
+ if (qp_num > 2)
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
iwqp->q2_ctx_mem.va = NULL;
@@ -1096,6 +1171,30 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
}
/**
+ * irdma_srq_wq_destroy - send srq destroy cqp
+ * @rf: RDMA PCI function
+ * @srq: hardware control srq
+ */
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_destroy.srq = srq;
+ cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
* irdma_cq_wq_destroy - send cq destroy cqp
* @rf: RDMA PCI function
* @cq: hardware control cq
@@ -2266,7 +2365,10 @@ bool irdma_cq_empty(struct irdma_cq *iwcq)
u8 polarity;
ukcq = &iwcq->sc_cq.cq_uk;
- cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ if (ukcq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
get_64bit_val(cqe, 24, &qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index da5a41b275d8..76ce6137f2ba 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -41,7 +41,8 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_cq = rf->max_cq - rf->used_cqs;
props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
- props->max_mw = props->max_mr;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
@@ -56,9 +57,21 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
-#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
- if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
- props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
+ props->max_srq = rf->max_srq - rf->used_srqs;
+ props->max_srq_wr = IRDMA_MAX_SRQ_WRS;
+ props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ props->atomic_cap = IB_ATOMIC_HCA;
+ else
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) {
+#define HCA_CORE_CLOCK_KHZ 1000000UL
+ props->timestamp_mask = GENMASK(31, 0);
+ props->hca_core_clock = HCA_CORE_CLOCK_KHZ;
+ }
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
return 0;
}
@@ -292,6 +305,10 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
+ if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) &&
+ uk_attrs->hw_rev >= IRDMA_GEN_3)
+ return -EOPNOTSUPP;
+
if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
ucontext->use_raw_attrs = true;
@@ -332,6 +349,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
+ uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -343,6 +362,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->srq_reg_mem_list);
+ spin_lock_init(&ucontext->srq_reg_mem_list_lock);
return 0;
@@ -521,7 +542,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
iwqp->sc_qp.qp_uk.destroy_pending = true;
- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
irdma_modify_qp_to_err(&iwqp->sc_qp);
if (!iwqp->user_mode)
@@ -541,6 +562,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
irdma_remove_push_mmap_entries(iwqp);
+
+ if (iwqp->sc_qp.qp_uk.qp_id == 1)
+ iwdev->rf->hwqp1_rsvd = false;
irdma_free_qp_rsrc(iwqp);
return 0;
@@ -564,7 +588,11 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
if (iwpbl->pbl_allocated) {
init_info->virtual_map = true;
init_info->sq_pa = qpmr->sq_pbl.idx;
- init_info->rq_pa = qpmr->rq_pbl.idx;
+ /* Need to use contiguous buffer for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ?
+ qpmr->rq_pa : qpmr->rq_pbl.idx;
} else {
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
@@ -719,6 +747,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ ukinfo->qp_id = info->qp_uk_init_info.qp_id;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -775,9 +804,12 @@ static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
roce_info = &iwqp->roce_info;
ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1)
+ roce_info->is_qp1 = true;
roce_info->rd_en = true;
roce_info->wr_rdresp_en = true;
- roce_info->bind_en = true;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ roce_info->bind_en = true;
roce_info->dcqcn_en = false;
roce_info->rtomin = 5;
@@ -808,7 +840,6 @@ static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
iwarp_info->rd_en = true;
iwarp_info->wr_rdresp_en = true;
- iwarp_info->bind_en = true;
iwarp_info->ecn_en = true;
iwarp_info->rtomin = 5;
@@ -864,6 +895,47 @@ static void irdma_flush_worker(struct work_struct *work)
irdma_generate_flush_completions(iwqp);
}
+static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+ int ret;
+
+ if (rf->rdma_ver <= IRDMA_GEN_2) {
+ *qp_num = 1;
+ return 0;
+ }
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ if (!rf->hwqp1_rsvd) {
+ *qp_num = 1;
+ rf->hwqp1_rsvd = true;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ qp_num, &rf->next_qp);
+ if (ret)
+ return ret;
+ }
+
+ ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num,
+ (&iwdev->vsi)->qos);
+ if (ret) {
+ if (*qp_num != 1) {
+ irdma_free_rsrc(rf, rf->allocated_qps, *qp_num);
+ } else {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* irdma_create_qp - create qp
* @ibqp: ptr of qp
@@ -889,6 +961,18 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_srq *iwsrq;
+ bool srq_valid = false;
+ u32 srq_id = 0;
+
+ if (init_attr->srq) {
+ iwsrq = to_iwsrq(init_attr->srq);
+ srq_valid = true;
+ srq_id = iwsrq->srq_num;
+ init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags;
+ init_attr->cap.max_recv_wr = 4;
+ init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk;
+ }
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@@ -925,16 +1009,20 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
- if (init_attr->qp_type == IB_QPT_GSI)
- qp_num = 1;
- else
+ if (init_attr->qp_type == IB_QPT_GSI) {
+ err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num);
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = 1;
+ } else {
err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
&qp_num, &rf->next_qp);
- if (err_code)
- goto error;
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = qp_num;
+ }
iwqp->iwpd = iwpd;
- iwqp->ibqp.qp_num = qp_num;
qp = &iwqp->sc_qp;
iwqp->iwscq = to_iwcq(init_attr->send_cq);
iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
@@ -991,13 +1079,22 @@ static int irdma_create_qp(struct ib_qp *ibqp,
}
ctx_info = &iwqp->ctx_info;
+ ctx_info->srq_valid = srq_valid;
+ ctx_info->srq_id = srq_id;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
- if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (dev->ws_add(&iwdev->vsi, 0)) {
+ irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
+ err_code = -EINVAL;
+ goto error;
+ }
+ irdma_qp_add_qos(&iwqp->sc_qp);
irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
- else
+ } else {
irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
+ }
err_code = irdma_cqp_create_qp_cmd(iwqp);
if (err_code)
@@ -1009,16 +1106,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
- if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- if (dev->ws_add(&iwdev->vsi, 0)) {
- irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
- err_code = -EINVAL;
- goto error;
- }
-
- irdma_qp_add_qos(&iwqp->sc_qp);
- }
-
if (udata) {
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
if (udata->outlen < sizeof(uresp)) {
@@ -1063,6 +1150,8 @@ static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
acc_flags |= IB_ACCESS_REMOTE_READ;
if (iwqp->roce_info.bind_en)
acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
} else {
if (iwqp->iwarp_info.wr_rdresp_en) {
acc_flags |= IB_ACCESS_LOCAL_WRITE;
@@ -1070,8 +1159,8 @@ static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
}
if (iwqp->iwarp_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
- if (iwqp->iwarp_info.bind_en)
- acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
}
return acc_flags;
}
@@ -1110,6 +1199,7 @@ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->pkey_index = iwqp->roce_info.p_key;
attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
+ attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer;
attr->max_rd_atomic = iwqp->roce_info.ord_size;
attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
}
@@ -1118,6 +1208,7 @@ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap;
return 0;
@@ -1242,6 +1333,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_RNR_RETRY)
udp_info->rnr_nak_thresh = attr->rnr_retry;
+ if (attr_mask & IB_QP_MIN_RNR_TIMER &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ udp_info->min_rnr_timer = attr->min_rnr_timer;
+
if (attr_mask & IB_QP_RETRY_CNT)
udp_info->rexmit_thresh = attr->retry_cnt;
@@ -1362,6 +1457,9 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info->wr_rdresp_en = true;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
roce_info->rd_en = true;
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ ctx_info->remote_atomics_en = true;
}
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
@@ -1777,6 +1875,24 @@ exit:
}
/**
+ * irdma_srq_free_rsrc - free up resources for srq
+ * @rf: RDMA PCI function
+ * @iwsrq: srq ptr
+ */
+static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq)
+{
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ if (!iwsrq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+ iwsrq->kmem.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id);
+}
+
+/**
* irdma_cq_free_rsrc - free up resources for cq
* @rf: RDMA PCI function
* @iwcq: cq ptr
@@ -1840,6 +1956,22 @@ static int irdma_process_resize_list(struct irdma_cq *iwcq,
}
/**
+ * irdma_destroy_srq - destroy srq
+ * @ibsrq: srq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ irdma_srq_wq_destroy(iwdev->rf, srq);
+ irdma_srq_free_rsrc(iwdev->rf, iwsrq);
+ return 0;
+}
+
+/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
* @udata: user data
@@ -1914,8 +2046,13 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
if (!iwcq->user_mode) {
entries++;
- if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+
+ if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
}
info.cq_size = max(entries, 4);
@@ -2022,10 +2159,297 @@ error:
return ret;
}
+/**
+ * irdma_srq_event - event notification for srq limit
+ * @srq: shared srq struct
+ */
+void irdma_srq_event(struct irdma_sc_srq *srq)
+{
+ struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq);
+ struct ib_srq *ibsrq = &iwsrq->ibsrq;
+ struct ib_event event;
+
+ srq->srq_limit = 0;
+
+ if (!ibsrq->event_handler)
+ return;
+
+ event.device = ibsrq->device;
+ event.element.port_num = 1;
+ event.element.srq = ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+}
+
+/**
+ * irdma_modify_srq - modify srq request
+ * @ibsrq: srq's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_modify_srq_info *info;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return 0;
+
+ if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size)
+ return -EINVAL;
+
+ /* Execute this cqp op synchronously, so we can update srq_limit
+ * upon successful completion.
+ */
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.srq_modify.info;
+ info->srq_limit = attr->srq_limit;
+ if (info->srq_limit > 0xFFF)
+ info->srq_limit = 0xFFF;
+ info->arm_limit_event = 1;
+
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ iwsrq->sc_srq.srq_limit = info->srq_limit;
+
+ return 0;
+}
+
+static int irdma_setup_umode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_SRQ_MIN_REQ_LEN \
+ offsetofend(struct irdma_create_srq_req, user_shadow_area)
+ struct irdma_create_srq_req req = {};
+ struct irdma_ucontext *ucontext;
+ struct irdma_srq_mr *srqmr;
+ struct irdma_pbl *iwpbl;
+ unsigned long flags;
+
+ iwsrq->user_mode = true;
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN)
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf,
+ &ucontext->srq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ if (!iwpbl)
+ return -EPROTO;
+
+ iwsrq->iwpbl = iwpbl;
+ srqmr = &iwpbl->srq_mr;
+
+ if (iwpbl->pbl_allocated) {
+ info->virtual_map = true;
+ info->pbl_chunk_size = 1;
+ info->first_pm_pbl_idx = srqmr->srq_pbl.idx;
+ info->leaf_pbl_size = 1;
+ } else {
+ info->srq_pa = srqmr->srq_pbl.addr;
+ }
+ info->shadow_area_pa = srqmr->shadow;
+
+ return 0;
+}
+
+static int irdma_setup_kmode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info, u32 depth,
+ u8 shift)
+{
+ struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info;
+ struct irdma_dma_mem *mem = &iwsrq->kmem;
+ u32 size, ring_size;
+
+ ring_size = depth * IRDMA_QP_WQE_MIN_SIZE;
+ size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+
+ ukinfo->srq = mem->va;
+ ukinfo->srq_size = depth >> shift;
+ ukinfo->shadow_area = mem->va + ring_size;
+
+ info->shadow_area_pa = info->srq_pa + ring_size;
+ info->srq_pa = mem->pa;
+
+ return 0;
+}
+
+/**
+ * irdma_create_srq - create srq
+ * @ibsrq: ib's srq pointer
+ * @initattrs: attributes for srq
+ * @udata: user data for create srq
+ */
+static int irdma_create_srq(struct ib_srq *ibsrq,
+ struct ib_srq_init_attr *initattrs,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct ib_srq_attr *attr = &initattrs->attr;
+ struct irdma_pd *iwpd = to_iwpd(ibsrq->pd);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk_init_info *ukinfo;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_srq_init_info info = {};
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_uk_attrs *uk_attrs;
+ struct cqp_cmds_info *cqp_info;
+ int err_code = 0;
+ u32 depth;
+ u8 shift;
+
+ uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs;
+ ukinfo = &info.srq_uk_init_info;
+
+ if (initattrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) ||
+ attr->max_sge > uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ refcount_set(&iwsrq->refcnt, 1);
+ spin_lock_init(&iwsrq->lock);
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq,
+ &iwsrq->srq_num, &rf->next_srq);
+ if (err_code)
+ return err_code;
+
+ ukinfo->max_srq_frag_cnt = attr->max_sge;
+ ukinfo->uk_attrs = uk_attrs;
+ ukinfo->srq_id = iwsrq->srq_num;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0,
+ &shift);
+
+ err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr,
+ shift, &depth);
+ if (err_code)
+ return err_code;
+
+ /* Actual SRQ size in WRs for ring and HW */
+ ukinfo->srq_size = depth >> shift;
+
+ /* Max postable WRs to SRQ */
+ iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift;
+ attr->max_wr = iwsrq->max_wr;
+
+ if (udata)
+ err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata);
+ else
+ err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth,
+ shift);
+
+ if (err_code)
+ goto free_rsrc;
+
+ info.vsi = &iwdev->vsi;
+ info.pd = &iwpd->sc_pd;
+
+ err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info);
+ if (err_code)
+ goto free_dmem;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto free_dmem;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request;
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (err_code)
+ goto free_dmem;
+
+ if (udata) {
+ struct irdma_create_srq_resp resp = {};
+
+ resp.srq_id = iwsrq->srq_num;
+ resp.srq_size = ukinfo->srq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ err_code = -EPROTO;
+ goto srq_destroy;
+ }
+ }
+
+ return 0;
+
+srq_destroy:
+ irdma_srq_wq_destroy(rf, &iwsrq->sc_srq);
+
+free_dmem:
+ if (!iwsrq->user_mode)
+ dma_free_coherent(rf->hw.device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+free_rsrc:
+ irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num);
+ return err_code;
+}
+
+/**
+ * irdma_query_srq - get SRQ attributes
+ * @ibsrq: the SRQ to query
+ * @attr: the attributes of the SRQ
+ */
+static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+
+ attr->max_wr = iwsrq->max_wr;
+ attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt;
+ attr->srq_limit = iwsrq->sc_srq.srq_limit;
+
+ return 0;
+}
+
static inline int cq_validate_flags(u32 flags, u8 hw_rev)
{
- /* GEN1 does not support CQ create flags */
- if (hw_rev == IRDMA_GEN_1)
+ /* GEN1/2 does not support CQ create flags */
+ if (hw_rev <= IRDMA_GEN_2)
return flags ? -EOPNOTSUPP : 0;
return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
@@ -2058,6 +2482,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
unsigned long flags;
int err_code;
int entries = attr->cqe;
+ bool cqe_64byte_ena;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
@@ -2081,6 +2506,9 @@ static int irdma_create_cq(struct ib_cq *ibcq,
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
+ cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
+ true : false;
+ ukinfo->avoid_mem_cflct = cqe_64byte_ena;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
@@ -2116,8 +2544,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
goto cq_free_rsrc;
}
- iwcq->iwpbl = iwpbl;
- iwcq->cq_mem_size = 0;
cqmr = &iwpbl->cq_mr;
if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
@@ -2132,7 +2558,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = -EPROTO;
goto cq_free_rsrc;
}
- iwcq->iwpbl_shadow = iwpbl_shadow;
cqmr_shadow = &iwpbl_shadow->cq_mr;
info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
cqmr->split = true;
@@ -2156,11 +2581,18 @@ static int irdma_create_cq(struct ib_cq *ibcq,
}
entries++;
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
+
ukinfo->cq_size = entries;
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ if (cqe_64byte_ena)
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
+ else
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
iwcq->kmem.size,
@@ -2240,8 +2672,9 @@ cq_free_rsrc:
/**
* irdma_get_mr_access - get hw MR access permissions from IB access flags
* @access: IB access flags
+ * @hw_rev: Hardware version
*/
-static inline u16 irdma_get_mr_access(int access)
+static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
{
u16 hw_access = 0;
@@ -2251,8 +2684,10 @@ static inline u16 irdma_get_mr_access(int access)
IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
- hw_access |= (access & IB_ACCESS_MW_BIND) ?
- IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ if (hw_rev >= IRDMA_GEN_3) {
+ hw_access |= (access & IB_ACCESS_MW_BIND) ?
+ IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ }
hw_access |= (access & IB_ZERO_BASED) ?
IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
@@ -2463,6 +2898,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
struct irdma_mr *iwmr = iwpbl->iwmr;
struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct irdma_srq_mr *srqmr = &iwpbl->srq_mr;
struct irdma_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem;
u32 pg_size, total;
@@ -2482,7 +2918,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
total = req->sq_pages + req->rq_pages;
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total];
-
+ /* Need to use physical address for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages];
if (lvl) {
ret = irdma_check_mem_contiguous(arr, req->sq_pages,
pg_size);
@@ -2502,6 +2941,18 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
hmc_p->addr = arr[req->sq_pages];
}
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ hmc_p = &srqmr->srq_pbl;
+ srqmr->shadow = (dma_addr_t)arr[req->rq_pages];
+ if (lvl)
+ ret = irdma_check_mem_contiguous(arr, req->rq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
case IRDMA_MEMREG_TYPE_CQ:
hmc_p = &cqmr->cq_pbl;
@@ -2806,7 +3257,10 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
stag_info->stag_key = (u8)iwmr->stag;
stag_info->total_len = iwmr->len;
- stag_info->access_rights = irdma_get_mr_access(access);
+ stag_info->access_rights = irdma_get_mr_access(access,
+ iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
stag_info->pd_id = iwpd->sc_pd.pd_id;
stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
@@ -2972,6 +3426,37 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
return 0;
}
+static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ total = req.rq_pages + IRDMA_SHADOW_PGCNT;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
struct ib_udata *udata,
struct irdma_mr *iwmr)
@@ -3063,6 +3548,12 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
goto error;
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ err = irdma_reg_user_mr_type_srq(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
case IRDMA_MEMREG_TYPE_CQ:
err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
if (err)
@@ -3106,9 +3597,9 @@ static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
if (IS_ERR(umem_dmabuf)) {
- err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
- return ERR_PTR(err);
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
+ return ERR_CAST(umem_dmabuf);
}
iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
@@ -3382,6 +3873,14 @@ static void irdma_del_memlist(struct irdma_mr *iwmr,
}
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ break;
default:
break;
}
@@ -3461,6 +3960,40 @@ static int irdma_post_send(struct ib_qp *ibqp,
if (ib_wr->send_flags & IB_SEND_FENCE)
info.read_fence = true;
switch (ib_wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP;
+ info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_compare_swap.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap;
+ info.op.atomic_compare_swap.compare_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_compare_swap(ukqp, &info, false);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD;
+ info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_fetch_add.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_fetch_add.fetch_add_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_fetch_add.remote_stag =
+ atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_fetch_add(ukqp, &info, false);
+ break;
case IB_WR_SEND_WITH_IMM:
if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
info.imm_data_valid = true;
@@ -3555,7 +4088,9 @@ static int irdma_post_send(struct ib_qp *ibqp,
stag_info.signaled = info.signaled;
stag_info.read_fence = info.read_fence;
- stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
+ stag_info.access_rights =
+ irdma_get_mr_access(reg_wr(ib_wr)->access,
+ dev->hw_attrs.uk_attrs.hw_rev);
stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
@@ -3594,6 +4129,48 @@ static int irdma_post_send(struct ib_qp *ibqp,
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_srq_recv - post receive wr for kernel application
+ * @ibsrq: ib srq pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk;
+ struct irdma_post_rq_info post_recv = {};
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&iwsrq->lock, flags);
+ while (ib_wr) {
+ if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) {
+ err = -EINVAL;
+ goto out;
+ }
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ post_recv.sg_list = ib_wr->sg_list;
+ err = irdma_uk_srq_post_receive(uksrq, &post_recv);
+ if (err)
+ goto out;
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&iwsrq->lock, flags);
+
if (err)
*bad_wr = ib_wr;
@@ -3619,6 +4196,11 @@ static int irdma_post_recv(struct ib_qp *ibqp,
iwqp = to_iwqp(ibqp);
ukqp = &iwqp->sc_qp.qp_uk;
+ if (ukqp->srq_uk) {
+ *bad_wr = ib_wr;
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&iwqp->lock, flags);
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
@@ -3671,6 +4253,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_MW_BIND_ERR;
case FLUSH_REM_INV_REQ_ERR:
return IB_WC_REM_INV_REQ_ERR;
+ case FLUSH_RNR_RETRY_EXC_ERR:
+ return IB_WC_RNR_RETRY_EXC_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
@@ -3727,8 +4311,12 @@ static void irdma_process_cqe(struct ib_wc *entry,
if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
set_ib_wc_op_sq(cq_poll_info, entry);
} else {
- set_ib_wc_op_rq(cq_poll_info, entry,
- qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
+ else
+ set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
@@ -3923,40 +4511,7 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
return ret;
}
-static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static const struct rdma_stat_desc irdma_hw_stat_names[] = {
+static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
/* gen1 - 32-bit */
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
@@ -3964,9 +4519,6 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
- [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
- [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
[IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
/* gen1 - 64-bit */
[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
@@ -3985,16 +4537,14 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
- [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
- [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
- [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "InRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "InRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "InRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "OutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "OutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "OutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "RdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "RdmaInv",
/* gen2 - 32-bit */
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
@@ -4008,9 +4558,59 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
-
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "RetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "InOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "InProtoErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "InSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "OutSegs",
+
+ /* gen3 */
+ [IRDMA_HW_STAT_INDEX_RNR_SENT].name = "RNR sent",
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD].name = "RNR received",
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name = "ord limit count",
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name = "ird limit count",
+ [IRDMA_HW_STAT_INDEX_RDMARXATS].name = "Rx atomics",
+ [IRDMA_HW_STAT_INDEX_RDMATXATS].name = "Tx atomics",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR].name = "Nak Sequence Error",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name = "Nak Sequence Error Implied",
+ [IRDMA_HW_STAT_INDEX_RTO].name = "RTO",
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS].name = "Rcvd Out of order packets",
+ [IRDMA_HW_STAT_INDEX_ICRCERR].name = "CRC errors",
};
+static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
{
struct irdma_device *iwdev = to_iwdev(dev);
@@ -4034,7 +4634,7 @@ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
int num_counters = dev->hw_attrs.max_stat_idx;
unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
- return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
lifespan);
}
@@ -4539,7 +5139,7 @@ static bool irdma_ah_exists(struct irdma_device *iwdev,
new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
new_ah->sc_ah.ah_info.dest_ip_addr[3];
- hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
+ hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) {
/* Set ah_valid and ah_id the same so memcmp can work */
new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
@@ -4565,14 +5165,14 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
struct irdma_ah *ah = to_iwah(ibah);
if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
- mutex_lock(&iwdev->ah_tbl_lock);
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
return 0;
}
hash_del(&ah->parent_ah->list);
kfree(ah->parent_ah);
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
}
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
@@ -4609,11 +5209,11 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
err = irdma_setup_ah(ibah, attr);
if (err)
return err;
- mutex_lock(&iwdev->ah_tbl_lock);
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
if (!irdma_ah_exists(iwdev, ah)) {
err = irdma_create_hw_ah(iwdev, ah, true);
if (err) {
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
return err;
}
/* Add new AH to list */
@@ -4625,11 +5225,11 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
parent_ah->sc_ah.ah_info.dest_ip_addr[3];
ah->parent_ah = parent_ah;
- hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
+ hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key);
refcount_set(&parent_ah->refcnt, 1);
}
}
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
@@ -4691,6 +5291,20 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
+static const struct ib_device_ops irdma_gen1_dev_ops = {
+ .dealloc_driver = irdma_ib_dealloc_device,
+};
+
+static const struct ib_device_ops irdma_gen3_dev_ops = {
+ .alloc_mw = irdma_alloc_mw,
+ .create_srq = irdma_create_srq,
+ .dealloc_mw = irdma_dealloc_mw,
+ .destroy_srq = irdma_destroy_srq,
+ .modify_srq = irdma_modify_srq,
+ .post_srq_recv = irdma_post_srq_recv,
+ .query_srq = irdma_query_srq,
+};
+
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
@@ -4725,7 +5339,6 @@ static const struct ib_device_ops irdma_dev_ops = {
.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
.alloc_mr = irdma_alloc_mr,
- .alloc_mw = irdma_alloc_mw,
.alloc_pd = irdma_alloc_pd,
.alloc_ucontext = irdma_alloc_ucontext,
.create_cq = irdma_create_cq,
@@ -4761,6 +5374,7 @@ static const struct ib_device_ops irdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq),
};
/**
@@ -4808,6 +5422,10 @@ static void irdma_init_rdma_device(struct irdma_device *iwdev)
iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
iwdev->ibdev.dev.parent = &pcidev->dev;
ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
+ if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops);
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_3)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops);
}
/**
@@ -4879,5 +5497,9 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev)
struct irdma_device *iwdev = to_iwdev(ibdev);
irdma_rt_deinit_hw(iwdev);
- irdma_ctrl_deinit_hw(iwdev->rf);
+ if (!iwdev->is_vport) {
+ irdma_ctrl_deinit_hw(iwdev->rf);
+ if (iwdev->rf->vchnl_wq)
+ destroy_workqueue(iwdev->rf->vchnl_wq);
+ }
}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index cfa140b36395..ed21c1b56e8e 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -8,6 +8,7 @@
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
+#define IRDMA_SHADOW_PGCNT 1
struct irdma_ucontext {
struct ib_ucontext ibucontext;
@@ -17,6 +18,8 @@ struct irdma_ucontext {
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ struct list_head srq_reg_mem_list;
+ spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */
int abi_ver;
u8 legacy_mode : 1;
u8 use_raw_attrs : 1;
@@ -65,10 +68,16 @@ struct irdma_cq_mr {
bool split;
};
+struct irdma_srq_mr {
+ struct irdma_hmc_pble srq_pbl;
+ dma_addr_t shadow;
+};
+
struct irdma_qp_mr {
struct irdma_hmc_pble sq_pbl;
struct irdma_hmc_pble rq_pbl;
dma_addr_t shadow;
+ dma_addr_t rq_pa;
struct page *sq_page;
};
@@ -85,6 +94,7 @@ struct irdma_pbl {
union {
struct irdma_qp_mr qp_mr;
struct irdma_cq_mr cq_mr;
+ struct irdma_srq_mr srq_mr;
};
bool pbl_allocated:1;
@@ -112,24 +122,33 @@ struct irdma_mr {
struct irdma_pbl iwpbl;
};
+struct irdma_srq {
+ struct ib_srq ibsrq;
+ struct irdma_sc_srq sc_srq __aligned(64);
+ struct irdma_dma_mem kmem;
+ u64 *srq_wrid_mem;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll srq */
+ struct irdma_pbl *iwpbl;
+ struct irdma_sge *sg_list;
+ u16 srq_head;
+ u32 srq_num;
+ u32 max_wr;
+ bool user_mode:1;
+};
+
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
- u16 cq_head;
- u16 cq_size;
u16 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
- u32 polled_cmpls;
- u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
struct completion free_cq;
refcount_t refcnt;
spinlock_t lock; /* for poll cq */
- struct irdma_pbl *iwpbl;
- struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
struct list_head cmpl_generated;
@@ -259,6 +278,12 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
case IRDMA_OP_TYPE_FAST_REG_NSMR:
entry->opcode = IB_WC_REG_MR;
break;
+ case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP:
+ entry->opcode = IB_WC_COMP_SWAP;
+ break;
+ case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD:
+ entry->opcode = IB_WC_FETCH_ADD;
+ break;
case IRDMA_OP_TYPE_INV_STAG:
entry->opcode = IB_WC_LOCAL_INV;
break;
@@ -267,6 +292,19 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
}
}
+static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
+ struct ib_wc *entry)
+{
+ switch (info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
struct ib_wc *entry, bool send_imm_support)
{
diff --git a/drivers/infiniband/hw/irdma/virtchnl.c b/drivers/infiniband/hw/irdma/virtchnl.c
new file mode 100644
index 000000000000..16ad27247527
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "virtchnl.h"
+#include "ws.h"
+#include "i40iw_hw.h"
+#include "ig3rdma_hw.h"
+
+struct vchnl_reg_map_elem {
+ u16 reg_id;
+ u16 reg_idx;
+ bool pg_rel;
+};
+
+struct vchnl_regfld_map_elem {
+ u16 regfld_id;
+ u16 regfld_idx;
+};
+
+static struct vchnl_reg_map_elem vchnl_reg_map[] = {
+ {IRDMA_VCHNL_REG_ID_CQPTAIL, IRDMA_CQPTAIL, false},
+ {IRDMA_VCHNL_REG_ID_CQPDB, IRDMA_CQPDB, false},
+ {IRDMA_VCHNL_REG_ID_CCQPSTATUS, IRDMA_CCQPSTATUS, false},
+ {IRDMA_VCHNL_REG_ID_CCQPHIGH, IRDMA_CCQPHIGH, false},
+ {IRDMA_VCHNL_REG_ID_CCQPLOW, IRDMA_CCQPLOW, false},
+ {IRDMA_VCHNL_REG_ID_CQARM, IRDMA_CQARM, false},
+ {IRDMA_VCHNL_REG_ID_CQACK, IRDMA_CQACK, false},
+ {IRDMA_VCHNL_REG_ID_AEQALLOC, IRDMA_AEQALLOC, false},
+ {IRDMA_VCHNL_REG_ID_CQPERRCODES, IRDMA_CQPERRCODES, false},
+ {IRDMA_VCHNL_REG_ID_WQEALLOC, IRDMA_WQEALLOC, false},
+ {IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET, IRDMA_DB_ADDR_OFFSET, false },
+ {IRDMA_VCHNL_REG_ID_DYN_CTL, IRDMA_GLINT_DYN_CTL, false },
+ {IRDMA_VCHNL_REG_INV_ID, IRDMA_VCHNL_REG_INV_ID, false }
+};
+
+static struct vchnl_regfld_map_elem vchnl_regfld_map[] = {
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR, IRDMA_CCQPSTATUS_CCQP_ERR_M},
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE, IRDMA_CCQPSTATUS_CCQP_DONE_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID, IRDMA_CQPSQ_STAG_PDID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID, IRDMA_CQPSQ_CQ_CEQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID, IRDMA_CQPSQ_CQ_CQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT, IRDMA_COMMIT_FPM_CQCNT_M},
+ {IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID, IRDMA_CQPSQ_UPESD_HMCFNID_M},
+ {IRDMA_VCHNL_REGFLD_INV_ID, IRDMA_VCHNL_REGFLD_INV_ID}
+};
+
+#define IRDMA_VCHNL_REG_COUNT ARRAY_SIZE(vchnl_reg_map)
+#define IRDMA_VCHNL_REGFLD_COUNT ARRAY_SIZE(vchnl_regfld_map)
+#define IRDMA_VCHNL_REGFLD_BUF_SIZE \
+ (IRDMA_VCHNL_REG_COUNT * sizeof(struct irdma_vchnl_reg_info) + \
+ IRDMA_VCHNL_REGFLD_COUNT * sizeof(struct irdma_vchnl_reg_field_info))
+#define IRDMA_REGMAP_RESP_BUF_SIZE (IRDMA_VCHNL_RESP_MIN_SIZE + IRDMA_VCHNL_REGFLD_BUF_SIZE)
+
+/**
+ * irdma_sc_vchnl_init - Initialize dev virtchannel and get hw_rev
+ * @dev: dev structure to update
+ * @info: virtchannel info parameters to fill into the dev structure
+ */
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info)
+{
+ dev->vchnl_up = true;
+ dev->privileged = info->privileged;
+ dev->is_pf = info->is_pf;
+ dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev;
+
+ if (!dev->privileged) {
+ int ret = irdma_vchnl_req_get_ver(dev, IRDMA_VCHNL_CHNL_VER_MAX,
+ &dev->vchnl_ver);
+
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Channel version ret = %d, version is %u\n",
+ ret, dev->vchnl_ver);
+
+ if (ret)
+ return ret;
+
+ ret = irdma_vchnl_req_get_caps(dev);
+ if (ret)
+ return ret;
+
+ dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_verify_resp - Verify requested response size
+ * @vchnl_req: vchnl message requested
+ * @resp_len: response length sent from vchnl peer
+ */
+static int irdma_vchnl_req_verify_resp(struct irdma_vchnl_req *vchnl_req,
+ u16 resp_len)
+{
+ switch (vchnl_req->vchnl_msg->op_code) {
+ case IRDMA_VCHNL_OP_GET_VER:
+ case IRDMA_VCHNL_OP_GET_HMC_FCN:
+ case IRDMA_VCHNL_OP_PUT_HMC_FCN:
+ if (resp_len != vchnl_req->parm_len)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_RDMA_CAPS:
+ if (resp_len < IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_REG_LAYOUT:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP:
+ case IRDMA_VCHNL_OP_ADD_VPORT:
+ case IRDMA_VCHNL_OP_DEL_VPORT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void irdma_free_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req)
+{
+ kfree(vchnl_req->vchnl_msg);
+}
+
+static int irdma_alloc_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req,
+ struct irdma_vchnl_req_init_info *info)
+{
+ struct irdma_vchnl_op_buf *vchnl_msg;
+
+ vchnl_msg = kzalloc(IRDMA_VCHNL_MAX_MSG_SIZE, GFP_KERNEL);
+
+ if (!vchnl_msg)
+ return -ENOMEM;
+
+ vchnl_msg->op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len;
+ if (info->req_parm_len)
+ memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len);
+ vchnl_msg->op_code = info->op_code;
+ vchnl_msg->op_ver = info->op_ver;
+
+ vchnl_req->vchnl_msg = vchnl_msg;
+ vchnl_req->parm = info->resp_parm;
+ vchnl_req->parm_len = info->resp_parm_len;
+
+ return 0;
+}
+
+static int irdma_vchnl_req_send_sync(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req_init_info *info)
+{
+ u16 resp_len = sizeof(dev->vc_recv_buf);
+ struct irdma_vchnl_req vchnl_req = {};
+ u16 msg_len;
+ u8 *msg;
+ int ret;
+
+ ret = irdma_alloc_vchnl_req_msg(&vchnl_req, info);
+ if (ret)
+ return ret;
+
+ msg_len = vchnl_req.vchnl_msg->buf_len;
+ msg = (u8 *)vchnl_req.vchnl_msg;
+
+ mutex_lock(&dev->vchnl_mutex);
+ ret = ig3rdma_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf,
+ &resp_len);
+ dev->vc_recv_len = resp_len;
+ if (ret)
+ goto exit;
+
+ ret = irdma_vchnl_req_get_resp(dev, &vchnl_req);
+exit:
+ mutex_unlock(&dev->vchnl_mutex);
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: virtual channel send %s caller: %pS ret=%d op=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n",
+ !ret ? "SUCCEEDS" : "FAILS", __builtin_return_address(0),
+ ret, vchnl_req.vchnl_msg->op_code,
+ vchnl_req.vchnl_msg->op_ver, vchnl_req.vchnl_msg->buf_len,
+ vchnl_req.parm_len, vchnl_req.resp_len);
+ irdma_free_vchnl_req_msg(&vchnl_req);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_reg_layout - Get Register Layout
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev)
+{
+ u16 reg_idx, reg_id, tmp_reg_id, regfld_idx, regfld_id, tmp_regfld_id;
+ struct irdma_vchnl_reg_field_info *regfld_array = NULL;
+ u8 resp_buffer[IRDMA_REGMAP_RESP_BUF_SIZE] = {};
+ struct vchnl_regfld_map_elem *regfld_map_array;
+ struct irdma_vchnl_req_init_info info = {};
+ struct vchnl_reg_map_elem *reg_map_array;
+ struct irdma_vchnl_reg_info *reg_array;
+ u8 num_bits, shift_cnt;
+ u16 buf_len = 0;
+ u64 bitmask;
+ u32 rindex;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_REG_LAYOUT;
+ info.op_ver = IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0;
+ info.resp_parm = resp_buffer;
+ info.resp_parm_len = sizeof(resp_buffer);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ /* parse the response buffer and update reg info*/
+ /* Parse registers till invalid */
+ /* Parse register fields till invalid */
+ reg_array = (struct irdma_vchnl_reg_info *)resp_buffer;
+ for (rindex = 0; rindex < IRDMA_VCHNL_REG_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_info);
+ if (buf_len >= sizeof(resp_buffer))
+ return -ENOMEM;
+
+ regfld_array =
+ (struct irdma_vchnl_reg_field_info *)&reg_array[rindex + 1];
+ reg_id = reg_array[rindex].reg_id;
+ if (reg_id == IRDMA_VCHNL_REG_INV_ID)
+ break;
+
+ reg_id &= ~IRDMA_VCHNL_REG_PAGE_REL;
+ if (reg_id >= IRDMA_VCHNL_REG_COUNT)
+ return -EINVAL;
+
+ /* search regmap for register index in hw_regs.*/
+ reg_map_array = vchnl_reg_map;
+ do {
+ tmp_reg_id = reg_map_array->reg_id;
+ if (tmp_reg_id == reg_id)
+ break;
+
+ reg_map_array++;
+ } while (tmp_reg_id != IRDMA_VCHNL_REG_INV_ID);
+ if (tmp_reg_id != reg_id)
+ continue;
+
+ reg_idx = reg_map_array->reg_idx;
+
+ /* Page relative, DB Offset do not need bar offset */
+ if (reg_idx == IRDMA_DB_ADDR_OFFSET ||
+ (reg_array[rindex].reg_id & IRDMA_VCHNL_REG_PAGE_REL)) {
+ dev->hw_regs[reg_idx] =
+ (u32 __iomem *)(uintptr_t)reg_array[rindex].reg_offset;
+ continue;
+ }
+
+ /* Update the local HW struct */
+ dev->hw_regs[reg_idx] = ig3rdma_get_reg_addr(dev->hw,
+ reg_array[rindex].reg_offset);
+ if (!dev->hw_regs[reg_idx])
+ return -EINVAL;
+ }
+
+ if (!regfld_array)
+ return -ENOMEM;
+
+ /* set up doorbell variables using mapped DB page */
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+
+ for (rindex = 0; rindex < IRDMA_VCHNL_REGFLD_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_field_info);
+ if ((buf_len - 1) > sizeof(resp_buffer))
+ break;
+
+ if (regfld_array[rindex].fld_id == IRDMA_VCHNL_REGFLD_INV_ID)
+ break;
+
+ regfld_id = regfld_array[rindex].fld_id;
+ regfld_map_array = vchnl_regfld_map;
+ do {
+ tmp_regfld_id = regfld_map_array->regfld_id;
+ if (tmp_regfld_id == regfld_id)
+ break;
+
+ regfld_map_array++;
+ } while (tmp_regfld_id != IRDMA_VCHNL_REGFLD_INV_ID);
+
+ if (tmp_regfld_id != regfld_id)
+ continue;
+
+ regfld_idx = regfld_map_array->regfld_idx;
+
+ num_bits = regfld_array[rindex].fld_bits;
+ shift_cnt = regfld_array[rindex].fld_shift;
+ if ((num_bits + shift_cnt > 64) || !num_bits) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: Invalid field mask id %d bits %d shift %d",
+ regfld_id, num_bits, shift_cnt);
+
+ continue;
+ }
+
+ bitmask = (1ULL << num_bits) - 1;
+ dev->hw_masks[regfld_idx] = bitmask << shift_cnt;
+ dev->hw_shifts[regfld_idx] = shift_cnt;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos)
+{
+ struct irdma_vchnl_resp_vport_info resp_vport = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+ struct irdma_vchnl_req_init_info info = { 0 };
+ int ret, i;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_ADD_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_ADD_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+ info.resp_parm = &resp_vport;
+ info.resp_parm_len = sizeof(resp_vport);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ qos[i].qs_handle = resp_vport.qs_handle[i];
+ qos[i].valid = true;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id, u32 qp1_id)
+{
+ struct irdma_vchnl_req_init_info info = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_DEL_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_DEL_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_aeq_vec_map - Map AEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = 1;
+ qv = qvl->qv_info;
+
+ qv->ceq_idx = IRDMA_Q_INVALID_IDX;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_ceq_vec_map - Map CEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @ceq_id: CEQ index
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = num_vectors;
+ qv = qvl->qv_info;
+
+ qv->aeq_idx = IRDMA_Q_INVALID_IDX;
+ qv->ceq_idx = ceq_id;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_ver - Request Channel version
+ * @dev: RDMA device pointer
+ * @ver_req: Virtual channel version requested
+ * @ver_res: Virtual channel version response
+ */
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req, u32 *ver_res)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_VER;
+ info.op_ver = ver_req;
+ info.resp_parm = ver_res;
+ info.resp_parm_len = sizeof(*ver_res);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ if (*ver_res < IRDMA_VCHNL_CHNL_VER_MIN) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: %s unsupported vchnl version 0x%0x\n",
+ __func__, *ver_res);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_hmc_fcn - Request VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_hmc_info req_hmc = {};
+ struct irdma_vchnl_resp_hmc_info resp_hmc = {};
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_HMC_FCN;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info.op_ver = IRDMA_VCHNL_OP_GET_HMC_FCN_V2;
+ req_hmc.protocol_used = dev->protocol_used;
+ info.req_parm_len = sizeof(req_hmc);
+ info.req_parm = &req_hmc;
+ info.resp_parm = &resp_hmc;
+ info.resp_parm_len = sizeof(resp_hmc);
+ }
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ int i;
+
+ dev->hmc_fn_id = resp_hmc.hmc_func;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ dev->qos[i].qs_handle = resp_hmc.qs_handle[i];
+ dev->qos[i].valid = true;
+ }
+ }
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_put_hmc_fcn - Free VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_PUT_HMC_FCN;
+ info.op_ver = IRDMA_VCHNL_OP_PUT_HMC_FCN_V0;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_get_caps - Request RDMA capabilities
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_RDMA_CAPS;
+ info.op_ver = IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0;
+ info.resp_parm = &dev->vc_caps;
+ info.resp_parm_len = sizeof(dev->vc_caps);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->vc_caps.hw_rev > IRDMA_GEN_MAX ||
+ dev->vc_caps.hw_rev < IRDMA_GEN_2) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: %s unsupported hw_rev version 0x%0x\n",
+ __func__, dev->vc_caps.hw_rev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_resp - Receive the inbound vchnl response.
+ * @dev: Dev pointer
+ * @vchnl_req: Vchannel request
+ */
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vchnl_req)
+{
+ struct irdma_vchnl_resp_buf *vchnl_msg_resp =
+ (struct irdma_vchnl_resp_buf *)dev->vc_recv_buf;
+ u16 resp_len;
+ int ret;
+
+ if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: error vchnl context value does not match\n");
+ return -EBADMSG;
+ }
+
+ resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp);
+ resp_len = min(resp_len, vchnl_req->parm_len);
+
+ ret = irdma_vchnl_req_verify_resp(vchnl_req, resp_len);
+ if (ret)
+ return ret;
+
+ ret = (int)vchnl_msg_resp->op_ret;
+ if (ret)
+ return ret;
+
+ vchnl_req->resp_len = 0;
+ if (vchnl_req->parm_len && vchnl_req->parm && resp_len) {
+ memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len);
+ vchnl_req->resp_len = resp_len;
+ ibdev_dbg(to_ibdev(dev), "VIRT: Got response, data size %u\n",
+ resp_len);
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/virtchnl.h b/drivers/infiniband/hw/irdma/virtchnl.h
new file mode 100644
index 000000000000..aa955a9125bd
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+#ifndef IRDMA_VIRTCHNL_H
+#define IRDMA_VIRTCHNL_H
+
+#include "hmc.h"
+#include "irdma.h"
+
+/* IRDMA_VCHNL_CHNL_VER_V0 is for legacy hw, no longer supported. */
+#define IRDMA_VCHNL_CHNL_VER_V2 2
+#define IRDMA_VCHNL_CHNL_VER_MIN IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_CHNL_VER_MAX IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V1 1
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V2 2
+#define IRDMA_VCHNL_OP_PUT_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP_V0 0
+#define IRDMA_VCHNL_OP_ADD_VPORT_V0 0
+#define IRDMA_VCHNL_OP_DEL_VPORT_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE 1
+
+#define IRDMA_VCHNL_REG_ID_CQPTAIL 0
+#define IRDMA_VCHNL_REG_ID_CQPDB 1
+#define IRDMA_VCHNL_REG_ID_CCQPSTATUS 2
+#define IRDMA_VCHNL_REG_ID_CCQPHIGH 3
+#define IRDMA_VCHNL_REG_ID_CCQPLOW 4
+#define IRDMA_VCHNL_REG_ID_CQARM 5
+#define IRDMA_VCHNL_REG_ID_CQACK 6
+#define IRDMA_VCHNL_REG_ID_AEQALLOC 7
+#define IRDMA_VCHNL_REG_ID_CQPERRCODES 8
+#define IRDMA_VCHNL_REG_ID_WQEALLOC 9
+#define IRDMA_VCHNL_REG_ID_IPCONFIG0 10
+#define IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET 11
+#define IRDMA_VCHNL_REG_ID_DYN_CTL 12
+#define IRDMA_VCHNL_REG_ID_AEQITRMASK 13
+#define IRDMA_VCHNL_REG_ID_CEQITRMASK 14
+#define IRDMA_VCHNL_REG_INV_ID 0xFFFF
+#define IRDMA_VCHNL_REG_PAGE_REL 0x8000
+
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR 2
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE 5
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID 6
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID 7
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID 8
+#define IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT 9
+#define IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID 10
+#define IRDMA_VCHNL_REGFLD_INV_ID 0xFFFF
+
+#define IRDMA_VCHNL_RESP_MIN_SIZE (sizeof(struct irdma_vchnl_resp_buf))
+
+enum irdma_vchnl_ops {
+ IRDMA_VCHNL_OP_GET_VER = 0,
+ IRDMA_VCHNL_OP_GET_HMC_FCN = 1,
+ IRDMA_VCHNL_OP_PUT_HMC_FCN = 2,
+ IRDMA_VCHNL_OP_GET_REG_LAYOUT = 11,
+ IRDMA_VCHNL_OP_GET_RDMA_CAPS = 13,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP = 14,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP = 15,
+ IRDMA_VCHNL_OP_ADD_VPORT = 16,
+ IRDMA_VCHNL_OP_DEL_VPORT = 17,
+};
+
+struct irdma_vchnl_req_hmc_info {
+ u8 protocol_used;
+ u8 disable_qos;
+} __packed;
+
+struct irdma_vchnl_resp_hmc_info {
+ u16 hmc_func;
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+} __packed;
+
+struct irdma_vchnl_qv_info {
+ u32 v_idx;
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_vchnl_qvlist_info {
+ u32 num_vectors;
+ struct irdma_vchnl_qv_info qv_info[];
+};
+
+struct irdma_vchnl_req_vport_info {
+ u16 vport_id;
+ u32 qp1_id;
+};
+
+struct irdma_vchnl_resp_vport_info {
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+};
+
+struct irdma_vchnl_op_buf {
+ u16 op_code;
+ u16 op_ver;
+ u16 buf_len;
+ u16 rsvd;
+ u64 op_ctx;
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_resp_buf {
+ u64 op_ctx;
+ u16 buf_len;
+ s16 op_ret;
+ u16 rsvd[2];
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_rdma_caps {
+ u8 hw_rev;
+ u16 cqp_timeout_s;
+ u16 cqp_def_timeout_s;
+ u16 max_hw_push_len;
+} __packed;
+
+struct irdma_vchnl_init_info {
+ struct workqueue_struct *vchnl_wq;
+ enum irdma_vers hw_rev;
+ bool privileged;
+ bool is_pf;
+};
+
+struct irdma_vchnl_reg_info {
+ u32 reg_offset;
+ u16 field_cnt;
+ u16 reg_id; /* High bit of reg_id: bar or page relative */
+};
+
+struct irdma_vchnl_reg_field_info {
+ u8 fld_shift;
+ u8 fld_bits;
+ u16 fld_id;
+};
+
+struct irdma_vchnl_req {
+ struct irdma_vchnl_op_buf *vchnl_msg;
+ void *parm;
+ u32 vf_id;
+ u16 parm_len;
+ u16 resp_len;
+};
+
+struct irdma_vchnl_req_init_info {
+ void *req_parm;
+ void *resp_parm;
+ u16 req_parm_len;
+ u16 resp_parm_len;
+ u16 op_code;
+ u16 op_ver;
+} __packed;
+
+struct irdma_qos;
+
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info);
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req,
+ u32 *ver_res);
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vc_req);
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx);
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id,
+ u32 v_idx);
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos);
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id);
+#endif /* IRDMA_VIRTCHNL_H */
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 28e154bbb50f..1becc8779123 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -291,6 +291,32 @@ out:
return wc_index;
}
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ struct mana_ib_cq *cq;
+ unsigned long flags;
+
+ if (!qp)
+ return;
+
+ cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
+ != NULL) {
+ shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+
+ mana_put_qp_ref(qp);
+}
+
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index fa60872f169f..bdeddb642b87 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -230,6 +230,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
+ if (mana_ib_is_rnic(dev))
+ mana_drain_gsi_sqs(dev);
+
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 6a2471f2e804..fac159f7128d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -273,9 +273,8 @@ int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
- return err;
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
+ return PTR_ERR(umem);
}
err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 5d31034ac7fb..9d36232ed880 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -43,6 +43,8 @@
*/
#define MANA_AV_BUFFER_SIZE 64
+#define MANA_GSI_QPN (1)
+
struct mana_ib_adapter_caps {
u32 max_sq_id;
u32 max_rq_id;
@@ -410,7 +412,7 @@ struct mana_ib_ah_attr {
u8 traffic_class;
u16 src_port;
u16 dest_port;
- u32 reserved;
+ u32 flow_label;
};
struct mana_rnic_set_qp_state_req {
@@ -427,8 +429,15 @@ struct mana_rnic_set_qp_state_req {
u32 retry_cnt;
u32 rnr_retry;
u32 min_rnr_timer;
- u32 reserved;
+ u32 rate_limit;
struct mana_ib_ah_attr ah_attr;
+ u64 reserved1;
+ u32 qkey;
+ u32 qp_access_flags;
+ u8 local_ack_timeout;
+ u8 max_rd_atomic;
+ u16 reserved2;
+ u32 reserved3;
}; /* HW Data */
struct mana_rnic_set_qp_state_resp {
@@ -718,6 +727,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 55701046ffba..3d0245a4c1ed 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -138,7 +138,8 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(ibdev,
- "Failed to get umem for register user-mr, %d\n", err);
+ "Failed to get umem for register user-mr, %pe\n",
+ mr->umem);
goto err_free;
}
@@ -220,7 +221,8 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
+ ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
+ umem_dmabuf);
goto err_free;
}
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index a6bf4d539e67..48c1f4977f21 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
+
+ req.hdr.req.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
@@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.retry_cnt = attr->retry_cnt;
req.rnr_retry = attr->rnr_retry;
req.min_rnr_timer = attr->min_rnr_timer;
+ req.rate_limit = attr->rate_limit;
+ req.qkey = attr->qkey;
+ req.local_ack_timeout = attr->timeout;
+ req.qp_access_flags = attr->qp_access_flags;
+ req.max_rd_atomic = attr->max_rd_atomic;
+
if (attr_mask & IB_QP_AV) {
ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
if (!ndev) {
@@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ibqp->qp_num, attr->dest_qp_num);
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
+ req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e6e132f10625..91c714f72099 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1836,9 +1836,9 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
if (IS_ERR(tun_qp->qp)) {
ret = PTR_ERR(tun_qp->qp);
+ pr_err("Couldn't create %s QP (%pe)\n",
+ create_tun ? "tunnel" : "special", tun_qp->qp);
tun_qp->qp = NULL;
- pr_err("Couldn't create %s QP (%d)\n",
- create_tun ? "tunnel" : "special", ret);
return ret;
}
@@ -2017,14 +2017,14 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
- pr_err("Couldn't create tunnel CQ (%d)\n", ret);
+ pr_err("Couldn't create tunnel CQ (%pe)\n", ctx->cq);
goto err_buf;
}
ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
if (IS_ERR(ctx->pd)) {
ret = PTR_ERR(ctx->pd);
- pr_err("Couldn't create tunnel PD (%d)\n", ret);
+ pr_err("Couldn't create tunnel PD (%pe)\n", ctx->pd);
goto err_cq;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 50fd407103c7..f2887ae6390e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1652,7 +1652,8 @@ int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
if (IS_ERR(sqp->roce_v2_gsi)) {
- pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
+ pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n",
+ sqp->roce_v2_gsi);
sqp->roce_v2_gsi = NULL;
} else {
to_mqp(sqp->roce_v2_gsi)->flags |=
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 9c8003a78334..a23b364e24ff 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -648,7 +648,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- void __iomem *uar_page = mdev->priv.uar->map;
+ void __iomem *uar_page = mdev->priv.bfreg.up->map;
unsigned long irq_flags;
int ret = 0;
@@ -923,7 +923,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->buf.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
- *index = dev->mdev->priv.uar->index;
+ *index = dev->mdev->priv.bfreg.up->index;
return 0;
diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c
index b9ba84afaae2..b81ac5709b56 100644
--- a/drivers/infiniband/hw/mlx5/data_direct.c
+++ b/drivers/infiniband/hw/mlx5/data_direct.c
@@ -35,7 +35,7 @@ static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
vpd_data = pci_vpd_alloc(pdev, &vpd_size);
if (IS_ERR(vpd_data)) {
- pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
+ pci_err(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
return PTR_ERR(vpd_data);
}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index b804f2dd5628..d5487834ed25 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -131,8 +131,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
IB_POLL_SOFTIRQ);
if (IS_ERR(gsi->cq)) {
- mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
- PTR_ERR(gsi->cq));
+ mlx5_ib_warn(dev,
+ "unable to create send CQ for GSI QP. error %pe\n",
+ gsi->cq);
ret = PTR_ERR(gsi->cq);
goto err_free_wrs;
}
@@ -147,8 +148,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
if (IS_ERR(gsi->rx_qp)) {
- mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
- PTR_ERR(gsi->rx_qp));
+ mlx5_ib_warn(dev,
+ "unable to create hardware GSI QP. error %pe\n",
+ gsi->rx_qp);
ret = PTR_ERR(gsi->rx_qp);
goto err_destroy_cq;
}
@@ -294,8 +296,9 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
qp = create_gsi_ud_qp(gsi);
if (IS_ERR(qp)) {
- mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
- PTR_ERR(qp));
+ mlx5_ib_warn(dev,
+ "unable to create hardware UD QP for GSI: %pe\n",
+ qp);
return;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d456e4fde3e1..fc1e86f6c409 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
@@ -883,6 +884,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
}
+/*
+ * Calculate maximum SQ overhead across all QP types.
+ * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
+ * have smaller overhead than the types calculated below,
+ * so they are implicitly included.
+ */
+static u32 mlx5_ib_calc_max_sq_overhead(void)
+{
+ u32 max_overhead_xrc, overhead_ud_lso, a, b;
+
+ /* XRC_INI */
+ max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
+ max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
+ a = sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg);
+ b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
+ max_overhead_xrc += max(a, b);
+
+ /* UD with LSO */
+ overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
+
+ return max(max_overhead_xrc, overhead_ud_lso);
+}
+
+static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ u32 max_wqe_size;
+ /* max QP overhead + 1 SGE, no inline, no special features */
+ max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
+ sizeof(struct mlx5_wqe_data_seg);
+
+ max_wqe_size = roundup_pow_of_two(max_wqe_size);
+
+ max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
+
+ return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -1041,7 +1087,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = ~(min_page_size - 1);
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
- props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
@@ -1793,7 +1839,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
}
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
int err;
@@ -1805,6 +1852,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
if (err)
goto out;
+ lb_state->force_enable = true;
return 0;
out:
@@ -1813,16 +1861,22 @@ out:
}
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
mlx5_nic_vport_update_local_lb(slave, false);
mlx5_nic_vport_update_local_lb(master, false);
+
+ lb_state->force_enable = false;
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
+ if (dev->lb.force_enable)
+ return 0;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td++;
@@ -1844,6 +1898,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
+ if (dev->lb.force_enable)
+ return;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td--;
@@ -2994,14 +3051,16 @@ int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
- mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
+ mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
+ pd);
goto unlock;
}
cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
- mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
+ mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
+ cq);
ib_dealloc_pd(pd);
goto unlock;
}
@@ -3045,7 +3104,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
s0 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s0)) {
ret = PTR_ERR(s0);
- mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 0 for res init, err=%pe\n",
+ s0);
goto unlock;
}
@@ -3057,7 +3118,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
s1 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s1)) {
ret = PTR_ERR(s1);
- mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 1 for res init, err=%pe\n",
+ s1);
ib_destroy_srq(s0);
}
@@ -3118,6 +3181,7 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_core_dev *mdev = dev->mdev;
+ bool ro_supp = false;
void *mkc;
u32 mkey;
u32 pdn;
@@ -3146,14 +3210,37 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
- kvfree(in);
if (err)
- goto err;
+ goto err_mkey;
dev->ddr.mkey = mkey;
dev->ddr.pdn = pdn;
+
+ /* create another mkey with RO support */
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ ro_supp = true;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ ro_supp = true;
+ }
+
+ if (ro_supp) {
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ /* RO is defined as best effort */
+ if (!err) {
+ dev->ddr.mkey_ro = mkey;
+ dev->ddr.mkey_ro_valid = true;
+ }
+ }
+
+ kvfree(in);
return 0;
+err_mkey:
+ kvfree(in);
err:
mlx5_core_dealloc_pd(mdev, pdn);
return err;
@@ -3162,6 +3249,10 @@ err:
static void
mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
{
+
+ if (dev->ddr.mkey_ro_valid)
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
+
mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
}
@@ -3523,7 +3614,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
- mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
@@ -3620,7 +3711,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
- err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
if (err)
goto unbind;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7ffc7ee92cf0..09d82d5f95e3 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -854,6 +854,8 @@ struct mlx5_ib_port_resources {
struct mlx5_data_direct_resources {
u32 pdn;
u32 mkey;
+ u32 mkey_ro;
+ u8 mkey_ro_valid :1;
};
struct mlx5_ib_resources {
@@ -1109,6 +1111,7 @@ struct mlx5_ib_lb_state {
u32 user_td;
int qps;
bool enabled;
+ bool force_enable;
};
struct mlx5_ib_pf_eq {
@@ -1802,6 +1805,10 @@ mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
+ /* In KSM mode HW requires IOVA and mkey's page size to be aligned */
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
+ bitmap &= GENMASK_ULL(__ffs64(iova), 0);
+
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1317f2cb38a4..325fa04cbe8a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1652,8 +1652,7 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
- mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
- PTR_ERR(umem_dmabuf));
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%pe)\n", umem_dmabuf);
return ERR_CAST(umem_dmabuf);
}
@@ -1717,11 +1716,11 @@ reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
goto end;
}
- /* The device's 'data direct mkey' was created without RO flags to
- * simplify things and allow for a single mkey per device.
- * Since RO is not a must, mask it out accordingly.
+ /* If no device's 'data direct mkey' with RO flags exists
+ * mask it out accordingly.
*/
- access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
+ if (!dev->ddr.mkey_ro_valid)
+ access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
offset, length, virt_addr, fd,
access_flags, MLX5_MKC_ACCESS_MODE_KSM,
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index bdb568411091..2fcf553044e1 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -83,33 +83,14 @@ static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
struct mlx5_ib_uapi_query_port *info)
{
- size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- void *out;
- int err;
-
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ int err = mlx5_vport_get_vhca_id(mdev, vport, &info->vport_vhca_id);
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, true);
- MLX5_SET(query_hca_cap_in, in, function_id, vport);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
- HCA_CAP_OPMOD_GET_CUR);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
if (err)
- goto out;
-
- info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
- capability.cmd_hca_cap.vhca_id);
+ return err;
info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
-out:
- kfree(out);
- return err;
+
+ return 0;
}
static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 7ef35cddce81..4e562e0dd9e1 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -761,7 +761,11 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
if (dd) {
cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
- cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ if (mr->access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ dev->ddr.mkey_ro_valid)
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey_ro);
+ else
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
if (mr->umem->is_dmabuf &&
(flags & MLX5_IB_UPD_XLT_ZAP)) {
cur_ksm->va = 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e825e2ef7966..134a79eecfcb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -492,7 +492,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
- u32 ret;
+ int ret;
u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
RVT_AIP_QPN_MAX : RVT_QPN_MAX;
@@ -510,7 +510,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
else
qpt->flags |= n;
spin_unlock(&qpt->lock);
- goto bail;
+
+ return ret;
}
qpn = qpt->last + qpt->incr;
@@ -530,7 +531,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
if (!test_and_set_bit(offset, map->page)) {
qpt->last = qpn;
ret = qpn;
- goto bail;
+
+ return ret;
}
offset += qpt->incr;
/*
@@ -565,10 +567,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
qpn = mk_qpn(qpt, map, offset);
}
- ret = -ENOMEM;
-
-bail:
- return ret;
+ return -ENOMEM;
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6f8f353e9583..f522820b950c 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
* yield the cpu and reschedule the task
*/
if (!ret) {
- task->state = TASK_STATE_IDLE;
- resched = 1;
+ if (task->state != TASK_STATE_DRAINING) {
+ task->state = TASK_STATE_IDLE;
+ resched = 1;
+ } else {
+ cont = 1;
+ }
goto exit;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 35c3bde0d00a..efa2f097b582 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -769,7 +769,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
struct siw_wqe *wqe = tx_wqe(qp);
unsigned long flags;
- int rv = 0;
+ int rv = 0, imm_err = 0;
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
@@ -955,9 +955,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
* Send directly if SQ processing is not in progress.
* Eventual immediate errors (rv < 0) do not affect the involved
* RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
- * processing, if new work is already pending. But rv must be passed
- * to caller.
+ * processing, if new work is already pending. But rv and pointer
+ * to failed work request must be passed to caller.
*/
+ if (unlikely(rv < 0)) {
+ /*
+ * Immediate error
+ */
+ siw_dbg_qp(qp, "Immediate error %d\n", rv);
+ imm_err = rv;
+ *bad_wr = wr;
+ }
if (wqe->wr_status != SIW_WR_IDLE) {
spin_unlock_irqrestore(&qp->sq_lock, flags);
goto skip_direct_sending;
@@ -982,15 +990,10 @@ skip_direct_sending:
up_read(&qp->state_lock);
- if (rv >= 0)
- return 0;
- /*
- * Immediate error
- */
- siw_dbg_qp(qp, "error %d\n", rv);
+ if (unlikely(imm_err))
+ return imm_err;
- *bad_wr = wr;
- return rv;
+ return (rv >= 0) ? 0 : rv;
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7acafc5c0e09..5b4d76e97437 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -351,26 +351,27 @@ static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
}
/*
- * Find the master net_device on top of the given net_device.
+ * Find the L2 master net_device on top of the given net_device.
* @dev: base IPoIB net_device
*
- * Returns the master net_device with a reference held, or the same net_device
- * if no master exists.
+ * Returns the L2 master net_device with reference held if the L2 master
+ * exists (such as bond netdevice), or returns same netdev with reference
+ * held when master does not exist or when L3 master (such as VRF netdev).
*/
static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
{
struct net_device *master;
rcu_read_lock();
+
master = netdev_master_upper_dev_get_rcu(dev);
+ if (!master || netif_is_l3_master(master))
+ master = dev;
+
dev_hold(master);
rcu_read_unlock();
- if (master)
- return master;
-
- dev_hold(dev);
- return dev;
+ return master;
}
struct ipoib_walk_data {
@@ -522,7 +523,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- /* See if we can find a unique device matching the L2 parameters */
+ /* See if we can find a unique device matching the pkey and GID */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -535,7 +536,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
dev_put(net_dev);
- /* Couldn't find a unique device with L2 parameters only. Use L3
+ /* Couldn't find a unique device with pkey and GID only. Use L3
* address to uniquely match the net device */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, addr, &net_dev);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 5dfb4644446b..71269446353d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -667,9 +667,9 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(mad_agent)) {
- pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ pr_err("%s-%d: MAD agent registration failed (%pe). Note: this is expected if SR-IOV is enabled.\n",
dev_name(&sport->sdev->device->dev), sport->port,
- PTR_ERR(mad_agent));
+ mad_agent);
sport->mad_agent = NULL;
memset(&port_modify, 0, sizeof(port_modify));
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
@@ -1865,8 +1865,8 @@ retry:
IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
- pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + sq_size, ret);
+ pr_err("failed to create CQ cqe= %d ret= %pe\n",
+ ch->rq_size + sq_size, ch->cq);
goto out;
}
ch->cq_size = ch->rq_size + sq_size;
@@ -3132,7 +3132,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
WARN_ON_ONCE(sdev->srq);
srq = ib_create_srq(sdev->pd, &srq_attr);
if (IS_ERR(srq)) {
- pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
+ pr_debug("ib_create_srq() failed: %pe\n", srq);
return PTR_ERR(srq);
}
@@ -3236,8 +3236,7 @@ static int srpt_add_one(struct ib_device *device)
if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id)) {
- pr_info("ib_create_cm_id() failed: %ld\n",
- PTR_ERR(sdev->cm_id));
+ pr_info("ib_create_cm_id() failed: %pe\n", sdev->cm_id);
ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
@@ -3687,8 +3686,7 @@ static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(rdma_cm_id)) {
- pr_err("RDMA/CM ID creation failed: %ld\n",
- PTR_ERR(rdma_cm_id));
+ pr_err("RDMA/CM ID creation failed: %pe\n", rdma_cm_id);
goto out;
}
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 95f63c5f6159..a698a2e7ce2a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -792,6 +792,11 @@ struct amd_iommu {
u32 flags;
volatile u64 *cmd_sem;
atomic64_t cmd_sem_val;
+ /*
+ * Track physical address to directly use it in build_completion_wait()
+ * and avoid adding any special checks and handling for kdump.
+ */
+ u64 cmd_sem_paddr;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index ba9e582a8bbe..f2991c11867c 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -406,6 +406,9 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
BUG_ON(iommu->mmio_base == NULL);
+ if (is_kdump_kernel())
+ return;
+
entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
@@ -646,7 +649,10 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table);
+ if (is_kdump_kernel())
+ memunmap((void *)pci_seg->dev_table);
+ else
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
@@ -710,6 +716,26 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
pci_seg->alias_table = NULL;
}
+static inline void *iommu_memremap(unsigned long paddr, size_t size)
+{
+ phys_addr_t phys;
+
+ if (!paddr)
+ return NULL;
+
+ /*
+ * Obtain true physical address in kdump kernel when SME is enabled.
+ * Currently, previous kernel with SME enabled and kdump kernel
+ * with SME support disabled is not supported.
+ */
+ phys = __sme_clr(paddr);
+
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ return (__force void *)ioremap_encrypted(phys, size);
+ else
+ return memremap(phys, size, MEMREMAP_WB);
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -795,11 +821,16 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->cmd_buf == NULL);
- entry = iommu_virt_to_phys(iommu->cmd_buf);
- entry |= MMIO_CMD_SIZE_512;
-
- memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
- &entry, sizeof(entry));
+ if (!is_kdump_kernel()) {
+ /*
+ * Command buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->cmd_buf);
+ entry |= MMIO_CMD_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
amd_iommu_reset_cmd_buffer(iommu);
}
@@ -850,10 +881,15 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->evt_buf == NULL);
- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
-
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
+ if (!is_kdump_kernel()) {
+ /*
+ * Event buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
/* set head and tail to zero manually */
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
@@ -942,8 +978,91 @@ err_out:
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
+ if (!iommu->cmd_sem)
+ return -ENOMEM;
+ iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ return 0;
+}
+
+static int __init remap_event_buffer(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ pr_info_once("Re-using event buffer from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+
+ return iommu->evt_buf ? 0 : -ENOMEM;
+}
+
+static int __init remap_command_buffer(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ pr_info_once("Re-using command buffer from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
+
+ return iommu->cmd_buf ? 0 : -ENOMEM;
+}
+
+static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ if (check_feature(FEATURE_SNP)) {
+ /*
+ * When SNP is enabled, the exclusion base register is used for the
+ * completion wait buffer (CWB) address. Read and re-use it.
+ */
+ pr_info_once("Re-using CWB buffers from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
+ iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
+ if (!iommu->cmd_sem)
+ return -ENOMEM;
+ iommu->cmd_sem_paddr = paddr;
+ } else {
+ return alloc_cwwb_sem(iommu);
+ }
+
+ return 0;
+}
+
+static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
+{
+ int ret;
+
+ /*
+ * Reuse/Remap the previous kernel's allocated completion wait
+ * command and event buffers for kdump boot.
+ */
+ if (is_kdump_kernel()) {
+ ret = remap_or_alloc_cwwb_sem(iommu);
+ if (ret)
+ return ret;
+
+ ret = remap_command_buffer(iommu);
+ if (ret)
+ return ret;
+
+ ret = remap_event_buffer(iommu);
+ if (ret)
+ return ret;
+ } else {
+ ret = alloc_cwwb_sem(iommu);
+ if (ret)
+ return ret;
- return iommu->cmd_sem ? 0 : -ENOMEM;
+ ret = alloc_command_buffer(iommu);
+ if (ret)
+ return ret;
+
+ ret = alloc_event_buffer(iommu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void __init free_cwwb_sem(struct amd_iommu *iommu)
@@ -951,6 +1070,38 @@ static void __init free_cwwb_sem(struct amd_iommu *iommu)
if (iommu->cmd_sem)
iommu_free_pages((void *)iommu->cmd_sem);
}
+static void __init unmap_cwwb_sem(struct amd_iommu *iommu)
+{
+ if (iommu->cmd_sem) {
+ if (check_feature(FEATURE_SNP))
+ memunmap((void *)iommu->cmd_sem);
+ else
+ iommu_free_pages((void *)iommu->cmd_sem);
+ }
+}
+
+static void __init unmap_command_buffer(struct amd_iommu *iommu)
+{
+ memunmap((void *)iommu->cmd_buf);
+}
+
+static void __init unmap_event_buffer(struct amd_iommu *iommu)
+{
+ memunmap(iommu->evt_buf);
+}
+
+static void __init free_iommu_buffers(struct amd_iommu *iommu)
+{
+ if (is_kdump_kernel()) {
+ unmap_cwwb_sem(iommu);
+ unmap_command_buffer(iommu);
+ unmap_event_buffer(iommu);
+ } else {
+ free_cwwb_sem(iommu);
+ free_command_buffer(iommu);
+ free_event_buffer(iommu);
+ }
+}
static void iommu_enable_xt(struct amd_iommu *iommu)
{
@@ -982,15 +1133,12 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
dte->data[i] |= (1UL << _bit);
}
-static bool __copy_device_table(struct amd_iommu *iommu)
+static bool __reuse_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0;
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- struct dev_table_entry *old_devtb = NULL;
- u32 lo, hi, devid, old_devtb_size;
+ u32 lo, hi, old_devtb_size;
phys_addr_t old_devtb_phys;
- u16 dom_id, dte_v, irq_v;
- u64 tmp;
+ u64 entry;
/* Each IOMMU use separate device table with the same size */
lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
@@ -1015,66 +1163,20 @@ static bool __copy_device_table(struct amd_iommu *iommu)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
- ? (__force void *)ioremap_encrypted(old_devtb_phys,
- pci_seg->dev_table_size)
- : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
-
- if (!old_devtb)
- return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
- GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
+ /*
+ * Re-use the previous kernel's device table for kdump.
+ */
+ pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
- pr_err("Failed to allocate memory for copying old device table!\n");
- memunmap(old_devtb);
+ pr_err("Failed to remap memory for reusing old device table!\n");
return false;
}
- for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
- pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
- dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
- dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
-
- if (dte_v && dom_id) {
- pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
- /* Reserve the Domain IDs used by previous kernel */
- if (ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_ATOMIC) != dom_id) {
- pr_err("Failed to reserve domain ID 0x%x\n", dom_id);
- memunmap(old_devtb);
- return false;
- }
- /* If gcr3 table existed, mask it out */
- if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
- tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
- pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
- tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
- pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
- }
- }
-
- irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
- int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
- int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
- if (irq_v && (int_ctl || int_tab_len)) {
- if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
- (int_tab_len != DTE_INTTABLEN_512 &&
- int_tab_len != DTE_INTTABLEN_2K)) {
- pr_err("Wrong old irq remapping flag: %#x\n", devid);
- memunmap(old_devtb);
- return false;
- }
-
- pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
- }
- }
- memunmap(old_devtb);
-
return true;
}
-static bool copy_device_table(void)
+static bool reuse_device_table(void)
{
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
@@ -1082,17 +1184,17 @@ static bool copy_device_table(void)
if (!amd_iommu_pre_enabled)
return false;
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
+ pr_warn("Translation is already enabled - trying to reuse translation structures\n");
/*
* All IOMMUs within PCI segment shares common device table.
- * Hence copy device table only once per PCI segment.
+ * Hence reuse device table only once per PCI segment.
*/
for_each_pci_segment(pci_seg) {
for_each_iommu(iommu) {
if (pci_seg->id != iommu->pci_seg->id)
continue;
- if (!__copy_device_table(iommu))
+ if (!__reuse_device_table(iommu))
return false;
break;
}
@@ -1655,9 +1757,7 @@ static void __init free_sysfs(struct amd_iommu *iommu)
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_sysfs(iommu);
- free_cwwb_sem(iommu);
- free_command_buffer(iommu);
- free_event_buffer(iommu);
+ free_iommu_buffers(iommu);
amd_iommu_free_ppr_log(iommu);
free_ga_log(iommu);
iommu_unmap_mmio_space(iommu);
@@ -1821,14 +1921,9 @@ static int __init init_iommu_one_late(struct amd_iommu *iommu)
{
int ret;
- if (alloc_cwwb_sem(iommu))
- return -ENOMEM;
-
- if (alloc_command_buffer(iommu))
- return -ENOMEM;
-
- if (alloc_event_buffer(iommu))
- return -ENOMEM;
+ ret = alloc_iommu_buffers(iommu);
+ if (ret)
+ return ret;
iommu->int_enabled = false;
@@ -2778,8 +2873,8 @@ static void early_enable_iommu(struct amd_iommu *iommu)
* This function finally enables all IOMMUs found in the system after
* they have been initialized.
*
- * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
- * the old content of device table entries. Not this case or copy failed,
+ * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
+ * the old content of device table entries. Not this case or reuse failed,
* just continue as normal kernel does.
*/
static void early_enable_iommus(void)
@@ -2787,18 +2882,25 @@ static void early_enable_iommus(void)
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
- if (!copy_device_table()) {
+ if (!reuse_device_table()) {
/*
- * If come here because of failure in copying device table from old
+ * If come here because of failure in reusing device table from old
* kernel with all IOMMUs enabled, print error message and try to
* free allocated old_dev_tbl_cpy.
*/
- if (amd_iommu_pre_enabled)
- pr_err("Failed to copy DEV table from previous kernel.\n");
+ if (amd_iommu_pre_enabled) {
+ pr_err("Failed to reuse DEV table from previous kernel.\n");
+ /*
+ * Bail out early if unable to remap/reuse DEV table from
+ * previous kernel if SNP enabled as IOMMU commands will
+ * time out without DEV table and cause kdump boot panic.
+ */
+ BUG_ON(check_feature(FEATURE_SNP));
+ }
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy);
+ memunmap((void *)pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2808,7 +2910,7 @@ static void early_enable_iommus(void)
early_enable_iommu(iommu);
}
} else {
- pr_info("Copied DEV table from previous kernel.\n");
+ pr_info("Reused DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
iommu_free_pages(pci_seg->dev_table);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index eb348c63a8d0..2e1865daa1ce 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -14,6 +14,7 @@
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
@@ -265,7 +266,7 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
if (fw_bug)
dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
- hid_count, hid_count > 1 ? "s" : "");
+ hid_count, str_plural(hid_count));
if (hid_count > 1)
return -EINVAL;
if (entry)
@@ -1195,7 +1196,7 @@ static void build_completion_wait(struct iommu_cmd *cmd,
struct amd_iommu *iommu,
u64 data)
{
- u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ u64 paddr = iommu->cmd_sem_paddr;
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 190f28d76615..95a4e62b8f63 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -122,6 +122,8 @@
#define DART_T8110_ERROR_ADDR_LO 0x170
#define DART_T8110_ERROR_ADDR_HI 0x174
+#define DART_T8110_ERROR_STREAMS 0x1c0
+
#define DART_T8110_PROTECT 0x200
#define DART_T8110_UNPROTECT 0x204
#define DART_T8110_PROTECT_LOCK 0x208
@@ -133,6 +135,7 @@
#define DART_T8110_TCR 0x1000
#define DART_T8110_TCR_REMAP GENMASK(11, 8)
#define DART_T8110_TCR_REMAP_EN BIT(7)
+#define DART_T8110_TCR_FOUR_LEVEL BIT(3)
#define DART_T8110_TCR_BYPASS_DAPF BIT(2)
#define DART_T8110_TCR_BYPASS_DART BIT(1)
#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
@@ -166,22 +169,23 @@ struct apple_dart_hw {
int max_sid_count;
- u64 lock;
- u64 lock_bit;
+ u32 lock;
+ u32 lock_bit;
- u64 error;
+ u32 error;
- u64 enable_streams;
+ u32 enable_streams;
- u64 tcr;
- u64 tcr_enabled;
- u64 tcr_disabled;
- u64 tcr_bypass;
+ u32 tcr;
+ u32 tcr_enabled;
+ u32 tcr_disabled;
+ u32 tcr_bypass;
+ u32 tcr_4level;
- u64 ttbr;
- u64 ttbr_valid;
- u64 ttbr_addr_field_shift;
- u64 ttbr_shift;
+ u32 ttbr;
+ u32 ttbr_valid;
+ u32 ttbr_addr_field_shift;
+ u32 ttbr_shift;
int ttbr_count;
};
@@ -217,6 +221,7 @@ struct apple_dart {
u32 pgsize;
u32 num_streams;
u32 supports_bypass : 1;
+ u32 four_level : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
@@ -305,13 +310,19 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
}
static void
-apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
+apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map, int levels)
{
struct apple_dart *dart = stream_map->dart;
+ u32 tcr = dart->hw->tcr_enabled;
int sid;
+ if (levels == 4)
+ tcr |= dart->hw->tcr_4level;
+
+ WARN_ON(levels != 3 && levels != 4);
+ WARN_ON(levels == 4 && !dart->four_level);
for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
- writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
+ writel(tcr, dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
@@ -569,7 +580,8 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
for (; i < stream_map->dart->hw->ttbr_count; ++i)
apple_dart_hw_clear_ttbr(stream_map, i);
- apple_dart_hw_enable_translation(stream_map);
+ apple_dart_hw_enable_translation(stream_map,
+ pgtbl_cfg->apple_dart_cfg.n_levels);
stream_map->dart->hw->invalidate_tlb(stream_map);
}
@@ -614,7 +626,7 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
dart_domain->domain.geometry.aperture_start = 0;
dart_domain->domain.geometry.aperture_end =
- (dma_addr_t)DMA_BIT_MASK(dart->ias);
+ (dma_addr_t)DMA_BIT_MASK(pgtbl_cfg.ias);
dart_domain->domain.geometry.force_aperture = true;
dart_domain->finalized = true;
@@ -807,6 +819,8 @@ static int apple_dart_of_xlate(struct device *dev,
if (cfg_dart) {
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
+ if (cfg_dart->ias != dart->ias)
+ return -EINVAL;
}
cfg->supports_bypass &= dart->supports_bypass;
@@ -1077,6 +1091,9 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
error, stream_idx, error_code, fault_name, addr);
writel(error, dart->regs + DART_T8110_ERROR);
+ for (int i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+ writel(U32_MAX, dart->regs + DART_T8110_ERROR_STREAMS + 4 * i);
+
return IRQ_HANDLED;
}
@@ -1137,6 +1154,7 @@ static int apple_dart_probe(struct platform_device *pdev)
dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+ dart->four_level = dart->ias > 36;
break;
}
@@ -1169,9 +1187,9 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
- "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
+ "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, AS %d -> %d] initialized\n",
dart->pgsize, dart->num_streams, dart->supports_bypass,
- dart->pgsize > PAGE_SIZE);
+ dart->pgsize > PAGE_SIZE, dart->ias, dart->oas);
return 0;
err_sysfs_remove:
@@ -1292,6 +1310,7 @@ static const struct apple_dart_hw apple_dart_hw_t8110 = {
.tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
.tcr_disabled = 0,
.tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+ .tcr_4level = DART_T8110_TCR_FOUR_LEVEL,
.ttbr = DART_T8110_TTBR,
.ttbr_valid = DART_T8110_TTBR_VALID,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ea2ef53bd4fe..7944a3af4545 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -724,7 +724,12 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
- int prot = coherent ? IOMMU_CACHE : 0;
+ int prot;
+
+ if (attrs & DMA_ATTR_MMIO)
+ prot = IOMMU_MMIO;
+ else
+ prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
@@ -1190,11 +1195,9 @@ static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
return iova_offset(iovad, phys | size);
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
- phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -1208,27 +1211,34 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
*/
if (dev_use_swiotlb(dev, size, dir) &&
iova_unaligned(iovad, phys, size)) {
+ if (attrs & DMA_ATTR_MMIO)
+ return DMA_MAPPING_ERROR;
+
phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
}
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR)
+ if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
return iova;
}
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
phys_addr_t phys;
- phys = iommu_iova_to_phys(domain, dma_handle);
+ if (attrs & DMA_ATTR_MMIO) {
+ __iommu_dma_unmap(dev, dma_handle, size);
+ return;
+ }
+
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (WARN_ON(!phys))
return;
@@ -1341,7 +1351,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
int i;
for_each_sg(sg, s, nents, i)
- iommu_dma_unmap_page(dev, sg_dma_address(s),
+ iommu_dma_unmap_phys(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
@@ -1354,8 +1364,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
sg_dma_mark_swiotlb(sg);
for_each_sg(sg, s, nents, i) {
- sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
- s->offset, s->length, dir, attrs);
+ sg_dma_address(s) = iommu_dma_map_phys(dev, sg_phys(s),
+ s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
@@ -1546,20 +1556,6 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
__iommu_dma_unmap(dev, start, end - start);
}
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- dma_get_mask(dev));
-}
-
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- __iommu_dma_unmap(dev, handle, size);
-}
-
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
{
size_t alloc_size = PAGE_ALIGN(size);
@@ -1838,12 +1834,13 @@ static int __dma_iova_link(struct device *dev, dma_addr_t addr,
unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
- dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+ prot, GFP_ATOMIC);
}
static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
@@ -1949,9 +1946,13 @@ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
return -EIO;
if (dev_use_swiotlb(dev, size, dir) &&
- iova_unaligned(iovad, phys, size))
+ iova_unaligned(iovad, phys, size)) {
+ if (attrs & DMA_ATTR_MMIO)
+ return -EPERM;
+
return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
size, dir, attrs);
+ }
return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
phys - iova_start_pad,
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index affbf4a1558d..617fd81a80f0 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -62,8 +62,6 @@ static const struct iommu_regset iommu_regs_64[] = {
IOMMU_REGSET_ENTRY(CAP),
IOMMU_REGSET_ENTRY(ECAP),
IOMMU_REGSET_ENTRY(RTADDR),
- IOMMU_REGSET_ENTRY(CCMD),
- IOMMU_REGSET_ENTRY(AFLOG),
IOMMU_REGSET_ENTRY(PHMBASE),
IOMMU_REGSET_ENTRY(PHMLIMIT),
IOMMU_REGSET_ENTRY(IQH),
@@ -435,8 +433,21 @@ static int domain_translation_struct_show(struct seq_file *m,
}
pgd &= VTD_PAGE_MASK;
} else { /* legacy mode */
- pgd = context->lo & VTD_PAGE_MASK;
- agaw = context->hi & 7;
+ u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2;
+
+ /*
+ * According to Translation Type(TT),
+ * get the page table pointer(SSPTPTR).
+ */
+ switch (tt) {
+ case CONTEXT_TT_MULTI_LEVEL:
+ case CONTEXT_TT_DEV_IOTLB:
+ pgd = context->lo & VTD_PAGE_MASK;
+ agaw = context->hi & 7;
+ break;
+ default:
+ goto iommu_unlock;
+ }
}
seq_printf(m, "Device %04x:%02x:%02x.%x ",
@@ -648,17 +659,11 @@ DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
struct dmar_drhd_unit *drhd)
{
- int ret;
-
seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
iommu->name, drhd->reg_base_addr);
- ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
- if (ret < 0)
- seq_puts(m, "Failed to get latency snapshot");
- else
- seq_puts(m, debug_buf);
- seq_puts(m, "\n");
+ dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
+ seq_printf(m, "%s\n", debug_buf);
}
static int latency_show(struct seq_file *m, void *v)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index dff2d895b8ab..e236c7ec221f 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3817,7 +3817,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_pri_supported(pdev))
+ ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
info->pri_supported = 1;
}
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d09b92871659..3056583d7f56 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -77,7 +77,6 @@
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
@@ -173,8 +172,6 @@
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
@@ -462,7 +459,6 @@ enum {
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
/* Page group response descriptor QW1 */
-#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
@@ -541,7 +537,8 @@ enum {
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
#define ssads_supported(iommu) (sm_supported(iommu) && \
- ecap_slads((iommu)->ecap))
+ ecap_slads((iommu)->ecap) && \
+ ecap_smpwc(iommu->ecap))
#define nested_supported(iommu) (sm_supported(iommu) && \
ecap_nest((iommu)->ecap))
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index adc4de6bbd88..dceeadc3ee7c 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -113,7 +113,7 @@ static char *latency_type_names[] = {
" svm_prq"
};
-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
+void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
{
struct latency_statistic *lstat = iommu->perf_statistic;
unsigned long flags;
@@ -122,7 +122,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
memset(str, 0, size);
for (i = 0; i < COUNTS_NUM; i++)
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"%s", latency_counter_names[i]);
spin_lock_irqsave(&latency_lock, flags);
@@ -130,7 +130,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
if (!dmar_latency_enabled(iommu, i))
continue;
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"\n%s", latency_type_names[i]);
for (j = 0; j < COUNTS_NUM; j++) {
@@ -156,11 +156,9 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
break;
}
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"%12lld", val);
}
}
spin_unlock_irqrestore(&latency_lock, flags);
-
- return bytes;
}
diff --git a/drivers/iommu/intel/perf.h b/drivers/iommu/intel/perf.h
index df9a36942d64..1d4baad7e852 100644
--- a/drivers/iommu/intel/perf.h
+++ b/drivers/iommu/intel/perf.h
@@ -40,7 +40,7 @@ void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type);
bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type);
void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type,
u64 latency);
-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
+void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
#else
static inline int
dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
@@ -64,9 +64,8 @@ dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 laten
{
}
-static inline int
+static inline void
dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
{
- return 0;
}
#endif /* CONFIG_DMAR_PERF */
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 52570e42a14c..ff63c228e6e1 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -151,8 +151,7 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
QI_PGRP_PASID_P(req->pasid_present) |
QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(req->prg_index) |
- QI_PGRP_LPIG(req->lpig);
+ desc.qw1 = QI_PGRP_IDX(req->prg_index);
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -379,19 +378,17 @@ void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_fault_page_request *prm;
struct qi_desc desc;
bool pasid_present;
- bool last_page;
u16 sid;
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
QI_PGRP_PASID_P(pasid_present) |
QI_PGRP_RESP_CODE(msg->code) |
QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+ desc.qw1 = QI_PGRP_IDX(prm->grpid);
desc.qw2 = 0;
desc.qw3 = 0;
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 679bda104797..54d287cc0dd1 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -27,8 +27,9 @@
#define DART1_MAX_ADDR_BITS 36
-#define DART_MAX_TABLES 4
-#define DART_LEVELS 2
+#define DART_MAX_TABLE_BITS 2
+#define DART_MAX_TABLES BIT(DART_MAX_TABLE_BITS)
+#define DART_MAX_LEVELS 4 /* Includes TTBR level */
/* Struct accessors */
#define io_pgtable_to_data(x) \
@@ -68,6 +69,7 @@
struct dart_io_pgtable {
struct io_pgtable iop;
+ int levels;
int tbl_bits;
int bits_per_level;
@@ -156,44 +158,45 @@ static dart_iopte dart_install_table(dart_iopte *table,
return old;
}
-static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_index(struct dart_io_pgtable *data, unsigned long iova, int level)
{
- return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
- ((1 << data->tbl_bits) - 1);
+ return (iova >> (level * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
}
-static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
-{
-
- return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
- ((1 << data->bits_per_level) - 1);
-}
-
-static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_last_index(struct dart_io_pgtable *data, unsigned long iova)
{
return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
((1 << data->bits_per_level) - 1);
}
-static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+static dart_iopte *dart_get_last(struct dart_io_pgtable *data, unsigned long iova)
{
dart_iopte pte, *ptep;
- int tbl = dart_get_table(data, iova);
+ int level = data->levels;
+ int tbl = dart_get_index(data, iova, level);
+
+ if (tbl >= (1 << data->tbl_bits))
+ return NULL;
ptep = data->pgd[tbl];
if (!ptep)
return NULL;
- ptep += dart_get_l1_index(data, iova);
- pte = READ_ONCE(*ptep);
+ while (--level > 1) {
+ ptep += dart_get_index(data, iova, level);
+ pte = READ_ONCE(*ptep);
- /* Valid entry? */
- if (!pte)
- return NULL;
+ /* Valid entry? */
+ if (!pte)
+ return NULL;
- /* Deref to get level 2 table */
- return iopte_deref(pte, data);
+ /* Deref to get next level table */
+ ptep = iopte_deref(pte, data);
+ }
+
+ return ptep;
}
static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
@@ -230,6 +233,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int ret = 0, tbl, num_entries, max_entries, map_idx_start;
dart_iopte pte, *cptep, *ptep;
dart_iopte prot;
+ int level = data->levels;
if (WARN_ON(pgsize != cfg->pgsize_bitmap))
return -EINVAL;
@@ -240,31 +244,36 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return -EINVAL;
- tbl = dart_get_table(data, iova);
+ tbl = dart_get_index(data, iova, level);
+
+ if (tbl >= (1 << data->tbl_bits))
+ return -ENOMEM;
ptep = data->pgd[tbl];
- ptep += dart_get_l1_index(data, iova);
- pte = READ_ONCE(*ptep);
+ while (--level > 1) {
+ ptep += dart_get_index(data, iova, level);
+ pte = READ_ONCE(*ptep);
- /* no L2 table present */
- if (!pte) {
- cptep = iommu_alloc_pages_sz(gfp, tblsz);
- if (!cptep)
- return -ENOMEM;
+ /* no table present */
+ if (!pte) {
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
+ if (!cptep)
+ return -ENOMEM;
- pte = dart_install_table(cptep, ptep, 0, data);
- if (pte)
- iommu_free_pages(cptep);
+ pte = dart_install_table(cptep, ptep, 0, data);
+ if (pte)
+ iommu_free_pages(cptep);
- /* L2 table is present (now) */
- pte = READ_ONCE(*ptep);
- }
+ /* L2 table is present (now) */
+ pte = READ_ONCE(*ptep);
+ }
- ptep = iopte_deref(pte, data);
+ ptep = iopte_deref(pte, data);
+ }
/* install a leaf entries into L2 table */
prot = dart_prot_to_pte(data, iommu_prot);
- map_idx_start = dart_get_l2_index(data, iova);
+ map_idx_start = dart_get_last_index(data, iova);
max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
num_entries = min_t(int, pgcount, max_entries);
ptep += map_idx_start;
@@ -293,13 +302,13 @@ static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
return 0;
- ptep = dart_get_l2(data, iova);
+ ptep = dart_get_last(data, iova);
/* Valid L2 IOPTE pointer? */
if (WARN_ON(!ptep))
return 0;
- unmap_idx_start = dart_get_l2_index(data, iova);
+ unmap_idx_start = dart_get_last_index(data, iova);
ptep += unmap_idx_start;
max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
@@ -330,13 +339,13 @@ static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
dart_iopte pte, *ptep;
- ptep = dart_get_l2(data, iova);
+ ptep = dart_get_last(data, iova);
/* Valid L2 IOPTE pointer? */
if (!ptep)
return 0;
- ptep += dart_get_l2_index(data, iova);
+ ptep += dart_get_last_index(data, iova);
pte = READ_ONCE(*ptep);
/* Found translation */
@@ -353,21 +362,37 @@ static struct dart_io_pgtable *
dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
struct dart_io_pgtable *data;
- int tbl_bits, bits_per_level, va_bits, pg_shift;
+ int levels, max_tbl_bits, tbl_bits, bits_per_level, va_bits, pg_shift;
+
+ /*
+ * Old 4K page DARTs can use up to 4 top-level tables.
+ * Newer ones only ever use a maximum of 1.
+ */
+ if (cfg->pgsize_bitmap == SZ_4K)
+ max_tbl_bits = DART_MAX_TABLE_BITS;
+ else
+ max_tbl_bits = 0;
pg_shift = __ffs(cfg->pgsize_bitmap);
bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
va_bits = cfg->ias - pg_shift;
- tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
- if ((1 << tbl_bits) > DART_MAX_TABLES)
+ levels = max_t(int, 2, (va_bits - max_tbl_bits + bits_per_level - 1) / bits_per_level);
+
+ if (levels > (DART_MAX_LEVELS - 1))
+ return NULL;
+
+ tbl_bits = max_t(int, 0, va_bits - (bits_per_level * levels));
+
+ if (tbl_bits > max_tbl_bits)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
+ data->levels = levels + 1; /* Table level counts as one level */
data->tbl_bits = tbl_bits;
data->bits_per_level = bits_per_level;
@@ -403,6 +428,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+ cfg->apple_dart_cfg.n_levels = data->levels;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
data->pgd[i] =
@@ -422,24 +448,31 @@ out_free_data:
return NULL;
}
-static void apple_dart_free_pgtable(struct io_pgtable *iop)
+static void apple_dart_free_pgtables(struct dart_io_pgtable *data, dart_iopte *ptep, int level)
{
- struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- dart_iopte *ptep, *end;
- int i;
+ dart_iopte *end;
+ dart_iopte *start = ptep;
- for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
- ptep = data->pgd[i];
+ if (level > 1) {
end = (void *)ptep + DART_GRANULE(data);
while (ptep != end) {
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data));
+ apple_dart_free_pgtables(data, iopte_deref(pte, data), level - 1);
}
- iommu_free_pages(data->pgd[i]);
}
+ iommu_free_pages(start);
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+ struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ int i;
+
+ for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i)
+ apple_dart_free_pgtables(data, data->pgd[i], data->levels - 1);
kfree(data);
}
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index e236b932e766..c95394cd03a7 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -37,6 +37,8 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
const struct bus_type *bus,
struct notifier_block *nb);
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu);
+
struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group,
ioasid_t pasid,
unsigned int type);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 060ebe330ee1..59244c744eab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -304,6 +304,7 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
+ fwnode_remove_software_node(iommu->fwnode);
iommu_device_unregister(iommu);
}
EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
@@ -326,6 +327,12 @@ int iommu_device_register_bus(struct iommu_device *iommu,
if (err)
return err;
+ iommu->fwnode = fwnode_create_software_node(NULL, NULL);
+ if (IS_ERR(iommu->fwnode)) {
+ bus_unregister_notifier(bus, nb);
+ return PTR_ERR(iommu->fwnode);
+ }
+
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
@@ -335,9 +342,28 @@ int iommu_device_register_bus(struct iommu_device *iommu,
iommu_device_unregister_bus(iommu, bus, nb);
return err;
}
+ WRITE_ONCE(iommu->ready, true);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_device_register_bus);
+
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu)
+{
+ int rc;
+
+ mutex_lock(&iommu_probe_device_lock);
+ rc = iommu_fwspec_init(dev, iommu->fwnode);
+ mutex_unlock(&iommu_probe_device_lock);
+
+ if (rc)
+ return rc;
+
+ rc = device_add(dev);
+ if (rc)
+ iommu_fwspec_free(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iommu_mock_device_add);
#endif
static struct dev_iommu *dev_iommu_get(struct device *dev)
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 61686603c769..de178827a078 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -1126,7 +1126,7 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
goto err_put;
}
- rc = device_add(&mdev->dev);
+ rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
if (rc)
goto err_put;
return mdev;
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6fb93927bdb9..5c6f5943f44b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1303,8 +1303,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
struct iotlb_entry e;
+ int ret = -EINVAL;
int omap_pgsz;
- u32 ret = -EINVAL;
int i;
omap_pgsz = bytes_to_iopgsz(bytes);
diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c
index 725e919b97ef..83a28c83f991 100644
--- a/drivers/iommu/riscv/iommu-platform.c
+++ b/drivers/iommu/riscv/iommu-platform.c
@@ -10,6 +10,8 @@
* Tomasz Jeznach <tjeznach@rivosinc.com>
*/
+#include <linux/acpi.h>
+#include <linux/irqchip/riscv-imsic.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -46,6 +48,7 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
enum riscv_iommu_igs_settings igs;
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu = NULL;
+ struct irq_domain *msi_domain;
struct resource *res = NULL;
int vec, ret;
@@ -76,8 +79,13 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
switch (igs) {
case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
- if (is_of_node(dev->fwnode))
+ if (is_of_node(dev_fwnode(dev))) {
of_msi_configure(dev, to_of_node(dev->fwnode));
+ } else {
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
if (!dev_get_msi_domain(dev)) {
dev_warn(dev, "failed to find an MSI domain\n");
@@ -150,6 +158,12 @@ static const struct of_device_id riscv_iommu_of_match[] = {
{},
};
+static const struct acpi_device_id riscv_iommu_acpi_match[] = {
+ { "RSCV0004", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, riscv_iommu_acpi_match);
+
static struct platform_driver riscv_iommu_platform_driver = {
.probe = riscv_iommu_platform_probe,
.remove = riscv_iommu_platform_remove,
@@ -158,6 +172,7 @@ static struct platform_driver riscv_iommu_platform_driver = {
.name = "riscv,iommu",
.of_match_table = riscv_iommu_of_match,
.suppress_bind_attrs = true,
+ .acpi_match_table = riscv_iommu_acpi_match,
},
};
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 0eae2f4bdc5e..ebb22979075d 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -12,6 +12,8 @@
#define pr_fmt(fmt) "riscv-iommu: " fmt
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
#include <linux/compiler.h>
#include <linux/crash_dump.h>
#include <linux/init.h>
@@ -1650,6 +1652,14 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
goto err_iodir_off;
}
+ if (!acpi_disabled) {
+ rc = rimt_iommu_register(iommu->dev);
+ if (rc) {
+ dev_err_probe(iommu->dev, rc, "cannot register iommu with RIMT\n");
+ goto err_remove_sysfs;
+ }
+ }
+
rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev);
if (rc) {
dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n");
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 11549d85f23b..b5785472765a 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -30,7 +30,7 @@ static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
u32 out_id;
of_node = irq_domain_get_of_node(domain);
- out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) :
+ out_id = of_node ? of_msi_xlate(&mc_dev->dev, &of_node, mc_dev->icid) :
iort_msi_map_id(&mc_dev->dev, mc_dev->icid);
return out_id;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index ddb37f6670de..104aa5355090 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -37,6 +37,32 @@ config BLK_DEV_MD
If unsure, say N.
+config MD_BITMAP
+ bool "MD RAID bitmap support"
+ default y
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, support for the write intent bitmap will be
+ enabled. The bitmap can be used to optimize resync speed after power
+ failure or readding a disk, limiting it to recorded dirty sectors in
+ bitmap.
+
+ This feature can be added to existing MD array or MD array can be
+ created with bitmap via mdadm(8).
+
+ If unsure, say Y.
+
+config MD_LLBITMAP
+ bool "MD RAID lockless bitmap support"
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, support for the lockless write intent bitmap will
+ be enabled.
+
+ Note, this is an experimental feature.
+
+ If unsure, say N.
+
config MD_AUTODETECT
bool "Autodetect RAID arrays during kernel boot"
depends on BLK_DEV_MD=y
@@ -54,6 +80,7 @@ config MD_AUTODETECT
config MD_BITMAP_FILE
bool "MD bitmap file support (deprecated)"
default y
+ depends on MD_BITMAP
help
If you say Y here, support for write intent bitmaps in files on an
external file system is enabled. This is an alternative to the internal
@@ -174,6 +201,7 @@ config MD_RAID456
config MD_CLUSTER
tristate "Cluster Support for MD"
+ select MD_BITMAP
depends on BLK_DEV_MD
depends on DLM
default n
@@ -393,6 +421,7 @@ config DM_RAID
select MD_RAID1
select MD_RAID10
select MD_RAID456
+ select MD_BITMAP
select BLK_DEV_MD
help
A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings
@@ -659,4 +688,6 @@ config DM_AUDIT
source "drivers/md/dm-vdo/Kconfig"
+source "drivers/md/dm-pcache/Kconfig"
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 87bdfc9fe14c..c338cc6fbe2e 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -27,7 +27,9 @@ dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
-md-mod-y += md.o md-bitmap.o
+md-mod-y += md.o
+md-mod-$(CONFIG_MD_BITMAP) += md-bitmap.o
+md-mod-$(CONFIG_MD_LLBITMAP) += md-llbitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
linear-y += md-linear.o
@@ -71,6 +73,7 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_VDO) += dm-vdo/
+obj-$(CONFIG_DM_PCACHE) += dm-pcache/
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_EBS) += dm-ebs.o
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 7510d1c983a5..f327456fc4e0 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -115,8 +115,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(nr_segs, GFP_NOIO);
if (!check)
return;
- bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
- REQ_OP_READ);
+ bio_init_inline(check, bio->bi_bdev, nr_segs, REQ_OP_READ);
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 020712c5203f..2386d08bf4e4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,8 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
- meta_bucket_pages(&c->cache->sb), 0);
+ bio_init_inline(bio, NULL, meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7ff14bd2feb8..d50eb82ccb4f 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -615,7 +615,7 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
+ bio_init_inline(bio, ca->bdev, 1, REQ_OP_DISCARD);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_iter.bi_size = bucket_bytes(ca);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 26a6a535ec32..73918e55bf04 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,7 +79,7 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
+ bio_init_inline(bio, NULL,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
@@ -145,9 +145,9 @@ static void read_moving(struct cache_set *c)
continue;
}
- io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
- GFP_KERNEL);
+ io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
if (!io)
goto err;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1492c8552255..6d250e366412 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2236,7 +2236,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
+ bio_init_inline(&ca->journal.bio, NULL, 8, 0);
/*
* When the cache disk is first registered, ca->sb.njournal_buckets
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 302e75f1fc4b..6ba73dc1a3df 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -331,7 +331,7 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
+ bio_init_inline(bio, NULL,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
@@ -536,9 +536,9 @@ static void read_dirty(struct cached_dev *dc)
for (i = 0; i < nk; i++) {
w = keys[i];
- io = kzalloc(struct_size(io, bio.bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
- GFP_KERNEL);
+ io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
if (!io)
goto err;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index ff7595caf440..e6d28be11c5c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1337,12 +1337,12 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
char *ptr;
unsigned int len;
- bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
+ bio = bio_kmalloc(1, GFP_NOWAIT);
if (!bio) {
use_dmio(b, op, sector, n_sectors, offset, ioprio);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
+ bio_init_inline(bio, b->c->bdev, 1, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
@@ -1601,18 +1601,18 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
- * mutex and wait ourselves.
+ * GFP_NOWAIT: don't wait and don't print a warning in case of
+ * failure; if we need to sleep we'll release our mutex
+ * and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
- * __GFP_NOWARN: don't print a warning in case of failure
*
* For debugging, if we set the cache size to 1, no new buffers will
* be allocated.
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC);
if (b)
return b;
}
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 2ed894155cab..7e1e8cc0e33a 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -590,7 +590,7 @@ static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned in
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
ht->hash_bits = __ffs(nr_buckets);
- ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
+ ht->buckets = vmalloc_array(nr_buckets, sizeof(*ht->buckets));
if (!ht->buckets)
return -ENOMEM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index c889332e533b..a3c9f74fe2dc 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -162,6 +162,7 @@ struct mapped_device {
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
+#define DMF_QUEUE_STOPPED 10
static inline sector_t dm_get_size(struct mapped_device *md)
{
@@ -291,6 +292,7 @@ struct dm_io {
struct dm_io *next;
struct dm_stats_aux stats_aux;
blk_status_t status;
+ bool requeue_flush_with_data;
atomic_t io_count;
struct mapped_device *md;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index cf17fd46e255..08925aca838c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -441,7 +441,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init_inline(clone, fc->dev->bdev, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 8b50c908c6f4..efb3cd4f9cd4 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -45,7 +45,7 @@ static void fix_separator_chars(char **buf)
/*
* Internal function to allocate memory for IMA measurements.
*/
-static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
+static void *dm_ima_alloc(size_t len, bool noio)
{
unsigned int noio_flag;
void *ptr;
@@ -53,7 +53,7 @@ static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
if (noio)
noio_flag = memalloc_noio_save();
- ptr = kzalloc(len, flags);
+ ptr = kzalloc(len, GFP_KERNEL);
if (noio)
memalloc_noio_restore(noio_flag);
@@ -68,13 +68,13 @@ static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_
char **dev_uuid, bool noio)
{
int r;
- *dev_name = dm_ima_alloc(DM_NAME_LEN*2, GFP_KERNEL, noio);
+ *dev_name = dm_ima_alloc(DM_NAME_LEN*2, noio);
if (!(*dev_name)) {
r = -ENOMEM;
goto error;
}
- *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, GFP_KERNEL, noio);
+ *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, noio);
if (!(*dev_uuid)) {
r = -ENOMEM;
goto error;
@@ -109,7 +109,7 @@ static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **de
if (r)
return r;
- *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!(*device_data)) {
r = -ENOMEM;
goto error;
@@ -153,14 +153,12 @@ static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **c
capacity = get_capacity(md->disk);
- *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, GFP_KERNEL, noio);
+ *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, noio);
if (!(*capacity_str))
return -ENOMEM;
- scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
- capacity);
-
- return 0;
+ return scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
+ capacity);
}
/*
@@ -195,15 +193,15 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
const size_t hash_alg_prefix_len = strlen(DM_IMA_TABLE_HASH_ALG) + 1;
char table_load_event_name[] = "dm_table_load";
- ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, GFP_KERNEL, noio);
+ ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, noio);
if (!ima_buf)
return;
- target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, GFP_KERNEL, noio);
+ target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, noio);
if (!target_metadata_buf)
goto error;
- target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, GFP_KERNEL, noio);
+ target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, noio);
if (!target_data_buf)
goto error;
@@ -218,7 +216,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
shash->tfm = tfm;
digest_size = crypto_shash_digestsize(tfm);
- digest = dm_ima_alloc(digest_size, GFP_KERNEL, noio);
+ digest = dm_ima_alloc(digest_size, noio);
if (!digest)
goto error;
@@ -327,7 +325,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
if (r < 0)
goto error;
- digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, GFP_KERNEL, noio);
+ digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, noio);
if (!digest_buf)
goto error;
@@ -371,18 +369,18 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
{
char *device_table_data, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
char active[] = "active_table_hash=";
- unsigned int active_len = strlen(active), capacity_len = 0;
+ unsigned int active_len = strlen(active);
unsigned int l = 0;
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!device_table_data)
return;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0)
goto error;
memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
@@ -445,8 +443,7 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
}
if (nodata) {
- r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio);
- if (r)
+ if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error;
l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
@@ -454,7 +451,6 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -483,18 +479,17 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
unsigned int device_active_len = strlen(device_active_str);
unsigned int device_inactive_len = strlen(device_inactive_str);
unsigned int remove_all_len = strlen(remove_all_str);
- unsigned int capacity_len = 0;
unsigned int l = 0;
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, noio);
if (!device_table_data)
goto exit;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r) {
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0) {
kfree(device_table_data);
goto exit;
}
@@ -570,7 +565,6 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
memcpy(device_table_data + l, remove_all ? "y;" : "n;", 2);
l += 2;
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -602,20 +596,20 @@ exit:
*/
void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
{
- unsigned int l = 0, capacity_len = 0;
+ unsigned int l = 0;
char *device_table_data = NULL, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
char inactive_str[] = "inactive_table_hash=";
unsigned int inactive_len = strlen(inactive_str);
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!device_table_data)
return;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0)
goto error1;
memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
@@ -650,7 +644,6 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -703,7 +696,7 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL;
char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL;
bool noio = true;
- int r, len;
+ int len;
if (dm_ima_alloc_and_copy_device_data(md, &new_device_data,
md->ima.active_table.num_targets, noio))
@@ -712,12 +705,11 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
if (dm_ima_alloc_and_copy_name_uuid(md, &new_dev_name, &new_dev_uuid, noio))
goto error;
- combined_device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN * 2, GFP_KERNEL, noio);
+ combined_device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN * 2, noio);
if (!combined_device_data)
goto error;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ if (dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio) < 0)
goto error;
old_device_data = md->ima.active_table.device_metadata;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ab96b692e5a3..170bf67a2edd 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -219,10 +219,13 @@ struct dm_integrity_c {
__u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
+ bool internal_hash;
int failed;
- struct crypto_shash *internal_hash;
+ struct crypto_shash *internal_shash;
+ struct crypto_ahash *internal_ahash;
+ unsigned int internal_hash_digestsize;
struct dm_target *ti;
@@ -277,6 +280,9 @@ struct dm_integrity_c {
bool fix_hmac;
bool legacy_recalculate;
+ mempool_t ahash_req_pool;
+ struct ahash_request *journal_ahash_req;
+
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
struct alg_spec journal_mac_alg;
@@ -326,6 +332,8 @@ struct dm_integrity_io {
unsigned payload_len;
bool integrity_payload_from_mempool;
bool integrity_range_locked;
+
+ struct ahash_request *ahash_req;
};
struct journal_completion {
@@ -352,6 +360,7 @@ struct bitmap_block_status {
static struct kmem_cache *journal_io_cache;
#define JOURNAL_IO_MEMPOOL 32
+#define AHASH_MEMPOOL 32
#ifdef DEBUG_PRINT
#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
@@ -1634,15 +1643,15 @@ static void integrity_end_io(struct bio *bio)
dec_in_flight(dio);
}
-static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
- const char *data, char *result)
+static void integrity_sector_checksum_shash(struct dm_integrity_c *ic, sector_t sector,
+ const char *data, unsigned offset, char *result)
{
__le64 sector_le = cpu_to_le64(sector);
- SHASH_DESC_ON_STACK(req, ic->internal_hash);
+ SHASH_DESC_ON_STACK(req, ic->internal_shash);
int r;
unsigned int digest_size;
- req->tfm = ic->internal_hash;
+ req->tfm = ic->internal_shash;
r = crypto_shash_init(req);
if (unlikely(r < 0)) {
@@ -1664,7 +1673,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
- r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
+ r = crypto_shash_update(req, data + offset, ic->sectors_per_block << SECTOR_SHIFT);
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto failed;
@@ -1676,7 +1685,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
- digest_size = crypto_shash_digestsize(ic->internal_hash);
+ digest_size = ic->internal_hash_digestsize;
if (unlikely(digest_size < ic->tag_size))
memset(result + digest_size, 0, ic->tag_size - digest_size);
@@ -1687,6 +1696,104 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static void integrity_sector_checksum_ahash(struct dm_integrity_c *ic, struct ahash_request **ahash_req,
+ sector_t sector, struct page *page, unsigned offset, char *result)
+{
+ __le64 sector_le = cpu_to_le64(sector);
+ struct ahash_request *req;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct scatterlist sg[3], *s = sg;
+ int r;
+ unsigned int digest_size;
+ unsigned int nbytes = 0;
+
+ might_sleep();
+
+ req = *ahash_req;
+ if (unlikely(!req)) {
+ req = mempool_alloc(&ic->ahash_req_pool, GFP_NOIO);
+ *ahash_req = req;
+ }
+
+ ahash_request_set_tfm(req, ic->internal_ahash);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ sg_init_table(sg, 3);
+ sg_set_buf(s, (const __u8 *)&ic->sb->salt, SALT_SIZE);
+ nbytes += SALT_SIZE;
+ s++;
+ } else {
+ sg_init_table(sg, 2);
+ }
+
+ if (likely(!is_vmalloc_addr(&sector_le))) {
+ sg_set_buf(s, &sector_le, sizeof(sector_le));
+ } else {
+ struct page *sec_page = vmalloc_to_page(&sector_le);
+ unsigned int sec_off = offset_in_page(&sector_le);
+ sg_set_page(s, sec_page, sizeof(sector_le), sec_off);
+ }
+ nbytes += sizeof(sector_le);
+ s++;
+
+ sg_set_page(s, page, ic->sectors_per_block << SECTOR_SHIFT, offset);
+ nbytes += ic->sectors_per_block << SECTOR_SHIFT;
+
+ ahash_request_set_crypt(req, sg, result, nbytes);
+
+ r = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "crypto_ahash_digest", r);
+ goto failed;
+ }
+
+ digest_size = ic->internal_hash_digestsize;
+ if (unlikely(digest_size < ic->tag_size))
+ memset(result + digest_size, 0, ic->tag_size - digest_size);
+
+ return;
+
+failed:
+ /* this shouldn't happen anyway, the hash functions have no reason to fail */
+ get_random_bytes(result, ic->tag_size);
+}
+
+static void integrity_sector_checksum(struct dm_integrity_c *ic, struct ahash_request **ahash_req,
+ sector_t sector, const char *data, unsigned offset, char *result)
+{
+ if (likely(ic->internal_shash != NULL))
+ integrity_sector_checksum_shash(ic, sector, data, offset, result);
+ else
+ integrity_sector_checksum_ahash(ic, ahash_req, sector, (struct page *)data, offset, result);
+}
+
+static void *integrity_kmap(struct dm_integrity_c *ic, struct page *p)
+{
+ if (likely(ic->internal_shash != NULL))
+ return kmap_local_page(p);
+ else
+ return p;
+}
+
+static void integrity_kunmap(struct dm_integrity_c *ic, const void *ptr)
+{
+ if (likely(ic->internal_shash != NULL))
+ kunmap_local(ptr);
+}
+
+static void *integrity_identity(struct dm_integrity_c *ic, void *data)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(offset_in_page(data));
+ BUG_ON(!virt_addr_valid(data));
+#endif
+ if (likely(ic->internal_shash != NULL))
+ return data;
+ else
+ return virt_to_page(data);
+}
+
static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
{
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
@@ -1711,6 +1818,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
sector_t alignment;
char *mem;
char *buffer = page_to_virt(page);
+ unsigned int buffer_offset;
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1728,7 +1836,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
alignment &= -alignment;
io_loc.sector = round_down(io_loc.sector, alignment);
io_loc.count += sector - io_loc.sector;
- buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ buffer_offset = (sector - io_loc.sector) << SECTOR_SHIFT;
io_loc.count = round_up(io_loc.count, alignment);
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
@@ -1737,7 +1845,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
goto free_ret;
}
- integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, integrity_identity(ic, buffer), buffer_offset, checksum);
r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
&dio->metadata_offset, ic->tag_size, TAG_CMP);
if (r) {
@@ -1754,7 +1862,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
}
mem = bvec_kmap_local(&bv);
- memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ memcpy(mem + pos, buffer + buffer_offset, ic->sectors_per_block << SECTOR_SHIFT);
kunmap_local(mem);
pos += ic->sectors_per_block << SECTOR_SHIFT;
@@ -1776,7 +1884,7 @@ static void integrity_metadata(struct work_struct *w)
if (ic->internal_hash) {
struct bvec_iter iter;
struct bio_vec bv;
- unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = ic->internal_hash_digestsize;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
@@ -1837,17 +1945,17 @@ static void integrity_metadata(struct work_struct *w)
char *mem, *checksums_ptr;
again:
- mem = bvec_kmap_local(&bv_copy);
+ mem = integrity_kmap(ic, bv_copy.bv_page);
pos = 0;
checksums_ptr = checksums;
do {
- integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
+ integrity_sector_checksum(ic, &dio->ahash_req, sector, mem, bv_copy.bv_offset + pos, checksums_ptr);
checksums_ptr += ic->tag_size;
sectors_to_process -= ic->sectors_per_block;
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
- kunmap_local(mem);
+ integrity_kunmap(ic, mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
@@ -1949,6 +2057,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->ic = ic;
dio->bi_status = 0;
dio->op = bio_op(bio);
+ dio->ahash_req = NULL;
if (ic->mode == 'I') {
bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
@@ -2071,19 +2180,6 @@ retry_kmap:
js++;
mem_ptr += 1 << SECTOR_SHIFT;
} while (++s < ic->sectors_per_block);
-#ifdef INTERNAL_VERIFY
- if (ic->internal_hash) {
- char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
-
- integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
- if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
- DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
- logical_sector);
- dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
- bio, logical_sector, 0);
- }
- }
-#endif
}
if (!ic->internal_hash) {
@@ -2124,15 +2220,17 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
if (ic->internal_hash) {
- unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = ic->internal_hash_digestsize;
+ void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js));
+ unsigned js_offset = offset_in_page(js);
if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[HASH_MAX_DIGESTSIZE];
- integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
- integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, journal_entry_tag(ic, je));
}
journal_entry_set_sector(je, logical_sector);
@@ -2428,7 +2526,7 @@ retry:
if (!dio->integrity_payload) {
unsigned digest_size, extra_size;
dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
- digest_size = crypto_shash_digestsize(ic->internal_hash);
+ digest_size = ic->internal_hash_digestsize;
extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
dio->payload_len += extra_size;
dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -2505,11 +2603,11 @@ skip_spinlock:
unsigned pos = 0;
while (dio->bio_details.bi_iter.bi_size) {
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
- const char *mem = bvec_kmap_local(&bv);
+ const char *mem = integrity_kmap(ic, bv.bv_page);
if (ic->tag_size < ic->tuple_size)
memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + pos);
- kunmap_local(mem);
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, dio->integrity_payload + pos);
+ integrity_kunmap(ic, mem);
pos += ic->tuple_size;
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
}
@@ -2588,8 +2686,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
}
bio_put(outgoing_bio);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
- if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, integrity_identity(ic, outgoing_data), 0, digest);
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(ic->internal_hash_digestsize, ic->tag_size)))) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
atomic64_inc(&ic->number_of_mismatches);
@@ -2612,33 +2710,58 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
bio_endio(bio);
}
+static inline bool dm_integrity_check(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ unsigned pos = 0;
+
+ while (dio->bio_details.bi_iter.bi_size) {
+ char digest[HASH_MAX_DIGESTSIZE];
+ struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
+ char *mem = integrity_kmap(ic, bv.bv_page);
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, digest);
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
+ min(ic->internal_hash_digestsize, ic->tag_size)))) {
+ integrity_kunmap(ic, mem);
+ dm_integrity_free_payload(dio);
+ INIT_WORK(&dio->work, dm_integrity_inline_recheck);
+ queue_work(ic->offload_wq, &dio->work);
+ return false;
+ }
+ integrity_kunmap(ic, mem);
+ pos += ic->tuple_size;
+ bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ }
+
+ return true;
+}
+
+static void dm_integrity_inline_async_check(struct work_struct *w)
+{
+ struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct dm_integrity_c *ic = dio->ic;
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+
+ if (likely(dm_integrity_check(ic, dio)))
+ bio_endio(bio);
+}
+
static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
{
struct dm_integrity_c *ic = ti->private;
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
if (ic->mode == 'I') {
- struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
- if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
- unsigned pos = 0;
+ if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK) && likely(dio->bio_details.bi_iter.bi_size != 0)) {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
unlikely(dio->integrity_range_locked))
- goto skip_check;
- while (dio->bio_details.bi_iter.bi_size) {
- char digest[HASH_MAX_DIGESTSIZE];
- struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
- char *mem = bvec_kmap_local(&bv);
- //memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
- if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
- min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
- kunmap_local(mem);
- dm_integrity_free_payload(dio);
- INIT_WORK(&dio->work, dm_integrity_inline_recheck);
- queue_work(ic->offload_wq, &dio->work);
+ goto skip_check;
+ if (likely(ic->internal_shash != NULL)) {
+ if (unlikely(!dm_integrity_check(ic, dio)))
return DM_ENDIO_INCOMPLETE;
- }
- kunmap_local(mem);
- pos += ic->tuple_size;
- bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ } else {
+ INIT_WORK(&dio->work, dm_integrity_inline_async_check);
+ queue_work(ic->offload_wq, &dio->work);
+ return DM_ENDIO_INCOMPLETE;
}
}
skip_check:
@@ -2646,6 +2769,8 @@ skip_check:
if (unlikely(dio->integrity_range_locked))
remove_range(ic, &dio->range);
}
+ if (unlikely(dio->ahash_req))
+ mempool_free(dio->ahash_req, &ic->ahash_req_pool);
return DM_ENDIO_DONE;
}
@@ -2902,9 +3027,12 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
#endif
ic->internal_hash) {
char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ struct journal_sector *js = access_journal_data(ic, i, l);
+ void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js));
+ unsigned js_offset = offset_in_page(js);
- integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
- (char *)access_journal_data(ic, i, l), test_tag);
+ integrity_sector_checksum(ic, &ic->journal_ahash_req, sec + ((l - j) << ic->sb->log2_sectors_per_block),
+ js_page, js_offset, test_tag);
if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
@@ -2987,6 +3115,7 @@ static void integrity_recalc(struct work_struct *w)
size_t recalc_tags_size;
u8 *recalc_buffer = NULL;
u8 *recalc_tags = NULL;
+ struct ahash_request *ahash_req = NULL;
struct dm_integrity_range range;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -3001,7 +3130,7 @@ static void integrity_recalc(struct work_struct *w)
unsigned recalc_sectors = RECALC_SECTORS;
retry:
- recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
+ recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN);
if (!recalc_buffer) {
oom:
recalc_sectors >>= 1;
@@ -3011,11 +3140,11 @@ oom:
goto free_ret;
}
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
- if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
- recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ if (ic->internal_hash_digestsize > ic->tag_size)
+ recalc_tags_size += ic->internal_hash_digestsize - ic->tag_size;
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
if (!recalc_tags) {
- vfree(recalc_buffer);
+ kfree(recalc_buffer);
recalc_buffer = NULL;
goto oom;
}
@@ -3081,7 +3210,7 @@ next_chunk:
goto err;
io_req.bi_opf = REQ_OP_READ;
- io_req.mem.type = DM_IO_VMA;
+ io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = recalc_buffer;
io_req.notify.fn = NULL;
io_req.client = ic->io;
@@ -3097,7 +3226,10 @@ next_chunk:
t = recalc_tags;
for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ void *ptr = recalc_buffer + (i << SECTOR_SHIFT);
+ void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr));
+ unsigned ptr_offset = offset_in_page(ptr);
+ integrity_sector_checksum(ic, &ahash_req, logical_sector + i, ptr_page, ptr_offset, t);
t += ic->tag_size;
}
@@ -3139,8 +3271,9 @@ unlock_ret:
recalc_write_super(ic);
free_ret:
- vfree(recalc_buffer);
+ kfree(recalc_buffer);
kvfree(recalc_tags);
+ mempool_free(ahash_req, &ic->ahash_req_pool);
}
static void integrity_recalc_inline(struct work_struct *w)
@@ -3149,6 +3282,7 @@ static void integrity_recalc_inline(struct work_struct *w)
size_t recalc_tags_size;
u8 *recalc_buffer = NULL;
u8 *recalc_tags = NULL;
+ struct ahash_request *ahash_req = NULL;
struct dm_integrity_range range;
struct bio *bio;
struct bio_integrity_payload *bip;
@@ -3171,8 +3305,8 @@ oom:
}
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size;
- if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size)
- recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size;
+ if (ic->internal_hash_digestsize > ic->tuple_size)
+ recalc_tags_size += ic->internal_hash_digestsize - ic->tuple_size;
recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN);
if (!recalc_tags) {
kfree(recalc_buffer);
@@ -3217,8 +3351,11 @@ next_chunk:
t = recalc_tags;
for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ void *ptr = recalc_buffer + (i << SECTOR_SHIFT);
+ void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr));
+ unsigned ptr_offset = offset_in_page(ptr);
memset(t, 0, ic->tuple_size);
- integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ integrity_sector_checksum(ic, &ahash_req, range.logical_sector + i, ptr_page, ptr_offset, t);
t += ic->tuple_size;
}
@@ -3270,6 +3407,7 @@ unlock_ret:
free_ret:
kfree(recalc_buffer);
kfree(recalc_tags);
+ mempool_free(ahash_req, &ic->ahash_req_pool);
}
static void bitmap_block_work(struct work_struct *w)
@@ -4210,30 +4348,53 @@ nomem:
return -ENOMEM;
}
-static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
- char *error_alg, char *error_key)
+static int get_mac(struct crypto_shash **shash, struct crypto_ahash **ahash,
+ struct alg_spec *a, char **error, char *error_alg, char *error_key)
{
int r;
if (a->alg_string) {
- *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(*hash)) {
- *error = error_alg;
- r = PTR_ERR(*hash);
- *hash = NULL;
- return r;
- }
-
- if (a->key) {
- r = crypto_shash_setkey(*hash, a->key, a->key_size);
- if (r) {
+ if (shash) {
+ *shash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(*shash)) {
+ *shash = NULL;
+ goto try_ahash;
+ }
+ if (a->key) {
+ r = crypto_shash_setkey(*shash, a->key, a->key_size);
+ if (r) {
+ *error = error_key;
+ return r;
+ }
+ } else if (crypto_shash_get_flags(*shash) & CRYPTO_TFM_NEED_KEY) {
*error = error_key;
+ return -ENOKEY;
+ }
+ return 0;
+ }
+try_ahash:
+ if (ahash) {
+ *ahash = crypto_alloc_ahash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(*ahash)) {
+ *error = error_alg;
+ r = PTR_ERR(*ahash);
+ *ahash = NULL;
return r;
}
- } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
- *error = error_key;
- return -ENOKEY;
+ if (a->key) {
+ r = crypto_ahash_setkey(*ahash, a->key, a->key_size);
+ if (r) {
+ *error = error_key;
+ return r;
+ }
+ } else if (crypto_ahash_get_flags(*ahash) & CRYPTO_TFM_NEED_KEY) {
+ *error = error_key;
+ return -ENOKEY;
+ }
+ return 0;
}
+ *error = error_alg;
+ return -ENOENT;
}
return 0;
@@ -4690,12 +4851,26 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
buffer_sectors = 1;
ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
- r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
+ r = get_mac(&ic->internal_shash, &ic->internal_ahash, &ic->internal_hash_alg, &ti->error,
"Invalid internal hash", "Error setting internal hash key");
if (r)
goto bad;
+ if (ic->internal_shash) {
+ ic->internal_hash = true;
+ ic->internal_hash_digestsize = crypto_shash_digestsize(ic->internal_shash);
+ }
+ if (ic->internal_ahash) {
+ ic->internal_hash = true;
+ ic->internal_hash_digestsize = crypto_ahash_digestsize(ic->internal_ahash);
+ r = mempool_init_kmalloc_pool(&ic->ahash_req_pool, AHASH_MEMPOOL,
+ sizeof(struct ahash_request) + crypto_ahash_reqsize(ic->internal_ahash));
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+ }
- r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
+ r = get_mac(&ic->journal_mac, NULL, &ic->journal_mac_alg, &ti->error,
"Invalid journal mac", "Error setting journal mac key");
if (r)
goto bad;
@@ -4706,7 +4881,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
r = -EINVAL;
goto bad;
}
- ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
+ ic->tag_size = ic->internal_hash_digestsize;
}
if (ic->tag_size > MAX_TAG_SIZE) {
ti->error = "Too big tag size";
@@ -5178,6 +5353,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_free(ic->journal_ahash_req, &ic->ahash_req_pool);
+ mempool_exit(&ic->ahash_req_pool);
bioset_exit(&ic->recalc_bios);
bioset_exit(&ic->recheck_bios);
mempool_exit(&ic->recheck_pool);
@@ -5215,8 +5392,10 @@ static void dm_integrity_dtr(struct dm_target *ti)
if (ic->sb)
free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
- if (ic->internal_hash)
- crypto_free_shash(ic->internal_hash);
+ if (ic->internal_shash)
+ crypto_free_shash(ic->internal_shash);
+ if (ic->internal_ahash)
+ crypto_free_ahash(ic->internal_ahash);
free_alg(&ic->internal_hash_alg);
if (ic->journal_crypt)
@@ -5233,7 +5412,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 679b07dee229..7bb7174f8f4f 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -414,7 +414,7 @@ static int log_super(struct log_writes_c *lc)
}
/*
- * Super sector should be writen in-order, otherwise the
+ * Super sector should be written in-order, otherwise the
* nr_entries could be rewritten incorrectly by an old bio.
*/
wait_for_completion_io(&lc->super_done);
diff --git a/drivers/md/dm-pcache/Kconfig b/drivers/md/dm-pcache/Kconfig
new file mode 100644
index 000000000000..0e251eca892e
--- /dev/null
+++ b/drivers/md/dm-pcache/Kconfig
@@ -0,0 +1,17 @@
+config DM_PCACHE
+ tristate "Persistent cache for Block Device (Experimental)"
+ depends on BLK_DEV_DM
+ depends on DEV_DAX
+ help
+ PCACHE provides a mechanism to use persistent memory (e.g., CXL persistent memory,
+ DAX-enabled devices) as a high-performance cache layer in front of
+ traditional block devices such as SSDs or HDDs.
+
+ PCACHE is implemented as a kernel module that integrates with the block
+ layer and supports direct access (DAX) to persistent memory for low-latency,
+ byte-addressable caching.
+
+ Note: This feature is experimental and should be tested thoroughly
+ before use in production environments.
+
+ If unsure, say 'N'.
diff --git a/drivers/md/dm-pcache/Makefile b/drivers/md/dm-pcache/Makefile
new file mode 100644
index 000000000000..86776e4acad2
--- /dev/null
+++ b/drivers/md/dm-pcache/Makefile
@@ -0,0 +1,3 @@
+dm-pcache-y := dm_pcache.o cache_dev.o segment.o backing_dev.o cache.o cache_gc.o cache_writeback.o cache_segment.o cache_key.o cache_req.o
+
+obj-m += dm-pcache.o
diff --git a/drivers/md/dm-pcache/backing_dev.c b/drivers/md/dm-pcache/backing_dev.c
new file mode 100644
index 000000000000..7165fc0364bb
--- /dev/null
+++ b/drivers/md/dm-pcache/backing_dev.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/blkdev.h>
+
+#include "../dm-core.h"
+#include "pcache_internal.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+static struct kmem_cache *backing_req_cache;
+static struct kmem_cache *backing_bvec_cache;
+
+static void backing_dev_exit(struct pcache_backing_dev *backing_dev)
+{
+ mempool_exit(&backing_dev->req_pool);
+ mempool_exit(&backing_dev->bvec_pool);
+}
+
+static void req_submit_fn(struct work_struct *work);
+static void req_complete_fn(struct work_struct *work);
+static int backing_dev_init(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ int ret;
+
+ ret = mempool_init_slab_pool(&backing_dev->req_pool, 128, backing_req_cache);
+ if (ret)
+ goto err;
+
+ ret = mempool_init_slab_pool(&backing_dev->bvec_pool, 128, backing_bvec_cache);
+ if (ret)
+ goto req_pool_exit;
+
+ INIT_LIST_HEAD(&backing_dev->submit_list);
+ INIT_LIST_HEAD(&backing_dev->complete_list);
+ spin_lock_init(&backing_dev->submit_lock);
+ spin_lock_init(&backing_dev->complete_lock);
+ INIT_WORK(&backing_dev->req_submit_work, req_submit_fn);
+ INIT_WORK(&backing_dev->req_complete_work, req_complete_fn);
+ atomic_set(&backing_dev->inflight_reqs, 0);
+ init_waitqueue_head(&backing_dev->inflight_wq);
+
+ return 0;
+
+req_pool_exit:
+ mempool_exit(&backing_dev->req_pool);
+err:
+ return ret;
+}
+
+int backing_dev_start(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ int ret;
+
+ ret = backing_dev_init(pcache);
+ if (ret)
+ return ret;
+
+ backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev);
+
+ return 0;
+}
+
+void backing_dev_stop(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+
+ /*
+ * There should not be any new request comming, just wait
+ * inflight requests done.
+ */
+ wait_event(backing_dev->inflight_wq,
+ atomic_read(&backing_dev->inflight_reqs) == 0);
+
+ flush_work(&backing_dev->req_submit_work);
+ flush_work(&backing_dev->req_complete_work);
+
+ backing_dev_exit(backing_dev);
+}
+
+/* pcache_backing_dev_req functions */
+void backing_dev_req_end(struct pcache_backing_dev_req *backing_req)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+
+ if (backing_req->end_req)
+ backing_req->end_req(backing_req, backing_req->ret);
+
+ switch (backing_req->type) {
+ case BACKING_DEV_REQ_TYPE_REQ:
+ if (backing_req->req.upper_req)
+ pcache_req_put(backing_req->req.upper_req, backing_req->ret);
+ break;
+ case BACKING_DEV_REQ_TYPE_KMEM:
+ if (backing_req->kmem.bvecs != backing_req->kmem.inline_bvecs)
+ mempool_free(backing_req->kmem.bvecs, &backing_dev->bvec_pool);
+ break;
+ default:
+ BUG();
+ }
+
+ mempool_free(backing_req, &backing_dev->req_pool);
+
+ if (atomic_dec_and_test(&backing_dev->inflight_reqs))
+ wake_up(&backing_dev->inflight_wq);
+}
+
+static void req_complete_fn(struct work_struct *work)
+{
+ struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
+ struct pcache_backing_dev_req *backing_req;
+ LIST_HEAD(tmp_list);
+
+ spin_lock_irq(&backing_dev->complete_lock);
+ list_splice_init(&backing_dev->complete_list, &tmp_list);
+ spin_unlock_irq(&backing_dev->complete_lock);
+
+ while (!list_empty(&tmp_list)) {
+ backing_req = list_first_entry(&tmp_list,
+ struct pcache_backing_dev_req, node);
+ list_del_init(&backing_req->node);
+ backing_dev_req_end(backing_req);
+ }
+}
+
+static void backing_dev_bio_end(struct bio *bio)
+{
+ struct pcache_backing_dev_req *backing_req = bio->bi_private;
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+ unsigned long flags;
+
+ backing_req->ret = blk_status_to_errno(bio->bi_status);
+
+ spin_lock_irqsave(&backing_dev->complete_lock, flags);
+ list_move_tail(&backing_req->node, &backing_dev->complete_list);
+ queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work);
+ spin_unlock_irqrestore(&backing_dev->complete_lock, flags);
+}
+
+static void req_submit_fn(struct work_struct *work)
+{
+ struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
+ struct pcache_backing_dev_req *backing_req;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&backing_dev->submit_lock);
+ list_splice_init(&backing_dev->submit_list, &tmp_list);
+ spin_unlock(&backing_dev->submit_lock);
+
+ while (!list_empty(&tmp_list)) {
+ backing_req = list_first_entry(&tmp_list,
+ struct pcache_backing_dev_req, node);
+ list_del_init(&backing_req->node);
+ submit_bio_noacct(&backing_req->bio);
+ }
+}
+
+void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+
+ if (direct) {
+ submit_bio_noacct(&backing_req->bio);
+ return;
+ }
+
+ spin_lock(&backing_dev->submit_lock);
+ list_add_tail(&backing_req->node, &backing_dev->submit_list);
+ queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work);
+ spin_unlock(&backing_dev->submit_lock);
+}
+
+static void bio_map(struct bio *bio, void *base, size_t size)
+{
+ struct page *page;
+ unsigned int offset;
+ unsigned int len;
+
+ if (!is_vmalloc_addr(base)) {
+ page = virt_to_page(base);
+ offset = offset_in_page(base);
+
+ BUG_ON(!bio_add_page(bio, page, size, offset));
+ return;
+ }
+
+ flush_kernel_vmap_range(base, size);
+ while (size) {
+ page = vmalloc_to_page(base);
+ offset = offset_in_page(base);
+ len = min_t(size_t, PAGE_SIZE - offset, size);
+
+ BUG_ON(!bio_add_page(bio, page, len, offset));
+ size -= len;
+ base += len;
+ }
+}
+
+static struct pcache_backing_dev_req *req_type_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_request *pcache_req = opts->req.upper_req;
+ struct pcache_backing_dev_req *backing_req;
+ struct bio *orig = pcache_req->bio;
+
+ backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
+ if (!backing_req)
+ return NULL;
+
+ memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
+
+ bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
+
+ backing_req->type = BACKING_DEV_REQ_TYPE_REQ;
+ backing_req->backing_dev = backing_dev;
+ atomic_inc(&backing_dev->inflight_reqs);
+
+ return backing_req;
+}
+
+static struct pcache_backing_dev_req *kmem_type_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev_req *backing_req;
+ u32 n_vecs = bio_add_max_vecs(opts->kmem.data, opts->kmem.len);
+
+ backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
+ if (!backing_req)
+ return NULL;
+
+ memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
+
+ if (n_vecs > BACKING_DEV_REQ_INLINE_BVECS) {
+ backing_req->kmem.bvecs = mempool_alloc(&backing_dev->bvec_pool, opts->gfp_mask);
+ if (!backing_req->kmem.bvecs)
+ goto free_backing_req;
+ } else {
+ backing_req->kmem.bvecs = backing_req->kmem.inline_bvecs;
+ }
+
+ backing_req->kmem.n_vecs = n_vecs;
+ backing_req->type = BACKING_DEV_REQ_TYPE_KMEM;
+ backing_req->backing_dev = backing_dev;
+ atomic_inc(&backing_dev->inflight_reqs);
+
+ return backing_req;
+
+free_backing_req:
+ mempool_free(backing_req, &backing_dev->req_pool);
+ return NULL;
+}
+
+struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
+ return req_type_req_alloc(backing_dev, opts);
+
+ if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
+ return kmem_type_req_alloc(backing_dev, opts);
+
+ BUG();
+}
+
+static void req_type_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_request *pcache_req = opts->req.upper_req;
+ struct bio *clone;
+ u32 off = opts->req.req_off;
+ u32 len = opts->req.len;
+
+ clone = &backing_req->bio;
+ BUG_ON(off & SECTOR_MASK);
+ BUG_ON(len & SECTOR_MASK);
+ bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT);
+
+ clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT;
+ clone->bi_private = backing_req;
+ clone->bi_end_io = backing_dev_bio_end;
+
+ INIT_LIST_HEAD(&backing_req->node);
+ backing_req->end_req = opts->end_fn;
+
+ pcache_req_get(pcache_req);
+ backing_req->req.upper_req = pcache_req;
+ backing_req->req.bio_off = off;
+}
+
+static void kmem_type_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+ struct bio *backing_bio;
+
+ bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
+ backing_req->kmem.n_vecs, opts->kmem.opf);
+
+ backing_bio = &backing_req->bio;
+ bio_map(backing_bio, opts->kmem.data, opts->kmem.len);
+
+ backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT;
+ backing_bio->bi_private = backing_req;
+ backing_bio->bi_end_io = backing_dev_bio_end;
+
+ INIT_LIST_HEAD(&backing_req->node);
+ backing_req->end_req = opts->end_fn;
+ backing_req->priv_data = opts->priv_data;
+}
+
+void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
+ return req_type_req_init(backing_req, opts);
+
+ if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
+ return kmem_type_req_init(backing_req, opts);
+
+ BUG();
+}
+
+struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev_req *backing_req;
+
+ backing_req = backing_dev_req_alloc(backing_dev, opts);
+ if (!backing_req)
+ return NULL;
+
+ backing_dev_req_init(backing_req, opts);
+
+ return backing_req;
+}
+
+void backing_dev_flush(struct pcache_backing_dev *backing_dev)
+{
+ blkdev_issue_flush(backing_dev->dm_dev->bdev);
+}
+
+int pcache_backing_init(void)
+{
+ u32 max_bvecs = (PCACHE_CACHE_SUBTREE_SIZE >> PAGE_SHIFT) + 1;
+ int ret;
+
+ backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0);
+ if (!backing_req_cache) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ backing_bvec_cache = kmem_cache_create("pcache-bvec-slab",
+ max_bvecs * sizeof(struct bio_vec),
+ 0, 0, NULL);
+ if (!backing_bvec_cache) {
+ ret = -ENOMEM;
+ goto destroy_req_cache;
+ }
+
+ return 0;
+destroy_req_cache:
+ kmem_cache_destroy(backing_req_cache);
+err:
+ return ret;
+}
+
+void pcache_backing_exit(void)
+{
+ kmem_cache_destroy(backing_bvec_cache);
+ kmem_cache_destroy(backing_req_cache);
+}
diff --git a/drivers/md/dm-pcache/backing_dev.h b/drivers/md/dm-pcache/backing_dev.h
new file mode 100644
index 000000000000..b371cba483b9
--- /dev/null
+++ b/drivers/md/dm-pcache/backing_dev.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _BACKING_DEV_H
+#define _BACKING_DEV_H
+
+#include <linux/device-mapper.h>
+
+#include "pcache_internal.h"
+
+struct pcache_backing_dev_req;
+typedef void (*backing_req_end_fn_t)(struct pcache_backing_dev_req *backing_req, int ret);
+
+#define BACKING_DEV_REQ_TYPE_REQ 1
+#define BACKING_DEV_REQ_TYPE_KMEM 2
+
+#define BACKING_DEV_REQ_INLINE_BVECS 4
+
+struct pcache_request;
+struct pcache_backing_dev_req {
+ u8 type;
+ struct bio bio;
+ struct pcache_backing_dev *backing_dev;
+
+ void *priv_data;
+ backing_req_end_fn_t end_req;
+
+ struct list_head node;
+ int ret;
+
+ union {
+ struct {
+ struct pcache_request *upper_req;
+ u32 bio_off;
+ } req;
+ struct {
+ struct bio_vec inline_bvecs[BACKING_DEV_REQ_INLINE_BVECS];
+ struct bio_vec *bvecs;
+ u32 n_vecs;
+ } kmem;
+ };
+};
+
+struct pcache_backing_dev {
+ struct pcache_cache *cache;
+
+ struct dm_dev *dm_dev;
+ mempool_t req_pool;
+ mempool_t bvec_pool;
+
+ struct list_head submit_list;
+ spinlock_t submit_lock;
+ struct work_struct req_submit_work;
+
+ struct list_head complete_list;
+ spinlock_t complete_lock;
+ struct work_struct req_complete_work;
+
+ atomic_t inflight_reqs;
+ wait_queue_head_t inflight_wq;
+
+ u64 dev_size;
+};
+
+struct dm_pcache;
+int backing_dev_start(struct dm_pcache *pcache);
+void backing_dev_stop(struct dm_pcache *pcache);
+
+struct pcache_backing_dev_req_opts {
+ u32 type;
+ union {
+ struct {
+ struct pcache_request *upper_req;
+ u32 req_off;
+ u32 len;
+ } req;
+ struct {
+ void *data;
+ blk_opf_t opf;
+ u32 len;
+ u64 backing_off;
+ } kmem;
+ };
+
+ gfp_t gfp_mask;
+ backing_req_end_fn_t end_fn;
+ void *priv_data;
+};
+
+static inline u32 backing_dev_req_coalesced_max_len(const void *data, u32 len)
+{
+ const void *p = data;
+ u32 done = 0, in_page, to_advance;
+ struct page *first_page, *next_page;
+
+ if (!is_vmalloc_addr(data))
+ return len;
+
+ first_page = vmalloc_to_page(p);
+advance:
+ in_page = PAGE_SIZE - offset_in_page(p);
+ to_advance = min_t(u32, in_page, len - done);
+
+ done += to_advance;
+ p += to_advance;
+
+ if (done == len)
+ return done;
+
+ next_page = vmalloc_to_page(p);
+ if (zone_device_pages_have_same_pgmap(first_page, next_page))
+ goto advance;
+
+ return done;
+}
+
+void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct);
+void backing_dev_req_end(struct pcache_backing_dev_req *backing_req);
+struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts);
+struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts);
+void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts);
+void backing_dev_flush(struct pcache_backing_dev *backing_dev);
+
+int pcache_backing_init(void);
+void pcache_backing_exit(void);
+#endif /* _BACKING_DEV_H */
diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c
new file mode 100644
index 000000000000..d8e92367d947
--- /dev/null
+++ b/drivers/md/dm-pcache/cache.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/blk_types.h>
+
+#include "cache.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "dm_pcache.h"
+
+struct kmem_cache *key_cache;
+
+static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
+{
+ return cache->cache_info_addr + cache->info_index;
+}
+
+static void cache_info_write(struct pcache_cache *cache)
+{
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+
+ cache_info->header.seq++;
+ cache_info->header.crc = pcache_meta_crc(&cache_info->header,
+ sizeof(struct pcache_cache_info));
+
+ memcpy_flushcache(get_cache_info_addr(cache), cache_info,
+ sizeof(struct pcache_cache_info));
+
+ cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+static void cache_info_init_default(struct pcache_cache *cache);
+static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_info *cache_info_addr;
+
+ cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
+ sizeof(struct pcache_cache_info),
+ PCACHE_CACHE_INFO_SIZE,
+ &cache->cache_info);
+ if (IS_ERR(cache_info_addr))
+ return PTR_ERR(cache_info_addr);
+
+ if (cache_info_addr) {
+ if (opts->data_crc !=
+ (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
+ pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
+ opts->data_crc ? "true" : "false",
+ cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* init cache_info for new cache */
+ cache_info_init_default(cache);
+ cache_mode_set(cache, opts->cache_mode);
+ if (opts->data_crc)
+ cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
+
+ return 0;
+}
+
+static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
+{
+ cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
+ cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
+}
+
+int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
+{
+ if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
+ return -EINVAL;
+
+ mutex_lock(&cache->cache_info_lock);
+ cache_info_set_gc_percent(&cache->cache_info, percent);
+
+ cache_info_write(cache);
+ mutex_unlock(&cache->cache_info_lock);
+
+ return 0;
+}
+
+void cache_pos_encode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia_base,
+ struct pcache_cache_pos *pos, u64 seq, u32 *index)
+{
+ struct pcache_cache_pos_onmedia pos_onmedia;
+ struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
+
+ pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
+ pos_onmedia.seg_off = pos->seg_off;
+ pos_onmedia.header.seq = seq;
+ pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
+
+ memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
+ pmem_wmb();
+
+ *index = (*index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+int cache_pos_decode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 *seq, u32 *index)
+{
+ struct pcache_cache_pos_onmedia latest, *latest_addr;
+
+ latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
+ sizeof(struct pcache_cache_pos_onmedia),
+ sizeof(struct pcache_cache_pos_onmedia),
+ &latest);
+ if (IS_ERR(latest_addr))
+ return PTR_ERR(latest_addr);
+
+ if (!latest_addr)
+ return -EIO;
+
+ pos->cache_seg = &cache->segments[latest.cache_seg_id];
+ pos->seg_off = latest.seg_off;
+ *seq = latest.header.seq;
+ *index = (latest_addr - pos_onmedia);
+
+ return 0;
+}
+
+static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
+{
+ cache->cache_info.seg_id = seg_id;
+}
+
+static int cache_init(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ int ret;
+
+ cache->segments = kvcalloc(cache_dev->seg_num, sizeof(struct pcache_cache_segment), GFP_KERNEL);
+ if (!cache->segments) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
+ if (!cache->seg_map) {
+ ret = -ENOMEM;
+ goto free_segments;
+ }
+
+ cache->backing_dev = backing_dev;
+ cache->cache_dev = &pcache->cache_dev;
+ cache->n_segs = cache_dev->seg_num;
+ atomic_set(&cache->gc_errors, 0);
+ spin_lock_init(&cache->seg_map_lock);
+ spin_lock_init(&cache->key_head_lock);
+
+ mutex_init(&cache->cache_info_lock);
+ mutex_init(&cache->key_tail_lock);
+ mutex_init(&cache->dirty_tail_lock);
+ mutex_init(&cache->writeback_lock);
+
+ INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
+ INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
+ INIT_WORK(&cache->clean_work, clean_fn);
+
+ return 0;
+
+free_segments:
+ kvfree(cache->segments);
+err:
+ return ret;
+}
+
+static void cache_exit(struct pcache_cache *cache)
+{
+ kvfree(cache->seg_map);
+ kvfree(cache->segments);
+}
+
+static void cache_info_init_default(struct pcache_cache *cache)
+{
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+
+ cache_info->header.seq = 0;
+ cache_info->n_segs = cache->cache_dev->seg_num;
+ cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
+}
+
+static int cache_tail_init(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
+
+ if (new_cache) {
+ __set_bit(0, cache->seg_map);
+
+ cache->key_head.cache_seg = &cache->segments[0];
+ cache->key_head.seg_off = 0;
+ cache_pos_copy(&cache->key_tail, &cache->key_head);
+ cache_pos_copy(&cache->dirty_tail, &cache->key_head);
+
+ cache_encode_dirty_tail(cache);
+ cache_encode_key_tail(cache);
+ } else {
+ if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
+ pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int get_seg_id(struct pcache_cache *cache,
+ struct pcache_cache_segment *prev_cache_seg,
+ bool new_cache, u32 *seg_id)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_dev *cache_dev = cache->cache_dev;
+ int ret;
+
+ if (new_cache) {
+ ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
+ if (ret) {
+ pcache_dev_err(pcache, "no available segment\n");
+ goto err;
+ }
+
+ if (prev_cache_seg)
+ cache_seg_set_next_seg(prev_cache_seg, *seg_id);
+ else
+ cache_info_set_seg_id(cache, *seg_id);
+ } else {
+ if (prev_cache_seg) {
+ struct pcache_segment_info *prev_seg_info;
+
+ prev_seg_info = &prev_cache_seg->cache_seg_info;
+ if (!segment_info_has_next(prev_seg_info)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ *seg_id = prev_cache_seg->cache_seg_info.next_seg;
+ } else {
+ *seg_id = cache->cache_info.seg_id;
+ }
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int cache_segs_init(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *prev_cache_seg = NULL;
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+ bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
+ u32 seg_id;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < cache_info->n_segs; i++) {
+ ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
+ if (ret)
+ goto err;
+
+ ret = cache_seg_init(cache, seg_id, i, new_cache);
+ if (ret)
+ goto err;
+
+ prev_cache_seg = &cache->segments[i];
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ u32 n_subtrees;
+ int ret;
+ u32 i, cpu;
+
+ /* Calculate number of cache trees based on the device size */
+ n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
+ ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
+ if (ret)
+ goto err;
+
+ cache->n_ksets = n_paral;
+ cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
+ if (!cache->ksets) {
+ ret = -ENOMEM;
+ goto req_tree_exit;
+ }
+
+ /*
+ * Initialize each kset with a spinlock and delayed work for flushing.
+ * Each kset is associated with one queue to ensure independent handling
+ * of cache keys across multiple queues, maximizing multiqueue concurrency.
+ */
+ for (i = 0; i < cache->n_ksets; i++) {
+ struct pcache_cache_kset *kset = get_kset(cache, i);
+
+ kset->cache = cache;
+ spin_lock_init(&kset->kset_lock);
+ INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
+ }
+
+ cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
+ if (!cache->data_heads) {
+ ret = -ENOMEM;
+ goto free_kset;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct pcache_cache_data_head *h =
+ per_cpu_ptr(cache->data_heads, cpu);
+ h->head_pos.cache_seg = NULL;
+ }
+
+ /*
+ * Replay persisted cache keys using cache_replay.
+ * This function loads and replays cache keys from previously stored
+ * ksets, allowing the cache to restore its state after a restart.
+ */
+ ret = cache_replay(cache);
+ if (ret) {
+ pcache_dev_err(pcache, "failed to replay keys\n");
+ goto free_heads;
+ }
+
+ return 0;
+
+free_heads:
+ free_percpu(cache->data_heads);
+free_kset:
+ kvfree(cache->ksets);
+req_tree_exit:
+ cache_tree_exit(&cache->req_key_tree);
+err:
+ return ret;
+}
+
+static void cache_destroy_req_keys(struct pcache_cache *cache)
+{
+ u32 i;
+
+ for (i = 0; i < cache->n_ksets; i++) {
+ struct pcache_cache_kset *kset = get_kset(cache, i);
+
+ cancel_delayed_work_sync(&kset->flush_work);
+ }
+
+ free_percpu(cache->data_heads);
+ kvfree(cache->ksets);
+ cache_tree_exit(&cache->req_key_tree);
+}
+
+int pcache_cache_start(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache *cache = &pcache->cache;
+ struct pcache_cache_options *opts = &pcache->opts;
+ int ret;
+
+ ret = cache_init(pcache);
+ if (ret)
+ return ret;
+
+ cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
+ cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
+ backing_dev->cache = cache;
+ cache->dev_size = backing_dev->dev_size;
+
+ ret = cache_info_init(cache, opts);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_segs_init(cache);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_tail_init(cache);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_init_req_keys(cache, num_online_cpus());
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_writeback_init(cache);
+ if (ret)
+ goto destroy_keys;
+
+ cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
+ cache_info_write(cache);
+ queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
+
+ return 0;
+
+destroy_keys:
+ cache_destroy_req_keys(cache);
+cache_exit:
+ cache_exit(cache);
+
+ return ret;
+}
+
+void pcache_cache_stop(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+
+ cache_flush(cache);
+
+ cancel_delayed_work_sync(&cache->gc_work);
+ flush_work(&cache->clean_work);
+ cache_writeback_exit(cache);
+
+ if (cache->req_key_tree.n_subtrees)
+ cache_destroy_req_keys(cache);
+
+ cache_exit(cache);
+}
+
+struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+
+ return pcache->task_wq;
+}
+
+int pcache_cache_init(void)
+{
+ key_cache = KMEM_CACHE(pcache_cache_key, 0);
+ if (!key_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void pcache_cache_exit(void)
+{
+ kmem_cache_destroy(key_cache);
+}
diff --git a/drivers/md/dm-pcache/cache.h b/drivers/md/dm-pcache/cache.h
new file mode 100644
index 000000000000..1136d86958c8
--- /dev/null
+++ b/drivers/md/dm-pcache/cache.h
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_CACHE_H
+#define _PCACHE_CACHE_H
+
+#include "segment.h"
+
+/* Garbage collection thresholds */
+#define PCACHE_CACHE_GC_PERCENT_MIN 0 /* Minimum GC percentage */
+#define PCACHE_CACHE_GC_PERCENT_MAX 90 /* Maximum GC percentage */
+#define PCACHE_CACHE_GC_PERCENT_DEFAULT 70 /* Default GC percentage */
+
+#define PCACHE_CACHE_SUBTREE_SIZE (4 * PCACHE_MB) /* 4MB total tree size */
+#define PCACHE_CACHE_SUBTREE_SIZE_MASK 0x3FFFFF /* Mask for tree size */
+#define PCACHE_CACHE_SUBTREE_SIZE_SHIFT 22 /* Bit shift for tree size */
+
+/* Maximum number of keys per key set */
+#define PCACHE_KSET_KEYS_MAX 128
+#define PCACHE_CACHE_SEGS_MAX (1024 * 1024) /* maximum cache size for each device is 16T */
+#define PCACHE_KSET_ONMEDIA_SIZE_MAX struct_size_t(struct pcache_cache_kset_onmedia, data, PCACHE_KSET_KEYS_MAX)
+#define PCACHE_KSET_SIZE (sizeof(struct pcache_cache_kset) + sizeof(struct pcache_cache_key_onmedia) * PCACHE_KSET_KEYS_MAX)
+
+/* Maximum number of keys to clean in one round of clean_work */
+#define PCACHE_CLEAN_KEYS_MAX 10
+
+/* Writeback and garbage collection intervals in jiffies */
+#define PCACHE_CACHE_WRITEBACK_INTERVAL (5 * HZ)
+#define PCACHE_CACHE_GC_INTERVAL (5 * HZ)
+
+/* Macro to get the cache key structure from an rb_node pointer */
+#define CACHE_KEY(node) (container_of(node, struct pcache_cache_key, rb_node))
+
+struct pcache_cache_pos_onmedia {
+ struct pcache_meta_header header;
+ __u32 cache_seg_id;
+ __u32 seg_off;
+};
+
+/* Offset and size definitions for cache segment control */
+#define PCACHE_CACHE_SEG_CTRL_OFF (PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX)
+#define PCACHE_CACHE_SEG_CTRL_SIZE (4 * PCACHE_KB)
+
+struct pcache_cache_seg_gen {
+ struct pcache_meta_header header;
+ __u64 gen;
+};
+
+/* Control structure for cache segments */
+struct pcache_cache_seg_ctrl {
+ struct pcache_cache_seg_gen gen[PCACHE_META_INDEX_MAX];
+ __u64 res[64];
+};
+
+#define PCACHE_CACHE_FLAGS_DATA_CRC BIT(0)
+#define PCACHE_CACHE_FLAGS_INIT_DONE BIT(1)
+
+#define PCACHE_CACHE_FLAGS_CACHE_MODE_MASK GENMASK(5, 2)
+#define PCACHE_CACHE_MODE_WRITEBACK 0
+#define PCACHE_CACHE_MODE_WRITETHROUGH 1
+#define PCACHE_CACHE_MODE_WRITEAROUND 2
+#define PCACHE_CACHE_MODE_WRITEONLY 3
+
+#define PCACHE_CACHE_FLAGS_GC_PERCENT_MASK GENMASK(12, 6)
+
+struct pcache_cache_info {
+ struct pcache_meta_header header;
+ __u32 seg_id;
+ __u32 n_segs;
+ __u32 flags;
+ __u32 reserved;
+};
+
+struct pcache_cache_pos {
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_off;
+};
+
+struct pcache_cache_segment {
+ struct pcache_cache *cache;
+ u32 cache_seg_id; /* Index in cache->segments */
+ struct pcache_segment segment;
+ atomic_t refs;
+
+ struct pcache_segment_info cache_seg_info;
+ struct mutex info_lock;
+ u32 info_index;
+
+ spinlock_t gen_lock;
+ u64 gen;
+ u64 gen_seq;
+ u32 gen_index;
+
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl;
+};
+
+/* rbtree for cache entries */
+struct pcache_cache_subtree {
+ struct rb_root root;
+ spinlock_t tree_lock;
+};
+
+struct pcache_cache_tree {
+ struct pcache_cache *cache;
+ u32 n_subtrees;
+ mempool_t key_pool;
+ struct pcache_cache_subtree *subtrees;
+};
+
+extern struct kmem_cache *key_cache;
+
+struct pcache_cache_key {
+ struct pcache_cache_tree *cache_tree;
+ struct pcache_cache_subtree *cache_subtree;
+ struct kref ref;
+ struct rb_node rb_node;
+ struct list_head list_node;
+ u64 off;
+ u32 len;
+ u32 flags;
+ struct pcache_cache_pos cache_pos;
+ u64 seg_gen;
+};
+
+#define PCACHE_CACHE_KEY_FLAGS_EMPTY BIT(0)
+#define PCACHE_CACHE_KEY_FLAGS_CLEAN BIT(1)
+
+struct pcache_cache_key_onmedia {
+ __u64 off;
+ __u32 len;
+ __u32 flags;
+ __u32 cache_seg_id;
+ __u32 cache_seg_off;
+ __u64 seg_gen;
+ __u32 data_crc;
+ __u32 reserved;
+};
+
+struct pcache_cache_kset_onmedia {
+ __u32 crc;
+ union {
+ __u32 key_num;
+ __u32 next_cache_seg_id;
+ };
+ __u64 magic;
+ __u64 flags;
+ struct pcache_cache_key_onmedia data[];
+};
+
+struct pcache_cache {
+ struct pcache_backing_dev *backing_dev;
+ struct pcache_cache_dev *cache_dev;
+ struct pcache_cache_ctrl *cache_ctrl;
+ u64 dev_size;
+
+ struct pcache_cache_data_head __percpu *data_heads;
+
+ spinlock_t key_head_lock;
+ struct pcache_cache_pos key_head;
+ u32 n_ksets;
+ struct pcache_cache_kset *ksets;
+
+ struct mutex key_tail_lock;
+ struct pcache_cache_pos key_tail;
+ u64 key_tail_seq;
+ u32 key_tail_index;
+
+ struct mutex dirty_tail_lock;
+ struct pcache_cache_pos dirty_tail;
+ u64 dirty_tail_seq;
+ u32 dirty_tail_index;
+
+ struct pcache_cache_tree req_key_tree;
+ struct work_struct clean_work;
+
+ struct mutex writeback_lock;
+ char wb_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
+ struct pcache_cache_tree writeback_key_tree;
+ struct delayed_work writeback_work;
+ struct {
+ atomic_t pending;
+ u32 advance;
+ int ret;
+ } writeback_ctx;
+
+ char gc_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
+ struct delayed_work gc_work;
+ atomic_t gc_errors;
+
+ struct mutex cache_info_lock;
+ struct pcache_cache_info cache_info;
+ struct pcache_cache_info *cache_info_addr;
+ u32 info_index;
+
+ u32 n_segs;
+ unsigned long *seg_map;
+ u32 last_cache_seg;
+ bool cache_full;
+ spinlock_t seg_map_lock;
+ struct pcache_cache_segment *segments;
+};
+
+struct workqueue_struct *cache_get_wq(struct pcache_cache *cache);
+
+struct dm_pcache;
+struct pcache_cache_options {
+ u32 cache_mode:4;
+ u32 data_crc:1;
+};
+int pcache_cache_start(struct dm_pcache *pcache);
+void pcache_cache_stop(struct dm_pcache *pcache);
+
+struct pcache_cache_ctrl {
+ /* Updated by gc_thread */
+ struct pcache_cache_pos_onmedia key_tail_pos[PCACHE_META_INDEX_MAX];
+
+ /* Updated by writeback_thread */
+ struct pcache_cache_pos_onmedia dirty_tail_pos[PCACHE_META_INDEX_MAX];
+};
+
+struct pcache_cache_data_head {
+ struct pcache_cache_pos head_pos;
+};
+
+static inline u16 pcache_cache_get_gc_percent(struct pcache_cache *cache)
+{
+ return FIELD_GET(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, cache->cache_info.flags);
+}
+
+int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent);
+
+/* cache key */
+struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask);
+void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key);
+void cache_key_get(struct pcache_cache_key *key);
+void cache_key_put(struct pcache_cache_key *key);
+int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close);
+void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup);
+int cache_key_decode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key);
+void cache_pos_advance(struct pcache_cache_pos *pos, u32 len);
+
+#define PCACHE_KSET_FLAGS_LAST BIT(0)
+#define PCACHE_KSET_MAGIC 0x676894a64e164f1aULL
+
+struct pcache_cache_kset {
+ struct pcache_cache *cache;
+ spinlock_t kset_lock;
+ struct delayed_work flush_work;
+ struct pcache_cache_kset_onmedia kset_onmedia;
+};
+
+extern struct pcache_cache_kset_onmedia pcache_empty_kset;
+
+#define SUBTREE_WALK_RET_OK 0
+#define SUBTREE_WALK_RET_ERR 1
+#define SUBTREE_WALK_RET_NEED_KEY 2
+#define SUBTREE_WALK_RET_NEED_REQ 3
+#define SUBTREE_WALK_RET_RESEARCH 4
+
+struct pcache_cache_subtree_walk_ctx {
+ struct pcache_cache_tree *cache_tree;
+ struct rb_node *start_node;
+ struct pcache_request *pcache_req;
+ struct pcache_cache_key *key;
+ u32 req_done;
+ int ret;
+
+ /* pre-allocated key and backing_dev_req */
+ struct pcache_cache_key *pre_alloc_key;
+ struct pcache_backing_dev_req *pre_alloc_req;
+
+ struct list_head *delete_key_list;
+ struct list_head *submit_req_list;
+
+ /*
+ * |--------| key_tmp
+ * |====| key
+ */
+ int (*before)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----------| key_tmp
+ * |=====| key
+ */
+ int (*after)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ int (*overlap_tail)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |--------| key_tmp
+ * |==========| key
+ */
+ int (*overlap_head)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----| key_tmp
+ * |==========| key
+ */
+ int (*overlap_contain)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |-----------| key_tmp
+ * |====| key
+ */
+ int (*overlap_contained)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ int (*walk_finally)(struct pcache_cache_subtree_walk_ctx *ctx, int ret);
+ bool (*walk_done)(struct pcache_cache_subtree_walk_ctx *ctx);
+};
+
+int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx);
+struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
+ struct rb_node **parentp, struct rb_node ***newp,
+ struct list_head *delete_key_list);
+int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset);
+void clean_fn(struct work_struct *work);
+void kset_flush_fn(struct work_struct *work);
+int cache_replay(struct pcache_cache *cache);
+int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees);
+void cache_tree_clear(struct pcache_cache_tree *cache_tree);
+void cache_tree_exit(struct pcache_cache_tree *cache_tree);
+
+/* cache segments */
+struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache);
+int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
+ bool new_cache);
+void cache_seg_get(struct pcache_cache_segment *cache_seg);
+void cache_seg_put(struct pcache_cache_segment *cache_seg);
+void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
+
+/* cache request*/
+int cache_flush(struct pcache_cache *cache);
+void miss_read_end_work_fn(struct work_struct *work);
+int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
+
+/* gc */
+void pcache_cache_gc_fn(struct work_struct *work);
+
+/* writeback */
+void cache_writeback_exit(struct pcache_cache *cache);
+int cache_writeback_init(struct pcache_cache *cache);
+void cache_writeback_fn(struct work_struct *work);
+
+/* inline functions */
+static inline struct pcache_cache_subtree *get_subtree(struct pcache_cache_tree *cache_tree, u64 off)
+{
+ if (cache_tree->n_subtrees == 1)
+ return &cache_tree->subtrees[0];
+
+ return &cache_tree->subtrees[off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT];
+}
+
+static inline void *cache_pos_addr(struct pcache_cache_pos *pos)
+{
+ return (pos->cache_seg->segment.data + pos->seg_off);
+}
+
+static inline void *get_key_head_addr(struct pcache_cache *cache)
+{
+ return cache_pos_addr(&cache->key_head);
+}
+
+static inline u32 get_kset_id(struct pcache_cache *cache, u64 off)
+{
+ u32 kset_id;
+
+ div_u64_rem(off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT, cache->n_ksets, &kset_id);
+
+ return kset_id;
+}
+
+static inline struct pcache_cache_kset *get_kset(struct pcache_cache *cache, u32 kset_id)
+{
+ return (void *)cache->ksets + PCACHE_KSET_SIZE * kset_id;
+}
+
+static inline struct pcache_cache_data_head *get_data_head(struct pcache_cache *cache)
+{
+ return this_cpu_ptr(cache->data_heads);
+}
+
+static inline bool cache_key_empty(struct pcache_cache_key *key)
+{
+ return key->flags & PCACHE_CACHE_KEY_FLAGS_EMPTY;
+}
+
+static inline bool cache_key_clean(struct pcache_cache_key *key)
+{
+ return key->flags & PCACHE_CACHE_KEY_FLAGS_CLEAN;
+}
+
+static inline void cache_pos_copy(struct pcache_cache_pos *dst, struct pcache_cache_pos *src)
+{
+ memcpy(dst, src, sizeof(struct pcache_cache_pos));
+}
+
+/**
+ * cache_seg_is_ctrl_seg - Checks if a cache segment is a cache ctrl segment.
+ * @cache_seg_id: ID of the cache segment.
+ *
+ * Returns true if the cache segment ID corresponds to a cache ctrl segment.
+ *
+ * Note: We extend the segment control of the first cache segment
+ * (cache segment ID 0) to serve as the cache control (pcache_cache_ctrl)
+ * for the entire PCACHE cache. This function determines whether the given
+ * cache segment is the one storing the pcache_cache_ctrl information.
+ */
+static inline bool cache_seg_is_ctrl_seg(u32 cache_seg_id)
+{
+ return (cache_seg_id == 0);
+}
+
+/**
+ * cache_key_cutfront - Cuts a specified length from the front of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ * @cut_len: Length to cut from the front.
+ *
+ * Advances the cache key position by cut_len and adjusts offset and length accordingly.
+ */
+static inline void cache_key_cutfront(struct pcache_cache_key *key, u32 cut_len)
+{
+ if (key->cache_pos.cache_seg)
+ cache_pos_advance(&key->cache_pos, cut_len);
+
+ key->off += cut_len;
+ key->len -= cut_len;
+}
+
+/**
+ * cache_key_cutback - Cuts a specified length from the back of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ * @cut_len: Length to cut from the back.
+ *
+ * Reduces the length of the cache key by cut_len.
+ */
+static inline void cache_key_cutback(struct pcache_cache_key *key, u32 cut_len)
+{
+ key->len -= cut_len;
+}
+
+static inline void cache_key_delete(struct pcache_cache_key *key)
+{
+ struct pcache_cache_subtree *cache_subtree;
+
+ cache_subtree = key->cache_subtree;
+ BUG_ON(!cache_subtree);
+
+ rb_erase(&key->rb_node, &cache_subtree->root);
+ key->flags = 0;
+ cache_key_put(key);
+}
+
+static inline bool cache_data_crc_on(struct pcache_cache *cache)
+{
+ return (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC);
+}
+
+static inline u32 cache_mode_get(struct pcache_cache *cache)
+{
+ return FIELD_GET(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache->cache_info.flags);
+}
+
+static inline void cache_mode_set(struct pcache_cache *cache, u32 cache_mode)
+{
+ cache->cache_info.flags &= ~PCACHE_CACHE_FLAGS_CACHE_MODE_MASK;
+ cache->cache_info.flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache_mode);
+}
+
+/**
+ * cache_key_data_crc - Calculates CRC for data in a cache key.
+ * @key: Pointer to the pcache_cache_key structure.
+ *
+ * Returns the CRC-32 checksum of the data within the cache key's position.
+ */
+static inline u32 cache_key_data_crc(struct pcache_cache_key *key)
+{
+ void *data;
+
+ data = cache_pos_addr(&key->cache_pos);
+
+ return crc32c(PCACHE_CRC_SEED, data, key->len);
+}
+
+static inline u32 cache_kset_crc(struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ u32 crc_size;
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST)
+ crc_size = sizeof(struct pcache_cache_kset_onmedia) - 4;
+ else
+ crc_size = struct_size(kset_onmedia, data, kset_onmedia->key_num) - 4;
+
+ return crc32c(PCACHE_CRC_SEED, (void *)kset_onmedia + 4, crc_size);
+}
+
+static inline u32 get_kset_onmedia_size(struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ return struct_size_t(struct pcache_cache_kset_onmedia, data, kset_onmedia->key_num);
+}
+
+/**
+ * cache_seg_remain - Computes remaining space in a cache segment.
+ * @pos: Pointer to pcache_cache_pos structure.
+ *
+ * Returns the amount of remaining space in the segment data starting from
+ * the current position offset.
+ */
+static inline u32 cache_seg_remain(struct pcache_cache_pos *pos)
+{
+ struct pcache_cache_segment *cache_seg;
+ struct pcache_segment *segment;
+ u32 seg_remain;
+
+ cache_seg = pos->cache_seg;
+ segment = &cache_seg->segment;
+ seg_remain = segment->data_size - pos->seg_off;
+
+ return seg_remain;
+}
+
+/**
+ * cache_key_invalid - Checks if a cache key is invalid.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns true if the cache key is invalid due to its generation being
+ * less than the generation of its segment; otherwise returns false.
+ *
+ * When the GC (garbage collection) thread identifies a segment
+ * as reclaimable, it increments the segment's generation (gen). However,
+ * it does not immediately remove all related cache keys. When accessing
+ * such a cache key, this function can be used to determine if the cache
+ * key has already become invalid.
+ */
+static inline bool cache_key_invalid(struct pcache_cache_key *key)
+{
+ if (cache_key_empty(key))
+ return false;
+
+ return (key->seg_gen < key->cache_pos.cache_seg->gen);
+}
+
+/**
+ * cache_key_lstart - Retrieves the logical start offset of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns the logical start offset for the cache key.
+ */
+static inline u64 cache_key_lstart(struct pcache_cache_key *key)
+{
+ return key->off;
+}
+
+/**
+ * cache_key_lend - Retrieves the logical end offset of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns the logical end offset for the cache key.
+ */
+static inline u64 cache_key_lend(struct pcache_cache_key *key)
+{
+ return key->off + key->len;
+}
+
+static inline void cache_key_copy(struct pcache_cache_key *key_dst, struct pcache_cache_key *key_src)
+{
+ key_dst->off = key_src->off;
+ key_dst->len = key_src->len;
+ key_dst->seg_gen = key_src->seg_gen;
+ key_dst->cache_tree = key_src->cache_tree;
+ key_dst->cache_subtree = key_src->cache_subtree;
+ key_dst->flags = key_src->flags;
+
+ cache_pos_copy(&key_dst->cache_pos, &key_src->cache_pos);
+}
+
+/**
+ * cache_pos_onmedia_crc - Calculates the CRC for an on-media cache position.
+ * @pos_om: Pointer to pcache_cache_pos_onmedia structure.
+ *
+ * Calculates the CRC-32 checksum of the position, excluding the first 4 bytes.
+ * Returns the computed CRC value.
+ */
+static inline u32 cache_pos_onmedia_crc(struct pcache_cache_pos_onmedia *pos_om)
+{
+ return pcache_meta_crc(&pos_om->header, sizeof(struct pcache_cache_pos_onmedia));
+}
+
+void cache_pos_encode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 seq, u32 *index);
+int cache_pos_decode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 *seq, u32 *index);
+
+static inline void cache_encode_key_tail(struct pcache_cache *cache)
+{
+ cache_pos_encode(cache, cache->cache_ctrl->key_tail_pos,
+ &cache->key_tail, ++cache->key_tail_seq,
+ &cache->key_tail_index);
+}
+
+static inline int cache_decode_key_tail(struct pcache_cache *cache)
+{
+ return cache_pos_decode(cache, cache->cache_ctrl->key_tail_pos,
+ &cache->key_tail, &cache->key_tail_seq,
+ &cache->key_tail_index);
+}
+
+static inline void cache_encode_dirty_tail(struct pcache_cache *cache)
+{
+ cache_pos_encode(cache, cache->cache_ctrl->dirty_tail_pos,
+ &cache->dirty_tail, ++cache->dirty_tail_seq,
+ &cache->dirty_tail_index);
+}
+
+static inline int cache_decode_dirty_tail(struct pcache_cache *cache)
+{
+ return cache_pos_decode(cache, cache->cache_ctrl->dirty_tail_pos,
+ &cache->dirty_tail, &cache->dirty_tail_seq,
+ &cache->dirty_tail_index);
+}
+
+int pcache_cache_init(void);
+void pcache_cache_exit(void);
+#endif /* _PCACHE_CACHE_H */
diff --git a/drivers/md/dm-pcache/cache_dev.c b/drivers/md/dm-pcache/cache_dev.c
new file mode 100644
index 000000000000..ece689e6ce59
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_dev.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/blkdev.h>
+#include <linux/dax.h>
+#include <linux/vmalloc.h>
+#include <linux/parser.h>
+
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+static void cache_dev_dax_exit(struct pcache_cache_dev *cache_dev)
+{
+ if (cache_dev->use_vmap)
+ vunmap(cache_dev->mapping);
+}
+
+static int build_vmap(struct dax_device *dax_dev, long total_pages, void **vaddr)
+{
+ struct page **pages;
+ long i = 0, chunk;
+ unsigned long pfn;
+ int ret;
+
+ pages = vmalloc_array(total_pages, sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
+
+ do {
+ chunk = dax_direct_access(dax_dev, i, total_pages - i,
+ DAX_ACCESS, NULL, &pfn);
+ if (chunk <= 0) {
+ ret = chunk ? chunk : -EINVAL;
+ goto out_free;
+ }
+
+ if (!pfn_valid(pfn)) {
+ ret = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ while (chunk-- && i < total_pages) {
+ pages[i++] = pfn_to_page(pfn);
+ pfn++;
+ if (!(i & 15))
+ cond_resched();
+ }
+ } while (i < total_pages);
+
+ *vaddr = vmap(pages, total_pages, VM_MAP, PAGE_KERNEL);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ vfree(pages);
+ return ret;
+}
+
+static int cache_dev_dax_init(struct pcache_cache_dev *cache_dev)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ struct dax_device *dax_dev;
+ long total_pages, mapped_pages;
+ u64 bdev_size;
+ void *vaddr;
+ int ret;
+ int id;
+ unsigned long pfn;
+
+ dax_dev = cache_dev->dm_dev->dax_dev;
+ /* total size check */
+ bdev_size = bdev_nr_bytes(cache_dev->dm_dev->bdev);
+ if (bdev_size < PCACHE_CACHE_DEV_SIZE_MIN) {
+ pcache_dev_err(pcache, "dax device is too small, required at least %llu",
+ PCACHE_CACHE_DEV_SIZE_MIN);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ total_pages = bdev_size >> PAGE_SHIFT;
+ /* attempt: direct-map the whole range */
+ id = dax_read_lock();
+ mapped_pages = dax_direct_access(dax_dev, 0, total_pages,
+ DAX_ACCESS, &vaddr, &pfn);
+ if (mapped_pages < 0) {
+ pcache_dev_err(pcache, "dax_direct_access failed: %ld\n", mapped_pages);
+ ret = mapped_pages;
+ goto unlock;
+ }
+
+ if (!pfn_valid(pfn)) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (mapped_pages == total_pages) {
+ /* success: contiguous direct mapping */
+ cache_dev->mapping = vaddr;
+ } else {
+ /* need vmap fallback */
+ ret = build_vmap(dax_dev, total_pages, &vaddr);
+ if (ret) {
+ pcache_dev_err(pcache, "vmap fallback failed: %d\n", ret);
+ goto unlock;
+ }
+
+ cache_dev->mapping = vaddr;
+ cache_dev->use_vmap = true;
+ }
+ dax_read_unlock(id);
+
+ return 0;
+unlock:
+ dax_read_unlock(id);
+out:
+ return ret;
+}
+
+void cache_dev_zero_range(struct pcache_cache_dev *cache_dev, void *pos, u32 size)
+{
+ memset(pos, 0, size);
+ dax_flush(cache_dev->dm_dev->dax_dev, pos, size);
+}
+
+static int sb_read(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct pcache_sb *sb_addr = CACHE_DEV_SB(cache_dev);
+
+ if (copy_mc_to_kernel(sb, sb_addr, sizeof(struct pcache_sb)))
+ return -EIO;
+
+ return 0;
+}
+
+static void sb_write(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct pcache_sb *sb_addr = CACHE_DEV_SB(cache_dev);
+
+ memcpy_flushcache(sb_addr, sb, sizeof(struct pcache_sb));
+ pmem_wmb();
+}
+
+static int sb_init(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u64 nr_segs;
+ u64 cache_dev_size;
+ u64 magic;
+ u32 flags = 0;
+
+ magic = le64_to_cpu(sb->magic);
+ if (magic)
+ return -EEXIST;
+
+ cache_dev_size = bdev_nr_bytes(file_bdev(cache_dev->dm_dev->bdev_file));
+ if (cache_dev_size < PCACHE_CACHE_DEV_SIZE_MIN) {
+ pcache_dev_err(pcache, "dax device is too small, required at least %llu",
+ PCACHE_CACHE_DEV_SIZE_MIN);
+ return -ENOSPC;
+ }
+
+ nr_segs = (cache_dev_size - PCACHE_SEGMENTS_OFF) / ((PCACHE_SEG_SIZE));
+
+#if defined(__BYTE_ORDER) ? (__BIG_ENDIAN == __BYTE_ORDER) : defined(__BIG_ENDIAN)
+ flags |= PCACHE_SB_F_BIGENDIAN;
+#endif
+ sb->flags = cpu_to_le32(flags);
+ sb->magic = cpu_to_le64(PCACHE_MAGIC);
+ sb->seg_num = cpu_to_le32(nr_segs);
+ sb->crc = cpu_to_le32(crc32c(PCACHE_CRC_SEED, (void *)(sb) + 4, sizeof(struct pcache_sb) - 4));
+
+ cache_dev_zero_range(cache_dev, CACHE_DEV_CACHE_INFO(cache_dev),
+ PCACHE_CACHE_INFO_SIZE * PCACHE_META_INDEX_MAX +
+ PCACHE_CACHE_CTRL_SIZE);
+
+ return 0;
+}
+
+static int sb_validate(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u32 flags;
+ u32 crc;
+
+ if (le64_to_cpu(sb->magic) != PCACHE_MAGIC) {
+ pcache_dev_err(pcache, "unexpected magic: %llx\n",
+ le64_to_cpu(sb->magic));
+ return -EINVAL;
+ }
+
+ crc = crc32c(PCACHE_CRC_SEED, (void *)(sb) + 4, sizeof(struct pcache_sb) - 4);
+ if (crc != le32_to_cpu(sb->crc)) {
+ pcache_dev_err(pcache, "corrupted sb: %u, expected: %u\n", crc, le32_to_cpu(sb->crc));
+ return -EINVAL;
+ }
+
+ flags = le32_to_cpu(sb->flags);
+#if defined(__BYTE_ORDER) ? (__BIG_ENDIAN == __BYTE_ORDER) : defined(__BIG_ENDIAN)
+ if (!(flags & PCACHE_SB_F_BIGENDIAN)) {
+ pcache_dev_err(pcache, "cache_dev is not big endian\n");
+ return -EINVAL;
+ }
+#else
+ if (flags & PCACHE_SB_F_BIGENDIAN) {
+ pcache_dev_err(pcache, "cache_dev is big endian\n");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static int cache_dev_init(struct pcache_cache_dev *cache_dev, u32 seg_num)
+{
+ cache_dev->seg_num = seg_num;
+ cache_dev->seg_bitmap = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
+ if (!cache_dev->seg_bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cache_dev_exit(struct pcache_cache_dev *cache_dev)
+{
+ kvfree(cache_dev->seg_bitmap);
+}
+
+void cache_dev_stop(struct dm_pcache *pcache)
+{
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+
+ cache_dev_exit(cache_dev);
+ cache_dev_dax_exit(cache_dev);
+}
+
+int cache_dev_start(struct dm_pcache *pcache)
+{
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ struct pcache_sb sb;
+ bool format = false;
+ int ret;
+
+ mutex_init(&cache_dev->seg_lock);
+
+ ret = cache_dev_dax_init(cache_dev);
+ if (ret) {
+ pcache_dev_err(pcache, "failed to init cache_dev %s via dax way: %d.",
+ cache_dev->dm_dev->name, ret);
+ goto err;
+ }
+
+ ret = sb_read(cache_dev, &sb);
+ if (ret)
+ goto dax_release;
+
+ if (le64_to_cpu(sb.magic) == 0) {
+ format = true;
+ ret = sb_init(cache_dev, &sb);
+ if (ret < 0)
+ goto dax_release;
+ }
+
+ ret = sb_validate(cache_dev, &sb);
+ if (ret)
+ goto dax_release;
+
+ cache_dev->sb_flags = le32_to_cpu(sb.flags);
+ ret = cache_dev_init(cache_dev, le32_to_cpu(sb.seg_num));
+ if (ret)
+ goto dax_release;
+
+ if (format)
+ sb_write(cache_dev, &sb);
+
+ return 0;
+
+dax_release:
+ cache_dev_dax_exit(cache_dev);
+err:
+ return ret;
+}
+
+int cache_dev_get_empty_segment_id(struct pcache_cache_dev *cache_dev, u32 *seg_id)
+{
+ int ret;
+
+ mutex_lock(&cache_dev->seg_lock);
+ *seg_id = find_next_zero_bit(cache_dev->seg_bitmap, cache_dev->seg_num, 0);
+ if (*seg_id == cache_dev->seg_num) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ __set_bit(*seg_id, cache_dev->seg_bitmap);
+ ret = 0;
+unlock:
+ mutex_unlock(&cache_dev->seg_lock);
+ return ret;
+}
diff --git a/drivers/md/dm-pcache/cache_dev.h b/drivers/md/dm-pcache/cache_dev.h
new file mode 100644
index 000000000000..6251eb4ebe96
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_dev.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_CACHE_DEV_H
+#define _PCACHE_CACHE_DEV_H
+
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+
+#include "pcache_internal.h"
+
+#define PCACHE_MAGIC 0x65B05EFA96C596EFULL
+
+#define PCACHE_SB_OFF (4 * PCACHE_KB)
+#define PCACHE_SB_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_INFO_OFF (PCACHE_SB_OFF + PCACHE_SB_SIZE)
+#define PCACHE_CACHE_INFO_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_CTRL_OFF (PCACHE_CACHE_INFO_OFF + (PCACHE_CACHE_INFO_SIZE * PCACHE_META_INDEX_MAX))
+#define PCACHE_CACHE_CTRL_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_SEGMENTS_OFF (PCACHE_CACHE_CTRL_OFF + PCACHE_CACHE_CTRL_SIZE)
+#define PCACHE_SEG_INFO_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_DEV_SIZE_MIN (512 * PCACHE_MB) /* 512 MB */
+#define PCACHE_SEG_SIZE (16 * PCACHE_MB) /* Size of each PCACHE segment (16 MB) */
+
+#define CACHE_DEV_SB(cache_dev) ((struct pcache_sb *)(cache_dev->mapping + PCACHE_SB_OFF))
+#define CACHE_DEV_CACHE_INFO(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_INFO_OFF)
+#define CACHE_DEV_CACHE_CTRL(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_CTRL_OFF)
+#define CACHE_DEV_SEGMENTS(cache_dev) ((void *)cache_dev->mapping + PCACHE_SEGMENTS_OFF)
+#define CACHE_DEV_SEGMENT(cache_dev, id) ((void *)CACHE_DEV_SEGMENTS(cache_dev) + (u64)id * PCACHE_SEG_SIZE)
+
+/*
+ * PCACHE SB flags configured during formatting
+ *
+ * The PCACHE_SB_F_xxx flags define registration requirements based on cache_dev
+ * formatting. For a machine to register a cache_dev:
+ * - PCACHE_SB_F_BIGENDIAN: Requires a big-endian machine.
+ */
+#define PCACHE_SB_F_BIGENDIAN BIT(0)
+
+struct pcache_sb {
+ __le32 crc;
+ __le32 flags;
+ __le64 magic;
+
+ __le32 seg_num;
+};
+
+struct pcache_cache_dev {
+ u32 sb_flags;
+ u32 seg_num;
+ void *mapping;
+ bool use_vmap;
+
+ struct dm_dev *dm_dev;
+
+ struct mutex seg_lock;
+ unsigned long *seg_bitmap;
+};
+
+struct dm_pcache;
+int cache_dev_start(struct dm_pcache *pcache);
+void cache_dev_stop(struct dm_pcache *pcache);
+
+void cache_dev_zero_range(struct pcache_cache_dev *cache_dev, void *pos, u32 size);
+
+int cache_dev_get_empty_segment_id(struct pcache_cache_dev *cache_dev, u32 *seg_id);
+
+#endif /* _PCACHE_CACHE_DEV_H */
diff --git a/drivers/md/dm-pcache/cache_gc.c b/drivers/md/dm-pcache/cache_gc.c
new file mode 100644
index 000000000000..94f8b276a021
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_gc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+/**
+ * cache_key_gc - Releases the reference of a cache key segment.
+ * @cache: Pointer to the pcache_cache structure.
+ * @key: Pointer to the cache key to be garbage collected.
+ *
+ * This function decrements the reference count of the cache segment
+ * associated with the given key. If the reference count drops to zero,
+ * the segment may be invalidated and reused.
+ */
+static void cache_key_gc(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ cache_seg_put(key->cache_pos.cache_seg);
+}
+
+static bool need_gc(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail, struct pcache_cache_pos *key_tail)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ void *dirty_addr, *key_addr;
+ u32 segs_used, segs_gc_threshold, to_copy;
+ int ret;
+
+ dirty_addr = cache_pos_addr(dirty_tail);
+ key_addr = cache_pos_addr(key_tail);
+ if (dirty_addr == key_addr) {
+ pcache_dev_debug(pcache, "key tail is equal to dirty tail: %u:%u\n",
+ dirty_tail->cache_seg->cache_seg_id,
+ dirty_tail->seg_off);
+ return false;
+ }
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
+
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - key_tail->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, key_addr, to_copy);
+ if (ret) {
+ pcache_dev_err(pcache, "error to read kset: %d", ret);
+ return false;
+ }
+
+ /* Check if kset_onmedia is corrupted */
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
+ pcache_dev_debug(pcache, "gc error: magic is not as expected. key_tail: %u:%u magic: %llx, expected: %llx\n",
+ key_tail->cache_seg->cache_seg_id, key_tail->seg_off,
+ kset_onmedia->magic, PCACHE_KSET_MAGIC);
+ return false;
+ }
+
+ /* Verify the CRC of the kset_onmedia */
+ if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ pcache_dev_debug(pcache, "gc error: crc is not as expected. crc: %x, expected: %x\n",
+ cache_kset_crc(kset_onmedia), kset_onmedia->crc);
+ return false;
+ }
+
+ segs_used = bitmap_weight(cache->seg_map, cache->n_segs);
+ segs_gc_threshold = cache->n_segs * pcache_cache_get_gc_percent(cache) / 100;
+ if (segs_used < segs_gc_threshold) {
+ pcache_dev_debug(pcache, "segs_used: %u, segs_gc_threshold: %u\n", segs_used, segs_gc_threshold);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * last_kset_gc - Advances the garbage collection for the last kset.
+ * @cache: Pointer to the pcache_cache structure.
+ * @kset_onmedia: Pointer to the kset_onmedia structure for the last kset.
+ */
+static void last_kset_gc(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_segment *cur_seg, *next_seg;
+
+ cur_seg = cache->key_tail.cache_seg;
+
+ next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
+
+ mutex_lock(&cache->key_tail_lock);
+ cache->key_tail.cache_seg = next_seg;
+ cache->key_tail.seg_off = 0;
+ cache_encode_key_tail(cache);
+ mutex_unlock(&cache->key_tail_lock);
+
+ pcache_dev_debug(pcache, "gc advance kset seg: %u\n", cur_seg->cache_seg_id);
+
+ spin_lock(&cache->seg_map_lock);
+ __clear_bit(cur_seg->cache_seg_id, cache->seg_map);
+ spin_unlock(&cache->seg_map_lock);
+}
+
+void pcache_cache_gc_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos dirty_tail, key_tail;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_key *key;
+ int ret;
+ int i;
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
+
+ while (true) {
+ if (pcache_is_stopping(pcache) || atomic_read(&cache->gc_errors))
+ return;
+
+ /* Get new tail positions */
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_copy(&dirty_tail, &cache->dirty_tail);
+ mutex_unlock(&cache->dirty_tail_lock);
+
+ mutex_lock(&cache->key_tail_lock);
+ cache_pos_copy(&key_tail, &cache->key_tail);
+ mutex_unlock(&cache->key_tail_lock);
+
+ if (!need_gc(cache, &dirty_tail, &key_tail))
+ break;
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ /* Don't move to the next segment if dirty_tail has not moved */
+ if (dirty_tail.cache_seg == key_tail.cache_seg)
+ break;
+
+ last_kset_gc(cache, kset_onmedia);
+ continue;
+ }
+
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ struct pcache_cache_key key_tmp = { 0 };
+
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = &key_tmp;
+ cache_key_init(&cache->req_key_tree, key);
+
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ /* return without re-arm gc work, and prevent future
+ * gc, because we can't retry the partial-gc-ed kset
+ */
+ atomic_inc(&cache->gc_errors);
+ pcache_dev_err(pcache, "failed to decode cache key in gc\n");
+ return;
+ }
+
+ cache_key_gc(cache, key);
+ }
+
+ pcache_dev_debug(pcache, "gc advance: %u:%u %u\n",
+ key_tail.cache_seg->cache_seg_id,
+ key_tail.seg_off,
+ get_kset_onmedia_size(kset_onmedia));
+
+ mutex_lock(&cache->key_tail_lock);
+ cache_pos_advance(&cache->key_tail, get_kset_onmedia_size(kset_onmedia));
+ cache_encode_key_tail(cache);
+ mutex_unlock(&cache->key_tail_lock);
+ }
+
+ queue_delayed_work(cache_get_wq(cache), &cache->gc_work, PCACHE_CACHE_GC_INTERVAL);
+}
diff --git a/drivers/md/dm-pcache/cache_key.c b/drivers/md/dm-pcache/cache_key.c
new file mode 100644
index 000000000000..2b77e121f89b
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_key.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+struct pcache_cache_kset_onmedia pcache_empty_kset = { 0 };
+
+void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key)
+{
+ kref_init(&key->ref);
+ key->cache_tree = cache_tree;
+ INIT_LIST_HEAD(&key->list_node);
+ RB_CLEAR_NODE(&key->rb_node);
+}
+
+struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask)
+{
+ struct pcache_cache_key *key;
+
+ key = mempool_alloc(&cache_tree->key_pool, gfp_mask);
+ if (!key)
+ return NULL;
+
+ memset(key, 0, sizeof(struct pcache_cache_key));
+ cache_key_init(cache_tree, key);
+
+ return key;
+}
+
+/**
+ * cache_key_get - Increment the reference count of a cache key.
+ * @key: Pointer to the pcache_cache_key structure.
+ *
+ * This function increments the reference count of the specified cache key,
+ * ensuring that it is not freed while still in use.
+ */
+void cache_key_get(struct pcache_cache_key *key)
+{
+ kref_get(&key->ref);
+}
+
+/**
+ * cache_key_destroy - Free a cache key structure when its reference count drops to zero.
+ * @ref: Pointer to the kref structure.
+ *
+ * This function is called when the reference count of the cache key reaches zero.
+ * It frees the allocated cache key back to the slab cache.
+ */
+static void cache_key_destroy(struct kref *ref)
+{
+ struct pcache_cache_key *key = container_of(ref, struct pcache_cache_key, ref);
+ struct pcache_cache_tree *cache_tree = key->cache_tree;
+
+ mempool_free(key, &cache_tree->key_pool);
+}
+
+void cache_key_put(struct pcache_cache_key *key)
+{
+ kref_put(&key->ref, cache_key_destroy);
+}
+
+void cache_pos_advance(struct pcache_cache_pos *pos, u32 len)
+{
+ /* Ensure enough space remains in the current segment */
+ BUG_ON(cache_seg_remain(pos) < len);
+
+ pos->seg_off += len;
+}
+
+static void cache_key_encode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key)
+{
+ key_onmedia->off = key->off;
+ key_onmedia->len = key->len;
+
+ key_onmedia->cache_seg_id = key->cache_pos.cache_seg->cache_seg_id;
+ key_onmedia->cache_seg_off = key->cache_pos.seg_off;
+
+ key_onmedia->seg_gen = key->seg_gen;
+ key_onmedia->flags = key->flags;
+
+ if (cache_data_crc_on(cache))
+ key_onmedia->data_crc = cache_key_data_crc(key);
+}
+
+int cache_key_decode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+
+ key->off = key_onmedia->off;
+ key->len = key_onmedia->len;
+
+ key->cache_pos.cache_seg = &cache->segments[key_onmedia->cache_seg_id];
+ key->cache_pos.seg_off = key_onmedia->cache_seg_off;
+
+ key->seg_gen = key_onmedia->seg_gen;
+ key->flags = key_onmedia->flags;
+
+ if (cache_data_crc_on(cache) &&
+ key_onmedia->data_crc != cache_key_data_crc(key)) {
+ pcache_dev_err(pcache, "key: %llu:%u seg %u:%u data_crc error: %x, expected: %x\n",
+ key->off, key->len, key->cache_pos.cache_seg->cache_seg_id,
+ key->cache_pos.seg_off, cache_key_data_crc(key), key_onmedia->data_crc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void append_last_kset(struct pcache_cache *cache, u32 next_seg)
+{
+ struct pcache_cache_kset_onmedia kset_onmedia = { 0 };
+
+ kset_onmedia.flags |= PCACHE_KSET_FLAGS_LAST;
+ kset_onmedia.next_cache_seg_id = next_seg;
+ kset_onmedia.magic = PCACHE_KSET_MAGIC;
+ kset_onmedia.crc = cache_kset_crc(&kset_onmedia);
+
+ memcpy_flushcache(get_key_head_addr(cache), &kset_onmedia, sizeof(struct pcache_cache_kset_onmedia));
+ pmem_wmb();
+ cache_pos_advance(&cache->key_head, sizeof(struct pcache_cache_kset_onmedia));
+}
+
+int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset)
+{
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 kset_onmedia_size;
+ int ret;
+
+ kset_onmedia = &kset->kset_onmedia;
+
+ if (!kset_onmedia->key_num)
+ return 0;
+
+ kset_onmedia_size = struct_size(kset_onmedia, data, kset_onmedia->key_num);
+
+ spin_lock(&cache->key_head_lock);
+again:
+ /* Reserve space for the last kset */
+ if (cache_seg_remain(&cache->key_head) < kset_onmedia_size + sizeof(struct pcache_cache_kset_onmedia)) {
+ struct pcache_cache_segment *next_seg;
+
+ next_seg = get_cache_segment(cache);
+ if (!next_seg) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* clear outdated kset in next seg */
+ memcpy_flushcache(next_seg->segment.data, &pcache_empty_kset,
+ sizeof(struct pcache_cache_kset_onmedia));
+ append_last_kset(cache, next_seg->cache_seg_id);
+ cache->key_head.cache_seg = next_seg;
+ cache->key_head.seg_off = 0;
+ goto again;
+ }
+
+ kset_onmedia->magic = PCACHE_KSET_MAGIC;
+ kset_onmedia->crc = cache_kset_crc(kset_onmedia);
+
+ /* clear outdated kset after current kset */
+ memcpy_flushcache(get_key_head_addr(cache) + kset_onmedia_size, &pcache_empty_kset,
+ sizeof(struct pcache_cache_kset_onmedia));
+ /* write current kset into segment */
+ memcpy_flushcache(get_key_head_addr(cache), kset_onmedia, kset_onmedia_size);
+ pmem_wmb();
+
+ /* reset kset_onmedia */
+ memset(kset_onmedia, 0, sizeof(struct pcache_cache_kset_onmedia));
+ cache_pos_advance(&cache->key_head, kset_onmedia_size);
+
+ ret = 0;
+out:
+ spin_unlock(&cache->key_head_lock);
+
+ return ret;
+}
+
+/**
+ * cache_key_append - Append a cache key to the related kset.
+ * @cache: Pointer to the pcache_cache structure.
+ * @key: Pointer to the cache key structure to append.
+ * @force_close: Need to close current kset if true.
+ *
+ * This function appends a cache key to the appropriate kset. If the kset
+ * is full, it closes the kset. If not, it queues a flush work to write
+ * the kset to media.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close)
+{
+ struct pcache_cache_kset *kset;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ struct pcache_cache_key_onmedia *key_onmedia;
+ u32 kset_id = get_kset_id(cache, key->off);
+ int ret = 0;
+
+ kset = get_kset(cache, kset_id);
+ kset_onmedia = &kset->kset_onmedia;
+
+ spin_lock(&kset->kset_lock);
+ key_onmedia = &kset_onmedia->data[kset_onmedia->key_num];
+ cache_key_encode(cache, key_onmedia, key);
+
+ /* Check if the current kset has reached the maximum number of keys */
+ if (++kset_onmedia->key_num == PCACHE_KSET_KEYS_MAX || force_close) {
+ /* If full, close the kset */
+ ret = cache_kset_close(cache, kset);
+ if (ret) {
+ kset_onmedia->key_num--;
+ goto out;
+ }
+ } else {
+ /* If not full, queue a delayed work to flush the kset */
+ queue_delayed_work(cache_get_wq(cache), &kset->flush_work, 1 * HZ);
+ }
+out:
+ spin_unlock(&kset->kset_lock);
+
+ return ret;
+}
+
+/**
+ * cache_subtree_walk - Traverse the cache tree.
+ * @ctx: Pointer to the context structure for traversal.
+ *
+ * This function traverses the cache tree starting from the specified node.
+ * It calls the appropriate callback functions based on the relationships
+ * between the keys in the cache tree.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_key *key_tmp, *key;
+ struct rb_node *node_tmp;
+ int ret = SUBTREE_WALK_RET_OK;
+
+ key = ctx->key;
+ node_tmp = ctx->start_node;
+
+ while (node_tmp) {
+ if (ctx->walk_done && ctx->walk_done(ctx))
+ break;
+
+ key_tmp = CACHE_KEY(node_tmp);
+ /*
+ * If key_tmp ends before the start of key, continue to the next node.
+ * |----------|
+ * |=====|
+ */
+ if (cache_key_lend(key_tmp) <= cache_key_lstart(key)) {
+ if (ctx->after) {
+ ret = ctx->after(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ goto next;
+ }
+
+ /*
+ * If key_tmp starts after the end of key, stop traversing.
+ * |--------|
+ * |====|
+ */
+ if (cache_key_lstart(key_tmp) >= cache_key_lend(key)) {
+ if (ctx->before) {
+ ret = ctx->before(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /* Handle overlapping keys */
+ if (cache_key_lstart(key_tmp) >= cache_key_lstart(key)) {
+ /*
+ * If key_tmp encompasses key.
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ if (cache_key_lend(key_tmp) >= cache_key_lend(key)) {
+ if (ctx->overlap_tail) {
+ ret = ctx->overlap_tail(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * If key_tmp is contained within key.
+ * |----| key_tmp
+ * |==========| key
+ */
+ if (ctx->overlap_contain) {
+ ret = ctx->overlap_contain(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+
+ goto next;
+ }
+
+ /*
+ * If key_tmp starts before key ends but ends after key.
+ * |-----------| key_tmp
+ * |====| key
+ */
+ if (cache_key_lend(key_tmp) > cache_key_lend(key)) {
+ if (ctx->overlap_contained) {
+ ret = ctx->overlap_contained(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * If key_tmp starts before key and ends within key.
+ * |--------| key_tmp
+ * |==========| key
+ */
+ if (ctx->overlap_head) {
+ ret = ctx->overlap_head(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+next:
+ node_tmp = rb_next(node_tmp);
+ }
+
+out:
+ if (ctx->walk_finally)
+ ret = ctx->walk_finally(ctx, ret);
+
+ return ret;
+}
+
+/**
+ * cache_subtree_search - Search for a key in the cache tree.
+ * @cache_subtree: Pointer to the cache tree structure.
+ * @key: Pointer to the cache key to search for.
+ * @parentp: Pointer to store the parent node of the found node.
+ * @newp: Pointer to store the location where the new node should be inserted.
+ * @delete_key_list: List to collect invalid keys for deletion.
+ *
+ * This function searches the cache tree for a specific key and returns
+ * the node that is the predecessor of the key, or first node if the key is
+ * less than all keys in the tree. If any invalid keys are found during
+ * the search, they are added to the delete_key_list for later cleanup.
+ *
+ * Returns a pointer to the previous node.
+ */
+struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
+ struct rb_node **parentp, struct rb_node ***newp,
+ struct list_head *delete_key_list)
+{
+ struct rb_node **new, *parent = NULL;
+ struct pcache_cache_key *key_tmp;
+ struct rb_node *prev_node = NULL;
+
+ new = &(cache_subtree->root.rb_node);
+ while (*new) {
+ key_tmp = container_of(*new, struct pcache_cache_key, rb_node);
+ if (cache_key_invalid(key_tmp))
+ list_add(&key_tmp->list_node, delete_key_list);
+
+ parent = *new;
+ if (key_tmp->off >= key->off) {
+ new = &((*new)->rb_left);
+ } else {
+ prev_node = *new;
+ new = &((*new)->rb_right);
+ }
+ }
+
+ if (!prev_node)
+ prev_node = rb_first(&cache_subtree->root);
+
+ if (parentp)
+ *parentp = parent;
+
+ if (newp)
+ *newp = new;
+
+ return prev_node;
+}
+
+static struct pcache_cache_key *get_pre_alloc_key(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_key *key;
+
+ if (ctx->pre_alloc_key) {
+ key = ctx->pre_alloc_key;
+ ctx->pre_alloc_key = NULL;
+
+ return key;
+ }
+
+ return cache_key_alloc(ctx->cache_tree, GFP_NOWAIT);
+}
+
+/**
+ * fixup_overlap_tail - Adjust the key when it overlaps at the tail.
+ * @key: Pointer to the new cache key being inserted.
+ * @key_tmp: Pointer to the existing key that overlaps.
+ * @ctx: Pointer to the context for walking the cache tree.
+ *
+ * This function modifies the existing key (key_tmp) when there is an
+ * overlap at the tail with the new key. If the modified key becomes
+ * empty, it is deleted.
+ */
+static int fixup_overlap_tail(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ if (cache_key_empty(key_tmp)) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ cache_key_cutfront(key_tmp, cache_key_lend(key) - cache_key_lstart(key_tmp));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * fixup_overlap_contain - Handle case where new key completely contains an existing key.
+ * @key: Pointer to the new cache key being inserted.
+ * @key_tmp: Pointer to the existing key that is being contained.
+ * @ctx: Pointer to the context for walking the cache tree.
+ *
+ * This function deletes the existing key (key_tmp) when the new key
+ * completely contains it. It returns SUBTREE_WALK_RET_RESEARCH to indicate that the
+ * tree structure may have changed, necessitating a re-insertion of
+ * the new key.
+ */
+static int fixup_overlap_contain(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |----| key_tmp
+ * |==========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ cache_key_delete(key_tmp);
+
+ return SUBTREE_WALK_RET_RESEARCH;
+}
+
+/**
+ * fixup_overlap_contained - Handle overlap when a new key is contained in an existing key.
+ * @key: The new cache key being inserted.
+ * @key_tmp: The existing cache key that overlaps with the new key.
+ * @ctx: Context for the cache tree walk.
+ *
+ * This function adjusts the existing key if the new key is contained
+ * within it. If the existing key is empty, it indicates a placeholder key
+ * that was inserted during a miss read. This placeholder will later be
+ * updated with real data from the backing_dev, making it no longer an empty key.
+ *
+ * If we delete key or insert a key, the structure of the entire cache tree may change,
+ * requiring a full research of the tree to find a new insertion point.
+ */
+static int fixup_overlap_contained(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp, struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_tree *cache_tree = ctx->cache_tree;
+
+ /*
+ * |-----------| key_tmp
+ * |====| key
+ */
+ BUG_ON(cache_key_empty(key));
+ if (cache_key_empty(key_tmp)) {
+ /* If key_tmp is empty, don't split it;
+ * it's a placeholder key for miss reads that will be updated later.
+ */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+ } else {
+ struct pcache_cache_key *key_fixup;
+ bool need_research = false;
+
+ key_fixup = get_pre_alloc_key(ctx);
+ if (!key_fixup)
+ return SUBTREE_WALK_RET_NEED_KEY;
+
+ cache_key_copy(key_fixup, key_tmp);
+
+ /* Split key_tmp based on the new key's range */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ need_research = true;
+ }
+
+ /* Create a new portion for key_fixup */
+ cache_key_cutfront(key_fixup, cache_key_lend(key) - cache_key_lstart(key_tmp));
+ if (key_fixup->len == 0) {
+ cache_key_put(key_fixup);
+ } else {
+ /* Insert the new key into the cache */
+ cache_key_insert(cache_tree, key_fixup, false);
+ need_research = true;
+ }
+
+ if (need_research)
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * fixup_overlap_head - Handle overlap when a new key overlaps with the head of an existing key.
+ * @key: The new cache key being inserted.
+ * @key_tmp: The existing cache key that overlaps with the new key.
+ * @ctx: Context for the cache tree walk.
+ *
+ * This function adjusts the existing key if the new key overlaps
+ * with the beginning of it. If the resulting key length is zero
+ * after the adjustment, the key is deleted. This indicates that
+ * the key no longer holds valid data and requires the tree to be
+ * re-researched for a new insertion point.
+ */
+static int fixup_overlap_head(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp, struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |--------| key_tmp
+ * |==========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ /* Adjust key_tmp by cutting back based on the new key's start */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ /* If the adjusted key_tmp length is zero, delete it */
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * cache_key_insert - Insert a new cache key into the cache tree.
+ * @cache_tree: Pointer to the cache_tree structure.
+ * @key: The cache key to insert.
+ * @fixup: Indicates if this is a new key being inserted.
+ *
+ * This function searches for the appropriate location to insert
+ * a new cache key into the cache tree. It handles key overlaps
+ * and ensures any invalid keys are removed before insertion.
+ */
+void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup)
+{
+ struct pcache_cache *cache = cache_tree->cache;
+ struct pcache_cache_subtree_walk_ctx walk_ctx = { 0 };
+ struct rb_node **new, *parent = NULL;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key_tmp = NULL, *key_next;
+ struct rb_node *prev_node = NULL;
+ LIST_HEAD(delete_key_list);
+ int ret;
+
+ cache_subtree = get_subtree(cache_tree, key->off);
+ key->cache_subtree = cache_subtree;
+search:
+ prev_node = cache_subtree_search(cache_subtree, key, &parent, &new, &delete_key_list);
+ if (!list_empty(&delete_key_list)) {
+ /* Remove invalid keys from the delete list */
+ list_for_each_entry_safe(key_tmp, key_next, &delete_key_list, list_node) {
+ list_del_init(&key_tmp->list_node);
+ cache_key_delete(key_tmp);
+ }
+ goto search;
+ }
+
+ if (fixup) {
+ /* Set up the context with the cache, start node, and new key */
+ walk_ctx.cache_tree = cache_tree;
+ walk_ctx.start_node = prev_node;
+ walk_ctx.key = key;
+
+ /* Assign overlap handling functions for different scenarios */
+ walk_ctx.overlap_tail = fixup_overlap_tail;
+ walk_ctx.overlap_head = fixup_overlap_head;
+ walk_ctx.overlap_contain = fixup_overlap_contain;
+ walk_ctx.overlap_contained = fixup_overlap_contained;
+
+ ret = cache_subtree_walk(&walk_ctx);
+ switch (ret) {
+ case SUBTREE_WALK_RET_OK:
+ break;
+ case SUBTREE_WALK_RET_RESEARCH:
+ goto search;
+ case SUBTREE_WALK_RET_NEED_KEY:
+ spin_unlock(&cache_subtree->tree_lock);
+ pcache_dev_debug(CACHE_TO_PCACHE(cache), "allocate pre_alloc_key with GFP_NOIO");
+ walk_ctx.pre_alloc_key = cache_key_alloc(cache_tree, GFP_NOIO);
+ spin_lock(&cache_subtree->tree_lock);
+ goto search;
+ default:
+ BUG();
+ }
+ }
+
+ if (walk_ctx.pre_alloc_key)
+ cache_key_put(walk_ctx.pre_alloc_key);
+
+ /* Link and insert the new key into the red-black tree */
+ rb_link_node(&key->rb_node, parent, new);
+ rb_insert_color(&key->rb_node, &cache_subtree->root);
+}
+
+/**
+ * clean_fn - Cleanup function to remove invalid keys from the cache tree.
+ * @work: Pointer to the work_struct associated with the cleanup.
+ *
+ * This function cleans up invalid keys from the cache tree in the background
+ * after a cache segment has been invalidated during cache garbage collection.
+ * It processes a maximum of PCACHE_CLEAN_KEYS_MAX keys per iteration and holds
+ * the tree lock to ensure thread safety.
+ */
+void clean_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, clean_work);
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ int i, count;
+
+ for (i = 0; i < cache->req_key_tree.n_subtrees; i++) {
+ cache_subtree = &cache->req_key_tree.subtrees[i];
+
+again:
+ if (pcache_is_stopping(CACHE_TO_PCACHE(cache)))
+ return;
+
+ /* Delete up to PCACHE_CLEAN_KEYS_MAX keys in one iteration */
+ count = 0;
+ spin_lock(&cache_subtree->tree_lock);
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+ if (cache_key_invalid(key)) {
+ count++;
+ cache_key_delete(key);
+ }
+
+ if (count >= PCACHE_CLEAN_KEYS_MAX) {
+ /* Unlock and pause before continuing cleanup */
+ spin_unlock(&cache_subtree->tree_lock);
+ usleep_range(1000, 2000);
+ goto again;
+ }
+ }
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+}
+
+/*
+ * kset_flush_fn - Flush work for a cache kset.
+ *
+ * This function is called when a kset flush work is queued from
+ * cache_key_append(). If the kset is full, it will be closed
+ * immediately. If not, the flush work will be queued for later closure.
+ *
+ * If cache_kset_close detects that a new segment is required to store
+ * the kset and there are no available segments, it will return an error.
+ * In this scenario, a retry will be attempted.
+ */
+void kset_flush_fn(struct work_struct *work)
+{
+ struct pcache_cache_kset *kset = container_of(work, struct pcache_cache_kset, flush_work.work);
+ struct pcache_cache *cache = kset->cache;
+ int ret;
+
+ if (pcache_is_stopping(CACHE_TO_PCACHE(cache)))
+ return;
+
+ spin_lock(&kset->kset_lock);
+ ret = cache_kset_close(cache, kset);
+ spin_unlock(&kset->kset_lock);
+
+ if (ret) {
+ /* Failed to flush kset, schedule a retry. */
+ queue_delayed_work(cache_get_wq(cache), &kset->flush_work, msecs_to_jiffies(100));
+ }
+}
+
+static int kset_replay(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ int ret;
+ int i;
+
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = cache_key_alloc(&cache->req_key_tree, GFP_NOIO);
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ cache_key_put(key);
+ goto err;
+ }
+
+ __set_bit(key->cache_pos.cache_seg->cache_seg_id, cache->seg_map);
+
+ /* Check if the segment generation is valid for insertion. */
+ if (key->seg_gen < key->cache_pos.cache_seg->gen) {
+ cache_key_put(key);
+ } else {
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->req_key_tree, key, true);
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ cache_seg_get(key->cache_pos.cache_seg);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+int cache_replay(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos pos_tail;
+ struct pcache_cache_pos *pos;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 to_copy, count = 0;
+ int ret = 0;
+
+ kset_onmedia = kzalloc(PCACHE_KSET_ONMEDIA_SIZE_MAX, GFP_KERNEL);
+ if (!kset_onmedia)
+ return -ENOMEM;
+
+ cache_pos_copy(&pos_tail, &cache->key_tail);
+ pos = &pos_tail;
+
+ /*
+ * In cache replaying stage, there is no other one will access
+ * cache->seg_map, so we can set bit here without cache->seg_map_lock.
+ */
+ __set_bit(pos->cache_seg->cache_seg_id, cache->seg_map);
+
+ while (true) {
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - pos->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, cache_pos_addr(pos), to_copy);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC ||
+ kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ break;
+ }
+
+ /* Process the last kset and prepare for the next segment. */
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ struct pcache_cache_segment *next_seg;
+
+ pcache_dev_debug(pcache, "last kset replay, next: %u\n", kset_onmedia->next_cache_seg_id);
+
+ next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
+
+ pos->cache_seg = next_seg;
+ pos->seg_off = 0;
+
+ __set_bit(pos->cache_seg->cache_seg_id, cache->seg_map);
+ continue;
+ }
+
+ /* Replay the kset and check for errors. */
+ ret = kset_replay(cache, kset_onmedia);
+ if (ret)
+ goto out;
+
+ /* Advance the position after processing the kset. */
+ cache_pos_advance(pos, get_kset_onmedia_size(kset_onmedia));
+ if (++count > 512) {
+ cond_resched();
+ count = 0;
+ }
+ }
+
+ /* Update the key_head position after replaying. */
+ spin_lock(&cache->key_head_lock);
+ cache_pos_copy(&cache->key_head, pos);
+ spin_unlock(&cache->key_head_lock);
+out:
+ kfree(kset_onmedia);
+ return ret;
+}
+
+int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees)
+{
+ int ret;
+ u32 i;
+
+ cache_tree->cache = cache;
+ cache_tree->n_subtrees = n_subtrees;
+
+ ret = mempool_init_slab_pool(&cache_tree->key_pool, 1024, key_cache);
+ if (ret)
+ goto err;
+
+ /*
+ * Allocate and initialize the subtrees array.
+ * Each element is a cache tree structure that contains
+ * an RB tree root and a spinlock for protecting its contents.
+ */
+ cache_tree->subtrees = kvcalloc(cache_tree->n_subtrees, sizeof(struct pcache_cache_subtree), GFP_KERNEL);
+ if (!cache_tree->subtrees) {
+ ret = -ENOMEM;
+ goto key_pool_exit;
+ }
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ struct pcache_cache_subtree *cache_subtree = &cache_tree->subtrees[i];
+
+ cache_subtree->root = RB_ROOT;
+ spin_lock_init(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+
+key_pool_exit:
+ mempool_exit(&cache_tree->key_pool);
+err:
+ return ret;
+}
+
+void cache_tree_clear(struct pcache_cache_tree *cache_tree)
+{
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ u32 i;
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ cache_subtree = &cache_tree->subtrees[i];
+
+ spin_lock(&cache_subtree->tree_lock);
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+
+ cache_key_delete(key);
+ }
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+}
+
+void cache_tree_exit(struct pcache_cache_tree *cache_tree)
+{
+ cache_tree_clear(cache_tree);
+ kvfree(cache_tree->subtrees);
+ mempool_exit(&cache_tree->key_pool);
+}
diff --git a/drivers/md/dm-pcache/cache_req.c b/drivers/md/dm-pcache/cache_req.c
new file mode 100644
index 000000000000..27f94c1fa968
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_req.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+static int cache_data_head_init(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *next_seg;
+ struct pcache_cache_data_head *data_head;
+
+ data_head = get_data_head(cache);
+ next_seg = get_cache_segment(cache);
+ if (!next_seg)
+ return -EBUSY;
+
+ cache_seg_get(next_seg);
+ data_head->head_pos.cache_seg = next_seg;
+ data_head->head_pos.seg_off = 0;
+
+ return 0;
+}
+
+/**
+ * cache_data_alloc - Allocate data for a cache key.
+ * @cache: Pointer to the cache structure.
+ * @key: Pointer to the cache key to allocate data for.
+ *
+ * This function tries to allocate space from the cache segment specified by the
+ * data head. If the remaining space in the segment is insufficient to allocate
+ * the requested length for the cache key, it will allocate whatever is available
+ * and adjust the key's length accordingly. This function does not allocate
+ * space that crosses segment boundaries.
+ */
+static int cache_data_alloc(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ struct pcache_cache_data_head *data_head;
+ struct pcache_cache_pos *head_pos;
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_remain;
+ u32 allocated = 0, to_alloc;
+ int ret = 0;
+
+ preempt_disable();
+ data_head = get_data_head(cache);
+again:
+ to_alloc = key->len - allocated;
+ if (!data_head->head_pos.cache_seg) {
+ seg_remain = 0;
+ } else {
+ cache_pos_copy(&key->cache_pos, &data_head->head_pos);
+ key->seg_gen = key->cache_pos.cache_seg->gen;
+
+ head_pos = &data_head->head_pos;
+ cache_seg = head_pos->cache_seg;
+ seg_remain = cache_seg_remain(head_pos);
+ }
+
+ if (seg_remain > to_alloc) {
+ /* If remaining space in segment is sufficient for the cache key, allocate it. */
+ cache_pos_advance(head_pos, to_alloc);
+ allocated += to_alloc;
+ cache_seg_get(cache_seg);
+ } else if (seg_remain) {
+ /* If remaining space is not enough, allocate the remaining space and adjust the cache key length. */
+ cache_pos_advance(head_pos, seg_remain);
+ key->len = seg_remain;
+
+ /* Get for key: obtain a reference to the cache segment for the key. */
+ cache_seg_get(cache_seg);
+ /* Put for head_pos->cache_seg: release the reference for the current head's segment. */
+ cache_seg_put(head_pos->cache_seg);
+ head_pos->cache_seg = NULL;
+ } else {
+ /* Initialize a new data head if no segment is available. */
+ ret = cache_data_head_init(cache);
+ if (ret)
+ goto out;
+
+ goto again;
+ }
+
+out:
+ preempt_enable();
+
+ return ret;
+}
+
+static int cache_copy_from_req_bio(struct pcache_cache *cache, struct pcache_cache_key *key,
+ struct pcache_request *pcache_req, u32 bio_off)
+{
+ struct pcache_cache_pos *pos = &key->cache_pos;
+ struct pcache_segment *segment;
+
+ segment = &pos->cache_seg->segment;
+
+ return segment_copy_from_bio(segment, pos->seg_off, key->len, pcache_req->bio, bio_off);
+}
+
+static int cache_copy_to_req_bio(struct pcache_cache *cache, struct pcache_request *pcache_req,
+ u32 bio_off, u32 len, struct pcache_cache_pos *pos, u64 key_gen)
+{
+ struct pcache_cache_segment *cache_seg = pos->cache_seg;
+ struct pcache_segment *segment = &cache_seg->segment;
+ int ret;
+
+ spin_lock(&cache_seg->gen_lock);
+ if (key_gen < cache_seg->gen) {
+ spin_unlock(&cache_seg->gen_lock);
+ return -EINVAL;
+ }
+
+ ret = segment_copy_to_bio(segment, pos->seg_off, len, pcache_req->bio, bio_off);
+ spin_unlock(&cache_seg->gen_lock);
+
+ return ret;
+}
+
+/**
+ * miss_read_end_req - Handle the end of a miss read request.
+ * @backing_req: Pointer to the request structure.
+ * @read_ret: Return value of read.
+ *
+ * This function is called when a backing request to read data from
+ * the backing_dev is completed. If the key associated with the request
+ * is empty (a placeholder), it allocates cache space for the key,
+ * copies the data read from the bio into the cache, and updates
+ * the key's status. If the key has been overwritten by a write
+ * request during this process, it will be deleted from the cache
+ * tree and no further action will be taken.
+ */
+static void miss_read_end_req(struct pcache_backing_dev_req *backing_req, int read_ret)
+{
+ void *priv_data = backing_req->priv_data;
+ struct pcache_request *pcache_req = backing_req->req.upper_req;
+ struct pcache_cache *cache = backing_req->backing_dev->cache;
+ int ret;
+
+ if (priv_data) {
+ struct pcache_cache_key *key;
+ struct pcache_cache_subtree *cache_subtree;
+
+ key = (struct pcache_cache_key *)priv_data;
+ cache_subtree = key->cache_subtree;
+
+ /* if this key was deleted from cache_subtree by a write, key->flags should be cleared,
+ * so if cache_key_empty() return true, this key is still in cache_subtree
+ */
+ spin_lock(&cache_subtree->tree_lock);
+ if (cache_key_empty(key)) {
+ /* Check if the backing request was successful. */
+ if (read_ret) {
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ /* Allocate cache space for the key and copy data from the backing_dev. */
+ ret = cache_data_alloc(cache, key);
+ if (ret) {
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ ret = cache_copy_from_req_bio(cache, key, pcache_req, backing_req->req.bio_off);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+ key->flags &= ~PCACHE_CACHE_KEY_FLAGS_EMPTY;
+ key->flags |= PCACHE_CACHE_KEY_FLAGS_CLEAN;
+
+ /* Append the key to the cache. */
+ ret = cache_key_append(cache, key, false);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock(&cache_subtree->tree_lock);
+ cache_key_put(key);
+ }
+}
+
+/**
+ * submit_cache_miss_req - Submit a backing request when cache data is missing
+ * @cache: The cache context that manages cache operations
+ * @backing_req: The cache request containing information about the read request
+ *
+ * This function is used to handle cases where a cache read request cannot locate
+ * the required data in the cache. When such a miss occurs during `cache_subtree_walk`,
+ * it triggers a backing read request to fetch data from the backing storage.
+ *
+ * If `pcache_req->priv_data` is set, it points to a `pcache_cache_key`, representing
+ * a new cache key to be inserted into the cache. The function calls `cache_key_insert`
+ * to attempt adding the key. On insertion failure, it releases the key reference and
+ * clears `priv_data` to avoid further processing.
+ */
+static void submit_cache_miss_req(struct pcache_cache *cache, struct pcache_backing_dev_req *backing_req)
+{
+ if (backing_req->priv_data) {
+ struct pcache_cache_key *key;
+
+ /* Attempt to insert the key into the cache if priv_data is set */
+ key = (struct pcache_cache_key *)backing_req->priv_data;
+ cache_key_insert(&cache->req_key_tree, key, true);
+ }
+ backing_dev_req_submit(backing_req, false);
+}
+
+static void cache_miss_req_free(struct pcache_backing_dev_req *backing_req)
+{
+ struct pcache_cache_key *key;
+
+ if (backing_req->priv_data) {
+ key = backing_req->priv_data;
+ backing_req->priv_data = NULL;
+ cache_key_put(key); /* for ->priv_data */
+ cache_key_put(key); /* for init ref in alloc */
+ }
+
+ backing_dev_req_end(backing_req);
+}
+
+static struct pcache_backing_dev_req *cache_miss_req_alloc(struct pcache_cache *cache,
+ struct pcache_request *parent,
+ gfp_t gfp_mask)
+{
+ struct pcache_backing_dev *backing_dev = cache->backing_dev;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_key *key = NULL;
+ struct pcache_backing_dev_req_opts req_opts = { 0 };
+
+ req_opts.type = BACKING_DEV_REQ_TYPE_REQ;
+ req_opts.gfp_mask = gfp_mask;
+ req_opts.req.upper_req = parent;
+
+ backing_req = backing_dev_req_alloc(backing_dev, &req_opts);
+ if (!backing_req)
+ return NULL;
+
+ key = cache_key_alloc(&cache->req_key_tree, gfp_mask);
+ if (!key)
+ goto free_backing_req;
+
+ cache_key_get(key);
+ backing_req->priv_data = key;
+
+ return backing_req;
+
+free_backing_req:
+ cache_miss_req_free(backing_req);
+ return NULL;
+}
+
+static void cache_miss_req_init(struct pcache_cache *cache,
+ struct pcache_backing_dev_req *backing_req,
+ struct pcache_request *parent,
+ u32 off, u32 len, bool insert_key)
+{
+ struct pcache_cache_key *key;
+ struct pcache_backing_dev_req_opts req_opts = { 0 };
+
+ req_opts.type = BACKING_DEV_REQ_TYPE_REQ;
+ req_opts.req.upper_req = parent;
+ req_opts.req.req_off = off;
+ req_opts.req.len = len;
+ req_opts.end_fn = miss_read_end_req;
+
+ backing_dev_req_init(backing_req, &req_opts);
+
+ if (insert_key) {
+ key = backing_req->priv_data;
+ key->off = parent->off + off;
+ key->len = len;
+ key->flags |= PCACHE_CACHE_KEY_FLAGS_EMPTY;
+ } else {
+ key = backing_req->priv_data;
+ backing_req->priv_data = NULL;
+ cache_key_put(key);
+ cache_key_put(key);
+ }
+}
+
+static struct pcache_backing_dev_req *get_pre_alloc_req(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_request *pcache_req = ctx->pcache_req;
+ struct pcache_backing_dev_req *backing_req;
+
+ if (ctx->pre_alloc_req) {
+ backing_req = ctx->pre_alloc_req;
+ ctx->pre_alloc_req = NULL;
+
+ return backing_req;
+ }
+
+ return cache_miss_req_alloc(cache, pcache_req, GFP_NOWAIT);
+}
+
+/*
+ * In the process of walking the cache tree to locate cached data, this
+ * function handles the situation where the requested data range lies
+ * entirely before an existing cache node (`key_tmp`). This outcome
+ * signifies that the target data is absent from the cache (cache miss).
+ *
+ * To fulfill this portion of the read request, the function creates a
+ * backing request (`backing_req`) for the missing data range represented
+ * by `key`. It then appends this request to the submission list in the
+ * `ctx`, which will later be processed to retrieve the data from backing
+ * storage. After setting up the backing request, `req_done` in `ctx` is
+ * updated to reflect the length of the handled range, and the range
+ * in `key` is adjusted by trimming off the portion that is now handled.
+ *
+ * The scenario handled here:
+ *
+ * |--------| key_tmp (existing cached range)
+ * |====| key (requested range, preceding key_tmp)
+ *
+ * Since `key` is before `key_tmp`, it signifies that the requested data
+ * range is missing in the cache (cache miss) and needs retrieval from
+ * backing storage.
+ */
+static int read_before(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+
+ /*
+ * In this scenario, `key` represents a range that precedes `key_tmp`,
+ * meaning the requested data range is missing from the cache tree
+ * and must be retrieved from the backing_dev.
+ */
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+ ctx->req_done += key->len;
+ cache_key_cutfront(key, key->len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * During cache_subtree_walk, this function manages a scenario where part of the
+ * requested data range overlaps with an existing cache node (`key_tmp`).
+ *
+ * |----------------| key_tmp (existing cached range)
+ * |===========| key (requested range, overlapping the tail of key_tmp)
+ */
+static int read_overlap_tail(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ u32 io_len;
+ int ret;
+
+ /*
+ * Calculate the length of the non-overlapping portion of `key`
+ * before `key_tmp`, representing the data missing in the cache.
+ */
+ io_len = cache_key_lstart(key_tmp) - cache_key_lstart(key);
+ if (io_len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+ }
+
+ /*
+ * Handle the overlapping portion by calculating the length of
+ * the remaining data in `key` that coincides with `key_tmp`.
+ */
+ io_len = cache_key_lend(key) - cache_key_lstart(key_tmp);
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &key_tmp->cache_pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |----| key_tmp (existing cached range)
+ * |==========| key (requested range)
+ */
+static int read_overlap_contain(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ u32 io_len;
+ int ret;
+
+ /*
+ * Calculate the non-overlapping part of `key` before `key_tmp`
+ * to identify the missing data length.
+ */
+ io_len = cache_key_lstart(key_tmp) - cache_key_lstart(key);
+ if (io_len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+ }
+
+ /*
+ * Handle the overlapping portion between `key` and `key_tmp`.
+ */
+ io_len = key_tmp->len;
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &key_tmp->cache_pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |-----------| key_tmp (existing cached range)
+ * |====| key (requested range, fully within key_tmp)
+ *
+ * If `key_tmp` contains valid cached data, this function copies the relevant
+ * portion to the request's bio. Otherwise, it sends a backing request to
+ * fetch the required data range.
+ */
+static int read_overlap_contained(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_pos pos;
+ int ret;
+
+ /*
+ * Check if `key_tmp` is empty, indicating a miss. If so, initiate
+ * a backing request to fetch the required data for `key`.
+ */
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ cache_pos_copy(&pos, &key_tmp->cache_pos);
+ cache_pos_advance(&pos, cache_key_lstart(key) - cache_key_lstart(key_tmp));
+
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ key->len, &pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += key->len;
+ cache_key_cutfront(key, key->len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |--------| key_tmp (existing cached range)
+ * |==========| key (requested range, overlapping the head of key_tmp)
+ */
+static int read_overlap_head(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_pos pos;
+ u32 io_len;
+ int ret;
+
+ io_len = cache_key_lend(key_tmp) - cache_key_lstart(key);
+
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ cache_pos_copy(&pos, &key_tmp->cache_pos);
+ cache_pos_advance(&pos, cache_key_lstart(key) - cache_key_lstart(key_tmp));
+
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * read_walk_finally - Finalizes the cache read tree walk by submitting any
+ * remaining backing requests
+ * @ctx: Context structure holding information about the cache,
+ * read request, and submission list
+ * @ret: the return value after this walk.
+ *
+ * This function is called at the end of the `cache_subtree_walk` during a
+ * cache read operation. It completes the walk by checking if any data
+ * requested by `key` was not found in the cache tree, and if so, it sends
+ * a backing request to retrieve that data. Then, it iterates through the
+ * submission list of backing requests created during the walk, removing
+ * each request from the list and submitting it.
+ *
+ * The scenario managed here includes:
+ * - Sending a backing request for the remaining length of `key` if it was
+ * not fulfilled by existing cache entries.
+ * - Iterating through `ctx->submit_req_list` to submit each backing request
+ * enqueued during the walk.
+ *
+ * This ensures all necessary backing requests for cache misses are submitted
+ * to the backing storage to retrieve any data that could not be found in
+ * the cache.
+ */
+static int read_walk_finally(struct pcache_cache_subtree_walk_ctx *ctx, int ret)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req, *next_req;
+ struct pcache_cache_key *key = ctx->key;
+
+ list_for_each_entry_safe(backing_req, next_req, ctx->submit_req_list, node) {
+ list_del_init(&backing_req->node);
+ submit_cache_miss_req(ctx->cache_tree->cache, backing_req);
+ }
+
+ if (ret != SUBTREE_WALK_RET_OK)
+ return ret;
+
+ if (key->len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, true);
+ submit_cache_miss_req(cache, backing_req);
+ ctx->req_done += key->len;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * This function is used within `cache_subtree_walk` to determine whether the
+ * read operation has covered the requested data length. It compares the
+ * amount of data processed (`ctx->req_done`) with the total data length
+ * specified in the original request (`ctx->pcache_req->data_len`).
+ *
+ * If `req_done` meets or exceeds the required data length, the function
+ * returns `true`, indicating the walk is complete. Otherwise, it returns `false`,
+ * signaling that additional data processing is needed to fulfill the request.
+ */
+static bool read_walk_done(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ return (ctx->req_done >= ctx->pcache_req->data_len);
+}
+
+/**
+ * cache_read - Process a read request by traversing the cache tree
+ * @cache: Cache structure holding cache trees and related configurations
+ * @pcache_req: Request structure with information about the data to read
+ *
+ * This function attempts to fulfill a read request by traversing the cache tree(s)
+ * to locate cached data for the requested range. If parts of the data are missing
+ * in the cache, backing requests are generated to retrieve the required segments.
+ *
+ * The function operates by initializing a key for the requested data range and
+ * preparing a context (`walk_ctx`) to manage the cache tree traversal. The context
+ * includes pointers to functions (e.g., `read_before`, `read_overlap_tail`) that handle
+ * specific conditions encountered during the traversal. The `walk_finally` and `walk_done`
+ * functions manage the end stages of the traversal, while the `delete_key_list` and
+ * `submit_req_list` lists track any keys to be deleted or requests to be submitted.
+ *
+ * The function first calculates the requested range and checks if it fits within the
+ * current cache tree (based on the tree's size limits). It then locks the cache tree
+ * and performs a search to locate any matching keys. If there are outdated keys,
+ * these are deleted, and the search is restarted to ensure accurate data retrieval.
+ *
+ * If the requested range spans multiple cache trees, the function moves on to the
+ * next tree once the current range has been processed. This continues until the
+ * entire requested data length has been handled.
+ */
+static int cache_read(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct pcache_cache_key key_data = { .off = pcache_req->off, .len = pcache_req->data_len };
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key_tmp = NULL, *key_next;
+ struct rb_node *prev_node = NULL;
+ struct pcache_cache_key *key = &key_data;
+ struct pcache_cache_subtree_walk_ctx walk_ctx = { 0 };
+ struct pcache_backing_dev_req *backing_req, *next_req;
+ LIST_HEAD(delete_key_list);
+ LIST_HEAD(submit_req_list);
+ int ret;
+
+ walk_ctx.cache_tree = &cache->req_key_tree;
+ walk_ctx.req_done = 0;
+ walk_ctx.pcache_req = pcache_req;
+ walk_ctx.before = read_before;
+ walk_ctx.overlap_tail = read_overlap_tail;
+ walk_ctx.overlap_head = read_overlap_head;
+ walk_ctx.overlap_contain = read_overlap_contain;
+ walk_ctx.overlap_contained = read_overlap_contained;
+ walk_ctx.walk_finally = read_walk_finally;
+ walk_ctx.walk_done = read_walk_done;
+ walk_ctx.delete_key_list = &delete_key_list;
+ walk_ctx.submit_req_list = &submit_req_list;
+
+next:
+ key->off = pcache_req->off + walk_ctx.req_done;
+ key->len = pcache_req->data_len - walk_ctx.req_done;
+ if (key->len > PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK))
+ key->len = PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK);
+
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+search:
+ prev_node = cache_subtree_search(cache_subtree, key, NULL, NULL, &delete_key_list);
+ if (!list_empty(&delete_key_list)) {
+ list_for_each_entry_safe(key_tmp, key_next, &delete_key_list, list_node) {
+ list_del_init(&key_tmp->list_node);
+ cache_key_delete(key_tmp);
+ }
+ goto search;
+ }
+
+ walk_ctx.start_node = prev_node;
+ walk_ctx.key = key;
+
+ ret = cache_subtree_walk(&walk_ctx);
+ if (ret == SUBTREE_WALK_RET_RESEARCH)
+ goto search;
+ spin_unlock(&cache_subtree->tree_lock);
+
+ if (ret == SUBTREE_WALK_RET_ERR) {
+ ret = walk_ctx.ret;
+ goto out;
+ }
+
+ if (ret == SUBTREE_WALK_RET_NEED_REQ) {
+ walk_ctx.pre_alloc_req = cache_miss_req_alloc(cache, pcache_req, GFP_NOIO);
+ pcache_dev_debug(CACHE_TO_PCACHE(cache), "allocate pre_alloc_req with GFP_NOIO");
+ }
+
+ if (walk_ctx.req_done < pcache_req->data_len)
+ goto next;
+ ret = 0;
+out:
+ if (walk_ctx.pre_alloc_req)
+ cache_miss_req_free(walk_ctx.pre_alloc_req);
+
+ list_for_each_entry_safe(backing_req, next_req, &submit_req_list, node) {
+ list_del_init(&backing_req->node);
+ backing_dev_req_end(backing_req);
+ }
+
+ return ret;
+}
+
+static int cache_write(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ u64 offset = pcache_req->off;
+ u32 length = pcache_req->data_len;
+ u32 io_done = 0;
+ int ret;
+
+ while (true) {
+ if (io_done >= length)
+ break;
+
+ key = cache_key_alloc(&cache->req_key_tree, GFP_NOIO);
+ key->off = offset + io_done;
+ key->len = length - io_done;
+ if (key->len > PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK))
+ key->len = PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK);
+
+ ret = cache_data_alloc(cache, key);
+ if (ret) {
+ cache_key_put(key);
+ goto err;
+ }
+
+ ret = cache_copy_from_req_bio(cache, key, pcache_req, io_done);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_put(key);
+ goto err;
+ }
+
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->req_key_tree, key, true);
+ ret = cache_key_append(cache, key, pcache_req->bio->bi_opf & REQ_FUA);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ io_done += key->len;
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+unlock:
+ spin_unlock(&cache_subtree->tree_lock);
+err:
+ return ret;
+}
+
+/**
+ * cache_flush - Flush all ksets to persist any pending cache data
+ * @cache: Pointer to the cache structure
+ *
+ * This function iterates through all ksets associated with the provided `cache`
+ * and ensures that any data marked for persistence is written to media. For each
+ * kset, it acquires the kset lock, then invokes `cache_kset_close`, which handles
+ * the persistence logic for that kset.
+ *
+ * If `cache_kset_close` encounters an error, the function exits immediately with
+ * the respective error code, preventing the flush operation from proceeding to
+ * subsequent ksets.
+ */
+int cache_flush(struct pcache_cache *cache)
+{
+ struct pcache_cache_kset *kset;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < cache->n_ksets; i++) {
+ kset = get_kset(cache, i);
+
+ spin_lock(&kset->kset_lock);
+ ret = cache_kset_close(cache, kset);
+ spin_unlock(&kset->kset_lock);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct bio *bio = pcache_req->bio;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
+ return cache_flush(cache);
+
+ if (bio_data_dir(bio) == READ)
+ return cache_read(cache, pcache_req);
+
+ return cache_write(cache, pcache_req);
+}
diff --git a/drivers/md/dm-pcache/cache_segment.c b/drivers/md/dm-pcache/cache_segment.c
new file mode 100644
index 000000000000..f0b58980806e
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_segment.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cache_dev.h"
+#include "cache.h"
+#include "backing_dev.h"
+#include "dm_pcache.h"
+
+static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *seg_info_addr;
+ u32 seg_id = cache_seg->segment.seg_id;
+ void *seg_addr;
+
+ seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id);
+ seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index;
+
+ return seg_info_addr;
+}
+
+static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *seg_info_addr;
+ struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info;
+
+ mutex_lock(&cache_seg->info_lock);
+ seg_info->header.seq++;
+ seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
+
+ seg_info_addr = get_seg_info_addr(cache_seg);
+ memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
+ pmem_wmb();
+
+ cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
+ mutex_unlock(&cache_seg->info_lock);
+}
+
+static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr;
+ struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev;
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u32 seg_id = cache_seg->segment.seg_id;
+ int ret = 0;
+
+ cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id);
+
+ mutex_lock(&cache_seg->info_lock);
+ cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header,
+ sizeof(struct pcache_segment_info),
+ PCACHE_SEG_INFO_SIZE,
+ &cache_seg->cache_seg_info);
+ if (IS_ERR(cache_seg_info_addr)) {
+ ret = PTR_ERR(cache_seg_info_addr);
+ goto out;
+ } else if (!cache_seg_info_addr) {
+ ret = -EIO;
+ goto out;
+ }
+ cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
+out:
+ mutex_unlock(&cache_seg->info_lock);
+
+ if (ret)
+ pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n",
+ cache_seg->segment.seg_id, ret);
+ return ret;
+}
+
+static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
+ struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
+ int ret = 0;
+
+ cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
+ sizeof(struct pcache_cache_seg_gen),
+ sizeof(struct pcache_cache_seg_gen),
+ &cache_seg_gen);
+ if (IS_ERR(cache_seg_gen_addr)) {
+ ret = PTR_ERR(cache_seg_gen_addr);
+ goto out;
+ }
+
+ if (!cache_seg_gen_addr) {
+ cache_seg->gen = 0;
+ cache_seg->gen_seq = 0;
+ cache_seg->gen_index = 0;
+ goto out;
+ }
+
+ cache_seg->gen = cache_seg_gen.gen;
+ cache_seg->gen_seq = cache_seg_gen.header.seq;
+ cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
+out:
+
+ return ret;
+}
+
+static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
+
+ return (cache_seg_ctrl->gen + cache_seg->gen_index);
+}
+
+/*
+ * cache_seg_ctrl_write - write cache segment control information
+ * @seg: the cache segment to update
+ *
+ * This function writes the control information of a cache segment to media.
+ *
+ * Although this updates shared control data, we intentionally do not use
+ * any locking here. All accesses to control information are single-threaded:
+ *
+ * - All reads occur during the init phase, where no concurrent writes
+ * can happen.
+ * - Writes happen once during init and once when the last reference
+ * to the segment is dropped in cache_seg_put().
+ *
+ * Both cases are guaranteed to be single-threaded, so there is no risk
+ * of concurrent read/write races.
+ */
+static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_gen cache_seg_gen;
+
+ cache_seg_gen.gen = cache_seg->gen;
+ cache_seg_gen.header.seq = ++cache_seg->gen_seq;
+ cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
+ sizeof(struct pcache_cache_seg_gen));
+
+ memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
+ pmem_wmb();
+
+ cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
+{
+ cache_seg->gen = 0;
+ cache_seg->gen_seq = 0;
+ cache_seg->gen_index = 0;
+ cache_seg_ctrl_write(cache_seg);
+}
+
+static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg)
+{
+ int ret;
+
+ ret = cache_seg_info_load(cache_seg);
+ if (ret)
+ goto err;
+
+ ret = cache_seg_ctrl_load(cache_seg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * cache_seg_set_next_seg - Sets the ID of the next segment
+ * @cache_seg: Pointer to the cache segment structure.
+ * @seg_id: The segment ID to set as the next segment.
+ *
+ * A pcache_cache allocates multiple cache segments, which are linked together
+ * through next_seg. When loading a pcache_cache, the first cache segment can
+ * be found using cache->seg_id, which allows access to all the cache segments.
+ */
+void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id)
+{
+ cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT;
+ cache_seg->cache_seg_info.next_seg = seg_id;
+ cache_seg_info_write(cache_seg);
+}
+
+int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
+ bool new_cache)
+{
+ struct pcache_cache_dev *cache_dev = cache->cache_dev;
+ struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id];
+ struct pcache_segment_init_options seg_options = { 0 };
+ struct pcache_segment *segment = &cache_seg->segment;
+ int ret;
+
+ cache_seg->cache = cache;
+ cache_seg->cache_seg_id = cache_seg_id;
+ spin_lock_init(&cache_seg->gen_lock);
+ atomic_set(&cache_seg->refs, 0);
+ mutex_init(&cache_seg->info_lock);
+
+ /* init pcache_segment */
+ seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
+ seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE;
+ seg_options.seg_id = seg_id;
+ seg_options.seg_info = &cache_seg->cache_seg_info;
+ pcache_segment_init(cache_dev, segment, &seg_options);
+
+ cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF;
+
+ if (new_cache) {
+ cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id),
+ PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX +
+ PCACHE_CACHE_SEG_CTRL_SIZE);
+
+ cache_seg_ctrl_init(cache_seg);
+
+ cache_seg->info_index = 0;
+ cache_seg_info_write(cache_seg);
+
+ /* clear outdated kset in segment */
+ memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia));
+ pmem_wmb();
+ } else {
+ ret = cache_seg_meta_load(cache_seg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * get_cache_segment - Retrieves a free cache segment from the cache.
+ * @cache: Pointer to the cache structure.
+ *
+ * This function attempts to find a free cache segment that can be used.
+ * It locks the segment map and checks for the next available segment ID.
+ * If a free segment is found, it initializes it and returns a pointer to the
+ * cache segment structure. Returns NULL if no segments are available.
+ */
+struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_id;
+
+ spin_lock(&cache->seg_map_lock);
+again:
+ seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg);
+ if (seg_id == cache->n_segs) {
+ /* reset the hint of ->last_cache_seg and retry */
+ if (cache->last_cache_seg) {
+ cache->last_cache_seg = 0;
+ goto again;
+ }
+ cache->cache_full = true;
+ spin_unlock(&cache->seg_map_lock);
+ return NULL;
+ }
+
+ /*
+ * found an available cache_seg, mark it used in seg_map
+ * and update the search hint ->last_cache_seg
+ */
+ __set_bit(seg_id, cache->seg_map);
+ cache->last_cache_seg = seg_id;
+ spin_unlock(&cache->seg_map_lock);
+
+ cache_seg = &cache->segments[seg_id];
+ cache_seg->cache_seg_id = seg_id;
+
+ return cache_seg;
+}
+
+static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg)
+{
+ spin_lock(&cache_seg->gen_lock);
+ cache_seg->gen++;
+ spin_unlock(&cache_seg->gen_lock);
+
+ cache_seg_ctrl_write(cache_seg);
+}
+
+void cache_seg_get(struct pcache_cache_segment *cache_seg)
+{
+ atomic_inc(&cache_seg->refs);
+}
+
+static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache *cache;
+
+ cache = cache_seg->cache;
+ cache_seg_gen_increase(cache_seg);
+
+ spin_lock(&cache->seg_map_lock);
+ if (cache->cache_full)
+ cache->cache_full = false;
+ __clear_bit(cache_seg->cache_seg_id, cache->seg_map);
+ spin_unlock(&cache->seg_map_lock);
+
+ pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache));
+ /* clean_work will clean the bad key in key_tree*/
+ queue_work(cache_get_wq(cache), &cache->clean_work);
+}
+
+void cache_seg_put(struct pcache_cache_segment *cache_seg)
+{
+ if (atomic_dec_and_test(&cache_seg->refs))
+ cache_seg_invalidate(cache_seg);
+}
diff --git a/drivers/md/dm-pcache/cache_writeback.c b/drivers/md/dm-pcache/cache_writeback.c
new file mode 100644
index 000000000000..87a82b3fe836
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_writeback.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/bio.h>
+
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+static void writeback_ctx_end(struct pcache_cache *cache, int ret)
+{
+ if (ret && !cache->writeback_ctx.ret) {
+ pcache_dev_err(CACHE_TO_PCACHE(cache), "writeback error: %d", ret);
+ cache->writeback_ctx.ret = ret;
+ }
+
+ if (!atomic_dec_and_test(&cache->writeback_ctx.pending))
+ return;
+
+ if (!cache->writeback_ctx.ret) {
+ backing_dev_flush(cache->backing_dev);
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_advance(&cache->dirty_tail, cache->writeback_ctx.advance);
+ cache_encode_dirty_tail(cache);
+ mutex_unlock(&cache->dirty_tail_lock);
+ }
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
+}
+
+static void writeback_end_req(struct pcache_backing_dev_req *backing_req, int ret)
+{
+ struct pcache_cache *cache = backing_req->priv_data;
+
+ mutex_lock(&cache->writeback_lock);
+ writeback_ctx_end(cache, ret);
+ mutex_unlock(&cache->writeback_lock);
+}
+
+static inline bool is_cache_clean(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 to_copy;
+ void *addr;
+ int ret;
+
+ addr = cache_pos_addr(dirty_tail);
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
+
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - dirty_tail->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, addr, to_copy);
+ if (ret) {
+ pcache_dev_err(pcache, "error to read kset: %d", ret);
+ return true;
+ }
+
+ /* Check if the magic number matches the expected value */
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
+ pcache_dev_debug(pcache, "dirty_tail: %u:%u magic: %llx, not expected: %llx\n",
+ dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
+ kset_onmedia->magic, PCACHE_KSET_MAGIC);
+ return true;
+ }
+
+ /* Verify the CRC checksum for data integrity */
+ if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ pcache_dev_debug(pcache, "dirty_tail: %u:%u crc: %x, not expected: %x\n",
+ dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
+ cache_kset_crc(kset_onmedia), kset_onmedia->crc);
+ return true;
+ }
+
+ return false;
+}
+
+void cache_writeback_exit(struct pcache_cache *cache)
+{
+ cancel_delayed_work_sync(&cache->writeback_work);
+ backing_dev_flush(cache->backing_dev);
+ cache_tree_exit(&cache->writeback_key_tree);
+}
+
+int cache_writeback_init(struct pcache_cache *cache)
+{
+ int ret;
+
+ ret = cache_tree_init(cache, &cache->writeback_key_tree, 1);
+ if (ret)
+ goto err;
+
+ atomic_set(&cache->writeback_ctx.pending, 0);
+
+ /* Queue delayed work to start writeback handling */
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
+
+ return 0;
+err:
+ return ret;
+}
+
+static void cache_key_writeback(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ struct pcache_backing_dev_req *writeback_req;
+ struct pcache_backing_dev_req_opts writeback_req_opts = { 0 };
+ struct pcache_cache_pos *pos;
+ void *addr;
+ u32 seg_remain, req_len, done = 0;
+
+ if (cache_key_clean(key))
+ return;
+
+ pos = &key->cache_pos;
+
+ seg_remain = cache_seg_remain(pos);
+ BUG_ON(seg_remain < key->len);
+next_req:
+ addr = cache_pos_addr(pos) + done;
+ req_len = backing_dev_req_coalesced_max_len(addr, key->len - done);
+
+ writeback_req_opts.type = BACKING_DEV_REQ_TYPE_KMEM;
+ writeback_req_opts.gfp_mask = GFP_NOIO;
+ writeback_req_opts.end_fn = writeback_end_req;
+ writeback_req_opts.priv_data = cache;
+
+ writeback_req_opts.kmem.data = addr;
+ writeback_req_opts.kmem.opf = REQ_OP_WRITE;
+ writeback_req_opts.kmem.len = req_len;
+ writeback_req_opts.kmem.backing_off = key->off + done;
+
+ writeback_req = backing_dev_req_create(cache->backing_dev, &writeback_req_opts);
+
+ atomic_inc(&cache->writeback_ctx.pending);
+ backing_dev_req_submit(writeback_req, true);
+
+ done += req_len;
+ if (done < key->len)
+ goto next_req;
+}
+
+static void cache_wb_tree_writeback(struct pcache_cache *cache, u32 advance)
+{
+ struct pcache_cache_tree *cache_tree = &cache->writeback_key_tree;
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ u32 i;
+
+ cache->writeback_ctx.ret = 0;
+ cache->writeback_ctx.advance = advance;
+ atomic_set(&cache->writeback_ctx.pending, 1);
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ cache_subtree = &cache_tree->subtrees[i];
+
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+
+ cache_key_writeback(cache, key);
+ cache_key_delete(key);
+ }
+ }
+ writeback_ctx_end(cache, 0);
+}
+
+static int cache_kset_insert_tree(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ int ret;
+ u32 i;
+
+ /* Iterate through all keys in the kset and write each back to storage */
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = cache_key_alloc(&cache->writeback_key_tree, GFP_NOIO);
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ cache_key_put(key);
+ goto clear_tree;
+ }
+
+ cache_subtree = get_subtree(&cache->writeback_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->writeback_key_tree, key, true);
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+clear_tree:
+ cache_tree_clear(&cache->writeback_key_tree);
+ return ret;
+}
+
+static void last_kset_writeback(struct pcache_cache *cache,
+ struct pcache_cache_kset_onmedia *last_kset_onmedia)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_segment *next_seg;
+
+ pcache_dev_debug(pcache, "last kset, next: %u\n", last_kset_onmedia->next_cache_seg_id);
+
+ next_seg = &cache->segments[last_kset_onmedia->next_cache_seg_id];
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache->dirty_tail.cache_seg = next_seg;
+ cache->dirty_tail.seg_off = 0;
+ cache_encode_dirty_tail(cache);
+ mutex_unlock(&cache->dirty_tail_lock);
+}
+
+void cache_writeback_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, writeback_work.work);
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos dirty_tail;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 delay;
+ int ret;
+
+ mutex_lock(&cache->writeback_lock);
+ if (atomic_read(&cache->writeback_ctx.pending))
+ goto unlock;
+
+ if (pcache_is_stopping(pcache))
+ goto unlock;
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_copy(&dirty_tail, &cache->dirty_tail);
+ mutex_unlock(&cache->dirty_tail_lock);
+
+ if (is_cache_clean(cache, &dirty_tail)) {
+ delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
+ goto queue_work;
+ }
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ last_kset_writeback(cache, kset_onmedia);
+ delay = 0;
+ goto queue_work;
+ }
+
+ ret = cache_kset_insert_tree(cache, kset_onmedia);
+ if (ret) {
+ delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
+ goto queue_work;
+ }
+
+ cache_wb_tree_writeback(cache, get_kset_onmedia_size(kset_onmedia));
+ delay = 0;
+queue_work:
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, delay);
+unlock:
+ mutex_unlock(&cache->writeback_lock);
+}
diff --git a/drivers/md/dm-pcache/dm_pcache.c b/drivers/md/dm-pcache/dm_pcache.c
new file mode 100644
index 000000000000..e5f5936fa6f0
--- /dev/null
+++ b/drivers/md/dm-pcache/dm_pcache.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include "../dm-core.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+void pcache_defer_reqs_kick(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+
+ spin_lock(&cache->seg_map_lock);
+ if (!cache->cache_full)
+ queue_work(pcache->task_wq, &pcache->defered_req_work);
+ spin_unlock(&cache->seg_map_lock);
+}
+
+static void defer_req(struct pcache_request *pcache_req)
+{
+ struct dm_pcache *pcache = pcache_req->pcache;
+
+ BUG_ON(!list_empty(&pcache_req->list_node));
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_add(&pcache_req->list_node, &pcache->defered_req_list);
+ pcache_defer_reqs_kick(pcache);
+ spin_unlock(&pcache->defered_req_list_lock);
+}
+
+static void defered_req_fn(struct work_struct *work)
+{
+ struct dm_pcache *pcache = container_of(work, struct dm_pcache, defered_req_work);
+ struct pcache_request *pcache_req;
+ LIST_HEAD(tmp_list);
+ int ret;
+
+ if (pcache_is_stopping(pcache))
+ return;
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_splice_init(&pcache->defered_req_list, &tmp_list);
+ spin_unlock(&pcache->defered_req_list_lock);
+
+ while (!list_empty(&tmp_list)) {
+ pcache_req = list_first_entry(&tmp_list,
+ struct pcache_request, list_node);
+ list_del_init(&pcache_req->list_node);
+ pcache_req->ret = 0;
+ ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
+ if (ret == -EBUSY)
+ defer_req(pcache_req);
+ else
+ pcache_req_put(pcache_req, ret);
+ }
+}
+
+void pcache_req_get(struct pcache_request *pcache_req)
+{
+ kref_get(&pcache_req->ref);
+}
+
+static void end_req(struct kref *ref)
+{
+ struct pcache_request *pcache_req = container_of(ref, struct pcache_request, ref);
+ struct dm_pcache *pcache = pcache_req->pcache;
+ struct bio *bio = pcache_req->bio;
+ int ret = pcache_req->ret;
+
+ if (ret == -EBUSY) {
+ pcache_req_get(pcache_req);
+ defer_req(pcache_req);
+ } else {
+ bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(bio);
+
+ if (atomic_dec_and_test(&pcache->inflight_reqs))
+ wake_up(&pcache->inflight_wq);
+ }
+}
+
+void pcache_req_put(struct pcache_request *pcache_req, int ret)
+{
+ /* Set the return status if it is not already set */
+ if (ret && !pcache_req->ret)
+ pcache_req->ret = ret;
+
+ kref_put(&pcache_req->ref, end_req);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+ if (!as->argc) {
+ *error = "Insufficient args";
+ return false;
+ }
+
+ return true;
+}
+
+static int parse_cache_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ int ret;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+ ret = dm_get_device(pcache->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &pcache->cache_dev.dm_dev);
+ if (ret) {
+ *error = "Error opening cache device";
+ return ret;
+ }
+
+ return 0;
+}
+
+static int parse_backing_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ int ret;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ ret = dm_get_device(pcache->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &pcache->backing_dev.dm_dev);
+ if (ret) {
+ *error = "Error opening backing device";
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pcache_init_opts(struct pcache_cache_options *opts)
+{
+ opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
+ opts->data_crc = false;
+}
+
+static int parse_cache_opts(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ struct pcache_cache_options *opts = &pcache->opts;
+ static const struct dm_arg _args[] = {
+ {0, 4, "Invalid number of cache option arguments"},
+ };
+ unsigned int argc;
+ const char *arg;
+ int ret;
+
+ pcache_init_opts(opts);
+ if (!as->argc)
+ return 0;
+
+ ret = dm_read_arg_group(_args, as, &argc, error);
+ if (ret)
+ return -EINVAL;
+
+ while (argc) {
+ arg = dm_shift_arg(as);
+ argc--;
+
+ if (!strcmp(arg, "cache_mode")) {
+ arg = dm_shift_arg(as);
+ if (!strcmp(arg, "writeback")) {
+ opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
+ } else {
+ *error = "Invalid cache mode parameter";
+ return -EINVAL;
+ }
+ argc--;
+ } else if (!strcmp(arg, "data_crc")) {
+ arg = dm_shift_arg(as);
+ if (!strcmp(arg, "true")) {
+ opts->data_crc = true;
+ } else if (!strcmp(arg, "false")) {
+ opts->data_crc = false;
+ } else {
+ *error = "Invalid data crc parameter";
+ return -EINVAL;
+ }
+ argc--;
+ } else {
+ *error = "Unrecognised cache option requested";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int pcache_start(struct dm_pcache *pcache, char **error)
+{
+ int ret;
+
+ ret = cache_dev_start(pcache);
+ if (ret) {
+ *error = "Failed to start cache dev";
+ return ret;
+ }
+
+ ret = backing_dev_start(pcache);
+ if (ret) {
+ *error = "Failed to start backing dev";
+ goto stop_cache;
+ }
+
+ ret = pcache_cache_start(pcache);
+ if (ret) {
+ *error = "Failed to start pcache";
+ goto stop_backing;
+ }
+
+ return 0;
+stop_backing:
+ backing_dev_stop(pcache);
+stop_cache:
+ cache_dev_stop(pcache);
+
+ return ret;
+}
+
+static void pcache_destroy_args(struct dm_pcache *pcache)
+{
+ if (pcache->cache_dev.dm_dev)
+ dm_put_device(pcache->ti, pcache->cache_dev.dm_dev);
+ if (pcache->backing_dev.dm_dev)
+ dm_put_device(pcache->ti, pcache->backing_dev.dm_dev);
+}
+
+static int pcache_parse_args(struct dm_pcache *pcache, unsigned int argc, char **argv,
+ char **error)
+{
+ struct dm_arg_set as;
+ int ret;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ /*
+ * Parse cache device
+ */
+ ret = parse_cache_dev(pcache, &as, error);
+ if (ret)
+ return ret;
+ /*
+ * Parse backing device
+ */
+ ret = parse_backing_dev(pcache, &as, error);
+ if (ret)
+ goto out;
+ /*
+ * Parse optional arguments
+ */
+ ret = parse_cache_opts(pcache, &as, error);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ pcache_destroy_args(pcache);
+ return ret;
+}
+
+static int dm_pcache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct mapped_device *md = ti->table->md;
+ struct dm_pcache *pcache;
+ int ret;
+
+ if (md->map) {
+ ti->error = "Don't support table loading for live md";
+ return -EOPNOTSUPP;
+ }
+
+ /* Allocate memory for the cache structure */
+ pcache = kzalloc(sizeof(struct dm_pcache), GFP_KERNEL);
+ if (!pcache)
+ return -ENOMEM;
+
+ pcache->task_wq = alloc_workqueue("pcache-%s-wq", WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0, md->name);
+ if (!pcache->task_wq) {
+ ret = -ENOMEM;
+ goto free_pcache;
+ }
+
+ spin_lock_init(&pcache->defered_req_list_lock);
+ INIT_LIST_HEAD(&pcache->defered_req_list);
+ INIT_WORK(&pcache->defered_req_work, defered_req_fn);
+ pcache->ti = ti;
+
+ ret = pcache_parse_args(pcache, argc, argv, &ti->error);
+ if (ret)
+ goto destroy_wq;
+
+ ret = pcache_start(pcache, &ti->error);
+ if (ret)
+ goto destroy_args;
+
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+ ti->per_io_data_size = sizeof(struct pcache_request);
+ ti->private = pcache;
+ atomic_set(&pcache->inflight_reqs, 0);
+ atomic_set(&pcache->state, PCACHE_STATE_RUNNING);
+ init_waitqueue_head(&pcache->inflight_wq);
+
+ return 0;
+destroy_args:
+ pcache_destroy_args(pcache);
+destroy_wq:
+ destroy_workqueue(pcache->task_wq);
+free_pcache:
+ kfree(pcache);
+
+ return ret;
+}
+
+static void defer_req_stop(struct dm_pcache *pcache)
+{
+ struct pcache_request *pcache_req;
+ LIST_HEAD(tmp_list);
+
+ flush_work(&pcache->defered_req_work);
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_splice_init(&pcache->defered_req_list, &tmp_list);
+ spin_unlock(&pcache->defered_req_list_lock);
+
+ while (!list_empty(&tmp_list)) {
+ pcache_req = list_first_entry(&tmp_list,
+ struct pcache_request, list_node);
+ list_del_init(&pcache_req->list_node);
+ pcache_req_put(pcache_req, -EIO);
+ }
+}
+
+static void dm_pcache_dtr(struct dm_target *ti)
+{
+ struct dm_pcache *pcache;
+
+ pcache = ti->private;
+ atomic_set(&pcache->state, PCACHE_STATE_STOPPING);
+ defer_req_stop(pcache);
+
+ wait_event(pcache->inflight_wq,
+ atomic_read(&pcache->inflight_reqs) == 0);
+
+ pcache_cache_stop(pcache);
+ backing_dev_stop(pcache);
+ cache_dev_stop(pcache);
+
+ pcache_destroy_args(pcache);
+ drain_workqueue(pcache->task_wq);
+ destroy_workqueue(pcache->task_wq);
+
+ kfree(pcache);
+}
+
+static int dm_pcache_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct pcache_request *pcache_req = dm_per_bio_data(bio, sizeof(struct pcache_request));
+ struct dm_pcache *pcache = ti->private;
+ int ret;
+
+ pcache_req->pcache = pcache;
+ kref_init(&pcache_req->ref);
+ pcache_req->ret = 0;
+ pcache_req->bio = bio;
+ pcache_req->off = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ pcache_req->data_len = bio->bi_iter.bi_size;
+ INIT_LIST_HEAD(&pcache_req->list_node);
+ atomic_inc(&pcache->inflight_reqs);
+
+ ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
+ if (ret == -EBUSY)
+ defer_req(pcache_req);
+ else
+ pcache_req_put(pcache_req, ret);
+
+ return DM_MAPIO_SUBMITTED;
+}
+
+static void dm_pcache_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ struct dm_pcache *pcache = ti->private;
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache *cache = &pcache->cache;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%x %u %u %u %u %x %u:%u %u:%u %u:%u",
+ cache_dev->sb_flags,
+ cache_dev->seg_num,
+ cache->n_segs,
+ bitmap_weight(cache->seg_map, cache->n_segs),
+ pcache_cache_get_gc_percent(cache),
+ cache->cache_info.flags,
+ cache->key_head.cache_seg->cache_seg_id,
+ cache->key_head.seg_off,
+ cache->dirty_tail.cache_seg->cache_seg_id,
+ cache->dirty_tail.seg_off,
+ cache->key_tail.cache_seg->cache_seg_id,
+ cache->key_tail.seg_off);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %s 4 cache_mode writeback crc %s",
+ cache_dev->dm_dev->name,
+ backing_dev->dm_dev->name,
+ cache_data_crc_on(cache) ? "true" : "false");
+ break;
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
+ }
+}
+
+static int dm_pcache_message(struct dm_target *ti, unsigned int argc,
+ char **argv, char *result, unsigned int maxlen)
+{
+ struct dm_pcache *pcache = ti->private;
+ unsigned long val;
+
+ if (argc != 2)
+ goto err;
+
+ if (!strcasecmp(argv[0], "gc_percent")) {
+ if (kstrtoul(argv[1], 10, &val))
+ goto err;
+
+ return pcache_cache_set_gc_percent(&pcache->cache, val);
+ }
+err:
+ return -EINVAL;
+}
+
+static struct target_type dm_pcache_target = {
+ .name = "pcache",
+ .version = {0, 1, 0},
+ .module = THIS_MODULE,
+ .features = DM_TARGET_SINGLETON,
+ .ctr = dm_pcache_ctr,
+ .dtr = dm_pcache_dtr,
+ .map = dm_pcache_map_bio,
+ .status = dm_pcache_status,
+ .message = dm_pcache_message,
+};
+
+static int __init dm_pcache_init(void)
+{
+ int ret;
+
+ ret = pcache_backing_init();
+ if (ret)
+ goto err;
+
+ ret = pcache_cache_init();
+ if (ret)
+ goto backing_exit;
+
+ ret = dm_register_target(&dm_pcache_target);
+ if (ret)
+ goto cache_exit;
+ return 0;
+
+cache_exit:
+ pcache_cache_exit();
+backing_exit:
+ pcache_backing_exit();
+err:
+ return ret;
+}
+module_init(dm_pcache_init);
+
+static void __exit dm_pcache_exit(void)
+{
+ dm_unregister_target(&dm_pcache_target);
+ pcache_cache_exit();
+ pcache_backing_exit();
+}
+module_exit(dm_pcache_exit);
+
+MODULE_DESCRIPTION("dm-pcache Persistent Cache for block device");
+MODULE_AUTHOR("Dongsheng Yang <dongsheng.yang@linux.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-pcache/dm_pcache.h b/drivers/md/dm-pcache/dm_pcache.h
new file mode 100644
index 000000000000..b4e06be0c0b9
--- /dev/null
+++ b/drivers/md/dm-pcache/dm_pcache.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _DM_PCACHE_H
+#define _DM_PCACHE_H
+#include <linux/device-mapper.h>
+
+#include "../dm-core.h"
+
+#define CACHE_DEV_TO_PCACHE(cache_dev) (container_of(cache_dev, struct dm_pcache, cache_dev))
+#define BACKING_DEV_TO_PCACHE(backing_dev) (container_of(backing_dev, struct dm_pcache, backing_dev))
+#define CACHE_TO_PCACHE(cache) (container_of(cache, struct dm_pcache, cache))
+
+#define PCACHE_STATE_RUNNING 1
+#define PCACHE_STATE_STOPPING 2
+
+struct pcache_cache_dev;
+struct pcache_backing_dev;
+struct pcache_cache;
+struct pcache_cache_options;
+struct dm_pcache {
+ struct dm_target *ti;
+ struct pcache_cache_dev cache_dev;
+ struct pcache_backing_dev backing_dev;
+ struct pcache_cache cache;
+ struct pcache_cache_options opts;
+
+ spinlock_t defered_req_list_lock;
+ struct list_head defered_req_list;
+ struct workqueue_struct *task_wq;
+
+ struct work_struct defered_req_work;
+
+ atomic_t state;
+ atomic_t inflight_reqs;
+ wait_queue_head_t inflight_wq;
+};
+
+static inline bool pcache_is_stopping(struct dm_pcache *pcache)
+{
+ return (atomic_read(&pcache->state) == PCACHE_STATE_STOPPING);
+}
+
+#define pcache_dev_err(pcache, fmt, ...) \
+ pcache_err("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+#define pcache_dev_info(pcache, fmt, ...) \
+ pcache_info("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+#define pcache_dev_debug(pcache, fmt, ...) \
+ pcache_debug("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+
+struct pcache_request {
+ struct dm_pcache *pcache;
+ struct bio *bio;
+
+ u64 off;
+ u32 data_len;
+
+ struct kref ref;
+ int ret;
+
+ struct list_head list_node;
+};
+
+void pcache_req_get(struct pcache_request *pcache_req);
+void pcache_req_put(struct pcache_request *pcache_req, int ret);
+
+void pcache_defer_reqs_kick(struct dm_pcache *pcache);
+
+#endif /* _DM_PCACHE_H */
diff --git a/drivers/md/dm-pcache/pcache_internal.h b/drivers/md/dm-pcache/pcache_internal.h
new file mode 100644
index 000000000000..d427e534727c
--- /dev/null
+++ b/drivers/md/dm-pcache/pcache_internal.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_INTERNAL_H
+#define _PCACHE_INTERNAL_H
+
+#include <linux/delay.h>
+#include <linux/crc32c.h>
+
+#define pcache_err(fmt, ...) \
+ pr_err("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define pcache_info(fmt, ...) \
+ pr_info("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define pcache_debug(fmt, ...) \
+ pr_debug("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define PCACHE_KB (1024ULL)
+#define PCACHE_MB (1024 * PCACHE_KB)
+
+/* Maximum number of metadata indices */
+#define PCACHE_META_INDEX_MAX 2
+
+#define PCACHE_CRC_SEED 0x3B15A
+/*
+ * struct pcache_meta_header - PCACHE metadata header structure
+ * @crc: CRC checksum for validating metadata integrity.
+ * @seq: Sequence number to track metadata updates.
+ * @version: Metadata version.
+ * @res: Reserved space for future use.
+ */
+struct pcache_meta_header {
+ __u32 crc;
+ __u8 seq;
+ __u8 version;
+ __u16 res;
+};
+
+/*
+ * pcache_meta_crc - Calculate CRC for the given metadata header.
+ * @header: Pointer to the metadata header.
+ * @meta_size: Size of the metadata structure.
+ *
+ * Returns the CRC checksum calculated by excluding the CRC field itself.
+ */
+static inline u32 pcache_meta_crc(struct pcache_meta_header *header, u32 meta_size)
+{
+ return crc32c(PCACHE_CRC_SEED, (void *)header + 4, meta_size - 4);
+}
+
+/*
+ * pcache_meta_seq_after - Check if a sequence number is more recent, accounting for overflow.
+ * @seq1: First sequence number.
+ * @seq2: Second sequence number.
+ *
+ * Determines if @seq1 is more recent than @seq2 by calculating the signed
+ * difference between them. This approach allows handling sequence number
+ * overflow correctly because the difference wraps naturally, and any value
+ * greater than zero indicates that @seq1 is "after" @seq2. This method
+ * assumes 8-bit unsigned sequence numbers, where the difference wraps
+ * around if seq1 overflows past seq2.
+ *
+ * Returns:
+ * - true if @seq1 is more recent than @seq2, indicating it comes "after"
+ * - false otherwise.
+ */
+static inline bool pcache_meta_seq_after(u8 seq1, u8 seq2)
+{
+ return (s8)(seq1 - seq2) > 0;
+}
+
+/*
+ * pcache_meta_find_latest - Find the latest valid metadata.
+ * @header: Pointer to the metadata header.
+ * @meta_size: Size of each metadata block.
+ *
+ * Finds the latest valid metadata by checking sequence numbers. If a
+ * valid entry with the highest sequence number is found, its pointer
+ * is returned. Returns NULL if no valid metadata is found.
+ */
+static inline void __must_check *pcache_meta_find_latest(struct pcache_meta_header *header,
+ u32 meta_size, u32 meta_max_size,
+ void *meta_ret)
+{
+ struct pcache_meta_header *meta, *latest = NULL;
+ u32 i, seq_latest = 0;
+ void *meta_addr;
+
+ meta = meta_ret;
+
+ for (i = 0; i < PCACHE_META_INDEX_MAX; i++) {
+ meta_addr = (void *)header + (i * meta_max_size);
+ if (copy_mc_to_kernel(meta, meta_addr, meta_size)) {
+ pcache_err("hardware memory error when copy meta");
+ return ERR_PTR(-EIO);
+ }
+
+ /* Skip if CRC check fails, which means corrupted */
+ if (meta->crc != pcache_meta_crc(meta, meta_size))
+ continue;
+
+ /* Update latest if a more recent sequence is found */
+ if (!latest || pcache_meta_seq_after(meta->seq, seq_latest)) {
+ seq_latest = meta->seq;
+ latest = (void *)header + (i * meta_max_size);
+ }
+ }
+
+ if (!latest)
+ return NULL;
+
+ if (copy_mc_to_kernel(meta_ret, latest, meta_size)) {
+ pcache_err("hardware memory error");
+ return ERR_PTR(-EIO);
+ }
+
+ return latest;
+}
+
+#endif /* _PCACHE_INTERNAL_H */
diff --git a/drivers/md/dm-pcache/segment.c b/drivers/md/dm-pcache/segment.c
new file mode 100644
index 000000000000..7e9818701445
--- /dev/null
+++ b/drivers/md/dm-pcache/segment.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/dax.h>
+
+#include "pcache_internal.h"
+#include "cache_dev.h"
+#include "segment.h"
+
+int segment_copy_to_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
+{
+ struct iov_iter iter;
+ size_t copied;
+ void *src;
+
+ iov_iter_bvec(&iter, ITER_DEST, &bio->bi_io_vec[bio->bi_iter.bi_idx],
+ bio_segments(bio), bio->bi_iter.bi_size);
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ if (bio_off)
+ iov_iter_advance(&iter, bio_off);
+
+ src = segment->data + data_off;
+ copied = _copy_mc_to_iter(src, data_len, &iter);
+ if (copied != data_len)
+ return -EIO;
+
+ return 0;
+}
+
+int segment_copy_from_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
+{
+ struct iov_iter iter;
+ size_t copied;
+ void *dst;
+
+ iov_iter_bvec(&iter, ITER_SOURCE, &bio->bi_io_vec[bio->bi_iter.bi_idx],
+ bio_segments(bio), bio->bi_iter.bi_size);
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ if (bio_off)
+ iov_iter_advance(&iter, bio_off);
+
+ dst = segment->data + data_off;
+ copied = _copy_from_iter_flushcache(dst, data_len, &iter);
+ if (copied != data_len)
+ return -EIO;
+ pmem_wmb();
+
+ return 0;
+}
+
+void pcache_segment_init(struct pcache_cache_dev *cache_dev, struct pcache_segment *segment,
+ struct pcache_segment_init_options *options)
+{
+ segment->seg_info = options->seg_info;
+ segment_info_set_type(segment->seg_info, options->type);
+
+ segment->cache_dev = cache_dev;
+ segment->seg_id = options->seg_id;
+ segment->data_size = PCACHE_SEG_SIZE - options->data_off;
+ segment->data = CACHE_DEV_SEGMENT(cache_dev, options->seg_id) + options->data_off;
+}
diff --git a/drivers/md/dm-pcache/segment.h b/drivers/md/dm-pcache/segment.h
new file mode 100644
index 000000000000..deca1ddcb02b
--- /dev/null
+++ b/drivers/md/dm-pcache/segment.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_SEGMENT_H
+#define _PCACHE_SEGMENT_H
+
+#include <linux/bio.h>
+#include <linux/bitfield.h>
+
+#include "pcache_internal.h"
+
+struct pcache_segment_info {
+ struct pcache_meta_header header;
+ __u32 flags;
+ __u32 next_seg;
+};
+
+#define PCACHE_SEG_INFO_FLAGS_HAS_NEXT BIT(0)
+
+#define PCACHE_SEG_INFO_FLAGS_TYPE_MASK GENMASK(4, 1)
+#define PCACHE_SEGMENT_TYPE_CACHE_DATA 1
+
+static inline bool segment_info_has_next(struct pcache_segment_info *seg_info)
+{
+ return (seg_info->flags & PCACHE_SEG_INFO_FLAGS_HAS_NEXT);
+}
+
+static inline void segment_info_set_type(struct pcache_segment_info *seg_info, u8 type)
+{
+ seg_info->flags &= ~PCACHE_SEG_INFO_FLAGS_TYPE_MASK;
+ seg_info->flags |= FIELD_PREP(PCACHE_SEG_INFO_FLAGS_TYPE_MASK, type);
+}
+
+static inline u8 segment_info_get_type(struct pcache_segment_info *seg_info)
+{
+ return FIELD_GET(PCACHE_SEG_INFO_FLAGS_TYPE_MASK, seg_info->flags);
+}
+
+struct pcache_segment_pos {
+ struct pcache_segment *segment; /* Segment associated with the position */
+ u32 off; /* Offset within the segment */
+};
+
+struct pcache_segment_init_options {
+ u8 type;
+ u32 seg_id;
+ u32 data_off;
+
+ struct pcache_segment_info *seg_info;
+};
+
+struct pcache_segment {
+ struct pcache_cache_dev *cache_dev;
+
+ void *data;
+ u32 data_size;
+ u32 seg_id;
+
+ struct pcache_segment_info *seg_info;
+};
+
+int segment_copy_to_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
+int segment_copy_from_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
+
+static inline void segment_pos_advance(struct pcache_segment_pos *seg_pos, u32 len)
+{
+ BUG_ON(seg_pos->off + len > seg_pos->segment->data_size);
+
+ seg_pos->off += len;
+}
+
+void pcache_segment_init(struct pcache_cache_dev *cache_dev, struct pcache_segment *segment,
+ struct pcache_segment_init_options *options);
+#endif /* _PCACHE_SEGMENT_H */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f4b904e24328..c6f7129e43d3 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3247,7 +3247,7 @@ size_check:
rs_reset_inconclusive_reshape(rs);
/* Start raid set read-only and assumed clean to change in raid_resume() */
- rs->md.ro = 1;
+ rs->md.ro = MD_RDONLY;
rs->md.in_sync = 1;
/* Has to be held on running the array */
@@ -3385,7 +3385,7 @@ static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long r
/* The MD sync thread can be done with io or be interrupted but still be running */
if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
(test_bit(MD_RECOVERY_RUNNING, &recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return st_reshape;
@@ -3775,11 +3775,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
} else
return -EINVAL;
}
- if (mddev->ro == 2) {
+ if (mddev->ro == MD_AUTO_READ) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
if (!mddev->suspended)
md_wakeup_thread(mddev->sync_thread);
}
@@ -3860,6 +3860,7 @@ static void raid_postsuspend(struct dm_target *ti)
*/
md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
+ rs->md.ro = MD_RDONLY;
}
}
@@ -3955,9 +3956,11 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
struct mddev *mddev = &rs->md;
- r = mddev->bitmap_ops->load(mddev);
- if (r)
- DMERR("Failed to load bitmap");
+ if (md_bitmap_enabled(mddev, false)) {
+ r = mddev->bitmap_ops->load(mddev);
+ if (r)
+ DMERR("Failed to load bitmap");
+ }
}
return r;
@@ -3970,7 +3973,7 @@ static void rs_update_sbs(struct raid_set *rs)
int ro = mddev->ro;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
md_update_sb(mddev, 1);
mddev->ro = ro;
}
@@ -4072,10 +4075,12 @@ static int raid_preresume(struct dm_target *ti)
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
- r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
- chunksize, false);
- if (r)
- DMERR("Failed to resize bitmap");
+ if (md_bitmap_enabled(mddev, false)) {
+ r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
+ chunksize);
+ if (r)
+ DMERR("Failed to resize bitmap");
+ }
}
/* Check for any resize/reshape on @rs and adjust/initiate */
@@ -4127,7 +4132,7 @@ static void raid_resume(struct dm_target *ti)
WARN_ON_ONCE(rcu_dereference_protected(mddev->sync_thread,
lockdep_is_held(&mddev->reconfig_mutex)));
clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
mddev->in_sync = 0;
md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index a4550975c27d..e9b47b659976 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -206,7 +206,7 @@ struct dm_region_hash *dm_region_hash_create(
rh->shift = RH_HASH_SHIFT;
rh->prime = RH_HASH_MULT;
- rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
+ rh->buckets = vmalloc_array(nr_buckets, sizeof(*rh->buckets));
if (!rh->buckets) {
DMERR("unable to allocate region hash bucket memory");
kfree(rh);
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index bb1a70b5a215..50a52ca50b34 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -114,8 +114,8 @@ static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
return -EINVAL;
}
- sctx->region_table = vmalloc(array_size(nr_slots,
- sizeof(region_table_slot_t)));
+ sctx->region_table = vmalloc_array(nr_slots,
+ sizeof(region_table_slot_t));
if (!sctx->region_table) {
ti->error = "Cannot allocate region table";
return -ENOMEM;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 2af5a9514c05..8fede41adec0 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -263,7 +263,8 @@ static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
static struct target_type error_target = {
.name = "error",
.version = {1, 7, 0},
- .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM |
+ DM_TARGET_PASSES_INTEGRITY,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 007bb93e5fca..c84149ba4e38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3031,8 +3031,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
}
pool->cell_sort_array =
- vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
- sizeof(*pool->cell_sort_array)));
+ vmalloc_array(CELL_SORT_ARRAY_SIZE,
+ sizeof(*pool->cell_sort_array));
if (!pool->cell_sort_array) {
*error = "Error allocating cell sort array";
err_p = ERR_PTR(-ENOMEM);
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index 810002747091..262e11581f2d 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -17,6 +17,7 @@
#include <linux/minmax.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/wait.h>
#include "logger.h"
@@ -509,18 +510,6 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb
vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
}
-static bool is_zero_block(char *block)
-{
- int i;
-
- for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
- if (*((u64 *) &block[i]))
- return false;
- }
-
- return true;
-}
-
static void copy_from_bio(struct bio *bio, char *data_ptr)
{
struct bio_vec biovec;
@@ -572,7 +561,7 @@ static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *b
* we acknowledge the bio.
*/
copy_from_bio(bio, data_vio->vio.data);
- data_vio->is_zero = is_zero_block(data_vio->vio.data);
+ data_vio->is_zero = mem_is_zero(data_vio->vio.data, VDO_BLOCK_SIZE);
data_vio->write = true;
}
@@ -1459,7 +1448,7 @@ static void modify_for_partial_write(struct vdo_completion *completion)
copy_from_bio(bio, data + data_vio->offset);
}
- data_vio->is_zero = is_zero_block(data);
+ data_vio->is_zero = mem_is_zero(data, VDO_BLOCK_SIZE);
data_vio->read = false;
launch_data_vio_logical_callback(data_vio,
continue_data_vio_with_block_map_slot);
diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
index 12f954a0c532..afb062e1f1fb 100644
--- a/drivers/md/dm-vdo/indexer/volume-index.c
+++ b/drivers/md/dm-vdo/indexer/volume-index.c
@@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
@@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
index e7f4153e55e3..8fc22fb14196 100644
--- a/drivers/md/dm-vdo/vio.c
+++ b/drivers/md/dm-vdo/vio.c
@@ -212,7 +212,7 @@ int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t
return VDO_SUCCESS;
bio->bi_ioprio = 0;
- bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_io_vec = bio_inline_vecs(bio);
bio->bi_max_vecs = vio->block_count + 1;
if (VDO_ASSERT(size <= vio_size, "specified size %d is not greater than allocated %d",
size, vio_size) != VDO_SUCCESS)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a44e8c2dccee..f5e5e59b232b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -403,9 +403,9 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove();
}
-static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
return dm_get_geometry(md, geo);
}
@@ -490,18 +490,13 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-static inline bool bio_is_flush_with_data(struct bio *bio)
-{
- return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
-}
-
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{
/*
* If REQ_PREFLUSH set, don't account payload, it will be
* submitted (and accounted) after this flush completes.
*/
- if (bio_is_flush_with_data(bio))
+ if (io->requeue_flush_with_data)
return 0;
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
return io->sectors;
@@ -590,6 +585,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t g
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = BLK_STS_OK;
+ io->requeue_flush_with_data = false;
/* one ref is for submission, the other is for completion */
atomic_set(&io->io_count, 2);
@@ -948,6 +944,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
struct mapped_device *md = io->md;
blk_status_t io_error;
bool requeued;
+ bool requeue_flush_with_data;
requeued = dm_handle_requeue(io, first_stage);
if (requeued && first_stage)
@@ -964,6 +961,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
__dm_start_io_acct(io);
dm_end_io_acct(io);
}
+ requeue_flush_with_data = io->requeue_flush_with_data;
free_io(io);
smp_wmb();
this_cpu_dec(*md->pending_io);
@@ -976,7 +974,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
if (requeued)
return;
- if (bio_is_flush_with_data(bio)) {
+ if (unlikely(requeue_flush_with_data)) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
@@ -1996,12 +1994,30 @@ static void dm_split_and_process_bio(struct mapped_device *md,
}
init_clone_info(&ci, io, map, bio, is_abnormal);
- if (bio->bi_opf & REQ_PREFLUSH) {
+ if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
+ /*
+ * The "flush_bypasses_map" is set on targets where it is safe
+ * to skip the map function and submit bios directly to the
+ * underlying block devices - currently, it is set for dm-linear
+ * and dm-stripe.
+ *
+ * If we have just one underlying device (i.e. there is one
+ * linear target or multiple linear targets pointing to the same
+ * device), we can send the flush with data directly to it.
+ */
+ if (map->flush_bypasses_map) {
+ struct list_head *devices = dm_table_get_devices(map);
+ if (devices->next == devices->prev)
+ goto send_preflush_with_data;
+ }
+ if (bio->bi_iter.bi_size)
+ io->requeue_flush_with_data = true;
__send_empty_flush(&ci);
/* dm_io_complete submits any data associated with flush */
goto out;
}
+send_preflush_with_data:
if (static_branch_unlikely(&zoned_enabled) &&
(bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
error = __send_zone_reset_all(&ci);
@@ -2908,7 +2924,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
- int r;
+ int r = 0;
lockdep_assert_held(&md->suspend_lock);
@@ -2960,8 +2976,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md))
+ if (map && dm_request_based(md)) {
dm_stop_queue(md->queue);
+ set_bit(DMF_QUEUE_STOPPED, &md->flags);
+ }
flush_workqueue(md->wq);
@@ -2970,7 +2988,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, task_state);
+ if (map)
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2983,7 +3002,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (r < 0) {
dm_queue_flush(md);
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
@@ -3067,7 +3086,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 334b71404930..84b7e2af6dba 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -34,15 +34,6 @@
#include "md-bitmap.h"
#include "md-cluster.h"
-#define BITMAP_MAJOR_LO 3
-/* version 4 insists the bitmap is in little-endian order
- * with version 3, it is host-endian which is non-portable
- * Version 5 is currently set only for clustered devices
- */
-#define BITMAP_MAJOR_HI 4
-#define BITMAP_MAJOR_CLUSTERED 5
-#define BITMAP_MAJOR_HOSTENDIAN 3
-
/*
* in-memory bitmap:
*
@@ -224,6 +215,8 @@ struct bitmap {
int cluster_slot;
};
+static struct workqueue_struct *md_bitmap_wq;
+
static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, bool init);
@@ -232,20 +225,19 @@ static inline char *bmname(struct bitmap *bitmap)
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
-static bool __bitmap_enabled(struct bitmap *bitmap)
-{
- return bitmap->storage.filemap &&
- !test_bit(BITMAP_STALE, &bitmap->flags);
-}
-
-static bool bitmap_enabled(struct mddev *mddev)
+static bool bitmap_enabled(void *data, bool flush)
{
- struct bitmap *bitmap = mddev->bitmap;
+ struct bitmap *bitmap = data;
- if (!bitmap)
- return false;
+ if (!flush)
+ return true;
- return __bitmap_enabled(bitmap);
+ /*
+ * If caller want to flush bitmap pages to underlying disks, check if
+ * there are cached pages in filemap.
+ */
+ return !test_bit(BITMAP_STALE, &bitmap->flags) &&
+ bitmap->storage.filemap != NULL;
}
/*
@@ -484,7 +476,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
return -EINVAL;
}
- md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
+ md_write_metadata(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit),
+ page, 0);
return 0;
}
@@ -1244,7 +1237,7 @@ static void __bitmap_unplug(struct bitmap *bitmap)
int dirty, need_write;
int writing = 0;
- if (!__bitmap_enabled(bitmap))
+ if (!bitmap_enabled(bitmap, true))
return;
/* look at each page to see if there are any set bits that need to be
@@ -1788,15 +1781,9 @@ static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
sector_t *blocks, bool degraded)
{
bitmap_counter_t *bmc;
- bool rv;
+ bool rv = false;
- if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
- *blocks = 1024;
- return true; /* always resync if no bitmap */
- }
spin_lock_irq(&bitmap->counts.lock);
-
- rv = false;
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc) {
/* locked */
@@ -1845,10 +1832,6 @@ static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset,
bitmap_counter_t *bmc;
unsigned long flags;
- if (bitmap == NULL) {
- *blocks = 1024;
- return;
- }
spin_lock_irqsave(&bitmap->counts.lock, flags);
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc == NULL)
@@ -2060,9 +2043,6 @@ static void bitmap_start_behind_write(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
int bw;
- if (!bitmap)
- return;
-
atomic_inc(&bitmap->behind_writes);
bw = atomic_read(&bitmap->behind_writes);
if (bw > bitmap->behind_writes_used)
@@ -2076,9 +2056,6 @@ static void bitmap_end_behind_write(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
- if (!bitmap)
- return;
-
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
pr_debug("dec write-behind count %d/%lu\n",
@@ -2593,15 +2570,14 @@ err:
return ret;
}
-static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize,
- bool init)
+static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return 0;
- return __bitmap_resize(bitmap, blocks, chunksize, init);
+ return __bitmap_resize(bitmap, blocks, chunksize, false);
}
static ssize_t
@@ -2990,12 +2966,19 @@ static struct attribute *md_bitmap_attrs[] = {
&max_backlog_used.attr,
NULL
};
-const struct attribute_group md_bitmap_group = {
+
+static struct attribute_group md_bitmap_group = {
.name = "bitmap",
.attrs = md_bitmap_attrs,
};
static struct bitmap_operations bitmap_ops = {
+ .head = {
+ .type = MD_BITMAP,
+ .id = ID_BITMAP,
+ .name = "bitmap",
+ },
+
.enabled = bitmap_enabled,
.create = bitmap_create,
.resize = bitmap_resize,
@@ -3013,6 +2996,9 @@ static struct bitmap_operations bitmap_ops = {
.start_write = bitmap_start_write,
.end_write = bitmap_end_write,
+ .start_discard = bitmap_start_write,
+ .end_discard = bitmap_end_write,
+
.start_sync = bitmap_start_sync,
.end_sync = bitmap_end_sync,
.cond_end_sync = bitmap_cond_end_sync,
@@ -3026,9 +3012,22 @@ static struct bitmap_operations bitmap_ops = {
.copy_from_slot = bitmap_copy_from_slot,
.set_pages = bitmap_set_pages,
.free = md_bitmap_free,
+
+ .group = &md_bitmap_group,
};
-void mddev_set_bitmap_ops(struct mddev *mddev)
+int md_bitmap_init(void)
+{
+ md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0);
+ if (!md_bitmap_wq)
+ return -ENOMEM;
+
+ return register_md_submodule(&bitmap_ops.head);
+}
+
+void md_bitmap_exit(void)
{
- mddev->bitmap_ops = &bitmap_ops;
+ destroy_workqueue(md_bitmap_wq);
+ unregister_md_submodule(&bitmap_ops.head);
}
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 59e9dd45cfde..b42a28fa83a0 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -9,10 +9,26 @@
#define BITMAP_MAGIC 0x6d746962
+/*
+ * version 3 is host-endian order, this is deprecated and not used for new
+ * array
+ */
+#define BITMAP_MAJOR_LO 3
+#define BITMAP_MAJOR_HOSTENDIAN 3
+/* version 4 is little-endian order, the default value */
+#define BITMAP_MAJOR_HI 4
+/* version 5 is only used for cluster */
+#define BITMAP_MAJOR_CLUSTERED 5
+/* version 6 is only used for lockless bitmap */
+#define BITMAP_MAJOR_LOCKLESS 6
+
/* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state {
- BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
+ BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
BITMAP_WRITE_ERROR = 2, /* A write error has occurred */
+ BITMAP_FIRST_USE = 3, /* llbitmap is just created */
+ BITMAP_CLEAN = 4, /* llbitmap is created with assume_clean */
+ BITMAP_DAEMON_BUSY = 5, /* llbitmap daemon is not finished after daemon_sleep */
BITMAP_HOSTENDIAN =15,
};
@@ -61,11 +77,15 @@ struct md_bitmap_stats {
struct file *file;
};
+typedef void (md_bitmap_fn)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors);
+
struct bitmap_operations {
- bool (*enabled)(struct mddev *mddev);
+ struct md_submodule_head head;
+
+ bool (*enabled)(void *data, bool flush);
int (*create)(struct mddev *mddev);
- int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize,
- bool init);
+ int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize);
int (*load)(struct mddev *mddev);
void (*destroy)(struct mddev *mddev);
@@ -80,10 +100,13 @@ struct bitmap_operations {
void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
- void (*start_write)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
- void (*end_write)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
+ md_bitmap_fn *start_write;
+ md_bitmap_fn *end_write;
+ md_bitmap_fn *start_discard;
+ md_bitmap_fn *end_discard;
+
+ sector_t (*skip_sync_blocks)(struct mddev *mddev, sector_t offset);
+ bool (*blocks_synced)(struct mddev *mddev, sector_t offset);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
@@ -101,9 +124,75 @@ struct bitmap_operations {
sector_t *hi, bool clear_bits);
void (*set_pages)(void *data, unsigned long pages);
void (*free)(void *data);
+
+ struct attribute_group *group;
};
/* the bitmap API */
-void mddev_set_bitmap_ops(struct mddev *mddev);
+static inline bool md_bitmap_registered(struct mddev *mddev)
+{
+ return mddev->bitmap_ops != NULL;
+}
+
+static inline bool md_bitmap_enabled(struct mddev *mddev, bool flush)
+{
+ /* bitmap_ops must be registered before creating bitmap. */
+ if (!md_bitmap_registered(mddev))
+ return false;
+
+ if (!mddev->bitmap)
+ return false;
+
+ return mddev->bitmap_ops->enabled(mddev->bitmap, flush);
+}
+
+static inline bool md_bitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
+{
+ /* always resync if no bitmap */
+ if (!md_bitmap_enabled(mddev, false)) {
+ *blocks = 1024;
+ return true;
+ }
+
+ return mddev->bitmap_ops->start_sync(mddev, offset, blocks, degraded);
+}
+
+static inline void md_bitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ if (!md_bitmap_enabled(mddev, false)) {
+ *blocks = 1024;
+ return;
+ }
+
+ mddev->bitmap_ops->end_sync(mddev, offset, blocks);
+}
+
+#ifdef CONFIG_MD_BITMAP
+int md_bitmap_init(void);
+void md_bitmap_exit(void);
+#else
+static inline int md_bitmap_init(void)
+{
+ return 0;
+}
+static inline void md_bitmap_exit(void)
+{
+}
+#endif
+
+#ifdef CONFIG_MD_LLBITMAP
+int md_llbitmap_init(void);
+void md_llbitmap_exit(void);
+#else
+static inline int md_llbitmap_init(void)
+{
+ return 0;
+}
+static inline void md_llbitmap_exit(void)
+{
+}
+#endif
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 6e9a0045f0ff..11f1e91d387d 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -630,7 +630,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
ret = mddev->bitmap_ops->resize(mddev,
le64_to_cpu(msg->high),
- 0, false);
+ 0);
break;
default:
ret = -1;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 3e1f165c2d20..7033d982d377 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -257,18 +257,10 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to split it */
- struct bio *split = bio_split(bio, end_sector - bio_sector,
- GFP_NOIO, &mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ bio = bio_submit_split_bioset(bio, end_sector - bio_sector,
+ &mddev->bio_set);
+ if (!bio)
return true;
- }
-
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
}
md_account_bio(mddev, &bio);
diff --git a/drivers/md/md-llbitmap.c b/drivers/md/md-llbitmap.c
new file mode 100644
index 000000000000..1eb434306162
--- /dev/null
+++ b/drivers/md/md-llbitmap.c
@@ -0,0 +1,1626 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <trace/events/block.h>
+
+#include "md.h"
+#include "md-bitmap.h"
+
+/*
+ * #### Background
+ *
+ * Redundant data is used to enhance data fault tolerance, and the storage
+ * methods for redundant data vary depending on the RAID levels. And it's
+ * important to maintain the consistency of redundant data.
+ *
+ * Bitmap is used to record which data blocks have been synchronized and which
+ * ones need to be resynchronized or recovered. Each bit in the bitmap
+ * represents a segment of data in the array. When a bit is set, it indicates
+ * that the multiple redundant copies of that data segment may not be
+ * consistent. Data synchronization can be performed based on the bitmap after
+ * power failure or readding a disk. If there is no bitmap, a full disk
+ * synchronization is required.
+ *
+ * #### Key Features
+ *
+ * - IO fastpath is lockless, if user issues lots of write IO to the same
+ * bitmap bit in a short time, only the first write has additional overhead
+ * to update bitmap bit, no additional overhead for the following writes;
+ * - support only resync or recover written data, means in the case creating
+ * new array or replacing with a new disk, there is no need to do a full disk
+ * resync/recovery;
+ *
+ * #### Key Concept
+ *
+ * ##### State Machine
+ *
+ * Each bit is one byte, contain 6 different states, see llbitmap_state. And
+ * there are total 8 different actions, see llbitmap_action, can change state:
+ *
+ * llbitmap state machine: transitions between states
+ *
+ * | | Startwrite | Startsync | Endsync | Abortsync|
+ * | --------- | ---------- | --------- | ------- | ------- |
+ * | Unwritten | Dirty | x | x | x |
+ * | Clean | Dirty | x | x | x |
+ * | Dirty | x | x | x | x |
+ * | NeedSync | x | Syncing | x | x |
+ * | Syncing | x | Syncing | Dirty | NeedSync |
+ *
+ * | | Reload | Daemon | Discard | Stale |
+ * | --------- | -------- | ------ | --------- | --------- |
+ * | Unwritten | x | x | x | x |
+ * | Clean | x | x | Unwritten | NeedSync |
+ * | Dirty | NeedSync | Clean | Unwritten | NeedSync |
+ * | NeedSync | x | x | Unwritten | x |
+ * | Syncing | NeedSync | x | Unwritten | NeedSync |
+ *
+ * Typical scenarios:
+ *
+ * 1) Create new array
+ * All bits will be set to Unwritten by default, if --assume-clean is set,
+ * all bits will be set to Clean instead.
+ *
+ * 2) write data, raid1/raid10 have full copy of data, while raid456 doesn't and
+ * rely on xor data
+ *
+ * 2.1) write new data to raid1/raid10:
+ * Unwritten --StartWrite--> Dirty
+ *
+ * 2.2) write new data to raid456:
+ * Unwritten --StartWrite--> NeedSync
+ *
+ * Because the initial recover for raid456 is skipped, the xor data is not built
+ * yet, the bit must be set to NeedSync first and after lazy initial recover is
+ * finished, the bit will finally set to Dirty(see 5.1 and 5.4);
+ *
+ * 2.3) cover write
+ * Clean --StartWrite--> Dirty
+ *
+ * 3) daemon, if the array is not degraded:
+ * Dirty --Daemon--> Clean
+ *
+ * 4) discard
+ * {Clean, Dirty, NeedSync, Syncing} --Discard--> Unwritten
+ *
+ * 5) resync and recover
+ *
+ * 5.1) common process
+ * NeedSync --Startsync--> Syncing --Endsync--> Dirty --Daemon--> Clean
+ *
+ * 5.2) resync after power failure
+ * Dirty --Reload--> NeedSync
+ *
+ * 5.3) recover while replacing with a new disk
+ * By default, the old bitmap framework will recover all data, and llbitmap
+ * implements this by a new helper, see llbitmap_skip_sync_blocks:
+ *
+ * skip recover for bits other than dirty or clean;
+ *
+ * 5.4) lazy initial recover for raid5:
+ * By default, the old bitmap framework will only allow new recover when there
+ * are spares(new disk), a new recovery flag MD_RECOVERY_LAZY_RECOVER is added
+ * to perform raid456 lazy recover for set bits(from 2.2).
+ *
+ * 6. special handling for degraded array:
+ *
+ * - Dirty bits will never be cleared, daemon will just do nothing, so that if
+ * a disk is readded, Clean bits can be skipped with recovery;
+ * - Dirty bits will convert to Syncing from start write, to do data recovery
+ * for new added disks;
+ * - New write will convert bits to NeedSync directly;
+ *
+ * ##### Bitmap IO
+ *
+ * ##### Chunksize
+ *
+ * The default bitmap size is 128k, incluing 1k bitmap super block, and
+ * the default size of segment of data in the array each bit(chunksize) is 64k,
+ * and chunksize will adjust to twice the old size each time if the total number
+ * bits is not less than 127k.(see llbitmap_init)
+ *
+ * ##### READ
+ *
+ * While creating bitmap, all pages will be allocated and read for llbitmap,
+ * there won't be read afterwards
+ *
+ * ##### WRITE
+ *
+ * WRITE IO is divided into logical_block_size of the array, the dirty state
+ * of each block is tracked independently, for example:
+ *
+ * each page is 4k, contain 8 blocks; each block is 512 bytes contain 512 bit;
+ *
+ * | page0 | page1 | ... | page 31 |
+ * | |
+ * | \-----------------------\
+ * | |
+ * | block0 | block1 | ... | block 8|
+ * | |
+ * | \-----------------\
+ * | |
+ * | bit0 | bit1 | ... | bit511 |
+ *
+ * From IO path, if one bit is changed to Dirty or NeedSync, the corresponding
+ * subpage will be marked dirty, such block must write first before the IO is
+ * issued. This behaviour will affect IO performance, to reduce the impact, if
+ * multiple bits are changed in the same block in a short time, all bits in this
+ * block will be changed to Dirty/NeedSync, so that there won't be any overhead
+ * until daemon clears dirty bits.
+ *
+ * ##### Dirty Bits synchronization
+ *
+ * IO fast path will set bits to dirty, and those dirty bits will be cleared
+ * by daemon after IO is done. llbitmap_page_ctl is used to synchronize between
+ * IO path and daemon;
+ *
+ * IO path:
+ * 1) try to grab a reference, if succeed, set expire time after 5s and return;
+ * 2) if failed to grab a reference, wait for daemon to finish clearing dirty
+ * bits;
+ *
+ * Daemon (Daemon will be woken up every daemon_sleep seconds):
+ * For each page:
+ * 1) check if page expired, if not skip this page; for expired page:
+ * 2) suspend the page and wait for inflight write IO to be done;
+ * 3) change dirty page to clean;
+ * 4) resume the page;
+ */
+
+#define BITMAP_DATA_OFFSET 1024
+
+/* 64k is the max IO size of sync IO for raid1/raid10 */
+#define MIN_CHUNK_SIZE (64 * 2)
+
+/* By default, daemon will be woken up every 30s */
+#define DEFAULT_DAEMON_SLEEP 30
+
+/*
+ * Dirtied bits that have not been accessed for more than 5s will be cleared
+ * by daemon.
+ */
+#define DEFAULT_BARRIER_IDLE 5
+
+enum llbitmap_state {
+ /* No valid data, init state after assemble the array */
+ BitUnwritten = 0,
+ /* data is consistent */
+ BitClean,
+ /* data will be consistent after IO is done, set directly for writes */
+ BitDirty,
+ /*
+ * data need to be resynchronized:
+ * 1) set directly for writes if array is degraded, prevent full disk
+ * synchronization after readding a disk;
+ * 2) reassemble the array after power failure, and dirty bits are
+ * found after reloading the bitmap;
+ * 3) set for first write for raid5, to build initial xor data lazily
+ */
+ BitNeedSync,
+ /* data is synchronizing */
+ BitSyncing,
+ BitStateCount,
+ BitNone = 0xff,
+};
+
+enum llbitmap_action {
+ /* User write new data, this is the only action from IO fast path */
+ BitmapActionStartwrite = 0,
+ /* Start recovery */
+ BitmapActionStartsync,
+ /* Finish recovery */
+ BitmapActionEndsync,
+ /* Failed recovery */
+ BitmapActionAbortsync,
+ /* Reassemble the array */
+ BitmapActionReload,
+ /* Daemon thread is trying to clear dirty bits */
+ BitmapActionDaemon,
+ /* Data is deleted */
+ BitmapActionDiscard,
+ /*
+ * Bitmap is stale, mark all bits in addition to BitUnwritten to
+ * BitNeedSync.
+ */
+ BitmapActionStale,
+ BitmapActionCount,
+ /* Init state is BitUnwritten */
+ BitmapActionInit,
+};
+
+enum llbitmap_page_state {
+ LLPageFlush = 0,
+ LLPageDirty,
+};
+
+struct llbitmap_page_ctl {
+ char *state;
+ struct page *page;
+ unsigned long expire;
+ unsigned long flags;
+ wait_queue_head_t wait;
+ struct percpu_ref active;
+ /* Per block size dirty state, maximum 64k page / 1 sector = 128 */
+ unsigned long dirty[];
+};
+
+struct llbitmap {
+ struct mddev *mddev;
+ struct llbitmap_page_ctl **pctl;
+
+ unsigned int nr_pages;
+ unsigned int io_size;
+ unsigned int blocks_per_page;
+
+ /* shift of one chunk */
+ unsigned long chunkshift;
+ /* size of one chunk in sector */
+ unsigned long chunksize;
+ /* total number of chunks */
+ unsigned long chunks;
+ unsigned long last_end_sync;
+ /*
+ * time in seconds that dirty bits will be cleared if the page is not
+ * accessed.
+ */
+ unsigned long barrier_idle;
+ /* fires on first BitDirty state */
+ struct timer_list pending_timer;
+ struct work_struct daemon_work;
+
+ unsigned long flags;
+ __u64 events_cleared;
+
+ /* for slow disks */
+ atomic_t behind_writes;
+ wait_queue_head_t behind_wait;
+};
+
+struct llbitmap_unplug_work {
+ struct work_struct work;
+ struct llbitmap *llbitmap;
+ struct completion *done;
+};
+
+static struct workqueue_struct *md_llbitmap_io_wq;
+static struct workqueue_struct *md_llbitmap_unplug_wq;
+
+static char state_machine[BitStateCount][BitmapActionCount] = {
+ [BitUnwritten] = {
+ [BitmapActionStartwrite] = BitDirty,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitNone,
+ [BitmapActionStale] = BitNone,
+ },
+ [BitClean] = {
+ [BitmapActionStartwrite] = BitDirty,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+ [BitDirty] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNeedSync,
+ [BitmapActionDaemon] = BitClean,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+ [BitNeedSync] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitSyncing,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNone,
+ },
+ [BitSyncing] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitSyncing,
+ [BitmapActionEndsync] = BitDirty,
+ [BitmapActionAbortsync] = BitNeedSync,
+ [BitmapActionReload] = BitNeedSync,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+};
+
+static void __llbitmap_flush(struct mddev *mddev);
+
+static enum llbitmap_state llbitmap_read(struct llbitmap *llbitmap, loff_t pos)
+{
+ unsigned int idx;
+ unsigned int offset;
+
+ pos += BITMAP_DATA_OFFSET;
+ idx = pos >> PAGE_SHIFT;
+ offset = offset_in_page(pos);
+
+ return llbitmap->pctl[idx]->state[offset];
+}
+
+/* set all the bits in the subpage as dirty */
+static void llbitmap_infect_dirty_bits(struct llbitmap *llbitmap,
+ struct llbitmap_page_ctl *pctl,
+ unsigned int block)
+{
+ bool level_456 = raid_is_456(llbitmap->mddev);
+ unsigned int io_size = llbitmap->io_size;
+ int pos;
+
+ for (pos = block * io_size; pos < (block + 1) * io_size; pos++) {
+ switch (pctl->state[pos]) {
+ case BitUnwritten:
+ pctl->state[pos] = level_456 ? BitNeedSync : BitDirty;
+ break;
+ case BitClean:
+ pctl->state[pos] = BitDirty;
+ break;
+ };
+ }
+}
+
+static void llbitmap_set_page_dirty(struct llbitmap *llbitmap, int idx,
+ int offset)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+ unsigned int io_size = llbitmap->io_size;
+ int block = offset / io_size;
+ int pos;
+
+ if (!test_bit(LLPageDirty, &pctl->flags))
+ set_bit(LLPageDirty, &pctl->flags);
+
+ /*
+ * For degraded array, dirty bits will never be cleared, and we must
+ * resync all the dirty bits, hence skip infect new dirty bits to
+ * prevent resync unnecessary data.
+ */
+ if (llbitmap->mddev->degraded) {
+ set_bit(block, pctl->dirty);
+ return;
+ }
+
+ /*
+ * The subpage usually contains a total of 512 bits. If any single bit
+ * within the subpage is marked as dirty, the entire sector will be
+ * written. To avoid impacting write performance, when multiple bits
+ * within the same sector are modified within llbitmap->barrier_idle,
+ * all bits in the sector will be collectively marked as dirty at once.
+ */
+ if (test_and_set_bit(block, pctl->dirty)) {
+ llbitmap_infect_dirty_bits(llbitmap, pctl, block);
+ return;
+ }
+
+ for (pos = block * io_size; pos < (block + 1) * io_size; pos++) {
+ if (pos == offset)
+ continue;
+ if (pctl->state[pos] == BitDirty ||
+ pctl->state[pos] == BitNeedSync) {
+ llbitmap_infect_dirty_bits(llbitmap, pctl, block);
+ return;
+ }
+ }
+}
+
+static void llbitmap_write(struct llbitmap *llbitmap, enum llbitmap_state state,
+ loff_t pos)
+{
+ unsigned int idx;
+ unsigned int bit;
+
+ pos += BITMAP_DATA_OFFSET;
+ idx = pos >> PAGE_SHIFT;
+ bit = offset_in_page(pos);
+
+ llbitmap->pctl[idx]->state[bit] = state;
+ if (state == BitDirty || state == BitNeedSync)
+ llbitmap_set_page_dirty(llbitmap, idx, bit);
+}
+
+static struct page *llbitmap_read_page(struct llbitmap *llbitmap, int idx)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ struct page *page = NULL;
+ struct md_rdev *rdev;
+
+ if (llbitmap->pctl && llbitmap->pctl[idx])
+ page = llbitmap->pctl[idx]->page;
+ if (page)
+ return page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ rdev_for_each(rdev, mddev) {
+ sector_t sector;
+
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ sector = mddev->bitmap_info.offset +
+ (idx << PAGE_SECTORS_SHIFT);
+
+ if (sync_page_io(rdev, sector, PAGE_SIZE, page, REQ_OP_READ,
+ true))
+ return page;
+
+ md_error(mddev, rdev);
+ }
+
+ __free_page(page);
+ return ERR_PTR(-EIO);
+}
+
+static void llbitmap_write_page(struct llbitmap *llbitmap, int idx)
+{
+ struct page *page = llbitmap->pctl[idx]->page;
+ struct mddev *mddev = llbitmap->mddev;
+ struct md_rdev *rdev;
+ int block;
+
+ for (block = 0; block < llbitmap->blocks_per_page; block++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+
+ if (!test_and_clear_bit(block, pctl->dirty))
+ continue;
+
+ rdev_for_each(rdev, mddev) {
+ sector_t sector;
+ sector_t bit_sector = llbitmap->io_size >> SECTOR_SHIFT;
+
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ sector = mddev->bitmap_info.offset + rdev->sb_start +
+ (idx << PAGE_SECTORS_SHIFT) +
+ block * bit_sector;
+ md_write_metadata(mddev, rdev, sector,
+ llbitmap->io_size, page,
+ block * llbitmap->io_size);
+ }
+ }
+}
+
+static void active_release(struct percpu_ref *ref)
+{
+ struct llbitmap_page_ctl *pctl =
+ container_of(ref, struct llbitmap_page_ctl, active);
+
+ wake_up(&pctl->wait);
+}
+
+static void llbitmap_free_pages(struct llbitmap *llbitmap)
+{
+ int i;
+
+ if (!llbitmap->pctl)
+ return;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ if (!pctl || !pctl->page)
+ break;
+
+ __free_page(pctl->page);
+ percpu_ref_exit(&pctl->active);
+ }
+
+ kfree(llbitmap->pctl[0]);
+ kfree(llbitmap->pctl);
+ llbitmap->pctl = NULL;
+}
+
+static int llbitmap_cache_pages(struct llbitmap *llbitmap)
+{
+ struct llbitmap_page_ctl *pctl;
+ unsigned int nr_pages = DIV_ROUND_UP(llbitmap->chunks +
+ BITMAP_DATA_OFFSET, PAGE_SIZE);
+ unsigned int size = struct_size(pctl, dirty, BITS_TO_LONGS(
+ llbitmap->blocks_per_page));
+ int i;
+
+ llbitmap->pctl = kmalloc_array(nr_pages, sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!llbitmap->pctl)
+ return -ENOMEM;
+
+ size = round_up(size, cache_line_size());
+ pctl = kmalloc_array(nr_pages, size, GFP_KERNEL | __GFP_ZERO);
+ if (!pctl) {
+ kfree(llbitmap->pctl);
+ return -ENOMEM;
+ }
+
+ llbitmap->nr_pages = nr_pages;
+
+ for (i = 0; i < nr_pages; i++, pctl = (void *)pctl + size) {
+ struct page *page = llbitmap_read_page(llbitmap, i);
+
+ llbitmap->pctl[i] = pctl;
+
+ if (IS_ERR(page)) {
+ llbitmap_free_pages(llbitmap);
+ return PTR_ERR(page);
+ }
+
+ if (percpu_ref_init(&pctl->active, active_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
+ __free_page(page);
+ llbitmap_free_pages(llbitmap);
+ return -ENOMEM;
+ }
+
+ pctl->page = page;
+ pctl->state = page_address(page);
+ init_waitqueue_head(&pctl->wait);
+ }
+
+ return 0;
+}
+
+static void llbitmap_init_state(struct llbitmap *llbitmap)
+{
+ enum llbitmap_state state = BitUnwritten;
+ unsigned long i;
+
+ if (test_and_clear_bit(BITMAP_CLEAN, &llbitmap->flags))
+ state = BitClean;
+
+ for (i = 0; i < llbitmap->chunks; i++)
+ llbitmap_write(llbitmap, state, i);
+}
+
+/* The return value is only used from resync, where @start == @end. */
+static enum llbitmap_state llbitmap_state_machine(struct llbitmap *llbitmap,
+ unsigned long start,
+ unsigned long end,
+ enum llbitmap_action action)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ enum llbitmap_state state = BitNone;
+ bool level_456 = raid_is_456(llbitmap->mddev);
+ bool need_resync = false;
+ bool need_recovery = false;
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags))
+ return BitNone;
+
+ if (action == BitmapActionInit) {
+ llbitmap_init_state(llbitmap);
+ return BitNone;
+ }
+
+ while (start <= end) {
+ enum llbitmap_state c = llbitmap_read(llbitmap, start);
+
+ if (c < 0 || c >= BitStateCount) {
+ pr_err("%s: invalid bit %lu state %d action %d, forcing resync\n",
+ __func__, start, c, action);
+ state = BitNeedSync;
+ goto write_bitmap;
+ }
+
+ if (c == BitNeedSync)
+ need_resync = !mddev->degraded;
+
+ state = state_machine[c][action];
+
+write_bitmap:
+ if (unlikely(mddev->degraded)) {
+ /* For degraded array, mark new data as need sync. */
+ if (state == BitDirty &&
+ action == BitmapActionStartwrite)
+ state = BitNeedSync;
+ /*
+ * For degraded array, resync dirty data as well, noted
+ * if array is still degraded after resync is done, all
+ * new data will still be dirty until array is clean.
+ */
+ else if (c == BitDirty &&
+ action == BitmapActionStartsync)
+ state = BitSyncing;
+ } else if (c == BitUnwritten && state == BitDirty &&
+ action == BitmapActionStartwrite && level_456) {
+ /* Delay raid456 initial recovery to first write. */
+ state = BitNeedSync;
+ }
+
+ if (state == BitNone) {
+ start++;
+ continue;
+ }
+
+ llbitmap_write(llbitmap, state, start);
+
+ if (state == BitNeedSync)
+ need_resync = !mddev->degraded;
+ else if (state == BitDirty &&
+ !timer_pending(&llbitmap->pending_timer))
+ mod_timer(&llbitmap->pending_timer,
+ jiffies + mddev->bitmap_info.daemon_sleep * HZ);
+
+ start++;
+ }
+
+ if (need_resync && level_456)
+ need_recovery = true;
+
+ if (need_recovery) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ set_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ } else if (need_resync) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+
+ return state;
+}
+
+static void llbitmap_raise_barrier(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+retry:
+ if (likely(percpu_ref_tryget_live(&pctl->active))) {
+ WRITE_ONCE(pctl->expire, jiffies + llbitmap->barrier_idle * HZ);
+ return;
+ }
+
+ wait_event(pctl->wait, !percpu_ref_is_dying(&pctl->active));
+ goto retry;
+}
+
+static void llbitmap_release_barrier(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ percpu_ref_put(&pctl->active);
+}
+
+static int llbitmap_suspend_timeout(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ percpu_ref_kill(&pctl->active);
+
+ if (!wait_event_timeout(pctl->wait, percpu_ref_is_zero(&pctl->active),
+ llbitmap->mddev->bitmap_info.daemon_sleep * HZ))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void llbitmap_resume(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ pctl->expire = LONG_MAX;
+ percpu_ref_resurrect(&pctl->active);
+ wake_up(&pctl->wait);
+}
+
+static int llbitmap_check_support(struct mddev *mddev)
+{
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ pr_notice("md/llbitmap: %s: array with journal cannot have bitmap\n",
+ mdname(mddev));
+ return -EBUSY;
+ }
+
+ if (mddev->bitmap_info.space == 0) {
+ if (mddev->bitmap_info.default_space == 0) {
+ pr_notice("md/llbitmap: %s: no space for bitmap\n",
+ mdname(mddev));
+ return -ENOSPC;
+ }
+ }
+
+ if (!mddev->persistent) {
+ pr_notice("md/llbitmap: %s: array must be persistent\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev->bitmap_info.file) {
+ pr_notice("md/llbitmap: %s: doesn't support bitmap file\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev->bitmap_info.external) {
+ pr_notice("md/llbitmap: %s: doesn't support external metadata\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev_is_dm(mddev)) {
+ pr_notice("md/llbitmap: %s: doesn't support dm-raid\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int llbitmap_init(struct llbitmap *llbitmap)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ sector_t blocks = mddev->resync_max_sectors;
+ unsigned long chunksize = MIN_CHUNK_SIZE;
+ unsigned long chunks = DIV_ROUND_UP(blocks, chunksize);
+ unsigned long space = mddev->bitmap_info.space << SECTOR_SHIFT;
+ int ret;
+
+ while (chunks > space) {
+ chunksize = chunksize << 1;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ }
+
+ llbitmap->barrier_idle = DEFAULT_BARRIER_IDLE;
+ llbitmap->chunkshift = ffz(~chunksize);
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = chunks;
+ mddev->bitmap_info.daemon_sleep = DEFAULT_DAEMON_SLEEP;
+
+ ret = llbitmap_cache_pages(llbitmap);
+ if (ret)
+ return ret;
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1,
+ BitmapActionInit);
+ /* flush initial llbitmap to disk */
+ __llbitmap_flush(mddev);
+
+ return 0;
+}
+
+static int llbitmap_read_sb(struct llbitmap *llbitmap)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ unsigned long daemon_sleep;
+ unsigned long chunksize;
+ unsigned long events;
+ struct page *sb_page;
+ bitmap_super_t *sb;
+ int ret = -EINVAL;
+
+ if (!mddev->bitmap_info.offset) {
+ pr_err("md/llbitmap: %s: no super block found", mdname(mddev));
+ return -EINVAL;
+ }
+
+ sb_page = llbitmap_read_page(llbitmap, 0);
+ if (IS_ERR(sb_page)) {
+ pr_err("md/llbitmap: %s: read super block failed",
+ mdname(mddev));
+ return -EIO;
+ }
+
+ sb = kmap_local_page(sb_page);
+ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) {
+ pr_err("md/llbitmap: %s: invalid super block magic number",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (sb->version != cpu_to_le32(BITMAP_MAJOR_LOCKLESS)) {
+ pr_err("md/llbitmap: %s: invalid super block version",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (memcmp(sb->uuid, mddev->uuid, 16)) {
+ pr_err("md/llbitmap: %s: bitmap superblock UUID mismatch\n",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (mddev->bitmap_info.space == 0) {
+ int room = le32_to_cpu(sb->sectors_reserved);
+
+ if (room)
+ mddev->bitmap_info.space = room;
+ else
+ mddev->bitmap_info.space = mddev->bitmap_info.default_space;
+ }
+ llbitmap->flags = le32_to_cpu(sb->state);
+ if (test_and_clear_bit(BITMAP_FIRST_USE, &llbitmap->flags)) {
+ ret = llbitmap_init(llbitmap);
+ goto out_put_page;
+ }
+
+ chunksize = le32_to_cpu(sb->chunksize);
+ if (!is_power_of_2(chunksize)) {
+ pr_err("md/llbitmap: %s: chunksize not a power of 2",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (chunksize < DIV_ROUND_UP_SECTOR_T(mddev->resync_max_sectors,
+ mddev->bitmap_info.space << SECTOR_SHIFT)) {
+ pr_err("md/llbitmap: %s: chunksize too small %lu < %llu / %lu",
+ mdname(mddev), chunksize, mddev->resync_max_sectors,
+ mddev->bitmap_info.space);
+ goto out_put_page;
+ }
+
+ daemon_sleep = le32_to_cpu(sb->daemon_sleep);
+ if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) {
+ pr_err("md/llbitmap: %s: daemon sleep %lu period out of range",
+ mdname(mddev), daemon_sleep);
+ goto out_put_page;
+ }
+
+ events = le64_to_cpu(sb->events);
+ if (events < mddev->events) {
+ pr_warn("md/llbitmap :%s: bitmap file is out of date (%lu < %llu) -- forcing full recovery",
+ mdname(mddev), events, mddev->events);
+ set_bit(BITMAP_STALE, &llbitmap->flags);
+ }
+
+ sb->sync_size = cpu_to_le64(mddev->resync_max_sectors);
+ mddev->bitmap_info.chunksize = chunksize;
+ mddev->bitmap_info.daemon_sleep = daemon_sleep;
+
+ llbitmap->barrier_idle = DEFAULT_BARRIER_IDLE;
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = DIV_ROUND_UP_SECTOR_T(mddev->resync_max_sectors, chunksize);
+ llbitmap->chunkshift = ffz(~chunksize);
+ ret = llbitmap_cache_pages(llbitmap);
+
+out_put_page:
+ __free_page(sb_page);
+ kunmap_local(sb);
+ return ret;
+}
+
+static void llbitmap_pending_timer_fn(struct timer_list *pending_timer)
+{
+ struct llbitmap *llbitmap =
+ container_of(pending_timer, struct llbitmap, pending_timer);
+
+ if (work_busy(&llbitmap->daemon_work)) {
+ pr_warn("md/llbitmap: %s daemon_work not finished in %lu seconds\n",
+ mdname(llbitmap->mddev),
+ llbitmap->mddev->bitmap_info.daemon_sleep);
+ set_bit(BITMAP_DAEMON_BUSY, &llbitmap->flags);
+ return;
+ }
+
+ queue_work(md_llbitmap_io_wq, &llbitmap->daemon_work);
+}
+
+static void md_llbitmap_daemon_fn(struct work_struct *work)
+{
+ struct llbitmap *llbitmap =
+ container_of(work, struct llbitmap, daemon_work);
+ unsigned long start;
+ unsigned long end;
+ bool restart;
+ int idx;
+
+ if (llbitmap->mddev->degraded)
+ return;
+retry:
+ start = 0;
+ end = min(llbitmap->chunks, PAGE_SIZE - BITMAP_DATA_OFFSET) - 1;
+ restart = false;
+
+ for (idx = 0; idx < llbitmap->nr_pages; idx++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+
+ if (idx > 0) {
+ start = end + 1;
+ end = min(end + PAGE_SIZE, llbitmap->chunks - 1);
+ }
+
+ if (!test_bit(LLPageFlush, &pctl->flags) &&
+ time_before(jiffies, pctl->expire)) {
+ restart = true;
+ continue;
+ }
+
+ if (llbitmap_suspend_timeout(llbitmap, idx) < 0) {
+ pr_warn("md/llbitmap: %s: %s waiting for page %d timeout\n",
+ mdname(llbitmap->mddev), __func__, idx);
+ continue;
+ }
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionDaemon);
+ llbitmap_resume(llbitmap, idx);
+ }
+
+ /*
+ * If the daemon took a long time to finish, retry to prevent missing
+ * clearing dirty bits.
+ */
+ if (test_and_clear_bit(BITMAP_DAEMON_BUSY, &llbitmap->flags))
+ goto retry;
+
+ /* If some page is dirty but not expired, setup timer again */
+ if (restart)
+ mod_timer(&llbitmap->pending_timer,
+ jiffies + llbitmap->mddev->bitmap_info.daemon_sleep * HZ);
+}
+
+static int llbitmap_create(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap;
+ int ret;
+
+ ret = llbitmap_check_support(mddev);
+ if (ret)
+ return ret;
+
+ llbitmap = kzalloc(sizeof(*llbitmap), GFP_KERNEL);
+ if (!llbitmap)
+ return -ENOMEM;
+
+ llbitmap->mddev = mddev;
+ llbitmap->io_size = bdev_logical_block_size(mddev->gendisk->part0);
+ llbitmap->blocks_per_page = PAGE_SIZE / llbitmap->io_size;
+
+ timer_setup(&llbitmap->pending_timer, llbitmap_pending_timer_fn, 0);
+ INIT_WORK(&llbitmap->daemon_work, md_llbitmap_daemon_fn);
+ atomic_set(&llbitmap->behind_writes, 0);
+ init_waitqueue_head(&llbitmap->behind_wait);
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ mddev->bitmap = llbitmap;
+ ret = llbitmap_read_sb(llbitmap);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ if (ret) {
+ kfree(llbitmap);
+ mddev->bitmap = NULL;
+ }
+
+ return ret;
+}
+
+static int llbitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long chunks;
+
+ if (chunksize == 0)
+ chunksize = llbitmap->chunksize;
+
+ /* If there is enough space, leave the chunksize unchanged. */
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ while (chunks > mddev->bitmap_info.space << SECTOR_SHIFT) {
+ chunksize = chunksize << 1;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ }
+
+ llbitmap->chunkshift = ffz(~chunksize);
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = chunks;
+
+ return 0;
+}
+
+static int llbitmap_load(struct mddev *mddev)
+{
+ enum llbitmap_action action = BitmapActionReload;
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (test_and_clear_bit(BITMAP_STALE, &llbitmap->flags))
+ action = BitmapActionStale;
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1, action);
+ return 0;
+}
+
+static void llbitmap_destroy(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (!llbitmap)
+ return;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+
+ timer_delete_sync(&llbitmap->pending_timer);
+ flush_workqueue(md_llbitmap_io_wq);
+ flush_workqueue(md_llbitmap_unplug_wq);
+
+ mddev->bitmap = NULL;
+ llbitmap_free_pages(llbitmap);
+ kfree(llbitmap);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+}
+
+static void llbitmap_start_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = offset >> llbitmap->chunkshift;
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionStartwrite);
+
+ while (page_start <= page_end) {
+ llbitmap_raise_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_end_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = offset >> llbitmap->chunkshift;
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ while (page_start <= page_end) {
+ llbitmap_release_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_start_discard(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = DIV_ROUND_UP_SECTOR_T(offset, llbitmap->chunksize);
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionDiscard);
+
+ while (page_start <= page_end) {
+ llbitmap_raise_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_end_discard(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = DIV_ROUND_UP_SECTOR_T(offset, llbitmap->chunksize);
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ while (page_start <= page_end) {
+ llbitmap_release_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_unplug_fn(struct work_struct *work)
+{
+ struct llbitmap_unplug_work *unplug_work =
+ container_of(work, struct llbitmap_unplug_work, work);
+ struct llbitmap *llbitmap = unplug_work->llbitmap;
+ struct blk_plug plug;
+ int i;
+
+ blk_start_plug(&plug);
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ if (!test_bit(LLPageDirty, &llbitmap->pctl[i]->flags) ||
+ !test_and_clear_bit(LLPageDirty, &llbitmap->pctl[i]->flags))
+ continue;
+
+ llbitmap_write_page(llbitmap, i);
+ }
+
+ blk_finish_plug(&plug);
+ md_super_wait(llbitmap->mddev);
+ complete(unplug_work->done);
+}
+
+static bool llbitmap_dirty(struct llbitmap *llbitmap)
+{
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++)
+ if (test_bit(LLPageDirty, &llbitmap->pctl[i]->flags))
+ return true;
+
+ return false;
+}
+
+static void llbitmap_unplug(struct mddev *mddev, bool sync)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct llbitmap *llbitmap = mddev->bitmap;
+ struct llbitmap_unplug_work unplug_work = {
+ .llbitmap = llbitmap,
+ .done = &done,
+ };
+
+ if (!llbitmap_dirty(llbitmap))
+ return;
+
+ /*
+ * Issue new bitmap IO under submit_bio() context will deadlock:
+ * - the bio will wait for bitmap bio to be done, before it can be
+ * issued;
+ * - bitmap bio will be added to current->bio_list and wait for this
+ * bio to be issued;
+ */
+ INIT_WORK_ONSTACK(&unplug_work.work, llbitmap_unplug_fn);
+ queue_work(md_llbitmap_unplug_wq, &unplug_work.work);
+ wait_for_completion(&done);
+ destroy_work_on_stack(&unplug_work.work);
+}
+
+/*
+ * Force to write all bitmap pages to disk, called when stopping the array, or
+ * every daemon_sleep seconds when sync_thread is running.
+ */
+static void __llbitmap_flush(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ struct blk_plug plug;
+ int i;
+
+ blk_start_plug(&plug);
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ /* mark all blocks as dirty */
+ set_bit(LLPageDirty, &pctl->flags);
+ bitmap_fill(pctl->dirty, llbitmap->blocks_per_page);
+ llbitmap_write_page(llbitmap, i);
+ }
+ blk_finish_plug(&plug);
+ md_super_wait(llbitmap->mddev);
+}
+
+static void llbitmap_flush(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++)
+ set_bit(LLPageFlush, &llbitmap->pctl[i]->flags);
+
+ timer_delete_sync(&llbitmap->pending_timer);
+ queue_work(md_llbitmap_io_wq, &llbitmap->daemon_work);
+ flush_work(&llbitmap->daemon_work);
+
+ __llbitmap_flush(mddev);
+}
+
+/* This is used for raid5 lazy initial recovery */
+static bool llbitmap_blocks_synced(struct mddev *mddev, sector_t offset)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+ enum llbitmap_state c = llbitmap_read(llbitmap, p);
+
+ return c == BitClean || c == BitDirty;
+}
+
+static sector_t llbitmap_skip_sync_blocks(struct mddev *mddev, sector_t offset)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+ int blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ enum llbitmap_state c = llbitmap_read(llbitmap, p);
+
+ /* always skip unwritten blocks */
+ if (c == BitUnwritten)
+ return blocks;
+
+ /* For degraded array, don't skip */
+ if (mddev->degraded)
+ return 0;
+
+ /* For resync also skip clean/dirty blocks */
+ if ((c == BitClean || c == BitDirty) &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ return blocks;
+
+ return 0;
+}
+
+static bool llbitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+
+ /*
+ * Handle one bit at a time, this is much simpler. And it doesn't matter
+ * if md_do_sync() loop more times.
+ */
+ *blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ return llbitmap_state_machine(llbitmap, p, p,
+ BitmapActionStartsync) == BitSyncing;
+}
+
+/* Something is wrong, sync_thread stop at @offset */
+static void llbitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+
+ *blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ llbitmap_state_machine(llbitmap, p, llbitmap->chunks - 1,
+ BitmapActionAbortsync);
+}
+
+/* A full sync_thread is finished */
+static void llbitmap_close_sync(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ /* let daemon_fn clear dirty bits immediately */
+ WRITE_ONCE(pctl->expire, jiffies);
+ }
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1,
+ BitmapActionEndsync);
+}
+
+/*
+ * sync_thread have reached @sector, update metadata every daemon_sleep seconds,
+ * just in case sync_thread have to restart after power failure.
+ */
+static void llbitmap_cond_end_sync(struct mddev *mddev, sector_t sector,
+ bool force)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (sector == 0) {
+ llbitmap->last_end_sync = jiffies;
+ return;
+ }
+
+ if (time_before(jiffies, llbitmap->last_end_sync +
+ HZ * mddev->bitmap_info.daemon_sleep))
+ return;
+
+ wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
+
+ mddev->curr_resync_completed = sector;
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ llbitmap_state_machine(llbitmap, 0, sector >> llbitmap->chunkshift,
+ BitmapActionEndsync);
+ __llbitmap_flush(mddev);
+
+ llbitmap->last_end_sync = jiffies;
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
+}
+
+static bool llbitmap_enabled(void *data, bool flush)
+{
+ struct llbitmap *llbitmap = data;
+
+ return llbitmap && !test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags);
+}
+
+static void llbitmap_dirty_bits(struct mddev *mddev, unsigned long s,
+ unsigned long e)
+{
+ llbitmap_state_machine(mddev->bitmap, s, e, BitmapActionStartwrite);
+}
+
+static void llbitmap_write_sb(struct llbitmap *llbitmap)
+{
+ int nr_blocks = DIV_ROUND_UP(BITMAP_DATA_OFFSET, llbitmap->io_size);
+
+ bitmap_fill(llbitmap->pctl[0]->dirty, nr_blocks);
+ llbitmap_write_page(llbitmap, 0);
+ md_super_wait(llbitmap->mddev);
+}
+
+static void llbitmap_update_sb(void *data)
+{
+ struct llbitmap *llbitmap = data;
+ struct mddev *mddev = llbitmap->mddev;
+ struct page *sb_page;
+ bitmap_super_t *sb;
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags))
+ return;
+
+ sb_page = llbitmap_read_page(llbitmap, 0);
+ if (IS_ERR(sb_page)) {
+ pr_err("%s: %s: read super block failed", __func__,
+ mdname(mddev));
+ set_bit(BITMAP_WRITE_ERROR, &llbitmap->flags);
+ return;
+ }
+
+ if (mddev->events < llbitmap->events_cleared)
+ llbitmap->events_cleared = mddev->events;
+
+ sb = kmap_local_page(sb_page);
+ sb->events = cpu_to_le64(mddev->events);
+ sb->state = cpu_to_le32(llbitmap->flags);
+ sb->chunksize = cpu_to_le32(llbitmap->chunksize);
+ sb->sync_size = cpu_to_le64(mddev->resync_max_sectors);
+ sb->events_cleared = cpu_to_le64(llbitmap->events_cleared);
+ sb->sectors_reserved = cpu_to_le32(mddev->bitmap_info.space);
+ sb->daemon_sleep = cpu_to_le32(mddev->bitmap_info.daemon_sleep);
+
+ kunmap_local(sb);
+ llbitmap_write_sb(llbitmap);
+}
+
+static int llbitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+{
+ struct llbitmap *llbitmap = data;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->missing_pages = 0;
+ stats->pages = llbitmap->nr_pages;
+ stats->file_pages = llbitmap->nr_pages;
+
+ stats->behind_writes = atomic_read(&llbitmap->behind_writes);
+ stats->behind_wait = wq_has_sleeper(&llbitmap->behind_wait);
+ stats->events_cleared = llbitmap->events_cleared;
+
+ return 0;
+}
+
+/* just flag all pages as needing to be written */
+static void llbitmap_write_all(struct mddev *mddev)
+{
+ int i;
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ set_bit(LLPageDirty, &pctl->flags);
+ bitmap_fill(pctl->dirty, llbitmap->blocks_per_page);
+ }
+}
+
+static void llbitmap_start_behind_write(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ atomic_inc(&llbitmap->behind_writes);
+}
+
+static void llbitmap_end_behind_write(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (atomic_dec_and_test(&llbitmap->behind_writes))
+ wake_up(&llbitmap->behind_wait);
+}
+
+static void llbitmap_wait_behind_writes(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (!llbitmap)
+ return;
+
+ wait_event(llbitmap->behind_wait,
+ atomic_read(&llbitmap->behind_writes) == 0);
+
+}
+
+static ssize_t bits_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap;
+ int bits[BitStateCount] = {0};
+ loff_t start = 0;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ llbitmap = mddev->bitmap;
+ if (!llbitmap || !llbitmap->pctl) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "no bitmap\n");
+ }
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags)) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "bitmap io error\n");
+ }
+
+ while (start < llbitmap->chunks) {
+ enum llbitmap_state c = llbitmap_read(llbitmap, start);
+
+ if (c < 0 || c >= BitStateCount)
+ pr_err("%s: invalid bit %llu state %d\n",
+ __func__, start, c);
+ else
+ bits[c]++;
+ start++;
+ }
+
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "unwritten %d\nclean %d\ndirty %d\nneed sync %d\nsyncing %d\n",
+ bits[BitUnwritten], bits[BitClean], bits[BitDirty],
+ bits[BitNeedSync], bits[BitSyncing]);
+}
+
+static struct md_sysfs_entry llbitmap_bits = __ATTR_RO(bits);
+
+static ssize_t metadata_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap;
+ ssize_t ret;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ llbitmap = mddev->bitmap;
+ if (!llbitmap) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "no bitmap\n");
+ }
+
+ ret = sprintf(page, "chunksize %lu\nchunkshift %lu\nchunks %lu\noffset %llu\ndaemon_sleep %lu\n",
+ llbitmap->chunksize, llbitmap->chunkshift,
+ llbitmap->chunks, mddev->bitmap_info.offset,
+ llbitmap->mddev->bitmap_info.daemon_sleep);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+
+ return ret;
+}
+
+static struct md_sysfs_entry llbitmap_metadata = __ATTR_RO(metadata);
+
+static ssize_t
+daemon_sleep_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.daemon_sleep);
+}
+
+static ssize_t
+daemon_sleep_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned long timeout;
+ int rv = kstrtoul(buf, 10, &timeout);
+
+ if (rv)
+ return rv;
+
+ mddev->bitmap_info.daemon_sleep = timeout;
+ return len;
+}
+
+static struct md_sysfs_entry llbitmap_daemon_sleep = __ATTR_RW(daemon_sleep);
+
+static ssize_t
+barrier_idle_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ return sprintf(page, "%lu\n", llbitmap->barrier_idle);
+}
+
+static ssize_t
+barrier_idle_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long timeout;
+ int rv = kstrtoul(buf, 10, &timeout);
+
+ if (rv)
+ return rv;
+
+ llbitmap->barrier_idle = timeout;
+ return len;
+}
+
+static struct md_sysfs_entry llbitmap_barrier_idle = __ATTR_RW(barrier_idle);
+
+static struct attribute *md_llbitmap_attrs[] = {
+ &llbitmap_bits.attr,
+ &llbitmap_metadata.attr,
+ &llbitmap_daemon_sleep.attr,
+ &llbitmap_barrier_idle.attr,
+ NULL
+};
+
+static struct attribute_group md_llbitmap_group = {
+ .name = "llbitmap",
+ .attrs = md_llbitmap_attrs,
+};
+
+static struct bitmap_operations llbitmap_ops = {
+ .head = {
+ .type = MD_BITMAP,
+ .id = ID_LLBITMAP,
+ .name = "llbitmap",
+ },
+
+ .enabled = llbitmap_enabled,
+ .create = llbitmap_create,
+ .resize = llbitmap_resize,
+ .load = llbitmap_load,
+ .destroy = llbitmap_destroy,
+
+ .start_write = llbitmap_start_write,
+ .end_write = llbitmap_end_write,
+ .start_discard = llbitmap_start_discard,
+ .end_discard = llbitmap_end_discard,
+ .unplug = llbitmap_unplug,
+ .flush = llbitmap_flush,
+
+ .start_behind_write = llbitmap_start_behind_write,
+ .end_behind_write = llbitmap_end_behind_write,
+ .wait_behind_writes = llbitmap_wait_behind_writes,
+
+ .blocks_synced = llbitmap_blocks_synced,
+ .skip_sync_blocks = llbitmap_skip_sync_blocks,
+ .start_sync = llbitmap_start_sync,
+ .end_sync = llbitmap_end_sync,
+ .close_sync = llbitmap_close_sync,
+ .cond_end_sync = llbitmap_cond_end_sync,
+
+ .update_sb = llbitmap_update_sb,
+ .get_stats = llbitmap_get_stats,
+ .dirty_bits = llbitmap_dirty_bits,
+ .write_all = llbitmap_write_all,
+
+ .group = &md_llbitmap_group,
+};
+
+int md_llbitmap_init(void)
+{
+ md_llbitmap_io_wq = alloc_workqueue("md_llbitmap_io",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!md_llbitmap_io_wq)
+ return -ENOMEM;
+
+ md_llbitmap_unplug_wq = alloc_workqueue("md_llbitmap_unplug",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!md_llbitmap_unplug_wq) {
+ destroy_workqueue(md_llbitmap_io_wq);
+ md_llbitmap_io_wq = NULL;
+ return -ENOMEM;
+ }
+
+ return register_md_submodule(&llbitmap_ops.head);
+}
+
+void md_llbitmap_exit(void)
+{
+ destroy_workqueue(md_llbitmap_io_wq);
+ md_llbitmap_io_wq = NULL;
+ destroy_workqueue(md_llbitmap_unplug_wq);
+ md_llbitmap_unplug_wq = NULL;
+ unregister_md_submodule(&llbitmap_ops.head);
+}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e033c26fdd4..41c476b40c7a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -94,7 +94,6 @@ static struct workqueue_struct *md_wq;
* workqueue whith reconfig_mutex grabbed.
*/
static struct workqueue_struct *md_misc_wq;
-struct workqueue_struct *md_bitmap_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
@@ -677,8 +676,64 @@ static void active_io_release(struct percpu_ref *ref)
static void no_op(struct percpu_ref *r) {}
+static bool mddev_set_bitmap_ops(struct mddev *mddev)
+{
+ struct bitmap_operations *old = mddev->bitmap_ops;
+ struct md_submodule_head *head;
+
+ if (mddev->bitmap_id == ID_BITMAP_NONE ||
+ (old && old->head.id == mddev->bitmap_id))
+ return true;
+
+ xa_lock(&md_submodule);
+ head = xa_load(&md_submodule, mddev->bitmap_id);
+
+ if (!head) {
+ pr_warn("md: can't find bitmap id %d\n", mddev->bitmap_id);
+ goto err;
+ }
+
+ if (head->type != MD_BITMAP) {
+ pr_warn("md: invalid bitmap id %d\n", mddev->bitmap_id);
+ goto err;
+ }
+
+ mddev->bitmap_ops = (void *)head;
+ xa_unlock(&md_submodule);
+
+ if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) {
+ if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group))
+ pr_warn("md: cannot register extra bitmap attributes for %s\n",
+ mdname(mddev));
+ else
+ /*
+ * Inform user with KOBJ_CHANGE about new bitmap
+ * attributes.
+ */
+ kobject_uevent(&mddev->kobj, KOBJ_CHANGE);
+ }
+ return true;
+
+err:
+ xa_unlock(&md_submodule);
+ return false;
+}
+
+static void mddev_clear_bitmap_ops(struct mddev *mddev)
+{
+ if (!mddev_is_dm(mddev) && mddev->bitmap_ops &&
+ mddev->bitmap_ops->group)
+ sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group);
+
+ mddev->bitmap_ops = NULL;
+}
+
int mddev_init(struct mddev *mddev)
{
+ if (!IS_ENABLED(CONFIG_MD_BITMAP))
+ mddev->bitmap_id = ID_BITMAP_NONE;
+ else
+ mddev->bitmap_id = ID_BITMAP;
if (percpu_ref_init(&mddev->active_io, active_io_release,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
@@ -713,7 +768,6 @@ int mddev_init(struct mddev *mddev)
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
- mddev_set_bitmap_ops(mddev);
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
@@ -1020,15 +1074,26 @@ static void super_written(struct bio *bio)
wake_up(&mddev->sb_wait);
}
-void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
- sector_t sector, int size, struct page *page)
+/**
+ * md_write_metadata - write metadata to underlying disk, including
+ * array superblock, badblocks, bitmap superblock and bitmap bits.
+ * @mddev: the array to write
+ * @rdev: the underlying disk to write
+ * @sector: the offset to @rdev
+ * @size: the length of the metadata
+ * @page: the metadata
+ * @offset: the offset to @page
+ *
+ * Write @size bytes of @page start from @offset, to @sector of @rdev, Increment
+ * mddev->pending_writes before returning, and decrement it on completion,
+ * waking up sb_wait. Caller must call md_super_wait() after issuing io to all
+ * rdev. If an error occurred, md_error() will be called, and the @rdev will be
+ * kicked out from @mddev.
+ */
+void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
+ sector_t sector, int size, struct page *page,
+ unsigned int offset)
{
- /* write first size bytes of page to sector of rdev
- * Increment mddev->pending_writes before returning
- * and decrement it on completion, waking up sb_wait
- * if zero is reached.
- * If an error occurred, call md_error
- */
struct bio *bio;
if (!page)
@@ -1046,7 +1111,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector;
- __bio_add_page(bio, page, size, 0);
+ __bio_add_page(bio, page, size, offset);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -1356,6 +1421,9 @@ static u64 md_bitmap_events_cleared(struct mddev *mddev)
struct md_bitmap_stats stats;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return 0;
+
err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (err)
return 0;
@@ -1653,8 +1721,8 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
do {
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
} while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -2302,8 +2370,8 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
do {
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
} while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
@@ -2313,13 +2381,15 @@ static int
super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
+ struct mddev *mddev = rdev->mddev;
+
/* All necessary checks on new >= old have been done */
if (new_offset >= rdev->data_offset)
return 1;
/* with 1.0 metadata, there is no metadata to tread on
* so we can always move back */
- if (rdev->mddev->minor_version == 0)
+ if (mddev->minor_version == 0)
return 1;
/* otherwise we must be sure not to step on
@@ -2331,8 +2401,7 @@ super_1_allow_new_offset(struct md_rdev *rdev,
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
- if (!rdev->mddev->bitmap_info.file) {
- struct mddev *mddev = rdev->mddev;
+ if (md_bitmap_registered(mddev) && !mddev->bitmap_info.file) {
struct md_bitmap_stats stats;
int err;
@@ -2804,24 +2873,24 @@ repeat:
mddev_add_trace_msg(mddev, "md md_update_sb");
rewrite:
- mddev->bitmap_ops->update_sb(mddev->bitmap);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
- md_super_write(mddev,rdev,
- rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
pr_debug("md: (write) %pg's sb offset: %llu\n",
rdev->bdev,
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
- md_super_write(mddev, rdev,
- rdev->badblocks.sector,
- rdev->badblocks.size << 9,
- rdev->bb_page);
+ md_write_metadata(mddev, rdev,
+ rdev->badblocks.sector,
+ rdev->badblocks.size << 9,
+ rdev->bb_page, 0);
rdev->badblocks.size = 0;
}
@@ -4150,6 +4219,86 @@ static struct md_sysfs_entry md_new_level =
__ATTR(new_level, 0664, new_level_show, new_level_store);
static ssize_t
+bitmap_type_show(struct mddev *mddev, char *page)
+{
+ struct md_submodule_head *head;
+ unsigned long i;
+ ssize_t len = 0;
+
+ if (mddev->bitmap_id == ID_BITMAP_NONE)
+ len += sprintf(page + len, "[none] ");
+ else
+ len += sprintf(page + len, "none ");
+
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type != MD_BITMAP)
+ continue;
+
+ if (mddev->bitmap_id == head->id)
+ len += sprintf(page + len, "[%s] ", head->name);
+ else
+ len += sprintf(page + len, "%s ", head->name);
+ }
+ xa_unlock(&md_submodule);
+
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static ssize_t
+bitmap_type_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ struct md_submodule_head *head;
+ enum md_submodule_id id;
+ unsigned long i;
+ int err = 0;
+
+ xa_lock(&md_submodule);
+
+ if (mddev->bitmap_ops) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (cmd_match(buf, "none")) {
+ mddev->bitmap_id = ID_BITMAP_NONE;
+ goto out;
+ }
+
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type == MD_BITMAP && cmd_match(buf, head->name)) {
+ mddev->bitmap_id = head->id;
+ goto out;
+ }
+ }
+
+ err = kstrtoint(buf, 10, &id);
+ if (err)
+ goto out;
+
+ if (id == ID_BITMAP_NONE) {
+ mddev->bitmap_id = id;
+ goto out;
+ }
+
+ head = xa_load(&md_submodule, id);
+ if (head && head->type == MD_BITMAP) {
+ mddev->bitmap_id = id;
+ goto out;
+ }
+
+ err = -ENOENT;
+
+out:
+ xa_unlock(&md_submodule);
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_bitmap_type =
+__ATTR(bitmap_type, 0664, bitmap_type_show, bitmap_type_store);
+
+static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
@@ -4680,6 +4829,9 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
unsigned long chunk, end_chunk;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return len;
+
err = mddev_lock(mddev);
if (err)
return err;
@@ -5752,6 +5904,7 @@ __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_new_level.attr,
+ &md_bitmap_type.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_uuid.attr,
@@ -5801,7 +5954,6 @@ static const struct attribute_group md_redundancy_group = {
static const struct attribute_group *md_attr_groups[] = {
&md_default_group,
- &md_bitmap_group,
NULL,
};
@@ -6133,6 +6285,26 @@ static void md_safemode_timeout(struct timer_list *t)
static int start_dirty_degraded;
+static int md_bitmap_create(struct mddev *mddev)
+{
+ if (mddev->bitmap_id == ID_BITMAP_NONE)
+ return -EINVAL;
+
+ if (!mddev_set_bitmap_ops(mddev))
+ return -ENOENT;
+
+ return mddev->bitmap_ops->create(mddev);
+}
+
+static void md_bitmap_destroy(struct mddev *mddev)
+{
+ if (!md_bitmap_registered(mddev))
+ return;
+
+ mddev->bitmap_ops->destroy(mddev);
+ mddev_clear_bitmap_ops(mddev);
+}
+
int md_run(struct mddev *mddev)
{
int err;
@@ -6299,7 +6471,7 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = mddev->bitmap_ops->create(mddev);
+ err = md_bitmap_create(mddev);
if (err)
pr_warn("%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
@@ -6372,7 +6544,7 @@ bitmap_abort:
pers->free(mddev, mddev->private);
mddev->private = NULL;
put_pers(pers);
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
abort:
bioset_exit(&mddev->io_clone_set);
exit_sync_set:
@@ -6392,10 +6564,12 @@ int do_md_run(struct mddev *mddev)
if (err)
goto out;
- err = mddev->bitmap_ops->load(mddev);
- if (err) {
- mddev->bitmap_ops->destroy(mddev);
- goto out;
+ if (md_bitmap_registered(mddev)) {
+ err = mddev->bitmap_ops->load(mddev);
+ if (err) {
+ md_bitmap_destroy(mddev);
+ goto out;
+ }
}
if (mddev_is_clustered(mddev))
@@ -6546,7 +6720,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0);
}
- mddev->bitmap_ops->flush(mddev);
+ if (md_bitmap_enabled(mddev, true))
+ mddev->bitmap_ops->flush(mddev);
if (md_is_rdwr(mddev) &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
@@ -6573,7 +6748,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
- mddev->bitmap_ops->wait_behind_writes(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
@@ -6589,7 +6765,7 @@ static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
@@ -7307,6 +7483,9 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err = 0;
+ if (!md_bitmap_registered(mddev))
+ return -EINVAL;
+
if (mddev->pers) {
if (!mddev->pers->quiesce || !mddev->thread)
return -EBUSY;
@@ -7363,16 +7542,16 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
err = 0;
if (mddev->pers) {
if (fd >= 0) {
- err = mddev->bitmap_ops->create(mddev);
+ err = md_bitmap_create(mddev);
if (!err)
err = mddev->bitmap_ops->load(mddev);
if (err) {
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
fd = -1;
}
} else if (fd < 0) {
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
}
}
@@ -7679,12 +7858,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- rv = mddev->bitmap_ops->create(mddev);
+ rv = md_bitmap_create(mddev);
if (!rv)
rv = mddev->bitmap_ops->load(mddev);
if (rv)
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
} else {
struct md_bitmap_stats stats;
@@ -7710,7 +7889,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
put_cluster_ops(mddev);
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev->bitmap_info.offset = 0;
}
}
@@ -7747,9 +7926,9 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
-static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int md_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mddev *mddev = bdev->bd_disk->private_data;
+ struct mddev *mddev = disk->private_data;
geo->heads = 2;
geo->sectors = 4;
@@ -8491,6 +8670,9 @@ static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
unsigned long chunk_kb;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (err)
return;
@@ -8873,18 +9055,24 @@ EXPORT_SYMBOL_GPL(md_submit_discard_bio);
static void md_bitmap_start(struct mddev *mddev,
struct md_io_clone *md_io_clone)
{
+ md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
+ mddev->bitmap_ops->start_discard :
+ mddev->bitmap_ops->start_write;
+
if (mddev->pers->bitmap_sector)
mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
&md_io_clone->sectors);
- mddev->bitmap_ops->start_write(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ fn(mddev, md_io_clone->offset, md_io_clone->sectors);
}
static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
{
- mddev->bitmap_ops->end_write(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
+ mddev->bitmap_ops->end_discard :
+ mddev->bitmap_ops->end_write;
+
+ fn(mddev, md_io_clone->offset, md_io_clone->sectors);
}
static void md_end_clone_io(struct bio *bio)
@@ -8893,7 +9081,7 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
@@ -8920,9 +9108,10 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
- if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
+ if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) {
md_io_clone->offset = (*bio)->bi_iter.bi_sector;
md_io_clone->sectors = bio_sectors(*bio);
+ md_io_clone->rw = op_stat_group(bio_op(*bio));
md_bitmap_start(mddev, md_io_clone);
}
@@ -8944,7 +9133,7 @@ void md_free_cloned_bio(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
@@ -9010,6 +9199,39 @@ static sector_t md_sync_max_sectors(struct mddev *mddev,
}
}
+/*
+ * If lazy recovery is requested and all rdevs are in sync, select the rdev with
+ * the higest index to perfore recovery to build initial xor data, this is the
+ * same as old bitmap.
+ */
+static bool mddev_select_lazy_recover_rdev(struct mddev *mddev)
+{
+ struct md_rdev *recover_rdev = NULL;
+ struct md_rdev *rdev;
+ bool ret = false;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev->raid_disk < 0)
+ continue;
+
+ if (test_bit(Faulty, &rdev->flags) ||
+ !test_bit(In_sync, &rdev->flags))
+ break;
+
+ if (!recover_rdev || recover_rdev->raid_disk < rdev->raid_disk)
+ recover_rdev = rdev;
+ }
+
+ if (recover_rdev) {
+ clear_bit(In_sync, &recover_rdev->flags);
+ ret = true;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
{
sector_t start = 0;
@@ -9041,6 +9263,14 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
start = rdev->recovery_offset;
rcu_read_unlock();
+ /*
+ * If there are no spares, and raid456 lazy initial recover is
+ * requested.
+ */
+ if (test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery) &&
+ start == MaxSector && mddev_select_lazy_recover_rdev(mddev))
+ start = 0;
+
/* If there is a bitmap, we need to make sure all
* writes that started before we added a spare
* complete before we start doing a recovery.
@@ -9061,19 +9291,12 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
static bool sync_io_within_limit(struct mddev *mddev)
{
- int io_sectors;
-
/*
* For raid456, sync IO is stripe(4k) per IO, for other levels, it's
* RESYNC_PAGES(64k) per IO.
*/
- if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6)
- io_sectors = 8;
- else
- io_sectors = 128;
-
return atomic_read(&mddev->recovery_active) <
- io_sectors * sync_io_depth(mddev);
+ (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
}
#define SYNC_MARKS 10
@@ -9283,6 +9506,12 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
+ if (mddev->bitmap_ops && mddev->bitmap_ops->skip_sync_blocks) {
+ sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j);
+ if (sectors)
+ goto update;
+ }
+
sectors = mddev->pers->sync_request(mddev, j, max_sectors,
&skipped);
if (sectors == 0) {
@@ -9298,6 +9527,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
+update:
j += sectors;
if (j > max_sectors)
/* when skipping, extra large numbers can be returned. */
@@ -9607,6 +9837,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
return true;
}
@@ -9615,6 +9846,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
return true;
}
@@ -9624,7 +9856,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
* re-add.
*/
*spares = remove_and_add_spares(mddev, NULL);
- if (*spares) {
+ if (*spares || test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery)) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
@@ -9682,7 +9914,7 @@ static void md_start_sync(struct work_struct *ws)
* We are adding a device or devices to an array which has the bitmap
* stored on all devices. So make sure all bitmap pages get written.
*/
- if (spares)
+ if (spares && md_bitmap_enabled(mddev, true))
mddev->bitmap_ops->write_all(mddev);
name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
@@ -9770,7 +10002,7 @@ static void unregister_sync_thread(struct mddev *mddev)
*/
void md_check_recovery(struct mddev *mddev)
{
- if (mddev->bitmap)
+ if (md_bitmap_enabled(mddev, false) && mddev->bitmap_ops->daemon_work)
mddev->bitmap_ops->daemon_work(mddev);
if (signal_pending(current)) {
@@ -9837,6 +10069,7 @@ void md_check_recovery(struct mddev *mddev)
}
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9947,6 +10180,7 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
/*
* We call mddev->cluster_ops->update_size here because sync_size could
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
@@ -10094,8 +10328,16 @@ static void md_geninit(void)
static int __init md_init(void)
{
- int ret = -ENOMEM;
+ int ret = md_bitmap_init();
+
+ if (ret)
+ return ret;
+ ret = md_llbitmap_init();
+ if (ret)
+ goto err_bitmap;
+
+ ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
@@ -10104,11 +10346,6 @@ static int __init md_init(void)
if (!md_misc_wq)
goto err_misc_wq;
- md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
- 0);
- if (!md_bitmap_wq)
- goto err_bitmap_wq;
-
ret = __register_blkdev(MD_MAJOR, "md", md_probe);
if (ret < 0)
goto err_md;
@@ -10127,12 +10364,13 @@ static int __init md_init(void)
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
- destroy_workqueue(md_bitmap_wq);
-err_bitmap_wq:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
+ md_llbitmap_exit();
+err_bitmap:
+ md_bitmap_exit();
return ret;
}
@@ -10150,7 +10388,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
if (ret)
pr_info("md-cluster: resize failed\n");
- else
+ else if (md_bitmap_enabled(mddev, false))
mddev->bitmap_ops->update_sb(mddev->bitmap);
}
@@ -10438,8 +10676,8 @@ static __exit void md_exit(void)
spin_unlock(&all_mddevs_lock);
destroy_workqueue(md_misc_wq);
- destroy_workqueue(md_bitmap_wq);
destroy_workqueue(md_wq);
+ md_bitmap_exit();
}
subsys_initcall(md_init);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 51af29a03079..1979c2d4fe89 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -26,7 +26,7 @@
enum md_submodule_type {
MD_PERSONALITY = 0,
MD_CLUSTER,
- MD_BITMAP, /* TODO */
+ MD_BITMAP,
};
enum md_submodule_id {
@@ -38,8 +38,9 @@ enum md_submodule_id {
ID_RAID6 = 6,
ID_RAID10 = 10,
ID_CLUSTER,
- ID_BITMAP, /* TODO */
- ID_LLBITMAP, /* TODO */
+ ID_BITMAP,
+ ID_LLBITMAP,
+ ID_BITMAP_NONE,
};
struct md_submodule_head {
@@ -565,6 +566,7 @@ struct mddev {
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
+ enum md_submodule_id bitmap_id;
void *bitmap; /* the bitmap for the device */
struct bitmap_operations *bitmap_ops;
struct {
@@ -665,6 +667,8 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE,
/* remote node is running resync thread */
MD_RESYNCING_REMOTE,
+ /* raid456 lazy initial recover */
+ MD_RECOVERY_LAZY_RECOVER,
};
enum md_ro_state {
@@ -796,7 +800,6 @@ struct md_sysfs_entry {
ssize_t (*show)(struct mddev *, char *);
ssize_t (*store)(struct mddev *, const char *, size_t);
};
-extern const struct attribute_group md_bitmap_group;
static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
@@ -873,6 +876,7 @@ struct md_io_clone {
unsigned long start_time;
sector_t offset;
unsigned long sectors;
+ enum stat_group rw;
struct bio bio_clone;
};
@@ -909,8 +913,9 @@ void md_account_bio(struct mddev *mddev, struct bio **bio);
void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
-extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
- sector_t sector, int size, struct page *page);
+void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
+ sector_t sector, int size, struct page *page,
+ unsigned int offset);
extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, blk_opf_t opf, bool metadata_op);
@@ -1013,7 +1018,6 @@ struct mdu_array_info_s;
struct mdu_disk_info_s;
extern int mdp_major;
-extern struct workqueue_struct *md_bitmap_wq;
void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
@@ -1034,6 +1038,12 @@ static inline bool mddev_is_dm(struct mddev *mddev)
return !mddev->gendisk;
}
+static inline bool raid_is_456(struct mddev *mddev)
+{
+ return mddev->level == ID_RAID4 || mddev->level == ID_RAID5 ||
+ mddev->level == ID_RAID6;
+}
+
static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
sector_t sector)
{
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 419139ad7663..e443e478645a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -464,21 +464,16 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
- struct bio *split = bio_split(bio,
- zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
- &mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ bio = bio_submit_split_bioset(bio,
+ zone->zone_end - bio->bi_iter.bi_sector,
+ &mddev->bio_set);
+ if (!bio)
return;
- }
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
end = zone->zone_end;
- } else
+ } else {
end = bio_end_sector(bio);
+ }
orig_end = end;
if (zone != conf->strip_zone)
@@ -613,17 +608,10 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
if (sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+ bio = bio_submit_split_bioset(bio, sectors,
&mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ if (!bio)
return true;
- }
- bio_chain(split, bio);
- raid0_map_submit_bio(mddev, bio);
- bio = split;
}
raid0_map_submit_bio(mddev, bio);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 52881e6032da..521625756128 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -140,7 +140,7 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
* If bitmap is not enabled, it's safe to submit the io directly, and
* this can get optimal performance.
*/
- if (!mddev->bitmap_ops->enabled(mddev)) {
+ if (!md_bitmap_enabled(mddev, true)) {
raid1_submit_write(bio);
return true;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d30b82beeb92..592a40233004 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -167,7 +167,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio;
}
/*
@@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct raid1_info *mirror;
struct bio *read_bio;
int max_sectors;
- int rdisk, error;
+ int rdisk;
bool r1bio_existed = !!r1_bio;
/*
@@ -1366,7 +1366,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
(unsigned long long)r1_bio->sector,
mirror->rdev->bdev);
- if (test_bit(WriteMostly, &mirror->rdev->flags)) {
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ md_bitmap_enabled(mddev, false)) {
/*
* Reading from a write-mostly device must take care not to
* over-take any writes that are 'behind'
@@ -1376,16 +1377,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
}
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
-
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
goto err_handle;
}
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
}
@@ -1413,8 +1411,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
err_handle:
atomic_dec(&mirror->rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R1BIO_Uptodate, &r1_bio->state);
raid_end_bio_io(r1_bio);
}
@@ -1452,12 +1448,36 @@ retry:
return true;
}
+static void raid1_start_write_behind(struct mddev *mddev, struct r1bio *r1_bio,
+ struct bio *bio)
+{
+ unsigned long max_write_behind = mddev->bitmap_info.max_write_behind;
+ struct md_bitmap_stats stats;
+ int err;
+
+ /* behind write rely on bitmap, see bitmap_operations */
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (err)
+ return;
+
+ /* Don't do behind IO if reader is waiting, or there are too many. */
+ if (!stats.behind_wait && stats.behind_writes < max_write_behind)
+ alloc_behind_master_bio(r1_bio, bio);
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
+
+}
+
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int max_write_sectors)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks, k, error;
+ int i, disks, k;
unsigned long flags;
int first_clone;
int max_sectors;
@@ -1561,10 +1581,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - r1_bio->sector;
if (good_sectors < max_sectors)
@@ -1584,16 +1602,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
max_sectors = min_t(int, max_sectors,
BIO_MAX_VECS * (PAGE_SIZE >> 9));
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- GFP_NOIO, &conf->bio_split);
-
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
goto err_handle;
}
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
}
@@ -1612,22 +1627,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
continue;
if (first_clone) {
- unsigned long max_write_behind =
- mddev->bitmap_info.max_write_behind;
- struct md_bitmap_stats stats;
- int err;
-
- /* do behind I/O ?
- * Not if there are too many, or cannot
- * allocate memory, or a reader on WriteMostly
- * is waiting for behind writes to flush */
- err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
- if (!err && write_behind && !stats.behind_wait &&
- stats.behind_writes < max_write_behind)
- alloc_behind_master_bio(r1_bio, bio);
-
- if (test_bit(R1BIO_BehindIO, &r1_bio->state))
- mddev->bitmap_ops->start_behind_write(mddev);
+ if (write_behind)
+ raid1_start_write_behind(mddev, r1_bio, bio);
first_clone = 0;
}
@@ -1683,8 +1684,6 @@ err_handle:
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R1BIO_Uptodate, &r1_bio->state);
raid_end_bio_io(r1_bio);
}
@@ -2057,7 +2056,7 @@ static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
/* make sure these bits don't get cleared. */
do {
- mddev->bitmap_ops->end_sync(mddev, s, &sync_blocks);
+ md_bitmap_end_sync(mddev, s, &sync_blocks);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
@@ -2804,12 +2803,13 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* We can find the current addess in mddev->curr_resync
*/
if (mddev->curr_resync < max_sector) /* aborted */
- mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
if (mddev_is_clustered(mddev)) {
@@ -2829,7 +2829,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -2846,10 +2846,11 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* we are incrementing sector_nr below. To be safe, we check against
* sector_nr + two times RESYNC_SECTORS
*/
-
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
- mddev_is_clustered(mddev) &&
- (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS >
+ conf->cluster_sync_high));
if (raise_barrier(conf, sector_nr))
return 0;
@@ -3004,8 +3005,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0)
break;
if (sync_blocks == 0) {
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
- &sync_blocks, still_degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr,
+ &sync_blocks, still_degraded) &&
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
@@ -3325,15 +3326,17 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize = raid1_size(mddev, sectors, 0);
- int ret;
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index d236ef179cfb..2ebe35aaa534 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -178,7 +178,9 @@ enum r1bio_state {
* any write was successful. Otherwise we call when
* any write-behind write succeeds, otherwise we call
* with failure when last write completes (and all failed).
- * Record that bi_end_io was called with this flag...
+ *
+ * And for bio_split errors, record that bi_end_io was called
+ * with this flag...
*/
R1BIO_Returned,
/* If a write for this request means we can clear some
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9832eefb2f15..14dcd5142eb4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -163,14 +163,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r10_bio->devs[j].bio = bio;
if (!conf->have_replacement)
continue;
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r10_bio->devs[j].repl_bio = bio;
}
/*
@@ -322,10 +322,12 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
struct bio *bio = r10_bio->master_bio;
struct r10conf *conf = r10_bio->mddev->private;
- if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
- bio->bi_status = BLK_STS_IOERR;
+ if (!test_and_set_bit(R10BIO_Returned, &r10_bio->state)) {
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ }
- bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -1154,7 +1156,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
- int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1203,17 +1204,15 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rdev->bdev,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
}
@@ -1241,8 +1240,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
return;
err_handle:
atomic_dec(&rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1351,7 +1348,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
int i, k;
sector_t sectors;
int max_sectors;
- int error;
if ((mddev_is_clustered(mddev) &&
mddev->cluster_ops->area_resyncing(mddev, WRITE,
@@ -1465,10 +1461,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - dev_sector;
if (good_sectors < max_sectors)
@@ -1489,17 +1483,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->sectors = max_sectors;
if (r10_bio->sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, r10_bio->sectors,
- GFP_NOIO, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split_bioset(bio, r10_bio->sectors,
+ &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
r10_bio->master_bio = bio;
}
@@ -1531,8 +1523,6 @@ err_handle:
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1679,7 +1669,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
return 0;
}
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the fist split part */
submit_bio_noacct(split);
@@ -1694,7 +1686,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
return 0;
}
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the second split part */
submit_bio_noacct(bio);
@@ -3221,15 +3215,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
- mddev->bitmap_ops->end_sync(mddev,
- mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
- mddev->bitmap_ops->end_sync(mddev, sect,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, sect, &sync_blocks);
}
} else {
/* completed sync */
@@ -3249,7 +3241,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
conf->fullsync = 0;
}
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
*skipped = 1;
return sectors_skipped;
@@ -3351,9 +3344,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in
* the bitmap
*/
- must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
- &sync_blocks,
- true);
+ must_sync = md_bitmap_start_sync(mddev, sect,
+ &sync_blocks, true);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
@@ -3396,9 +3388,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
- must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
- &sync_blocks, still_degraded);
-
+ md_bitmap_start_sync(mddev, sect, &sync_blocks,
+ still_degraded);
any_working = 0;
for (j=0; j<conf->copies;j++) {
int k;
@@ -3570,13 +3561,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* safety reason, which ensures curr_resync_completed is
* updated in bitmap_cond_end_sync.
*/
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
mddev_is_clustered(mddev) &&
(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
- &sync_blocks,
- mddev->degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks,
+ mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) {
/* We can skip this block */
@@ -4226,7 +4217,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
*/
struct r10conf *conf = mddev->private;
sector_t oldsize, size;
- int ret;
if (mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -4240,9 +4230,12 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > size)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, size, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, size);
if (sectors > mddev->dev_sectors &&
@@ -4508,8 +4501,9 @@ static int raid10_start_reshape(struct mddev *mddev)
oldsize = raid10_size(mddev, 0, 0);
newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
- if (!mddev_is_clustered(mddev)) {
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ if (!mddev_is_clustered(mddev) &&
+ md_bitmap_enabled(mddev, false)) {
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
if (ret)
goto abort;
else
@@ -4531,13 +4525,14 @@ static int raid10_start_reshape(struct mddev *mddev)
MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
goto out;
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ /* cluster can't be setup without bitmap */
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
if (ret)
goto abort;
ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
- mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
+ mddev->bitmap_ops->resize(mddev, oldsize, 0);
goto abort;
}
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 3f16ad6904a9..da00a55f7a55 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -165,6 +165,8 @@ enum r10bio_state {
* so that raid10d knows what to do with them.
*/
R10BIO_ReadError,
+/* For bio_split errors, record that bi_end_io was called. */
+ R10BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e385ef1355e8..24b32a0c95b4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4097,7 +4097,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
int disks)
{
int rmw = 0, rcw = 0, i;
- sector_t resync_offset = conf->mddev->resync_offset;
+ struct mddev *mddev = conf->mddev;
+ sector_t resync_offset = mddev->resync_offset;
/* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
@@ -4116,6 +4117,12 @@ static int handle_stripe_dirtying(struct r5conf *conf,
pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
conf->rmw_level, (unsigned long long)resync_offset,
(unsigned long long)sh->sector);
+ } else if (mddev->bitmap_ops && mddev->bitmap_ops->blocks_synced &&
+ !mddev->bitmap_ops->blocks_synced(mddev, sh->sector)) {
+ /* The initial recover is not done, must read everything */
+ rcw = 1; rmw = 2;
+ pr_debug("force RCW by lazy recovery, sh->sector=%llu\n",
+ sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
@@ -4148,7 +4155,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state);
if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
- mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d",
+ mddev_add_trace_msg(mddev, "raid5 rmw %llu %d",
sh->sector, rmw);
for (i = disks; i--; ) {
@@ -4227,8 +4234,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_DELAYED, &sh->state);
}
}
- if (rcw && !mddev_is_dm(conf->mddev))
- blk_add_trace_msg(conf->mddev->gendisk->queue,
+ if (rcw && !mddev_is_dm(mddev))
+ blk_add_trace_msg(mddev->gendisk->queue,
"raid5 rcw %llu %d %d %d",
(unsigned long long)sh->sector, rcw, qread,
test_bit(STRIPE_DELAYED, &sh->state));
@@ -4698,10 +4705,21 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
- /* in sync if before recovery_offset */
- set_bit(R5_Insync, &dev->flags);
- else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <=
+ rdev->recovery_offset) {
+ /*
+ * in sync if:
+ * - normal IO, or
+ * - resync IO that is not lazy recovery
+ *
+ * For lazy recovery, we have to mark the rdev without
+ * In_sync as failed, to build initial xor data.
+ */
+ if (!test_bit(STRIPE_SYNCING, &sh->state) ||
+ !test_bit(MD_RECOVERY_LAZY_RECOVER,
+ &conf->mddev->recovery))
+ set_bit(R5_Insync, &dev->flags);
+ } else if (test_bit(R5_UPTODATE, &dev->flags) &&
test_bit(R5_Expanded, &dev->flags))
/* If we've reshaped into here, we assume it is Insync.
* We will shortly update recovery_offset to make
@@ -5468,17 +5486,17 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
- struct bio *split;
sector_t sector = raid_bio->bi_iter.bi_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
- split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
- bio_chain(split, raid_bio);
- submit_bio_noacct(raid_bio);
- raid_bio = split;
+
+ raid_bio = bio_submit_split_bioset(raid_bio, sectors,
+ &conf->bio_split);
+ if (!raid_bio)
+ return NULL;
}
if (!raid5_read_one_chunk(mddev, raid_bio))
@@ -6492,11 +6510,12 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (mddev->curr_resync < max_sector) /* aborted */
- mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
return 0;
}
@@ -6525,8 +6544,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync &&
- !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
- true) &&
+ !md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
/* we can skip this block, and probably more */
do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
@@ -6535,7 +6553,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
return sync_blocks * RAID5_STRIPE_SECTORS(conf);
}
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
sh = raid5_get_active_stripe(conf, NULL, sector_nr,
R5_GAS_NOBLOCK);
@@ -6557,9 +6576,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
still_degraded = true;
}
- mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
- still_degraded);
-
+ md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -6763,7 +6780,8 @@ static void raid5d(struct md_thread *thread)
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
- mddev->bitmap_ops->unplug(mddev, true);
+ if (md_bitmap_enabled(mddev, true))
+ mddev->bitmap_ops->unplug(mddev, true);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf, conf->temp_inactive_list);
@@ -8313,7 +8331,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
*/
sector_t newsize;
struct r5conf *conf = mddev->private;
- int ret;
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return -EINVAL;
@@ -8323,9 +8340,12 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, sectors, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, sectors, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index e10bd588a586..d7259599029f 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -439,6 +439,6 @@ static void __exit cec_devnode_exit(void)
subsys_initcall(cec_devnode_init);
module_exit(cec_devnode_exit)
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Device node registration for cec drivers");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index 3c27789d8657..842555ed42c7 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -291,6 +291,6 @@ static struct platform_driver cec_gpio_pdrv = {
module_platform_driver(cec_gpio_pdrv);
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index fea2d65acffc..1ec0cece0a5b 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -248,7 +248,6 @@ static const struct regmap_config stm32_cec_regmap_cfg = {
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = 0x14,
- .fast_io = true,
};
static int stm32_cec_probe(struct platform_device *pdev)
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
index 2e8f7f60263f..08d58524419f 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
@@ -1,8 +1,2 @@
extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o
obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o
-
-all:
- $(MAKE) -C $(KDIR) M=$(shell pwd) modules
-
-install:
- $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
index 41d019b01ec0..e2eff17952ab 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -28,7 +28,7 @@
#include "extron-da-hd-4k-plus.h"
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Extron DA HD 4K PLUS HDMI CEC driver");
MODULE_LICENSE("GPL");
@@ -1252,7 +1252,7 @@ static int extron_s_output(struct file *file, void *priv, unsigned int o)
return o ? -EINVAL : 0;
}
-static int extron_g_edid(struct file *file, void *_fh,
+static int extron_g_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct extron_port *port = video_drvdata(file);
@@ -1280,7 +1280,7 @@ static int extron_g_edid(struct file *file, void *_fh,
return 0;
}
-static int extron_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid)
+static int extron_s_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct extron_port *port = video_drvdata(file);
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index 171366fe3544..60569f1670fe 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -2,7 +2,7 @@
/*
* Pulse Eight HDMI CEC driver
*
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ * Copyright 2016 Hans Verkuil <hverkuil@kernel.org>
*/
/*
@@ -41,7 +41,7 @@
#include <media/cec.h>
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
index 6f8d6797c614..08f58456d682 100644
--- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
+++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
@@ -2,7 +2,7 @@
/*
* RainShadow Tech HDMI CEC driver
*
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ * Copyright 2016 Hans Verkuil <hverkuil@kernel.org>
*/
/*
@@ -31,7 +31,7 @@
#include <media/cec.h>
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("RainShadow Tech HDMI CEC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c
index d97962eb0112..dba03769f263 100644
--- a/drivers/media/common/b2c2/flexcop-sram.c
+++ b/drivers/media/common/b2c2/flexcop-sram.c
@@ -352,7 +352,7 @@ static int flexcop_sram_detect(struct flexcop_device *fc)
sram_set_size(adapter, 0x10000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
- dprintk("%s: SRAM detection failed. Set to 32K \n", __func__);
+ dprintk("%s: SRAM detection failed. Set to 32K\n", __func__);
return 0;
}
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index e7a88a2d248c..8506de48ba45 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -170,7 +170,7 @@ static void flexcop_reset(struct flexcop_device *fc)
flexcop_ibi_value v210, v204;
/* reset the flexcop itself */
- fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
+ fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
v210.raw = 0;
v210.sw_reset_210.reset_block_000 = 1;
@@ -183,17 +183,17 @@ static void flexcop_reset(struct flexcop_device *fc)
v210.sw_reset_210.reset_block_700 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
v210.sw_reset_210.Special_controls = 0xc259;
- fc->write_ibi_reg(fc,sw_reset_210,v210);
+ fc->write_ibi_reg(fc, sw_reset_210, v210);
msleep(1);
/* reset the periphical devices */
- v204 = fc->read_ibi_reg(fc,misc_204);
+ v204 = fc->read_ibi_reg(fc, misc_204);
v204.misc_204.Per_reset_sig = 0;
- fc->write_ibi_reg(fc,misc_204,v204);
+ fc->write_ibi_reg(fc, misc_204, v204);
msleep(1);
v204.misc_204.Per_reset_sig = 1;
- fc->write_ibi_reg(fc,misc_204,v204);
+ fc->write_ibi_reg(fc, misc_204, v204);
}
void flexcop_reset_block_300(struct flexcop_device *fc)
@@ -202,13 +202,13 @@ void flexcop_reset_block_300(struct flexcop_device *fc)
v210 = fc->read_ibi_reg(fc, sw_reset_210);
deb_rdump("208: %08x, 210: %08x\n", v208_save.raw, v210.raw);
- fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
+ fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
v210.sw_reset_210.reset_block_300 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
- fc->write_ibi_reg(fc,sw_reset_210,v210);
- fc->write_ibi_reg(fc,ctrl_208,v208_save);
+ fc->write_ibi_reg(fc, sw_reset_210, v210);
+ fc->write_ibi_reg(fc, ctrl_208, v208_save);
}
struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len)
@@ -266,8 +266,8 @@ int flexcop_device_initialize(struct flexcop_device *fc)
if (fc->get_mac_addr(fc, 0) == 0) {
u8 *b = fc->dvb_adapter.proposed_mac;
info("MAC address = %pM", b);
- flexcop_set_mac_filter(fc,b);
- flexcop_mac_filter_ctrl(fc,1);
+ flexcop_set_mac_filter(fc, b);
+ flexcop_mac_filter_ctrl(fc, 1);
} else
warn("reading of MAC address failed.\n");
@@ -275,7 +275,7 @@ int flexcop_device_initialize(struct flexcop_device *fc)
if (ret)
goto error;
- flexcop_device_name(fc,"initialization of","complete");
+ flexcop_device_name(fc, "initialization of", "complete");
return 0;
error:
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 1392bd6b0026..1ee159ef7f38 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -2,7 +2,7 @@
/*
* cx2341x - generic code for cx23415/6/8 based devices
*
- * Copyright (C) 2006 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2006 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 1cd26faee503..d911021c1bb0 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -973,18 +973,14 @@ EXPORT_SYMBOL_GPL(vb2_queue_change_type);
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
- struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
__poll_t res;
res = vb2_core_poll(q, file, wait);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- res |= EPOLLPRI;
- }
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ res |= EPOLLPRI;
return res;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2ef2ff2a38ff..bcc97ca86ed5 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -163,7 +163,7 @@ config DVB_CX24123
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_DS3000
- tristate "Montage Tehnology DS3000 based"
+ tristate "Montage Technology DS3000 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -270,7 +270,7 @@ config DVB_TDA826X
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_TS2020
- tristate "Montage Tehnology TS2020 based tuners"
+ tristate "Montage Technology TS2020 based tuners"
depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 6237fe804a5c..cdd7ba5da0d5 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -27,7 +27,7 @@ config VIDEO_IR_I2C
menuconfig VIDEO_CAMERA_SENSOR
bool "Camera sensor devices"
- depends on MEDIA_CAMERA_SUPPORT && I2C
+ depends on MEDIA_CAMERA_SUPPORT && I2C && HAVE_CLK
select MEDIA_CONTROLLER
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
@@ -70,6 +70,16 @@ config VIDEO_GC0308
To compile this driver as a module, choose M here: the
module will be called gc0308.
+config VIDEO_GC0310
+ tristate "GalaxyCore GC0310 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gc0310.
+
config VIDEO_GC05A2
tristate "GalaxyCore gc05a2 sensor support"
select V4L2_CCI_I2C
@@ -317,6 +327,7 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
+ depends on OF
select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the Micron
@@ -340,6 +351,16 @@ config VIDEO_OG01A1B
To compile this driver as a module, choose M here: the
module will be called og01a1b.
+config VIDEO_OG0VE1B
+ tristate "OmniVision OG0VE1B sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OG0VE1B camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called og0ve1b.
+
config VIDEO_OV01A10
tristate "OmniVision OV01A10 sensor support"
help
@@ -446,6 +467,16 @@ config VIDEO_OV2685
To compile this driver as a module, choose M here: the
module will be called ov2685.
+config VIDEO_OV2735
+ tristate "OmniVision OV2735 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ OV2735 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2735.
+
config VIDEO_OV2740
tristate "OmniVision OV2740 sensor support"
depends on ACPI || COMPILE_TEST
@@ -542,24 +573,25 @@ config VIDEO_OV5695
To compile this driver as a module, choose M here: the
module will be called ov5695.
-config VIDEO_OV64A40
- tristate "OmniVision OV64A40 sensor support"
+config VIDEO_OV6211
+ tristate "OmniVision OV6211 sensor support"
select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
- OV64A40 camera.
+ OV6211 camera.
To compile this driver as a module, choose M here: the
- module will be called ov64a40.
+ module will be called ov6211.
-config VIDEO_OV6650
- tristate "OmniVision OV6650 sensor support"
+config VIDEO_OV64A40
+ tristate "OmniVision OV64A40 sensor support"
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
- OV6650 camera.
+ OV64A40 camera.
To compile this driver as a module, choose M here: the
- module will be called ov6650.
+ module will be called ov64a40.
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 5873d29433ee..57cdd8dc96f6 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
obj-$(CONFIG_VIDEO_GC0308) += gc0308.o
+obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
obj-$(CONFIG_VIDEO_GC05A2) += gc05a2.o
obj-$(CONFIG_VIDEO_GC08A3) += gc08a3.o
obj-$(CONFIG_VIDEO_GC2145) += gc2145.o
@@ -81,6 +82,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
+obj-$(CONFIG_VIDEO_OG0VE1B) += og0ve1b.o
obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o
@@ -93,6 +95,7 @@ obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
+obj-$(CONFIG_VIDEO_OV2735) += ov2735.o
obj-$(CONFIG_VIDEO_OV2740) += ov2740.o
obj-$(CONFIG_VIDEO_OV4689) += ov4689.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
@@ -103,8 +106,8 @@ obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
+obj-$(CONFIG_VIDEO_OV6211) += ov6211.o
obj-$(CONFIG_VIDEO_OV64A40) += ov64a40.o
-obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 5d90b8ab9b6d..378f4e6af12c 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -214,7 +214,6 @@ struct adv7180_state {
struct gpio_desc *pwdn_gpio;
struct gpio_desc *rst_gpio;
v4l2_std_id curr_norm;
- bool powered;
bool streaming;
u8 input;
@@ -274,6 +273,38 @@ static int adv7180_vpp_write(struct adv7180_state *state, unsigned int reg,
return i2c_smbus_write_byte_data(state->vpp_client, reg, value);
}
+static int adv7180_set_power(struct adv7180_state *state, bool on)
+{
+ u8 val;
+ int ret;
+
+ if (on)
+ val = ADV7180_PWR_MAN_ON;
+ else
+ val = ADV7180_PWR_MAN_OFF;
+
+ ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
+ if (ret)
+ return ret;
+
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ if (on) {
+ adv7180_csi_write(state, 0xDE, 0x02);
+ adv7180_csi_write(state, 0xD2, 0xF7);
+ adv7180_csi_write(state, 0xD8, 0x65);
+ adv7180_csi_write(state, 0xE0, 0x09);
+ adv7180_csi_write(state, 0x2C, 0x00);
+ if (state->field == V4L2_FIELD_NONE)
+ adv7180_csi_write(state, 0x1D, 0x80);
+ adv7180_csi_write(state, 0x00, 0x00);
+ } else {
+ adv7180_csi_write(state, 0x00, 0x80);
+ }
+ }
+
+ return 0;
+}
+
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
{
/* in case V4L2_IN_ST_NO_SIGNAL */
@@ -357,32 +388,27 @@ static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv7180_state *state = to_state(sd);
- int err = mutex_lock_interruptible(&state->mutex);
- if (err)
- return err;
-
- if (state->streaming) {
- err = -EBUSY;
- goto unlock;
- }
+ int ret;
- err = adv7180_set_video_standard(state,
- ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
- if (err)
- goto unlock;
+ guard(mutex)(&state->mutex);
- msleep(100);
- __adv7180_status(state, NULL, std);
+ /*
+ * We can't sample the standard if the device is streaming as that would
+ * interfere with the capture session as the VID_SEL reg is touched.
+ */
+ if (state->streaming)
+ return -EBUSY;
- err = v4l2_std_to_adv7180(state->curr_norm);
- if (err < 0)
- goto unlock;
+ /* Set the standard to autodetect PAL B/G/H/I/D, NTSC J or SECAM */
+ ret = adv7180_set_video_standard(state,
+ ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
+ if (ret)
+ return ret;
- err = adv7180_set_video_standard(state, err);
+ /* Allow some time for the autodetection to run. */
+ msleep(100);
-unlock:
- mutex_unlock(&state->mutex);
- return err;
+ return __adv7180_status(state, NULL, std);
}
static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
@@ -437,22 +463,18 @@ static int adv7180_program_std(struct adv7180_state *state)
static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
+ int ret;
- if (ret)
- return ret;
+ guard(mutex)(&state->mutex);
/* Make sure we can support this std */
ret = v4l2_std_to_adv7180(std);
if (ret < 0)
- goto out;
+ return ret;
state->curr_norm = std;
- ret = adv7180_program_std(state);
-out:
- mutex_unlock(&state->mutex);
- return ret;
+ return 0;
}
static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
@@ -514,55 +536,6 @@ static void adv7180_set_reset_pin(struct adv7180_state *state, bool on)
}
}
-static int adv7180_set_power(struct adv7180_state *state, bool on)
-{
- u8 val;
- int ret;
-
- if (on)
- val = ADV7180_PWR_MAN_ON;
- else
- val = ADV7180_PWR_MAN_OFF;
-
- ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
- if (ret)
- return ret;
-
- if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
- if (on) {
- adv7180_csi_write(state, 0xDE, 0x02);
- adv7180_csi_write(state, 0xD2, 0xF7);
- adv7180_csi_write(state, 0xD8, 0x65);
- adv7180_csi_write(state, 0xE0, 0x09);
- adv7180_csi_write(state, 0x2C, 0x00);
- if (state->field == V4L2_FIELD_NONE)
- adv7180_csi_write(state, 0x1D, 0x80);
- adv7180_csi_write(state, 0x00, 0x00);
- } else {
- adv7180_csi_write(state, 0x00, 0x80);
- }
- }
-
- return 0;
-}
-
-static int adv7180_s_power(struct v4l2_subdev *sd, int on)
-{
- struct adv7180_state *state = to_state(sd);
- int ret;
-
- ret = mutex_lock_interruptible(&state->mutex);
- if (ret)
- return ret;
-
- ret = adv7180_set_power(state, on);
- if (ret == 0)
- state->powered = on;
-
- mutex_unlock(&state->mutex);
- return ret;
-}
-
static const char * const test_pattern_menu[] = {
"Single color",
"Color bars",
@@ -601,11 +574,11 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
+ int ret = 0;
int val;
- if (ret)
- return ret;
+ lockdep_assert_held(&state->mutex);
+
val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -647,7 +620,6 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
ret = -EINVAL;
}
- mutex_unlock(&state->mutex);
return ret;
}
@@ -668,6 +640,7 @@ static const struct v4l2_ctrl_config adv7180_ctrl_fast_switch = {
static int adv7180_init_controls(struct adv7180_state *state)
{
v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
+ state->ctrl_hdl.lock = &state->mutex;
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_BRIGHTNESS, ADV7180_BRI_MIN,
@@ -700,7 +673,6 @@ static int adv7180_init_controls(struct adv7180_state *state)
v4l2_ctrl_handler_free(&state->ctrl_hdl);
return err;
}
- v4l2_ctrl_handler_setup(&state->ctrl_hdl);
return 0;
}
@@ -812,12 +784,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
ret = adv7180_mbus_fmt(sd, &format->format);
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- if (state->field != format->format.field) {
- state->field = format->format.field;
- adv7180_set_power(state, false);
- adv7180_set_field_mode(state);
- adv7180_set_power(state, true);
- }
+ state->field = format->format.field;
} else {
framefmt = v4l2_subdev_state_get_format(sd_state, 0);
*framefmt = format->format;
@@ -874,23 +841,117 @@ static int adv7180_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
-static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+static int init_device(struct adv7180_state *state)
{
- struct adv7180_state *state = to_state(sd);
int ret;
- /* It's always safe to stop streaming, no need to take the lock */
- if (!enable) {
- state->streaming = enable;
- return 0;
+ lockdep_assert_held(&state->mutex);
+
+ ret = adv7180_program_std(state);
+ if (ret)
+ return ret;
+
+ adv7180_set_field_mode(state);
+
+ __v4l2_ctrl_handler_setup(&state->ctrl_hdl);
+
+ return ret;
+}
+
+static int adv7180_reset_device(struct adv7180_state *state)
+{
+ int ret;
+
+ lockdep_assert_held(&state->mutex);
+
+ adv7180_set_power_pin(state, true);
+ adv7180_set_reset_pin(state, false);
+
+ adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
+ usleep_range(5000, 10000);
+
+ /*
+ * If the devices decoder is power on after reset, power off so the
+ * device can be configured.
+ */
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ adv7180_set_power(state, false);
+
+ ret = state->chip_info->init(state);
+ if (ret)
+ return ret;
+
+ ret = init_device(state);
+ if (ret)
+ return ret;
+
+ /* register for interrupts */
+ if (state->irq > 0) {
+ /* config the Interrupt pin to be active low */
+ ret = adv7180_write(state, ADV7180_REG_ICONF1,
+ ADV7180_ICONF1_ACTIVE_LOW |
+ ADV7180_ICONF1_PSYNC_ONLY);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* enable AD change interrupts */
+ ret = adv7180_write(state, ADV7180_REG_IMR3,
+ ADV7180_IRQ3_AD_CHANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
+ if (ret < 0)
+ return ret;
}
+ /*
+ * If the devices decoder is power on after reset, restore the power
+ * after configuration. This is to preserve the behavior of the driver,
+ * not doing this result in the first 35+ frames captured being garbage.
+ */
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ adv7180_set_power(state, true);
+
+ return 0;
+}
+
+static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret;
+
/* Must wait until querystd released the lock */
- ret = mutex_lock_interruptible(&state->mutex);
+ guard(mutex)(&state->mutex);
+
+ /*
+ * Always power off the decoder even if streaming is to be enabled, the
+ * decoder needs to be off for the device to be configured.
+ */
+ ret = adv7180_set_power(state, false);
if (ret)
return ret;
+
+ if (enable) {
+ ret = init_device(state);
+ if (ret)
+ return ret;
+
+ ret = adv7180_set_power(state, true);
+ if (ret)
+ return ret;
+ }
+
state->streaming = enable;
- mutex_unlock(&state->mutex);
+
return 0;
}
@@ -919,7 +980,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
- .s_power = adv7180_s_power,
.subscribe_event = adv7180_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
@@ -1343,62 +1403,6 @@ static const struct adv7180_chip_info adv7282_m_info = {
.select_input = adv7182_select_input,
};
-static int init_device(struct adv7180_state *state)
-{
- int ret;
-
- mutex_lock(&state->mutex);
-
- adv7180_set_power_pin(state, true);
- adv7180_set_reset_pin(state, false);
-
- adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
- usleep_range(5000, 10000);
-
- ret = state->chip_info->init(state);
- if (ret)
- goto out_unlock;
-
- ret = adv7180_program_std(state);
- if (ret)
- goto out_unlock;
-
- adv7180_set_field_mode(state);
-
- /* register for interrupts */
- if (state->irq > 0) {
- /* config the Interrupt pin to be active low */
- ret = adv7180_write(state, ADV7180_REG_ICONF1,
- ADV7180_ICONF1_ACTIVE_LOW |
- ADV7180_ICONF1_PSYNC_ONLY);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
- if (ret < 0)
- goto out_unlock;
-
- /* enable AD change interrupts interrupts */
- ret = adv7180_write(state, ADV7180_REG_IMR3,
- ADV7180_IRQ3_AD_CHANGE);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
- if (ret < 0)
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&state->mutex);
-
- return ret;
-}
-
static int adv7180_probe(struct i2c_client *client)
{
struct device_node *np = client->dev.of_node;
@@ -1457,10 +1461,7 @@ static int adv7180_probe(struct i2c_client *client)
state->irq = client->irq;
mutex_init(&state->mutex);
state->curr_norm = V4L2_STD_NTSC;
- if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
- state->powered = true;
- else
- state->powered = false;
+
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
@@ -1477,7 +1478,9 @@ static int adv7180_probe(struct i2c_client *client)
if (ret)
goto err_free_ctrl;
- ret = init_device(state);
+ mutex_lock(&state->mutex);
+ ret = adv7180_reset_device(state);
+ mutex_unlock(&state->mutex);
if (ret)
goto err_media_entity_cleanup;
@@ -1549,6 +1552,8 @@ static int adv7180_suspend(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct adv7180_state *state = to_state(sd);
+ guard(mutex)(&state->mutex);
+
return adv7180_set_power(state, false);
}
@@ -1558,13 +1563,18 @@ static int adv7180_resume(struct device *dev)
struct adv7180_state *state = to_state(sd);
int ret;
- ret = init_device(state);
+ guard(mutex)(&state->mutex);
+
+ ret = adv7180_reset_device(state);
if (ret < 0)
return ret;
- ret = adv7180_set_power(state, state->powered);
- if (ret)
- return ret;
+ /* If we were streaming when suspending, start decoder. */
+ if (state->streaming) {
+ ret = adv7180_set_power(state, true);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index afed38596362..8fe7c2f72883 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -42,7 +42,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7604/10/11/12 video decoder driver");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 5545cd23e113..9780082db841 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -38,7 +38,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7842 video decoder driver");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_AUTHOR("Martin Bugge <marbugge@cisco.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 24873149096c..939bf590d4b2 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -1077,11 +1077,10 @@ static int ar0521_probe(struct i2c_client *client)
}
/* Get master clock (extclk) */
- sensor->extclk = devm_clk_get(dev, "extclk");
- if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get extclk\n");
- return PTR_ERR(sensor->extclk);
- }
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, "extclk");
+ if (IS_ERR(sensor->extclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "failed to get extclk\n");
sensor->extclk_freq = clk_get_rate(sensor->extclk);
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index a90a9e5705a0..a86306304330 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -9,10 +9,10 @@
* Changes by Tyler Trafford <tatrafford@comcast.net>
* - cleanup/rewrite for V4L2 API (2005)
*
- * VBI support by Hans Verkuil <hverkuil@xs4all.nl>.
+ * VBI support by Hans Verkuil <hverkuil@kernel.org>.
*
* NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca>
- * with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>.
+ * with additional fixes by Hans Verkuil <hverkuil@kernel.org>.
*
* CX23885 support by Steven Toth <stoth@linuxtv.org>.
*
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index a80da2b4a8fa..73150061ea45 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -333,8 +333,7 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
.quantization = V4L2_QUANTIZATION_LIM_RANGE,
.xfer_func = V4L2_XFER_FUNC_SRGB,
};
- struct v4l2_subdev_stream_configs *stream_configs;
- unsigned int i;
+ struct v4l2_subdev_route *route;
int ret;
ret = v4l2_subdev_routing_validate(sd, routing,
@@ -346,13 +345,15 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
if (ret)
return ret;
- stream_configs = &state->stream_configs;
+ for_each_active_route(&state->routing, route) {
+ struct v4l2_mbus_framefmt *fmt;
- for (i = 0; i < stream_configs->num_configs; i++) {
- if (stream_configs->configs[i].pad == UB913_PAD_SINK)
- stream_configs->configs[i].fmt = in_format;
- else
- stream_configs->configs[i].fmt = out_format;
+ fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
+ route->sink_stream);
+ *fmt = in_format;
+ fmt = v4l2_subdev_state_get_format(state, route->source_pad,
+ route->source_stream);
+ *fmt = out_format;
}
return 0;
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 7519863d77b1..2cb7b718782b 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -816,7 +816,6 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
{
struct v4l2_subdev *subdev = &sensor->subdev;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
- unsigned int xclk_freq;
int val, rval;
rval = regulator_enable(sensor->vana);
@@ -825,17 +824,6 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
return rval;
}
- if (sensor->current_reglist)
- xclk_freq = sensor->current_reglist->mode.ext_clock;
- else
- xclk_freq = sensor->xclk_freq;
-
- rval = clk_set_rate(sensor->ext_clk, xclk_freq);
- if (rval < 0) {
- dev_err(&client->dev, "unable to set extclk clock freq to %u\n",
- xclk_freq);
- goto out;
- }
rval = clk_prepare_enable(sensor->ext_clk);
if (rval < 0) {
dev_err(&client->dev, "failed to enable extclk\n");
@@ -849,7 +837,7 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
gpiod_set_value(sensor->reset, 1);
- msleep(5000 * 1000 / xclk_freq + 1); /* Wait 5000 cycles */
+ msleep(5000 * 1000 / sensor->xclk_freq + 1); /* Wait 5000 cycles */
rval = et8ek8_i2c_reglist_find_write(client, &meta_reglist,
ET8EK8_REGLIST_POWERON);
@@ -1085,9 +1073,6 @@ static int et8ek8_set_frame_interval(struct v4l2_subdev *subdev,
if (!reglist)
return -EINVAL;
- if (sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock)
- return -EINVAL;
-
sensor->current_reglist = reglist;
et8ek8_update_controls(sensor);
@@ -1433,18 +1418,13 @@ static int et8ek8_probe(struct i2c_client *client)
return PTR_ERR(sensor->vana);
}
- sensor->ext_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock\n");
- return PTR_ERR(sensor->ext_clk);
- }
+ sensor->ext_clk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ 9600000);
+ if (IS_ERR(sensor->ext_clk))
+ return dev_err_probe(&client->dev, PTR_ERR(sensor->ext_clk),
+ "could not get clock\n");
- ret = of_property_read_u32(dev->of_node, "clock-frequency",
- &sensor->xclk_freq);
- if (ret) {
- dev_warn(dev, "can't get clock-frequency\n");
- return ret;
- }
+ sensor->xclk_freq = clk_get_rate(sensor->ext_clk);
mutex_init(&sensor->power_lock);
diff --git a/drivers/media/i2c/et8ek8/et8ek8_mode.c b/drivers/media/i2c/et8ek8/et8ek8_mode.c
index c9088eb0a812..914be1007099 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_mode.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_mode.c
@@ -44,7 +44,6 @@ static struct et8ek8_reglist mode1_poweron_mode2_16vga_2592x1968_12_07fps = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 1207
@@ -145,7 +144,6 @@ static struct et8ek8_reglist mode1_16vga_2592x1968_13_12fps_dpcm10_8 = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 1292
@@ -201,7 +199,6 @@ static struct et8ek8_reglist mode3_4vga_1296x984_29_99fps_dpcm10_8 = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 96533333,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 3000
@@ -257,7 +254,6 @@ static struct et8ek8_reglist mode4_svga_864x656_29_88fps = {
.window_width = 864,
.window_height = 656,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2988
@@ -313,7 +309,6 @@ static struct et8ek8_reglist mode5_vga_648x492_29_93fps = {
.window_width = 648,
.window_height = 492,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2993
@@ -369,7 +364,6 @@ static struct et8ek8_reglist mode2_16vga_2592x1968_3_99fps = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 399
@@ -424,7 +418,6 @@ static struct et8ek8_reglist mode_648x492_5fps = {
.window_width = 648,
.window_height = 492,
.pixel_clock = 13333333,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 499
@@ -480,7 +473,6 @@ static struct et8ek8_reglist mode3_4vga_1296x984_5fps = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 49400000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 501
@@ -536,7 +528,6 @@ static struct et8ek8_reglist mode_4vga_1296x984_25fps_dpcm10_8 = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 84266667,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2500
diff --git a/drivers/media/i2c/et8ek8/et8ek8_reg.h b/drivers/media/i2c/et8ek8/et8ek8_reg.h
index c90e74935f12..3305986c7c9c 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_reg.h
+++ b/drivers/media/i2c/et8ek8/et8ek8_reg.h
@@ -37,7 +37,6 @@ struct et8ek8_mode {
u16 window_height;
u32 pixel_clock; /* in Hz */
- u32 ext_clock; /* in Hz */
struct v4l2_fract timeperframe;
u32 max_exp; /* Maximum exposure value */
u32 bus_format; /* MEDIA_BUS_FMT_ */
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/media/i2c/gc0310.c
index 7af4d66f42a0..7af4d66f42a0 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/media/i2c/gc0310.c
diff --git a/drivers/media/i2c/gc05a2.c b/drivers/media/i2c/gc05a2.c
index 3f7f3d5abeeb..8ba17f80fffe 100644
--- a/drivers/media/i2c/gc05a2.c
+++ b/drivers/media/i2c/gc05a2.c
@@ -1235,16 +1235,12 @@ static int gc05a2_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(gc05a2->regmap),
"failed to init CCI\n");
- gc05a2->xclk = devm_clk_get(dev, NULL);
+ gc05a2->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ GC05A2_DEFAULT_CLK_FREQ);
if (IS_ERR(gc05a2->xclk))
return dev_err_probe(dev, PTR_ERR(gc05a2->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(gc05a2->xclk, GC05A2_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = gc05a2_get_regulators(dev, gc05a2);
if (ret < 0)
return dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/gc08a3.c b/drivers/media/i2c/gc08a3.c
index 938709a677b6..11fd936db9c3 100644
--- a/drivers/media/i2c/gc08a3.c
+++ b/drivers/media/i2c/gc08a3.c
@@ -1199,16 +1199,12 @@ static int gc08a3_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(gc08a3->regmap),
"failed to init CCI\n");
- gc08a3->xclk = devm_clk_get(dev, NULL);
+ gc08a3->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ GC08A3_DEFAULT_CLK_FREQ);
if (IS_ERR(gc08a3->xclk))
return dev_err_probe(dev, PTR_ERR(gc08a3->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(gc08a3->xclk, GC08A3_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = gc08a3_get_regulators(dev, gc08a3);
if (ret < 0)
return dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c
index 559a851669aa..b215963a2648 100644
--- a/drivers/media/i2c/gc2145.c
+++ b/drivers/media/i2c/gc2145.c
@@ -1331,7 +1331,7 @@ static int gc2145_probe(struct i2c_client *client)
return -EINVAL;
/* Get system clock (xclk) */
- gc2145->xclk = devm_clk_get(dev, NULL);
+ gc2145->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(gc2145->xclk))
return dev_err_probe(dev, PTR_ERR(gc2145->xclk),
"failed to get xclk\n");
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 076c19fcf99c..de573cee4451 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -631,6 +632,8 @@ static const char * const hi556_supply_names[] = {
};
struct hi556 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -715,7 +718,6 @@ static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
static int hi556_write_reg_list(struct hi556 *hi556,
const struct hi556_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
unsigned int i;
int ret;
@@ -724,7 +726,7 @@ static int hi556_write_reg_list(struct hi556 *hi556,
HI556_REG_VALUE_16BIT,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(hi556->dev,
"failed to write reg 0x%4.4x. error = %d\n",
r_list->regs[i].address, ret);
return ret;
@@ -785,7 +787,6 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct hi556 *hi556 = container_of(ctrl->handler,
struct hi556, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
s64 exposure_max;
int ret = 0;
@@ -801,7 +802,7 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(hi556->dev))
return 0;
switch (ctrl->id) {
@@ -835,7 +836,7 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
return ret;
}
@@ -921,7 +922,6 @@ static void hi556_assign_pad_format(const struct hi556_mode *mode,
static int hi556_identify_module(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
int ret;
u32 val;
@@ -934,7 +934,7 @@ static int hi556_identify_module(struct hi556 *hi556)
return ret;
if (val != HI556_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(hi556->dev, "chip id mismatch: %x!=%x\n",
HI556_CHIP_ID, val);
return -ENXIO;
}
@@ -998,7 +998,6 @@ static int hi556_get_selection(struct v4l2_subdev *sd,
static int hi556_start_streaming(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
const struct hi556_reg_list *reg_list;
int link_freq_index, ret;
@@ -1010,14 +1009,14 @@ static int hi556_start_streaming(struct hi556 *hi556)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls\n");
+ dev_err(hi556->dev, "failed to set plls\n");
return ret;
}
reg_list = &hi556->cur_mode->reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(hi556->dev, "failed to set mode\n");
return ret;
}
@@ -1029,7 +1028,7 @@ static int hi556_start_streaming(struct hi556 *hi556)
HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream\n");
+ dev_err(hi556->dev, "failed to set stream\n");
return ret;
}
@@ -1038,22 +1037,19 @@ static int hi556_start_streaming(struct hi556 *hi556)
static void hi556_stop_streaming(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
-
if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream\n");
+ dev_err(hi556->dev, "failed to set stream\n");
}
static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
{
struct hi556 *hi556 = to_hi556(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&hi556->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(hi556->dev);
if (ret < 0) {
mutex_unlock(&hi556->mutex);
return ret;
@@ -1062,11 +1058,11 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
ret = hi556_start_streaming(hi556);
if (ret) {
hi556_stop_streaming(hi556);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
}
} else {
hi556_stop_streaming(hi556);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
}
mutex_unlock(&hi556->mutex);
@@ -1217,7 +1213,6 @@ static int hi556_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret = 0;
unsigned int i, j;
@@ -1235,18 +1230,6 @@ static int hi556_check_hwcfg(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto check_hwcfg_error;
- }
-
- if (mclk != HI556_MCLK) {
- dev_err(dev, "external clock %d is not supported\n", mclk);
- ret = -EINVAL;
- goto check_hwcfg_error;
- }
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -1289,7 +1272,7 @@ static void hi556_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(hi556->dev);
mutex_destroy(&hi556->mutex);
}
@@ -1336,6 +1319,7 @@ static int hi556_resume(struct device *dev)
static int hi556_probe(struct i2c_client *client)
{
struct hi556 *hi556;
+ unsigned long freq;
bool full_power;
int i, ret;
@@ -1347,40 +1331,48 @@ static int hi556_probe(struct i2c_client *client)
if (!hi556)
return -ENOMEM;
+ hi556->dev = &client->dev;
+
v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
- hi556->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ hi556->reset_gpio = devm_gpiod_get_optional(hi556->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(hi556->reset_gpio))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->reset_gpio),
+ return dev_err_probe(hi556->dev, PTR_ERR(hi556->reset_gpio),
"failed to get reset GPIO\n");
- hi556->clk = devm_clk_get_optional(&client->dev, "clk");
+ hi556->clk = devm_v4l2_sensor_clk_get(hi556->dev, "clk");
if (IS_ERR(hi556->clk))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
+ return dev_err_probe(hi556->dev, PTR_ERR(hi556->clk),
"failed to get clock\n");
+ freq = clk_get_rate(hi556->clk);
+ if (freq != HI556_MCLK)
+ return dev_err_probe(hi556->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
for (i = 0; i < ARRAY_SIZE(hi556_supply_names); i++)
hi556->supplies[i].supply = hi556_supply_names[i];
- ret = devm_regulator_bulk_get(&client->dev,
+ ret = devm_regulator_bulk_get(hi556->dev,
ARRAY_SIZE(hi556_supply_names),
hi556->supplies);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(hi556->dev, ret,
"failed to get regulators\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(hi556->dev);
if (full_power) {
/* Ensure non ACPI managed resources are enabled */
- ret = hi556_resume(&client->dev);
+ ret = hi556_resume(hi556->dev);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(hi556->dev, ret,
"failed to power on sensor\n");
ret = hi556_identify_module(hi556);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(hi556->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
}
@@ -1389,7 +1381,7 @@ static int hi556_probe(struct i2c_client *client)
hi556->cur_mode = &supported_modes[0];
ret = hi556_init_controls(hi556);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ dev_err(hi556->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1400,22 +1392,22 @@ static int hi556_probe(struct i2c_client *client)
hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d\n", ret);
+ dev_err(hi556->dev, "failed to init entity pads: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi556->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d\n",
+ dev_err(hi556->dev, "failed to register V4L2 subdev: %d\n",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(hi556->dev);
+ pm_runtime_enable(hi556->dev);
+ pm_runtime_idle(hi556->dev);
return 0;
@@ -1428,7 +1420,7 @@ probe_error_v4l2_ctrl_handler_free:
probe_error_power_off:
if (full_power)
- hi556_suspend(&client->dev);
+ hi556_suspend(hi556->dev);
return ret;
}
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 172772decd3d..a3f77b8434ca 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -2052,12 +2052,11 @@ static int hi846_probe(struct i2c_client *client)
return ret;
}
- hi846->clock = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(hi846->clock)) {
- dev_err(&client->dev, "failed to get clock: %pe\n",
- hi846->clock);
- return PTR_ERR(hi846->clock);
- }
+ hi846->clock = devm_v4l2_sensor_clk_get(&client->dev, NULL);
+ if (IS_ERR(hi846->clock))
+ return dev_err_probe(&client->dev, PTR_ERR(hi846->clock),
+ "failed to get clock: %pe\n",
+ hi846->clock);
mclk_freq = clk_get_rate(hi846->clock);
if (mclk_freq != 25000000)
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 546833f5b5f5..def01aa07b2f 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -2166,6 +2168,9 @@ static const struct hi847_mode supported_modes[] = {
};
struct hi847 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -2244,7 +2249,6 @@ static int hi847_write_reg(struct hi847 *hi847, u16 reg, u16 len, u32 val)
static int hi847_write_reg_list(struct hi847 *hi847,
const struct hi847_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
unsigned int i;
int ret;
@@ -2253,7 +2257,7 @@ static int hi847_write_reg_list(struct hi847 *hi847,
HI847_REG_VALUE_16BIT,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(hi847->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -2408,7 +2412,6 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct hi847 *hi847 = container_of(ctrl->handler,
struct hi847, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
s64 exposure_max;
int ret = 0;
@@ -2424,7 +2427,7 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(hi847->dev))
return 0;
switch (ctrl->id) {
@@ -2466,7 +2469,7 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
return ret;
}
@@ -2557,7 +2560,6 @@ static void hi847_assign_pad_format(const struct hi847_mode *mode,
static int hi847_start_streaming(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
const struct hi847_reg_list *reg_list;
int link_freq_index, ret;
@@ -2565,14 +2567,14 @@ static int hi847_start_streaming(struct hi847 *hi847)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi847_write_reg_list(hi847, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(hi847->dev, "failed to set plls");
return ret;
}
reg_list = &hi847->cur_mode->reg_list;
ret = hi847_write_reg_list(hi847, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(hi847->dev, "failed to set mode");
return ret;
}
@@ -2587,7 +2589,7 @@ static int hi847_start_streaming(struct hi847 *hi847)
HI847_REG_VALUE_16BIT, HI847_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(hi847->dev, "failed to set stream");
return ret;
}
@@ -2596,28 +2598,25 @@ static int hi847_start_streaming(struct hi847 *hi847)
static void hi847_stop_streaming(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
-
if (hi847_write_reg(hi847, HI847_REG_MODE_TG,
HI847_REG_VALUE_16BIT, HI847_REG_MODE_TG_DISABLE))
- dev_err(&client->dev, "failed to set stream 0x%x",
+ dev_err(hi847->dev, "failed to set stream 0x%x",
HI847_REG_MODE_TG);
if (hi847_write_reg(hi847, HI847_REG_MODE_SELECT,
HI847_REG_VALUE_16BIT, HI847_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream 0x%x",
+ dev_err(hi847->dev, "failed to set stream 0x%x",
HI847_REG_MODE_SELECT);
}
static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
{
struct hi847 *hi847 = to_hi847(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&hi847->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(hi847->dev);
if (ret) {
mutex_unlock(&hi847->mutex);
return ret;
@@ -2627,11 +2626,11 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
hi847_stop_streaming(hi847);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
}
} else {
hi847_stop_streaming(hi847);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
}
mutex_unlock(&hi847->mutex);
@@ -2768,7 +2767,6 @@ static const struct v4l2_subdev_internal_ops hi847_internal_ops = {
static int hi847_identify_module(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
int ret;
u32 val;
@@ -2778,7 +2776,7 @@ static int hi847_identify_module(struct hi847 *hi847)
return ret;
if (val != HI847_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(hi847->dev, "chip id mismatch: %x!=%x",
HI847_CHIP_ID, val);
return -ENXIO;
}
@@ -2793,24 +2791,12 @@ static int hi847_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- if (mclk != HI847_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -2862,22 +2848,36 @@ static void hi847_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(hi847->dev);
mutex_destroy(&hi847->mutex);
}
static int hi847_probe(struct i2c_client *client)
{
struct hi847 *hi847;
+ unsigned long freq;
int ret;
hi847 = devm_kzalloc(&client->dev, sizeof(*hi847), GFP_KERNEL);
if (!hi847)
return -ENOMEM;
- ret = hi847_check_hwcfg(&client->dev);
+ hi847->dev = &client->dev;
+
+ hi847->clk = devm_v4l2_sensor_clk_get(hi847->dev, NULL);
+ if (IS_ERR(hi847->clk))
+ return dev_err_probe(hi847->dev, PTR_ERR(hi847->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(hi847->clk);
+ if (freq != HI847_MCLK)
+ return dev_err_probe(hi847->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
+ ret = hi847_check_hwcfg(hi847->dev);
if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
+ dev_err(hi847->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
@@ -2885,7 +2885,7 @@ static int hi847_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&hi847->sd, client, &hi847_subdev_ops);
ret = hi847_identify_module(hi847);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(hi847->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -2893,7 +2893,7 @@ static int hi847_probe(struct i2c_client *client)
hi847->cur_mode = &supported_modes[0];
ret = hi847_init_controls(hi847);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(hi847->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -2904,20 +2904,20 @@ static int hi847_probe(struct i2c_client *client)
hi847->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi847->sd.entity, 1, &hi847->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(hi847->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi847->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(hi847->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(hi847->dev);
+ pm_runtime_enable(hi847->dev);
+ pm_runtime_idle(hi847->dev);
return 0;
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 2b5a6ce7b1ae..d5350bb46f14 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -2,13 +2,15 @@
// Copyright (C) 2021 Intel Corporation
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <linux/unaligned.h>
#define IMX208_REG_MODE_SELECT 0x0100
#define IMX208_MODE_STANDBY 0x00
@@ -268,6 +270,9 @@ static const struct imx208_mode supported_modes[] = {
};
struct imx208 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -372,7 +377,6 @@ static int imx208_write_reg(struct imx208 *imx208, u16 reg, u32 len, u32 val)
static int imx208_write_regs(struct imx208 *imx208,
const struct imx208_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
unsigned int i;
int ret;
@@ -380,7 +384,7 @@ static int imx208_write_regs(struct imx208 *imx208,
ret = imx208_write_reg(imx208, regs[i].address, 1,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx208->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -431,14 +435,13 @@ static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx208 *imx208 =
container_of(ctrl->handler, struct imx208, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
/*
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx208->dev))
return 0;
switch (ctrl->id) {
@@ -471,13 +474,13 @@ static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_err(&client->dev,
+ dev_err(imx208->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
return ret;
}
@@ -620,7 +623,6 @@ static int imx208_set_pad_format(struct v4l2_subdev *sd,
static int imx208_identify_module(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
u32 val;
@@ -630,13 +632,13 @@ static int imx208_identify_module(struct imx208 *imx208)
ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
2, &val);
if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
+ dev_err(imx208->dev, "failed to read chip id %x\n",
IMX208_CHIP_ID);
return ret;
}
if (val != IMX208_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(imx208->dev, "chip id mismatch: %x!=%x\n",
IMX208_CHIP_ID, val);
return -EIO;
}
@@ -649,7 +651,6 @@ static int imx208_identify_module(struct imx208 *imx208)
/* Start streaming */
static int imx208_start_streaming(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
const struct imx208_reg_list *reg_list;
int ret, link_freq_index;
@@ -662,7 +663,7 @@ static int imx208_start_streaming(struct imx208 *imx208)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(imx208->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -670,7 +671,7 @@ static int imx208_start_streaming(struct imx208 *imx208)
reg_list = &imx208->cur_mode->reg_list;
ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(imx208->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -687,14 +688,13 @@ static int imx208_start_streaming(struct imx208 *imx208)
/* Stop streaming */
static int imx208_stop_streaming(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
/* set stream off register */
ret = imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
1, IMX208_MODE_STANDBY);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(imx208->dev, "%s failed to set stream\n", __func__);
/*
* Return success even if it was an error, as there is nothing the
@@ -706,13 +706,12 @@ static int imx208_stop_streaming(struct imx208 *imx208)
static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx208 *imx208 = to_imx208(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx208->imx208_mx);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx208->dev);
if (ret) {
mutex_unlock(&imx208->imx208_mx);
return ret;
@@ -727,7 +726,7 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx208_stop_streaming(imx208);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
}
mutex_unlock(&imx208->imx208_mx);
@@ -739,7 +738,7 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
mutex_unlock(&imx208->imx208_mx);
return ret;
@@ -778,7 +777,7 @@ static int imx208_read_otp(struct imx208 *imx208)
if (imx208->otp_read)
goto out_unlock;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx208->dev);
if (ret)
goto out_unlock;
@@ -805,7 +804,7 @@ static int imx208_read_otp(struct imx208 *imx208)
}
out_pm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
out_unlock:
mutex_unlock(&imx208->imx208_mx);
@@ -835,7 +834,6 @@ static const BIN_ATTR_RO(otp, IMX208_OTP_SIZE);
/* Initialize control handlers */
static int imx208_init_controls(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &imx208->ctrl_handler;
s64 exposure_max;
s64 vblank_def;
@@ -914,7 +912,7 @@ static int imx208_init_controls(struct imx208 *imx208)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(imx208->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
@@ -938,31 +936,36 @@ static void imx208_free_controls(struct imx208 *imx208)
static int imx208_probe(struct i2c_client *client)
{
struct imx208 *imx208;
+ unsigned long freq;
int ret;
bool full_power;
- u32 val = 0;
-
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000) {
- dev_err(&client->dev,
- "Unsupported clock-frequency %u. Expected 19200000.\n",
- val);
- return -EINVAL;
- }
imx208 = devm_kzalloc(&client->dev, sizeof(*imx208), GFP_KERNEL);
if (!imx208)
return -ENOMEM;
+ imx208->dev = &client->dev;
+
+ imx208->clk = devm_v4l2_sensor_clk_get(imx208->dev, NULL);
+ if (IS_ERR(imx208->clk))
+ return dev_err_probe(imx208->dev, PTR_ERR(imx208->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(imx208->clk);
+ if (freq != 19200000)
+ return dev_err_probe(imx208->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx208->sd, client, &imx208_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(imx208->dev);
if (full_power) {
/* Check module identity */
ret = imx208_identify_module(imx208);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx208->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
}
@@ -972,7 +975,7 @@ static int imx208_probe(struct i2c_client *client)
ret = imx208_init_controls(imx208);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx208->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -985,7 +988,7 @@ static int imx208_probe(struct i2c_client *client)
imx208->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx208->sd.entity, 1, &imx208->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(imx208->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -993,17 +996,17 @@ static int imx208_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- ret = device_create_bin_file(&client->dev, &bin_attr_otp);
+ ret = device_create_bin_file(imx208->dev, &bin_attr_otp);
if (ret) {
- dev_err(&client->dev, "sysfs otp creation failed\n");
+ dev_err(imx208->dev, "sysfs otp creation failed\n");
goto error_async_subdev;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx208->dev);
+ pm_runtime_enable(imx208->dev);
+ pm_runtime_idle(imx208->dev);
return 0;
@@ -1027,13 +1030,13 @@ static void imx208_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx208 *imx208 = to_imx208(sd);
- device_remove_bin_file(&client->dev, &bin_attr_otp);
+ device_remove_bin_file(imx208->dev, &bin_attr_otp);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
imx208_free_controls(imx208);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx208->dev);
+ pm_runtime_set_suspended(imx208->dev);
mutex_destroy(&imx208->imx208_mx);
}
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index a0cef9e61b41..94ebe625c9e6 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -881,6 +881,109 @@ static const struct v4l2_ctrl_ops imx214_ctrl_ops = {
.s_ctrl = imx214_set_ctrl,
};
+static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
+ unsigned int link_freq)
+{
+ struct ccs_pll_limits limits = {
+ .min_ext_clk_freq_hz = 6000000,
+ .max_ext_clk_freq_hz = 27000000,
+
+ .vt_fr = {
+ .min_pre_pll_clk_div = 1,
+ .max_pre_pll_clk_div = 15,
+ /* Value is educated guess as we don't have a spec */
+ .min_pll_ip_clk_freq_hz = 6000000,
+ /* Value is educated guess as we don't have a spec */
+ .max_pll_ip_clk_freq_hz = 12000000,
+ .min_pll_multiplier = 12,
+ .max_pll_multiplier = 1200,
+ .min_pll_op_clk_freq_hz = 338000000,
+ .max_pll_op_clk_freq_hz = 1200000000,
+ },
+ .vt_bk = {
+ .min_sys_clk_div = 2,
+ .max_sys_clk_div = 4,
+ .min_pix_clk_div = 5,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+ .op_bk = {
+ .min_sys_clk_div = 1,
+ .max_sys_clk_div = 2,
+ .min_pix_clk_div = 6,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+
+ .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
+ .min_line_length_pck = IMX214_PPL_DEFAULT,
+ };
+ unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ /*
+ * There are no documented constraints on the sys clock frequency, for
+ * either branch. Recover them based on the PLL output clock frequency
+ * and sys_clk_div limits on one hand, and the pix clock frequency and
+ * the pix_clk_div limits on the other hand.
+ */
+ limits.vt_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
+ limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
+ limits.vt_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
+ limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
+
+ limits.op_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
+ limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
+ limits.op_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
+ limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
+
+ memset(pll, 0, sizeof(*pll));
+
+ pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
+ pll->op_lanes = num_lanes;
+ pll->vt_lanes = num_lanes;
+ pll->csi2.lanes = num_lanes;
+
+ pll->binning_horizontal = 1;
+ pll->binning_vertical = 1;
+ pll->scale_m = 1;
+ pll->scale_n = 1;
+ pll->bits_per_pixel =
+ IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
+ pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
+ pll->link_freq = link_freq;
+ pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
+
+ return ccs_pll_calculate(imx214->dev, &limits, pll);
+}
+
+static int imx214_pll_update(struct imx214 *imx214)
+{
+ u64 link_freq;
+ int ret;
+
+ link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
+ ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
+ if (ret) {
+ dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
+ imx214->pll.pixel_rate_pixel_array);
+ if (ret) {
+ dev_err(imx214->dev, "failed to set pixel rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int imx214_ctrls_init(struct imx214 *imx214)
{
static const struct v4l2_area unit_size = {
@@ -1003,6 +1106,13 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return ret;
}
+ ret = imx214_pll_update(imx214);
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ dev_err(imx214->dev, "failed to update PLL\n");
+ return ret;
+ }
+
imx214->sd.ctrl_handler = ctrl_hdlr;
return 0;
@@ -1029,8 +1139,8 @@ static int imx214_start_streaming(struct imx214 *imx214)
return ret;
}
- bit_rate_mbps = (imx214->pll.pixel_rate_csi / 1000000)
- * imx214->pll.bits_per_pixel;
+ bit_rate_mbps = imx214->pll.pixel_rate_csi / 1000000
+ * imx214->pll.bits_per_pixel;
ret = cci_write(imx214->regmap, IMX214_REG_REQ_LINK_BIT_RATE,
IMX214_LINK_BIT_RATE_MBPS(bit_rate_mbps), NULL);
if (ret) {
@@ -1115,109 +1225,6 @@ err_rpm_put:
return ret;
}
-static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
- unsigned int link_freq)
-{
- struct ccs_pll_limits limits = {
- .min_ext_clk_freq_hz = 6000000,
- .max_ext_clk_freq_hz = 27000000,
-
- .vt_fr = {
- .min_pre_pll_clk_div = 1,
- .max_pre_pll_clk_div = 15,
- /* Value is educated guess as we don't have a spec */
- .min_pll_ip_clk_freq_hz = 6000000,
- /* Value is educated guess as we don't have a spec */
- .max_pll_ip_clk_freq_hz = 12000000,
- .min_pll_multiplier = 12,
- .max_pll_multiplier = 1200,
- .min_pll_op_clk_freq_hz = 338000000,
- .max_pll_op_clk_freq_hz = 1200000000,
- },
- .vt_bk = {
- .min_sys_clk_div = 2,
- .max_sys_clk_div = 4,
- .min_pix_clk_div = 5,
- .max_pix_clk_div = 10,
- .min_pix_clk_freq_hz = 30000000,
- .max_pix_clk_freq_hz = 120000000,
- },
- .op_bk = {
- .min_sys_clk_div = 1,
- .max_sys_clk_div = 2,
- .min_pix_clk_div = 6,
- .max_pix_clk_div = 10,
- .min_pix_clk_freq_hz = 30000000,
- .max_pix_clk_freq_hz = 120000000,
- },
-
- .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
- .min_line_length_pck = IMX214_PPL_DEFAULT,
- };
- unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
-
- /*
- * There are no documented constraints on the sys clock frequency, for
- * either branch. Recover them based on the PLL output clock frequency
- * and sys_clk_div limits on one hand, and the pix clock frequency and
- * the pix_clk_div limits on the other hand.
- */
- limits.vt_bk.min_sys_clk_freq_hz =
- max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
- limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
- limits.vt_bk.max_sys_clk_freq_hz =
- min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
- limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
-
- limits.op_bk.min_sys_clk_freq_hz =
- max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
- limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
- limits.op_bk.max_sys_clk_freq_hz =
- min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
- limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
-
- memset(pll, 0, sizeof(*pll));
-
- pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
- pll->op_lanes = num_lanes;
- pll->vt_lanes = num_lanes;
- pll->csi2.lanes = num_lanes;
-
- pll->binning_horizontal = 1;
- pll->binning_vertical = 1;
- pll->scale_m = 1;
- pll->scale_n = 1;
- pll->bits_per_pixel =
- IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
- pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
- pll->link_freq = link_freq;
- pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
-
- return ccs_pll_calculate(imx214->dev, &limits, pll);
-}
-
-static int imx214_pll_update(struct imx214 *imx214)
-{
- u64 link_freq;
- int ret;
-
- link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
- ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
- if (ret) {
- dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
- return ret;
- }
-
- ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
- imx214->pll.pixel_rate_pixel_array);
- if (ret) {
- dev_err(imx214->dev, "failed to set pixel rate\n");
- return ret;
- }
-
- return 0;
-}
-
static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fival)
@@ -1324,10 +1331,11 @@ static int imx214_identify_module(struct imx214 *imx214)
return 0;
}
-static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
+static int imx214_parse_fwnode(struct imx214 *imx214)
{
+ struct fwnode_handle *endpoint __free(fwnode_handle) = NULL;
struct v4l2_fwnode_endpoint *bus_cfg = &imx214->bus_cfg;
- struct fwnode_handle *endpoint;
+ struct device *dev = imx214->dev;
unsigned int i;
int ret;
@@ -1337,11 +1345,8 @@ static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
bus_cfg->bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, bus_cfg);
- fwnode_handle_put(endpoint);
- if (ret) {
- dev_err_probe(dev, ret, "parsing endpoint node failed\n");
- goto error;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint node failed\n");
/* Check the number of MIPI CSI2 data lanes */
if (bus_cfg->bus.mipi_csi2.num_data_lanes != 4) {
@@ -1357,18 +1362,16 @@ static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
u64 freq = bus_cfg->link_frequencies[i];
struct ccs_pll pll;
- if (!imx214_pll_calculate(imx214, &pll, freq))
- break;
if (freq == IMX214_DEFAULT_LINK_FREQ_LEGACY) {
dev_warn(dev,
"link-frequencies %d not supported, please review your DT. Continuing anyway\n",
IMX214_DEFAULT_LINK_FREQ);
freq = IMX214_DEFAULT_LINK_FREQ;
- if (imx214_pll_calculate(imx214, &pll, freq))
- continue;
bus_cfg->link_frequencies[i] = freq;
- break;
}
+
+ if (!imx214_pll_calculate(imx214, &pll, freq))
+ break;
}
if (i == bus_cfg->nr_of_link_frequencies)
@@ -1396,7 +1399,7 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
- imx214->xclk = devm_clk_get(dev, NULL);
+ imx214->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(imx214->xclk))
return dev_err_probe(dev, PTR_ERR(imx214->xclk),
"failed to get xclk\n");
@@ -1415,7 +1418,7 @@ static int imx214_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx214->regmap),
"failed to initialize CCI\n");
- ret = imx214_parse_fwnode(dev, imx214);
+ ret = imx214_parse_fwnode(imx214);
if (ret)
return ret;
@@ -1459,12 +1462,6 @@ static int imx214_probe(struct i2c_client *client)
pm_runtime_set_active(imx214->dev);
pm_runtime_enable(imx214->dev);
- ret = imx214_pll_update(imx214);
- if (ret < 0) {
- dev_err_probe(dev, ret, "failed to update PLL\n");
- goto error_subdev_cleanup;
- }
-
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 3faf48f34af4..c680aa6c3a55 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -1032,6 +1032,10 @@ static int imx219_power_on(struct device *dev)
goto reg_off;
}
+ /*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
+ */
gpiod_set_value_cansleep(imx219->reset_gpio, 1);
usleep_range(IMX219_XCLR_MIN_DELAY_US,
IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
@@ -1186,7 +1190,7 @@ static int imx219_probe(struct i2c_client *client)
"failed to initialize CCI\n");
/* Get system clock (xclk) */
- imx219->xclk = devm_clk_get(dev, NULL);
+ imx219->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(imx219->xclk))
return dev_err_probe(dev, PTR_ERR(imx219->xclk),
"failed to get xclk\n");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 9e30fce1f223..e50dcfd830f5 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -8,11 +8,12 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
-#include <linux/unaligned.h>
#define IMX258_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX258_MODE_STANDBY 0x00
@@ -645,6 +646,8 @@ static const struct imx258_mode supported_modes[] = {
};
struct imx258 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct regmap *regmap;
@@ -751,7 +754,6 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx258 *imx258 =
container_of(ctrl->handler, struct imx258, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret = 0;
/*
@@ -765,7 +767,7 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ if (pm_runtime_get_if_in_use(imx258->dev) == 0)
return 0;
switch (ctrl->id) {
@@ -811,14 +813,14 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
NULL);
break;
default:
- dev_info(&client->dev,
+ dev_info(imx258->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
ret = -EINVAL;
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
return ret;
}
@@ -1013,14 +1015,13 @@ static int imx258_get_selection(struct v4l2_subdev *sd,
/* Start streaming */
static int imx258_start_streaming(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
const struct imx258_reg_list *reg_list;
const struct imx258_link_freq_config *link_freq_cfg;
int ret, link_freq_index;
ret = cci_write(imx258->regmap, IMX258_REG_RESET, 0x01, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to reset sensor\n", __func__);
+ dev_err(imx258->dev, "%s failed to reset sensor\n", __func__);
return ret;
}
@@ -1034,21 +1035,21 @@ static int imx258_start_streaming(struct imx258 *imx258)
reg_list = &link_freq_cfg->link_cfg[imx258->lane_mode_idx].reg_list;
ret = cci_multi_reg_write(imx258->regmap, reg_list->regs, reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(imx258->dev, "%s failed to set plls\n", __func__);
return ret;
}
ret = cci_multi_reg_write(imx258->regmap, mode_common_regs,
ARRAY_SIZE(mode_common_regs), NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set common regs\n", __func__);
+ dev_err(imx258->dev, "%s failed to set common regs\n", __func__);
return ret;
}
ret = cci_multi_reg_write(imx258->regmap, imx258->variant_cfg->regs,
imx258->variant_cfg->num_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set variant config\n",
+ dev_err(imx258->dev, "%s failed to set variant config\n",
__func__);
return ret;
}
@@ -1057,7 +1058,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
!!(imx258->csi2_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK),
NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set clock lane mode\n", __func__);
+ dev_err(imx258->dev, "%s failed to set clock lane mode\n", __func__);
return ret;
}
@@ -1065,7 +1066,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
reg_list = &imx258->cur_mode->reg_list;
ret = cci_multi_reg_write(imx258->regmap, reg_list->regs, reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(imx258->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1082,14 +1083,13 @@ static int imx258_start_streaming(struct imx258 *imx258)
/* Stop streaming */
static int imx258_stop_streaming(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret;
/* set stream off register */
ret = cci_write(imx258->regmap, IMX258_REG_MODE_SELECT,
IMX258_MODE_STANDBY, NULL);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(imx258->dev, "%s failed to set stream\n", __func__);
/*
* Return success even if it was an error, as there is nothing the
@@ -1135,13 +1135,12 @@ static int imx258_power_off(struct device *dev)
static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx258 *imx258 = to_imx258(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx258->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx258->dev);
if (ret < 0)
goto err_unlock;
@@ -1154,7 +1153,7 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx258_stop_streaming(imx258);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
}
mutex_unlock(&imx258->mutex);
@@ -1162,7 +1161,7 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
err_unlock:
mutex_unlock(&imx258->mutex);
@@ -1172,20 +1171,19 @@ err_unlock:
/* Verify chip ID */
static int imx258_identify_module(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret;
u64 val;
ret = cci_read(imx258->regmap, IMX258_REG_CHIP_ID,
&val, NULL);
if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
+ dev_err(imx258->dev, "failed to read chip id %x\n",
IMX258_CHIP_ID);
return ret;
}
if (val != IMX258_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ dev_err(imx258->dev, "chip id mismatch: %x!=%llx\n",
IMX258_CHIP_ID, val);
return -EIO;
}
@@ -1217,7 +1215,6 @@ static const struct v4l2_subdev_internal_ops imx258_internal_ops = {
/* Initialize control handlers */
static int imx258_init_controls(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
const struct imx258_link_freq_config *link_freq_cfgs;
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
@@ -1308,12 +1305,12 @@ static int imx258_init_controls(struct imx258 *imx258)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(imx258->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(imx258->dev, &props);
if (ret)
goto error;
@@ -1339,15 +1336,14 @@ static void imx258_free_controls(struct imx258 *imx258)
mutex_destroy(&imx258->mutex);
}
-static int imx258_get_regulators(struct imx258 *imx258,
- struct i2c_client *client)
+static int imx258_get_regulators(struct imx258 *imx258)
{
unsigned int i;
for (i = 0; i < IMX258_NUM_SUPPLIES; i++)
imx258->supplies[i].supply = imx258_supply_name[i];
- return devm_regulator_bulk_get(&client->dev,
+ return devm_regulator_bulk_get(imx258->dev,
IMX258_NUM_SUPPLIES, imx258->supplies);
}
@@ -1365,30 +1361,27 @@ static int imx258_probe(struct i2c_client *client)
if (!imx258)
return -ENOMEM;
+ imx258->dev = &client->dev;
+
imx258->regmap = devm_cci_regmap_init_i2c(client, 16);
if (IS_ERR(imx258->regmap)) {
ret = PTR_ERR(imx258->regmap);
- dev_err(&client->dev, "failed to initialize CCI: %d\n", ret);
+ dev_err(imx258->dev, "failed to initialize CCI: %d\n", ret);
return ret;
}
- ret = imx258_get_regulators(imx258, client);
+ ret = imx258_get_regulators(imx258);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(imx258->dev, ret,
"failed to get regulators\n");
- imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ imx258->clk = devm_v4l2_sensor_clk_get_legacy(imx258->dev, NULL, false,
+ 0);
if (IS_ERR(imx258->clk))
- return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ return dev_err_probe(imx258->dev, PTR_ERR(imx258->clk),
"error getting clock\n");
- if (!imx258->clk) {
- dev_dbg(&client->dev,
- "no clock provided, using clock-frequency property\n");
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- } else {
- val = clk_get_rate(imx258->clk);
- }
+ val = clk_get_rate(imx258->clk);
switch (val) {
case 19200000:
@@ -1400,32 +1393,32 @@ static int imx258_probe(struct i2c_client *client)
imx258->link_freq_menu_items = link_freq_menu_items_24;
break;
default:
- dev_err(&client->dev, "input clock frequency of %u not supported\n",
+ dev_err(imx258->dev, "input clock frequency of %u not supported\n",
val);
return -EINVAL;
}
- endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(imx258->dev), NULL);
if (!endpoint) {
- dev_err(&client->dev, "Endpoint node not found\n");
+ dev_err(imx258->dev, "Endpoint node not found\n");
return -EINVAL;
}
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
fwnode_handle_put(endpoint);
if (ret) {
- dev_err(&client->dev, "Parsing endpoint node failed\n");
+ dev_err(imx258->dev, "Parsing endpoint node failed\n");
return ret;
}
- ret = v4l2_link_freq_to_bitmap(&client->dev,
+ ret = v4l2_link_freq_to_bitmap(imx258->dev,
ep.link_frequencies,
ep.nr_of_link_frequencies,
imx258->link_freq_menu_items,
ARRAY_SIZE(link_freq_menu_items_19_2),
&imx258->link_freq_bitmap);
if (ret) {
- dev_err(&client->dev, "Link frequency not supported\n");
+ dev_err(imx258->dev, "Link frequency not supported\n");
goto error_endpoint_free;
}
@@ -1438,7 +1431,7 @@ static int imx258_probe(struct i2c_client *client)
imx258->lane_mode_idx = IMX258_4_LANE_MODE;
break;
default:
- dev_err(&client->dev, "Invalid data lanes: %u\n",
+ dev_err(imx258->dev, "Invalid data lanes: %u\n",
ep.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto error_endpoint_free;
@@ -1446,7 +1439,7 @@ static int imx258_probe(struct i2c_client *client)
imx258->csi2_flags = ep.bus.mipi_csi2.flags;
- imx258->variant_cfg = device_get_match_data(&client->dev);
+ imx258->variant_cfg = device_get_match_data(imx258->dev);
if (!imx258->variant_cfg)
imx258->variant_cfg = &imx258_cfg;
@@ -1454,7 +1447,7 @@ static int imx258_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&imx258->sd, client, &imx258_subdev_ops);
/* Will be powered off via pm_runtime_idle */
- ret = imx258_power_on(&client->dev);
+ ret = imx258_power_on(imx258->dev);
if (ret)
goto error_endpoint_free;
@@ -1486,9 +1479,9 @@ static int imx258_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx258->dev);
+ pm_runtime_enable(imx258->dev);
+ pm_runtime_idle(imx258->dev);
v4l2_fwnode_endpoint_free(&ep);
return 0;
@@ -1500,7 +1493,7 @@ error_handler_free:
imx258_free_controls(imx258);
error_identify:
- imx258_power_off(&client->dev);
+ imx258_power_off(imx258->dev);
error_endpoint_free:
v4l2_fwnode_endpoint_free(&ep);
@@ -1517,10 +1510,10 @@ static void imx258_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
imx258_free_controls(imx258);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- imx258_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx258->dev);
+ if (!pm_runtime_status_suspended(imx258->dev))
+ imx258_power_off(imx258->dev);
+ pm_runtime_set_suspended(imx258->dev);
}
static const struct dev_pm_ops imx258_pm_ops = {
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index a2b824986027..d86d08c29174 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -826,6 +826,8 @@ static int imx274_start_stream(struct stimx274 *priv)
* if rst = 0, keep it in reset;
* if rst = 1, bring it out of reset.
*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
*/
static void imx274_reset(struct stimx274 *priv, int rst)
{
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index 67e8bb432d10..8ab63ad8f385 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1460,11 +1460,10 @@ static int imx283_probe(struct i2c_client *client)
}
/* Get system clock (xclk) */
- imx283->xclk = devm_clk_get(imx283->dev, NULL);
- if (IS_ERR(imx283->xclk)) {
+ imx283->xclk = devm_v4l2_sensor_clk_get(imx283->dev, NULL);
+ if (IS_ERR(imx283->xclk))
return dev_err_probe(imx283->dev, PTR_ERR(imx283->xclk),
"failed to get xclk\n");
- }
xclk_freq = clk_get_rate(imx283->xclk);
for (i = 0; i < ARRAY_SIZE(imx283_frequencies); i++) {
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index ec172556612e..21cbc81cb2ed 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1422,14 +1422,14 @@ static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
static int imx290_init_clk(struct imx290 *imx290)
{
u32 xclk_freq;
- int ret;
- ret = device_property_read_u32(imx290->dev, "clock-frequency",
- &xclk_freq);
- if (ret) {
- dev_err(imx290->dev, "Could not get xclk frequency\n");
- return ret;
- }
+ imx290->xclk = devm_v4l2_sensor_clk_get_legacy(imx290->dev, "xclk",
+ false, 0);
+ if (IS_ERR(imx290->xclk))
+ return dev_err_probe(imx290->dev, PTR_ERR(imx290->xclk),
+ "Could not get xclk\n");
+
+ xclk_freq = clk_get_rate(imx290->xclk);
/* external clock must be 37.125 MHz or 74.25MHz */
switch (xclk_freq) {
@@ -1445,12 +1445,6 @@ static int imx290_init_clk(struct imx290 *imx290)
return -EINVAL;
}
- ret = clk_set_rate(imx290->xclk, xclk_freq);
- if (ret) {
- dev_err(imx290->dev, "Could not set xclk frequency\n");
- return ret;
- }
-
return 0;
}
@@ -1596,11 +1590,6 @@ static int imx290_probe(struct i2c_client *client)
return ret;
/* Acquire resources. */
- imx290->xclk = devm_clk_get(dev, "xclk");
- if (IS_ERR(imx290->xclk))
- return dev_err_probe(dev, PTR_ERR(imx290->xclk),
- "Could not get xclk\n");
-
ret = imx290_get_regulators(dev, imx290);
if (ret < 0)
return dev_err_probe(dev, ret, "Cannot get regulators\n");
@@ -1611,7 +1600,7 @@ static int imx290_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx290->rst_gpio),
"Cannot get reset gpio\n");
- /* Initialize external clock frequency. */
+ /* Initialize external clock. */
ret = imx290_init_clk(imx290);
if (ret)
return ret;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 61116f4e3f76..69636db11a2b 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -921,7 +921,7 @@ static int imx296_read_temperature(struct imx296 *sensor, int *temp)
tmdout &= IMX296_TMDOUT_MASK;
- /* T(°C) = 246.312 - 0.304 * TMDOUT */;
+ /* T(°C) = 246.312 - 0.304 * TMDOUT */
*temp = 246312 - 304 * tmdout;
return imx296_write(sensor, IMX296_TMDCTRL, 0, NULL);
@@ -1043,7 +1043,7 @@ static int imx296_probe(struct i2c_client *client)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, "inck");
+ sensor->clk = devm_v4l2_sensor_clk_get(sensor->dev, "inck");
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 701840f4a5cc..953310ef3046 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -106,11 +108,12 @@ struct imx319_mode {
};
struct imx319_hwcfg {
- u32 ext_clk; /* sensor external clk */
unsigned long link_freq_bitmap;
};
struct imx319 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1839,14 +1842,13 @@ static int imx319_write_reg(struct imx319 *imx319, u16 reg, u32 len, u32 val)
static int imx319_write_regs(struct imx319 *imx319,
const struct imx319_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
int ret;
u32 i;
for (i = 0; i < len; i++) {
ret = imx319_write_reg(imx319, regs[i].address, 1, regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx319->dev,
"write reg 0x%4.4x return err %d",
regs[i].address, ret);
return ret;
@@ -1880,7 +1882,6 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx319 *imx319 = container_of(ctrl->handler,
struct imx319, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
s64 max;
int ret;
@@ -1899,7 +1900,7 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx319->dev))
return 0;
switch (ctrl->id) {
@@ -1933,12 +1934,12 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ dev_info(imx319->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
return ret;
}
@@ -2087,7 +2088,6 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
/* Verify chip ID */
static int imx319_identify_module(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
int ret;
u32 val;
@@ -2099,7 +2099,7 @@ static int imx319_identify_module(struct imx319 *imx319)
return ret;
if (val != IMX319_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx319->dev, "chip id mismatch: %x!=%x",
IMX319_CHIP_ID, val);
return -EIO;
}
@@ -2112,7 +2112,6 @@ static int imx319_identify_module(struct imx319 *imx319)
/* Start streaming */
static int imx319_start_streaming(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
const struct imx319_reg_list *reg_list;
int ret;
@@ -2124,7 +2123,7 @@ static int imx319_start_streaming(struct imx319 *imx319)
reg_list = &imx319_global_setting;
ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set global settings");
+ dev_err(imx319->dev, "failed to set global settings");
return ret;
}
@@ -2132,7 +2131,7 @@ static int imx319_start_streaming(struct imx319 *imx319)
reg_list = &imx319->cur_mode->reg_list;
ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(imx319->dev, "failed to set mode");
return ret;
}
@@ -2160,13 +2159,12 @@ static int imx319_stop_streaming(struct imx319 *imx319)
static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx319 *imx319 = to_imx319(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx319->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx319->dev);
if (ret < 0)
goto err_unlock;
@@ -2179,7 +2177,7 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx319_stop_streaming(imx319);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -2191,7 +2189,7 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
err_unlock:
mutex_unlock(&imx319->mutex);
@@ -2231,7 +2229,6 @@ static const struct v4l2_subdev_internal_ops imx319_internal_ops = {
/* Initialize control handlers */
static int imx319_init_controls(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
s64 vblank_def;
@@ -2311,7 +2308,7 @@ static int imx319_init_controls(struct imx319 *imx319)
0, 0, imx319_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "control init failed: %d", ret);
+ dev_err(imx319->dev, "control init failed: %d", ret);
goto error;
}
@@ -2350,20 +2347,6 @@ static struct imx319_hwcfg *imx319_get_hwcfg(struct device *dev)
if (!cfg)
goto out_err;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &cfg->ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- goto out_err;
- }
-
- dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
- if (cfg->ext_clk != IMX319_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- cfg->ext_clk);
- goto out_err;
- }
-
ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
bus_cfg.nr_of_link_frequencies,
link_freq_menu_items,
@@ -2385,6 +2368,8 @@ out_err:
static int imx319_probe(struct i2c_client *client)
{
struct imx319 *imx319;
+ unsigned long freq;
+ struct clk *clk;
bool full_power;
int ret;
@@ -2392,24 +2377,37 @@ static int imx319_probe(struct i2c_client *client)
if (!imx319)
return -ENOMEM;
+ imx319->dev = &client->dev;
+
mutex_init(&imx319->mutex);
+ clk = devm_v4l2_sensor_clk_get(imx319->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(imx319->dev, PTR_ERR(clk),
+ "failed to acquire clock\n");
+
+ freq = clk_get_rate(clk);
+ if (freq != IMX319_EXT_CLK)
+ return dev_err_probe(imx319->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx319->sd, client, &imx319_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(imx319->dev);
if (full_power) {
/* Check module identity */
ret = imx319_identify_module(imx319);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx319->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
}
- imx319->hwcfg = imx319_get_hwcfg(&client->dev);
+ imx319->hwcfg = imx319_get_hwcfg(imx319->dev);
if (!imx319->hwcfg) {
- dev_err(&client->dev, "failed to get hwcfg");
+ dev_err(imx319->dev, "failed to get hwcfg");
ret = -ENODEV;
goto error_probe;
}
@@ -2419,7 +2417,7 @@ static int imx319_probe(struct i2c_client *client)
ret = imx319_init_controls(imx319);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx319->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -2434,27 +2432,27 @@ static int imx319_probe(struct i2c_client *client)
imx319->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx319->sd.entity, 1, &imx319->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx319->dev, "failed to init entity pads: %d", ret);
goto error_handler_free;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(imx319->dev);
+ pm_runtime_enable(imx319->dev);
ret = v4l2_async_register_subdev_sensor(&imx319->sd);
if (ret < 0)
goto error_media_entity_pm;
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(imx319->dev);
return 0;
error_media_entity_pm:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(imx319->dev);
if (full_power)
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(imx319->dev);
media_entity_cleanup(&imx319->sd.entity);
error_handler_free:
@@ -2475,9 +2473,9 @@ static void imx319_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx319->dev);
+ if (!pm_runtime_status_suspended(imx319->dev))
+ pm_runtime_set_suspended(imx319->dev);
mutex_destroy(&imx319->mutex);
}
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 846b9928d4e8..9654f9268056 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -118,6 +118,9 @@
#define IMX334_REG_TP CCI_REG8(0x329e)
#define IMX334_TP_COLOR_HBARS 0xa
#define IMX334_TP_COLOR_VBARS 0xb
+#define IMX334_TP_BLACK 0x0
+#define IMX334_TP_WHITE 0x1
+#define IMX334_TP_BLACK_GREY 0xc
#define IMX334_TPG_EN_DOUT CCI_REG8(0x329c)
#define IMX334_TP_ENABLE 0x1
@@ -398,12 +401,18 @@ static const char * const imx334_test_pattern_menu[] = {
"Disabled",
"Vertical Color Bars",
"Horizontal Color Bars",
+ "Black and Grey Bars",
+ "Black Color",
+ "White Color",
};
static const int imx334_test_pattern_val[] = {
IMX334_TP_DISABLE,
IMX334_TP_COLOR_HBARS,
IMX334_TP_COLOR_VBARS,
+ IMX334_TP_BLACK_GREY,
+ IMX334_TP_BLACK,
+ IMX334_TP_WHITE,
};
static const struct cci_reg_sequence raw10_framefmt_regs[] = {
@@ -997,7 +1006,7 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
"failed to get reset gpio\n");
/* Get sensor input clock */
- imx334->inclk = devm_clk_get(imx334->dev, NULL);
+ imx334->inclk = devm_v4l2_sensor_clk_get(imx334->dev, NULL);
if (IS_ERR(imx334->inclk))
return dev_err_probe(imx334->dev, PTR_ERR(imx334->inclk),
"could not get inclk\n");
@@ -1070,6 +1079,10 @@ static int imx334_power_on(struct device *dev)
struct imx334 *imx334 = to_imx334(sd);
int ret;
+ /*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
+ */
gpiod_set_value_cansleep(imx334->reset_gpio, 1);
ret = clk_prepare_enable(imx334->inclk);
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 9b4db4cd4929..c043df2f15fb 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -1026,11 +1026,10 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
}
/* Get sensor input clock */
- imx335->inclk = devm_clk_get(imx335->dev, NULL);
- if (IS_ERR(imx335->inclk)) {
- dev_err(imx335->dev, "could not get inclk\n");
- return PTR_ERR(imx335->inclk);
- }
+ imx335->inclk = devm_v4l2_sensor_clk_get(imx335->dev, NULL);
+ if (IS_ERR(imx335->inclk))
+ return dev_err_probe(imx335->dev, PTR_ERR(imx335->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx335->inclk);
if (rate != IMX335_INCLK_RATE) {
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index b2dce67c0b6b..776107efe386 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -92,11 +94,13 @@ struct imx355_mode {
};
struct imx355_hwcfg {
- u32 ext_clk; /* sensor external clk */
unsigned long link_freq_bitmap;
};
struct imx355 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1136,14 +1140,13 @@ static int imx355_write_reg(struct imx355 *imx355, u16 reg, u32 len, u32 val)
static int imx355_write_regs(struct imx355 *imx355,
const struct imx355_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
int ret;
u32 i;
for (i = 0; i < len; i++) {
ret = imx355_write_reg(imx355, regs[i].address, 1, regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx355->dev,
"write reg 0x%4.4x return err %d",
regs[i].address, ret);
@@ -1178,7 +1181,6 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx355 *imx355 = container_of(ctrl->handler,
struct imx355, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
s64 max;
int ret;
@@ -1197,7 +1199,7 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx355->dev))
return 0;
switch (ctrl->id) {
@@ -1231,12 +1233,12 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ dev_info(imx355->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
return ret;
}
@@ -1385,7 +1387,6 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
/* Start streaming */
static int imx355_start_streaming(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
const struct imx355_reg_list *reg_list;
int ret;
@@ -1393,7 +1394,7 @@ static int imx355_start_streaming(struct imx355 *imx355)
reg_list = &imx355_global_setting;
ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set global settings");
+ dev_err(imx355->dev, "failed to set global settings");
return ret;
}
@@ -1401,7 +1402,7 @@ static int imx355_start_streaming(struct imx355 *imx355)
reg_list = &imx355->cur_mode->reg_list;
ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(imx355->dev, "failed to set mode");
return ret;
}
@@ -1429,13 +1430,12 @@ static int imx355_stop_streaming(struct imx355 *imx355)
static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx355 *imx355 = to_imx355(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx355->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx355->dev);
if (ret < 0)
goto err_unlock;
@@ -1448,7 +1448,7 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx355_stop_streaming(imx355);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -1460,7 +1460,7 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
err_unlock:
mutex_unlock(&imx355->mutex);
@@ -1470,7 +1470,6 @@ err_unlock:
/* Verify chip ID */
static int imx355_identify_module(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
int ret;
u32 val;
@@ -1479,7 +1478,7 @@ static int imx355_identify_module(struct imx355 *imx355)
return ret;
if (val != IMX355_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx355->dev, "chip id mismatch: %x!=%x",
IMX355_CHIP_ID, val);
return -EIO;
}
@@ -1519,7 +1518,6 @@ static const struct v4l2_subdev_internal_ops imx355_internal_ops = {
/* Initialize control handlers */
static int imx355_init_controls(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1600,11 +1598,11 @@ static int imx355_init_controls(struct imx355 *imx355)
0, 0, imx355_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "control init failed: %d", ret);
+ dev_err(imx355->dev, "control init failed: %d", ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(imx355->dev, &props);
if (ret)
goto error;
@@ -1648,20 +1646,6 @@ static struct imx355_hwcfg *imx355_get_hwcfg(struct device *dev)
if (!cfg)
goto out_err;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &cfg->ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- goto out_err;
- }
-
- dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
- if (cfg->ext_clk != IMX355_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- cfg->ext_clk);
- goto out_err;
- }
-
ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
bus_cfg.nr_of_link_frequencies,
link_freq_menu_items,
@@ -1683,27 +1667,41 @@ out_err:
static int imx355_probe(struct i2c_client *client)
{
struct imx355 *imx355;
+ unsigned long freq;
int ret;
imx355 = devm_kzalloc(&client->dev, sizeof(*imx355), GFP_KERNEL);
if (!imx355)
return -ENOMEM;
+ imx355->dev = &client->dev;
+
mutex_init(&imx355->mutex);
+ imx355->clk = devm_v4l2_sensor_clk_get(imx355->dev, NULL);
+ if (IS_ERR(imx355->clk))
+ return dev_err_probe(imx355->dev, PTR_ERR(imx355->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(imx355->clk);
+ if (freq != IMX355_EXT_CLK)
+ return dev_err_probe(imx355->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx355->sd, client, &imx355_subdev_ops);
/* Check module identity */
ret = imx355_identify_module(imx355);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx355->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
- imx355->hwcfg = imx355_get_hwcfg(&client->dev);
+ imx355->hwcfg = imx355_get_hwcfg(imx355->dev);
if (!imx355->hwcfg) {
- dev_err(&client->dev, "failed to get hwcfg");
+ dev_err(imx355->dev, "failed to get hwcfg");
ret = -ENODEV;
goto error_probe;
}
@@ -1713,7 +1711,7 @@ static int imx355_probe(struct i2c_client *client)
ret = imx355_init_controls(imx355);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx355->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -1728,7 +1726,7 @@ static int imx355_probe(struct i2c_client *client)
imx355->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx355->sd.entity, 1, &imx355->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx355->dev, "failed to init entity pads: %d", ret);
goto error_handler_free;
}
@@ -1736,9 +1734,9 @@ static int imx355_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx355->dev);
+ pm_runtime_enable(imx355->dev);
+ pm_runtime_idle(imx355->dev);
ret = v4l2_async_register_subdev_sensor(&imx355->sd);
if (ret < 0)
@@ -1747,8 +1745,8 @@ static int imx355_probe(struct i2c_client *client)
return 0;
error_media_entity_runtime_pm:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx355->dev);
+ pm_runtime_set_suspended(imx355->dev);
media_entity_cleanup(&imx355->sd.entity);
error_handler_free:
@@ -1769,8 +1767,8 @@ static void imx355_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx355->dev);
+ pm_runtime_set_suspended(imx355->dev);
mutex_destroy(&imx355->mutex);
}
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index c74097a59c42..7bbd639a9ddf 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -933,11 +933,10 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
}
/* Get sensor input clock */
- imx412->inclk = devm_clk_get(imx412->dev, NULL);
- if (IS_ERR(imx412->inclk)) {
- dev_err(imx412->dev, "could not get inclk\n");
- return PTR_ERR(imx412->inclk);
- }
+ imx412->inclk = devm_v4l2_sensor_clk_get(imx412->dev, NULL);
+ if (IS_ERR(imx412->inclk))
+ return dev_err_probe(imx412->dev, PTR_ERR(imx412->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx412->inclk);
if (rate != IMX412_INCLK_RATE) {
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 276bf4d6f39d..0b424c17e880 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -1250,7 +1250,7 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, NULL);
+ sensor->clk = devm_v4l2_sensor_clk_get(sensor->dev, NULL);
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index c84e1e0e6109..5588cdd7ec20 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -321,9 +321,9 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol,
static int ir_key_poll(struct IR_i2c *ir)
{
- enum rc_proto protocol;
- u32 scancode;
- u8 toggle;
+ enum rc_proto protocol = 0;
+ u32 scancode = 0;
+ u8 toggle = 0;
int rc;
dev_dbg(&ir->rc->dev, "%s\n", __func__);
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 12d3e86bdc0f..7a6114d18dfc 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -743,9 +743,10 @@ static int mt9m001_probe(struct i2c_client *client)
if (!mt9m001)
return -ENOMEM;
- mt9m001->clk = devm_clk_get(&client->dev, NULL);
+ mt9m001->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9m001->clk))
- return PTR_ERR(mt9m001->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9m001->clk),
+ "failed to get the clock\n");
mt9m001->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_LOW);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 9aa5dcda3805..05dcf37c6f01 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1279,9 +1279,10 @@ static int mt9m111_probe(struct i2c_client *client)
return ret;
}
- mt9m111->clk = devm_clk_get(&client->dev, "mclk");
+ mt9m111->clk = devm_v4l2_sensor_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
- return PTR_ERR(mt9m111->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9m111->clk),
+ "failed to get mclk\n");
mt9m111->regulator = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(mt9m111->regulator)) {
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index aa3fd6c6c76c..51ebbe7ae996 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/types.h>
@@ -42,6 +43,9 @@
#define MT9M114_RESET_AND_MISC_CONTROL CCI_REG16(0x001a)
#define MT9M114_RESET_SOC BIT(0)
#define MT9M114_PAD_SLEW CCI_REG16(0x001e)
+#define MT9M114_PAD_SLEW_MIN 0
+#define MT9M114_PAD_SLEW_MAX 7
+#define MT9M114_PAD_SLEW_DEFAULT 7
#define MT9M114_PAD_CONTROL CCI_REG16(0x0032)
/* XDMA registers */
@@ -388,6 +392,7 @@ struct mt9m114 {
unsigned int pixrate;
bool streaming;
+ u32 pad_slew_rate;
/* Pixel Array */
struct {
@@ -645,9 +650,6 @@ static const struct cci_reg_sequence mt9m114_init[] = {
{ MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX, 1459 },
{ MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION, 96 },
{ MT9M114_CAM_SENSOR_CFG_REG_0_DATA, 32 },
-
- /* Miscellaneous settings */
- { MT9M114_PAD_SLEW, 0x0777 },
};
/* -----------------------------------------------------------------------------
@@ -779,6 +781,13 @@ static int mt9m114_initialize(struct mt9m114 *sensor)
if (ret < 0)
return ret;
+ value = sensor->pad_slew_rate
+ | sensor->pad_slew_rate << 4
+ | sensor->pad_slew_rate << 8;
+ cci_write(sensor->regmap, MT9M114_PAD_SLEW, value, &ret);
+ if (ret < 0)
+ return ret;
+
ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
if (ret < 0)
return ret;
@@ -1283,6 +1292,7 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
struct mt9m114 *sensor = pa_to_mt9m114(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
+ int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
@@ -1298,25 +1308,41 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
* binning, but binning is configured after setting the selection, so
* we can't know tell here if it will be used.
*/
- crop->left = ALIGN(sel->r.left, 4);
- crop->top = ALIGN(sel->r.top, 2);
- crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
- MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
- MT9M114_PIXEL_ARRAY_WIDTH - crop->left);
- crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
- MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
- MT9M114_PIXEL_ARRAY_HEIGHT - crop->top);
-
- sel->r = *crop;
+ sel->r.left = ALIGN(sel->r.left, 4);
+ sel->r.top = ALIGN(sel->r.top, 2);
+ sel->r.width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+ MT9M114_PIXEL_ARRAY_WIDTH - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+ MT9M114_PIXEL_ARRAY_HEIGHT - sel->r.top);
+
+ /* Changing the selection size is not allowed in streaming state. */
+ if (sensor->streaming &&
+ (sel->r.height != crop->height || sel->r.width != crop->width))
+ return -EBUSY;
+
+ *crop = sel->r;
/* Reset the format. */
format->width = crop->width;
format->height = crop->height;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9m114_pa_ctrl_update_blanking(sensor, format);
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return ret;
- return 0;
+ mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+ /* Apply values immediately if streaming. */
+ if (sensor->streaming) {
+ ret = mt9m114_configure_pa(sensor, state);
+ if (ret)
+ return ret;
+ /* Changing the cropping config requires a CONFIG_CHANGE. */
+ ret = mt9m114_set_state(sensor,
+ MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+ }
+ return ret;
}
static const struct v4l2_subdev_pad_ops mt9m114_pa_pad_ops = {
@@ -2360,6 +2386,17 @@ static int mt9m114_parse_dt(struct mt9m114 *sensor)
goto error;
}
+ sensor->pad_slew_rate = MT9M114_PAD_SLEW_DEFAULT;
+ device_property_read_u32(&sensor->client->dev, "slew-rate",
+ &sensor->pad_slew_rate);
+
+ if (sensor->pad_slew_rate < MT9M114_PAD_SLEW_MIN ||
+ sensor->pad_slew_rate > MT9M114_PAD_SLEW_MAX) {
+ dev_err(&sensor->client->dev, "Invalid slew-rate %u\n",
+ sensor->pad_slew_rate);
+ return -EINVAL;
+ }
+
return 0;
error:
@@ -2390,10 +2427,10 @@ static int mt9m114_probe(struct i2c_client *client)
return ret;
/* Acquire clocks, GPIOs and regulators. */
- sensor->clk = devm_clk_get(dev, NULL);
+ sensor->clk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->clk)) {
- ret = PTR_ERR(sensor->clk);
- dev_err_probe(dev, ret, "Failed to get clock\n");
+ ret = dev_err_probe(dev, PTR_ERR(sensor->clk),
+ "Failed to get clock\n");
goto error_ep_free;
}
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4ef5fb06131d..1500ee4db47e 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -233,9 +233,10 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
unsigned long ext_freq;
int ret;
- mt9p031->clk = devm_clk_get(&client->dev, NULL);
+ mt9p031->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9p031->clk))
- return PTR_ERR(mt9p031->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9p031->clk),
+ "failed to get the clock\n");
ret = clk_set_rate(mt9p031->clk, mt9p031->ext_freq);
if (ret < 0)
@@ -1092,6 +1093,7 @@ static int mt9p031_parse_properties(struct mt9p031 *mt9p031, struct device *dev)
static int mt9p031_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
+ const struct mt9p031_model_info *info;
struct mt9p031 *mt9p031;
unsigned int i;
int ret;
@@ -1112,7 +1114,8 @@ static int mt9p031_probe(struct i2c_client *client)
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
- mt9p031->code = (uintptr_t)device_get_match_data(&client->dev);
+ info = device_get_match_data(&client->dev);
+ mt9p031->code = info->code;
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index 878dff9b7577..2d2c840fc002 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1078,13 +1078,12 @@ static int mt9t112_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
- priv->clk = devm_clk_get(&client->dev, "extclk");
- if (PTR_ERR(priv->clk) == -ENOENT) {
+ priv->clk = devm_v4l2_sensor_clk_get(&client->dev, "extclk");
+ if (PTR_ERR(priv->clk) == -ENOENT)
priv->clk = NULL;
- } else if (IS_ERR(priv->clk)) {
- dev_err(&client->dev, "Unable to get clock \"extclk\"\n");
- return PTR_ERR(priv->clk);
- }
+ else if (IS_ERR(priv->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(priv->clk),
+ "Unable to get clock \"extclk\"\n");
priv->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_HIGH);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 302120ff125e..d4359d5b92bb 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -15,16 +15,15 @@
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/videodev2.h>
#include <linux/v4l2-mediabus.h>
-#include <linux/module.h>
+#include <linux/videodev2.h>
-#include <media/i2c/mt9v032.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -182,7 +181,16 @@ static const struct mt9v032_model_version mt9v032_versions[] = {
{ MT9V034_CHIP_ID_REV1, "MT9V024/MT9V034 rev1" },
};
+struct mt9v032_platform_data {
+ unsigned int clk_pol:1;
+
+ const s64 *link_freqs;
+ s64 link_def_freq;
+};
+
struct mt9v032 {
+ struct device *dev;
+
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -205,7 +213,7 @@ struct mt9v032 {
struct gpio_desc *reset_gpio;
struct gpio_desc *standby_gpio;
- struct mt9v032_platform_data *pdata;
+ struct mt9v032_platform_data pdata;
const struct mt9v032_model_info *model;
const struct mt9v032_model_version *version;
@@ -330,7 +338,7 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
return ret;
/* Configure the pixel clock polarity */
- if (mt9v032->pdata && mt9v032->pdata->clk_pol) {
+ if (mt9v032->pdata.clk_pol) {
ret = regmap_write(map, mt9v032->model->data->pclk_reg,
MT9V032_PIXEL_CLOCK_INV_PXL_CLK);
if (ret < 0)
@@ -473,13 +481,12 @@ static int mt9v032_get_format(struct v4l2_subdev *subdev,
static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032)
{
- struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
ret = v4l2_ctrl_s_ctrl_int64(mt9v032->pixel_rate,
mt9v032->sysclk / mt9v032->hratio);
if (ret < 0)
- dev_warn(&client->dev, "failed to set pixel rate (%d)\n", ret);
+ dev_warn(mt9v032->dev, "failed to set pixel rate (%d)\n", ret);
}
static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
@@ -682,7 +689,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
if (mt9v032->link_freq == NULL)
break;
- freq = mt9v032->pdata->link_freqs[mt9v032->link_freq->val];
+ freq = mt9v032->pdata.link_freqs[mt9v032->link_freq->val];
*mt9v032->pixel_rate->p_new.p_s64 = freq;
mt9v032->sysclk = freq;
break;
@@ -883,12 +890,12 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
u32 version;
int ret;
- dev_info(&client->dev, "Probing MT9V032 at address 0x%02x\n",
+ dev_info(mt9v032->dev, "Probing MT9V032 at address 0x%02x\n",
client->addr);
ret = mt9v032_power_on(mt9v032);
if (ret < 0) {
- dev_err(&client->dev, "MT9V032 power up failed\n");
+ dev_err(mt9v032->dev, "MT9V032 power up failed\n");
return ret;
}
@@ -898,7 +905,7 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
mt9v032_power_off(mt9v032);
if (ret < 0) {
- dev_err(&client->dev, "Failed reading chip version\n");
+ dev_err(mt9v032->dev, "Failed reading chip version\n");
return ret;
}
@@ -910,12 +917,12 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
}
if (mt9v032->version == NULL) {
- dev_err(&client->dev, "Unsupported chip version 0x%04x\n",
+ dev_err(mt9v032->dev, "Unsupported chip version 0x%04x\n",
version);
return -ENODEV;
}
- dev_info(&client->dev, "%s detected at address 0x%02x\n",
+ dev_info(mt9v032->dev, "%s detected at address 0x%02x\n",
mt9v032->version->name, client->addr);
mt9v032_configure_pixel_rate(mt9v032);
@@ -995,41 +1002,33 @@ static const struct regmap_config mt9v032_regmap_config = {
* Driver initialization and probing
*/
-static struct mt9v032_platform_data *
-mt9v032_get_pdata(struct i2c_client *client)
+static int mt9v032_get_pdata(struct mt9v032 *mt9v032)
{
- struct mt9v032_platform_data *pdata = NULL;
+ struct mt9v032_platform_data *pdata = &mt9v032->pdata;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
- struct device_node *np;
+ struct device_node *np __free(device_node) = NULL;
struct property *prop;
- if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
- return client->dev.platform_data;
-
- np = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
+ np = of_graph_get_endpoint_by_regs(mt9v032->dev->of_node, 0, -1);
if (!np)
- return NULL;
+ return -EINVAL;
if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
- goto done;
-
- pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- goto done;
+ return -EINVAL;
prop = of_find_property(np, "link-frequencies", NULL);
if (prop) {
u64 *link_freqs;
size_t size = prop->length / sizeof(*link_freqs);
- link_freqs = devm_kcalloc(&client->dev, size,
+ link_freqs = devm_kcalloc(mt9v032->dev, size,
sizeof(*link_freqs), GFP_KERNEL);
if (!link_freqs)
- goto done;
+ return -EINVAL;
if (of_property_read_u64_array(np, "link-frequencies",
link_freqs, size) < 0)
- goto done;
+ return -EINVAL;
pdata->link_freqs = link_freqs;
pdata->link_def_freq = link_freqs[0];
@@ -1038,14 +1037,11 @@ mt9v032_get_pdata(struct i2c_client *client)
pdata->clk_pol = !!(endpoint.bus.parallel.flags &
V4L2_MBUS_PCLK_SAMPLE_RISING);
-done:
- of_node_put(np);
- return pdata;
+ return 0;
}
static int mt9v032_probe(struct i2c_client *client)
{
- struct mt9v032_platform_data *pdata = mt9v032_get_pdata(client);
struct mt9v032 *mt9v032;
unsigned int i;
int ret;
@@ -1054,27 +1050,35 @@ static int mt9v032_probe(struct i2c_client *client)
if (!mt9v032)
return -ENOMEM;
+ mt9v032->dev = &client->dev;
+
mt9v032->regmap = devm_regmap_init_i2c(client, &mt9v032_regmap_config);
if (IS_ERR(mt9v032->regmap))
return PTR_ERR(mt9v032->regmap);
- mt9v032->clk = devm_clk_get(&client->dev, NULL);
+ mt9v032->clk = devm_v4l2_sensor_clk_get(mt9v032->dev, NULL);
if (IS_ERR(mt9v032->clk))
- return PTR_ERR(mt9v032->clk);
+ return dev_err_probe(mt9v032->dev, PTR_ERR(mt9v032->clk),
+ "failed to get the clock\n");
- mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ mt9v032->reset_gpio = devm_gpiod_get_optional(mt9v032->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(mt9v032->reset_gpio))
return PTR_ERR(mt9v032->reset_gpio);
- mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+ mt9v032->standby_gpio = devm_gpiod_get_optional(mt9v032->dev, "standby",
GPIOD_OUT_LOW);
if (IS_ERR(mt9v032->standby_gpio))
return PTR_ERR(mt9v032->standby_gpio);
mutex_init(&mt9v032->power_lock);
- mt9v032->pdata = pdata;
- mt9v032->model = i2c_get_match_data(client);
+
+ ret = mt9v032_get_pdata(mt9v032);
+ if (ret)
+ return dev_err_probe(mt9v032->dev, -EINVAL,
+ "Failed to parse DT properties\n");
+
+ mt9v032->model = device_get_match_data(mt9v032->dev);
v4l2_ctrl_handler_init(&mt9v032->ctrls, 11 +
ARRAY_SIZE(mt9v032_aegc_controls));
@@ -1119,7 +1123,8 @@ static int mt9v032_probe(struct i2c_client *client)
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1);
- if (pdata && pdata->link_freqs) {
+ if (mt9v032->pdata.link_freqs) {
+ const struct mt9v032_platform_data *pdata = &mt9v032->pdata;
unsigned int def = 0;
for (i = 0; pdata->link_freqs[i]; ++i) {
@@ -1139,7 +1144,7 @@ static int mt9v032_probe(struct i2c_client *client)
mt9v032->subdev.ctrl_handler = &mt9v032->ctrls;
if (mt9v032->ctrls.error) {
- dev_err(&client->dev, "control initialization error %d\n",
+ dev_err(mt9v032->dev, "control initialization error %d\n",
mt9v032->ctrls.error);
ret = mt9v032->ctrls.error;
goto err;
@@ -1177,7 +1182,7 @@ static int mt9v032_probe(struct i2c_client *client)
if (ret < 0)
goto err;
- mt9v032->subdev.dev = &client->dev;
+ mt9v032->subdev.dev = mt9v032->dev;
ret = v4l2_async_register_subdev(&mt9v032->subdev);
if (ret < 0)
goto err;
@@ -1261,19 +1266,6 @@ static const struct mt9v032_model_info mt9v032_models[] = {
},
};
-static const struct i2c_device_id mt9v032_id[] = {
- { "mt9v022", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_COLOR] },
- { "mt9v022m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_MONO] },
- { "mt9v024", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V024_COLOR] },
- { "mt9v024m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V024_MONO] },
- { "mt9v032", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_COLOR] },
- { "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
- { "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
- { "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(i2c, mt9v032_id);
-
static const struct of_device_id mt9v032_of_match[] = {
{ .compatible = "aptina,mt9v022", .data = &mt9v032_models[MT9V032_MODEL_V022_COLOR] },
{ .compatible = "aptina,mt9v022m", .data = &mt9v032_models[MT9V032_MODEL_V022_MONO] },
@@ -1294,7 +1286,6 @@ static struct i2c_driver mt9v032_driver = {
},
.probe = mt9v032_probe,
.remove = mt9v032_remove,
- .id_table = mt9v032_id,
};
module_i2c_driver(mt9v032_driver);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 723fe138e7bc..b4f2703faa18 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -365,8 +365,6 @@ static int __mt9v111_power_on(struct v4l2_subdev *sd)
if (ret)
return ret;
- clk_set_rate(mt9v111->clk, mt9v111->sysclk);
-
gpiod_set_value(mt9v111->standby, 0);
usleep_range(500, 1000);
@@ -532,8 +530,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
{
struct i2c_client *c = mt9v111->client;
- unsigned int ret;
u16 outfmtctrl2;
+ int ret;
/* Force device reset. */
ret = __mt9v111_hw_reset(mt9v111);
@@ -1129,9 +1127,10 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->dev = &client->dev;
mt9v111->client = client;
- mt9v111->clk = devm_clk_get(&client->dev, NULL);
+ mt9v111->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9v111->clk))
- return PTR_ERR(mt9v111->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9v111->clk),
+ "failed to get the clock\n");
mt9v111->sysclk = clk_get_rate(mt9v111->clk);
if (mt9v111->sysclk > MT9V111_MAX_CLKIN)
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 78d5d406e4b7..c7184de6251a 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -421,6 +422,7 @@ static const struct og01a1b_mode supported_modes[] = {
};
struct og01a1b {
+ struct device *dev;
struct clk *xvclk;
struct gpio_desc *reset_gpio;
struct regulator *avdd;
@@ -512,7 +514,6 @@ static int og01a1b_write_reg(struct og01a1b *og01a1b, u16 reg, u16 len, u32 val)
static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
const struct og01a1b_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
unsigned int i;
int ret;
@@ -520,7 +521,7 @@ static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
ret = og01a1b_write_reg(og01a1b, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(og01a1b->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -544,7 +545,6 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct og01a1b *og01a1b = container_of(ctrl->handler,
struct og01a1b, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
s64 exposure_max;
int ret = 0;
@@ -560,7 +560,7 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(og01a1b->dev))
return 0;
switch (ctrl->id) {
@@ -596,7 +596,7 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
return ret;
}
@@ -682,13 +682,12 @@ static void og01a1b_update_pad_format(const struct og01a1b_mode *mode,
{
fmt->width = mode->width;
fmt->height = mode->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_Y10_1X10;
fmt->field = V4L2_FIELD_NONE;
}
static int og01a1b_start_streaming(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
const struct og01a1b_reg_list *reg_list;
int link_freq_index, ret;
@@ -697,14 +696,14 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
ret = og01a1b_write_reg_list(og01a1b, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(og01a1b->dev, "failed to set plls");
return ret;
}
reg_list = &og01a1b->cur_mode->reg_list;
ret = og01a1b_write_reg_list(og01a1b, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(og01a1b->dev, "failed to set mode");
return ret;
}
@@ -716,7 +715,7 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
OG01A1B_REG_VALUE_08BIT,
OG01A1B_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(og01a1b->dev, "failed to set stream");
return ret;
}
@@ -725,22 +724,19 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
static void og01a1b_stop_streaming(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
-
if (og01a1b_write_reg(og01a1b, OG01A1B_REG_MODE_SELECT,
OG01A1B_REG_VALUE_08BIT, OG01A1B_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(og01a1b->dev, "failed to set stream");
}
static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
{
struct og01a1b *og01a1b = to_og01a1b(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&og01a1b->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(og01a1b->dev);
if (ret) {
mutex_unlock(&og01a1b->mutex);
return ret;
@@ -750,11 +746,11 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
og01a1b_stop_streaming(og01a1b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
}
} else {
og01a1b_stop_streaming(og01a1b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
}
mutex_unlock(&og01a1b->mutex);
@@ -828,7 +824,7 @@ static int og01a1b_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = MEDIA_BUS_FMT_Y10_1X10;
return 0;
}
@@ -840,7 +836,7 @@ static int og01a1b_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ if (fse->code != MEDIA_BUS_FMT_Y10_1X10)
return -EINVAL;
fse->min_width = supported_modes[fse->index].width;
@@ -889,7 +885,6 @@ static const struct v4l2_subdev_internal_ops og01a1b_internal_ops = {
static int og01a1b_identify_module(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
int ret;
u32 val;
@@ -899,7 +894,7 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
return ret;
if (val != OG01A1B_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(og01a1b->dev, "chip id mismatch: %x!=%x",
OG01A1B_CHIP_ID, val);
return -ENXIO;
}
@@ -909,35 +904,18 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
static int og01a1b_check_hwcfg(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
- struct device *dev = &client->dev;
+ struct device *dev = og01a1b->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- if (!og01a1b->xvclk) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- mclk = clk_get_rate(og01a1b->xvclk);
- }
-
- if (mclk != OG01A1B_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1066,47 +1044,54 @@ static void og01a1b_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(og01a1b->dev);
mutex_destroy(&og01a1b->mutex);
}
static int og01a1b_probe(struct i2c_client *client)
{
struct og01a1b *og01a1b;
+ unsigned long freq;
int ret;
og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
if (!og01a1b)
return -ENOMEM;
+ og01a1b->dev = &client->dev;
+
v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
- og01a1b->xvclk = devm_clk_get_optional(&client->dev, NULL);
- if (IS_ERR(og01a1b->xvclk)) {
- ret = PTR_ERR(og01a1b->xvclk);
- dev_err(&client->dev, "failed to get xvclk clock: %d\n", ret);
- return ret;
- }
+ og01a1b->xvclk = devm_v4l2_sensor_clk_get(og01a1b->dev, NULL);
+ if (IS_ERR(og01a1b->xvclk))
+ return dev_err_probe(og01a1b->dev, PTR_ERR(og01a1b->xvclk),
+ "failed to get xvclk clock\n");
+
+ freq = clk_get_rate(og01a1b->xvclk);
+ if (freq != OG01A1B_MCLK)
+ return dev_err_probe(og01a1b->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
ret = og01a1b_check_hwcfg(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to check HW configuration: %d",
+ dev_err(og01a1b->dev, "failed to check HW configuration: %d",
ret);
return ret;
}
- og01a1b->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ og01a1b->reset_gpio = devm_gpiod_get_optional(og01a1b->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(og01a1b->reset_gpio)) {
- dev_err(&client->dev, "cannot get reset GPIO\n");
+ dev_err(og01a1b->dev, "cannot get reset GPIO\n");
return PTR_ERR(og01a1b->reset_gpio);
}
- og01a1b->avdd = devm_regulator_get_optional(&client->dev, "avdd");
+ og01a1b->avdd = devm_regulator_get_optional(og01a1b->dev, "avdd");
if (IS_ERR(og01a1b->avdd)) {
ret = PTR_ERR(og01a1b->avdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'avdd' regulator\n");
return ret;
}
@@ -1114,11 +1099,11 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->avdd = NULL;
}
- og01a1b->dovdd = devm_regulator_get_optional(&client->dev, "dovdd");
+ og01a1b->dovdd = devm_regulator_get_optional(og01a1b->dev, "dovdd");
if (IS_ERR(og01a1b->dovdd)) {
ret = PTR_ERR(og01a1b->dovdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'dovdd' regulator\n");
return ret;
}
@@ -1126,11 +1111,11 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->dovdd = NULL;
}
- og01a1b->dvdd = devm_regulator_get_optional(&client->dev, "dvdd");
+ og01a1b->dvdd = devm_regulator_get_optional(og01a1b->dev, "dvdd");
if (IS_ERR(og01a1b->dvdd)) {
ret = PTR_ERR(og01a1b->dvdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'dvdd' regulator\n");
return ret;
}
@@ -1139,13 +1124,13 @@ static int og01a1b_probe(struct i2c_client *client)
}
/* The sensor must be powered on to read the CHIP_ID register */
- ret = og01a1b_power_on(&client->dev);
+ ret = og01a1b_power_on(og01a1b->dev);
if (ret)
return ret;
ret = og01a1b_identify_module(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(og01a1b->dev, "failed to find sensor: %d", ret);
goto power_off;
}
@@ -1153,7 +1138,7 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->cur_mode = &supported_modes[0];
ret = og01a1b_init_controls(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(og01a1b->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1164,21 +1149,21 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&og01a1b->sd.entity, 1, &og01a1b->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(og01a1b->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&og01a1b->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(og01a1b->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Enable runtime PM and turn off the device */
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(og01a1b->dev);
+ pm_runtime_enable(og01a1b->dev);
+ pm_runtime_idle(og01a1b->dev);
return 0;
@@ -1190,7 +1175,7 @@ probe_error_v4l2_ctrl_handler_free:
mutex_destroy(&og01a1b->mutex);
power_off:
- og01a1b_power_off(&client->dev);
+ og01a1b_power_off(og01a1b->dev);
return ret;
}
diff --git a/drivers/media/i2c/og0ve1b.c b/drivers/media/i2c/og0ve1b.c
new file mode 100644
index 000000000000..262d9df766fe
--- /dev/null
+++ b/drivers/media/i2c/og0ve1b.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024-2025 Linaro Ltd
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OG0VE1B_LINK_FREQ_500MHZ (500 * HZ_PER_MHZ)
+#define OG0VE1B_MCLK_FREQ_24MHZ (24 * HZ_PER_MHZ)
+
+#define OG0VE1B_REG_CHIP_ID CCI_REG24(0x300a)
+#define OG0VE1B_CHIP_ID 0xc75645
+
+#define OG0VE1B_REG_MODE_SELECT CCI_REG8(0x0100)
+#define OG0VE1B_MODE_STANDBY 0x00
+#define OG0VE1B_MODE_STREAMING BIT(0)
+
+#define OG0VE1B_REG_SOFTWARE_RST CCI_REG8(0x0103)
+#define OG0VE1B_SOFTWARE_RST BIT(0)
+
+/* Exposure controls from sensor */
+#define OG0VE1B_REG_EXPOSURE CCI_REG24(0x3500)
+#define OG0VE1B_EXPOSURE_MIN 1
+#define OG0VE1B_EXPOSURE_MAX_MARGIN 14
+#define OG0VE1B_EXPOSURE_STEP 1
+#define OG0VE1B_EXPOSURE_DEFAULT 554
+
+/* Analogue gain controls from sensor */
+#define OG0VE1B_REG_ANALOGUE_GAIN CCI_REG16(0x350a)
+#define OG0VE1B_ANALOGUE_GAIN_MIN 1
+#define OG0VE1B_ANALOGUE_GAIN_MAX 0x1ff
+#define OG0VE1B_ANALOGUE_GAIN_STEP 1
+#define OG0VE1B_ANALOGUE_GAIN_DEFAULT 16
+
+/* Test pattern */
+#define OG0VE1B_REG_PRE_ISP CCI_REG8(0x5e00)
+#define OG0VE1B_TEST_PATTERN_ENABLE BIT(7)
+
+#define to_og0ve1b(_sd) container_of(_sd, struct og0ve1b, sd)
+
+static const s64 og0ve1b_link_freq_menu[] = {
+ OG0VE1B_LINK_FREQ_500MHZ,
+};
+
+struct og0ve1b_reg_list {
+ const struct cci_reg_sequence *regs;
+ unsigned int num_regs;
+};
+
+struct og0ve1b_mode {
+ u32 width; /* Frame width in pixels */
+ u32 height; /* Frame height in pixels */
+ u32 hts; /* Horizontal timing size */
+ u32 vts; /* Default vertical timing size */
+ u32 bpp; /* Bits per pixel */
+
+ const struct og0ve1b_reg_list reg_list; /* Sensor register setting */
+};
+
+static const char * const og0ve1b_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Colour Bars",
+};
+
+static const char * const og0ve1b_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+#define OG0VE1B_NUM_SUPPLIES ARRAY_SIZE(og0ve1b_supply_names)
+
+struct og0ve1b {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[OG0VE1B_NUM_SUPPLIES];
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Saved register value */
+ u64 pre_isp;
+};
+
+static const struct cci_reg_sequence og0ve1b_640x480_120fps_mode[] = {
+ { CCI_REG8(0x30a0), 0x02 },
+ { CCI_REG8(0x30a1), 0x00 },
+ { CCI_REG8(0x30a2), 0x48 },
+ { CCI_REG8(0x30a3), 0x34 },
+ { CCI_REG8(0x30a4), 0xf7 },
+ { CCI_REG8(0x30a5), 0x00 },
+ { CCI_REG8(0x3082), 0x32 },
+ { CCI_REG8(0x3083), 0x01 },
+ { CCI_REG8(0x301c), 0xf0 },
+ { CCI_REG8(0x301e), 0x0b },
+ { CCI_REG8(0x3106), 0x10 },
+ { CCI_REG8(0x3708), 0x77 },
+ { CCI_REG8(0x3709), 0xf8 },
+ { CCI_REG8(0x3717), 0x00 },
+ { CCI_REG8(0x3782), 0x00 },
+ { CCI_REG8(0x3783), 0x47 },
+ { CCI_REG8(0x37a2), 0x00 },
+ { CCI_REG8(0x3503), 0x07 },
+ { CCI_REG8(0x3509), 0x10 },
+ { CCI_REG8(0x3600), 0x83 },
+ { CCI_REG8(0x3601), 0x21 },
+ { CCI_REG8(0x3602), 0xf1 },
+ { CCI_REG8(0x360a), 0x18 },
+ { CCI_REG8(0x360e), 0xb3 },
+ { CCI_REG8(0x3613), 0x20 },
+ { CCI_REG8(0x366a), 0x78 },
+ { CCI_REG8(0x3706), 0x63 },
+ { CCI_REG8(0x3713), 0x00 },
+ { CCI_REG8(0x3716), 0xb0 },
+ { CCI_REG8(0x37a1), 0x38 },
+ { CCI_REG8(0x3800), 0x00 },
+ { CCI_REG8(0x3801), 0x04 },
+ { CCI_REG8(0x3802), 0x00 },
+ { CCI_REG8(0x3803), 0x04 },
+ { CCI_REG8(0x3804), 0x02 },
+ { CCI_REG8(0x3805), 0x8b },
+ { CCI_REG8(0x3806), 0x01 },
+ { CCI_REG8(0x3807), 0xeb },
+ { CCI_REG8(0x3808), 0x02 }, /* output width */
+ { CCI_REG8(0x3809), 0x80 },
+ { CCI_REG8(0x380a), 0x01 }, /* output height */
+ { CCI_REG8(0x380b), 0xe0 },
+ { CCI_REG8(0x380c), 0x03 }, /* horizontal timing size */
+ { CCI_REG8(0x380d), 0x18 },
+ { CCI_REG8(0x380e), 0x02 }, /* vertical timing size */
+ { CCI_REG8(0x380f), 0x38 },
+ { CCI_REG8(0x3811), 0x04 },
+ { CCI_REG8(0x3813), 0x04 },
+ { CCI_REG8(0x3814), 0x11 },
+ { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x00 },
+ { CCI_REG8(0x3821), 0x00 },
+ { CCI_REG8(0x3823), 0x04 },
+ { CCI_REG8(0x382a), 0x00 },
+ { CCI_REG8(0x382b), 0x03 },
+ { CCI_REG8(0x3840), 0x00 },
+ { CCI_REG8(0x389e), 0x00 },
+ { CCI_REG8(0x3c05), 0x08 },
+ { CCI_REG8(0x3c26), 0x02 },
+ { CCI_REG8(0x3c27), 0xc0 },
+ { CCI_REG8(0x3c28), 0x00 },
+ { CCI_REG8(0x3c29), 0x40 },
+ { CCI_REG8(0x3c2c), 0x00 },
+ { CCI_REG8(0x3c2d), 0x50 },
+ { CCI_REG8(0x3c2e), 0x02 },
+ { CCI_REG8(0x3c2f), 0x66 },
+ { CCI_REG8(0x3c33), 0x08 },
+ { CCI_REG8(0x3c35), 0x00 },
+ { CCI_REG8(0x3c36), 0x00 },
+ { CCI_REG8(0x3c37), 0x00 },
+ { CCI_REG8(0x3f52), 0x9b },
+ { CCI_REG8(0x4001), 0x42 },
+ { CCI_REG8(0x4004), 0x08 },
+ { CCI_REG8(0x4005), 0x00 },
+ { CCI_REG8(0x4007), 0x28 },
+ { CCI_REG8(0x4009), 0x40 },
+ { CCI_REG8(0x4307), 0x30 },
+ { CCI_REG8(0x4500), 0x80 },
+ { CCI_REG8(0x4501), 0x02 },
+ { CCI_REG8(0x4502), 0x47 },
+ { CCI_REG8(0x4504), 0x7f },
+ { CCI_REG8(0x4601), 0x48 },
+ { CCI_REG8(0x4800), 0x64 },
+ { CCI_REG8(0x4801), 0x0f },
+ { CCI_REG8(0x4806), 0x2f },
+ { CCI_REG8(0x4819), 0xaa },
+ { CCI_REG8(0x4823), 0x3e },
+ { CCI_REG8(0x5000), 0x85 },
+ { CCI_REG8(0x5e00), 0x0c },
+ { CCI_REG8(0x3899), 0x09 },
+ { CCI_REG8(0x4f00), 0x64 },
+ { CCI_REG8(0x4f02), 0x0a },
+ { CCI_REG8(0x4f05), 0x0e },
+ { CCI_REG8(0x4f06), 0x11 },
+ { CCI_REG8(0x4f08), 0x0b },
+ { CCI_REG8(0x4f0a), 0xc4 },
+ { CCI_REG8(0x4f20), 0x1f },
+ { CCI_REG8(0x4f25), 0x10 },
+ { CCI_REG8(0x3016), 0x10 },
+ { CCI_REG8(0x3017), 0x00 },
+ { CCI_REG8(0x3018), 0x00 },
+ { CCI_REG8(0x3019), 0x00 },
+ { CCI_REG8(0x301a), 0x00 },
+ { CCI_REG8(0x301b), 0x00 },
+ { CCI_REG8(0x301c), 0x72 },
+ { CCI_REG8(0x3037), 0x40 },
+ { CCI_REG8(0x4f2c), 0x00 },
+ { CCI_REG8(0x4f21), 0x00 },
+ { CCI_REG8(0x4f23), 0x00 },
+ { CCI_REG8(0x4f2a), 0x00 },
+ { CCI_REG8(0x3665), 0xe7 },
+ { CCI_REG8(0x3668), 0x48 },
+ { CCI_REG8(0x3671), 0x3c },
+ { CCI_REG8(0x389a), 0x02 },
+ { CCI_REG8(0x389b), 0x00 },
+ { CCI_REG8(0x303c), 0xa0 },
+ { CCI_REG8(0x300f), 0xf0 },
+ { CCI_REG8(0x304b), 0x0f },
+ { CCI_REG8(0x3662), 0x24 },
+ { CCI_REG8(0x3006), 0x40 },
+ { CCI_REG8(0x4f26), 0x45 },
+ { CCI_REG8(0x3607), 0x34 },
+ { CCI_REG8(0x3608), 0x01 },
+ { CCI_REG8(0x360a), 0x0c },
+ { CCI_REG8(0x360b), 0x86 },
+ { CCI_REG8(0x360c), 0xcc },
+ { CCI_REG8(0x3013), 0x00 },
+ { CCI_REG8(0x3083), 0x02 },
+ { CCI_REG8(0x3084), 0x12 },
+ { CCI_REG8(0x4601), 0x38 },
+ { CCI_REG8(0x366f), 0x3a },
+ { CCI_REG8(0x3713), 0x19 },
+ { CCI_REG8(0x37a2), 0x00 },
+ { CCI_REG8(0x3f43), 0x27 },
+ { CCI_REG8(0x3f45), 0x27 },
+ { CCI_REG8(0x3f47), 0x32 },
+ { CCI_REG8(0x3f49), 0x3e },
+ { CCI_REG8(0x3f4b), 0x20 },
+ { CCI_REG8(0x3f4d), 0x30 },
+ { CCI_REG8(0x4300), 0x3f },
+ { CCI_REG8(0x4009), 0x10 },
+ { CCI_REG8(0x3f02), 0x68 },
+ { CCI_REG8(0x3700), 0x8c },
+ { CCI_REG8(0x370b), 0x7e },
+ { CCI_REG8(0x3f47), 0x35 },
+};
+
+static const struct og0ve1b_mode supported_modes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .hts = 792,
+ .vts = 568,
+ .bpp = 8,
+ .reg_list = {
+ .regs = og0ve1b_640x480_120fps_mode,
+ .num_regs = ARRAY_SIZE(og0ve1b_640x480_120fps_mode),
+ },
+ },
+};
+
+static int og0ve1b_enable_test_pattern(struct og0ve1b *og0ve1b, u32 pattern)
+{
+ u64 val = og0ve1b->pre_isp;
+
+ if (pattern)
+ val |= OG0VE1B_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OG0VE1B_TEST_PATTERN_ENABLE;
+
+ return cci_write(og0ve1b->regmap, OG0VE1B_REG_PRE_ISP, val, NULL);
+}
+
+static int og0ve1b_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct og0ve1b *og0ve1b = container_of(ctrl->handler, struct og0ve1b,
+ ctrl_handler);
+ int ret;
+
+ /* V4L2 controls are applied, when sensor is powered up for streaming */
+ if (!pm_runtime_get_if_active(og0ve1b->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_ANALOGUE_GAIN,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_EXPOSURE,
+ ctrl->val << 4, NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = og0ve1b_enable_test_pattern(og0ve1b, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(og0ve1b->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops og0ve1b_ctrl_ops = {
+ .s_ctrl = og0ve1b_set_ctrl,
+};
+
+static int og0ve1b_init_controls(struct og0ve1b *og0ve1b)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &og0ve1b->ctrl_handler;
+ const struct og0ve1b_mode *mode = &supported_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, pixel_rate, h_blank;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(og0ve1b_link_freq_menu) - 1,
+ 0, og0ve1b_link_freq_menu);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = og0ve1b_link_freq_menu[0] / mode->bpp;
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ 0, pixel_rate, 1, pixel_rate);
+
+ h_blank = mode->hts - mode->width;
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_HBLANK,
+ h_blank, h_blank, 1, h_blank);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_VBLANK,
+ mode->vts - mode->height,
+ mode->vts - mode->height, 1,
+ mode->vts - mode->height);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OG0VE1B_ANALOGUE_GAIN_MIN, OG0VE1B_ANALOGUE_GAIN_MAX,
+ OG0VE1B_ANALOGUE_GAIN_STEP,
+ OG0VE1B_ANALOGUE_GAIN_DEFAULT);
+
+ exposure_max = (mode->vts - OG0VE1B_EXPOSURE_MAX_MARGIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OG0VE1B_EXPOSURE_MIN, exposure_max,
+ OG0VE1B_EXPOSURE_STEP,
+ OG0VE1B_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(og0ve1b_test_pattern_menu) - 1,
+ 0, 0, og0ve1b_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ret = v4l2_fwnode_device_parse(og0ve1b->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ og0ve1b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void og0ve1b_update_pad_format(const struct og0ve1b_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_Y8_1X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int og0ve1b_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ const struct og0ve1b_reg_list *reg_list = &supported_modes[0].reg_list;
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(og0ve1b->dev);
+ if (ret)
+ return ret;
+
+ /* Skip a step of explicit entering into the standby mode */
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_SOFTWARE_RST,
+ OG0VE1B_SOFTWARE_RST, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to software reset: %d\n", ret);
+ goto error;
+ }
+
+ ret = cci_multi_reg_write(og0ve1b->regmap, reg_list->regs,
+ reg_list->num_regs, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to set mode: %d\n", ret);
+ goto error;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(og0ve1b->sd.ctrl_handler);
+ if (ret)
+ goto error;
+
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_MODE_SELECT,
+ OG0VE1B_MODE_STREAMING, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to start streaming: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_put_autosuspend(og0ve1b->dev);
+
+ return ret;
+}
+
+static int og0ve1b_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_MODE_SELECT,
+ OG0VE1B_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(og0ve1b->dev, "failed to stop streaming: %d\n", ret);
+
+ pm_runtime_put_autosuspend(og0ve1b->dev);
+
+ return ret;
+}
+
+static int og0ve1b_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct og0ve1b_mode *mode;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height);
+
+ og0ve1b_update_pad_format(mode, &fmt->format);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int og0ve1b_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
+
+ return 0;
+}
+
+static int og0ve1b_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_Y8_1X8)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int og0ve1b_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ og0ve1b_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops og0ve1b_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops og0ve1b_pad_ops = {
+ .set_fmt = og0ve1b_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = og0ve1b_enum_mbus_code,
+ .enum_frame_size = og0ve1b_enum_frame_size,
+ .enable_streams = og0ve1b_enable_streams,
+ .disable_streams = og0ve1b_disable_streams,
+};
+
+static const struct v4l2_subdev_ops og0ve1b_subdev_ops = {
+ .video = &og0ve1b_video_ops,
+ .pad = &og0ve1b_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops og0ve1b_internal_ops = {
+ .init_state = og0ve1b_init_state,
+};
+
+static const struct media_entity_operations og0ve1b_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int og0ve1b_identify_sensor(struct og0ve1b *og0ve1b)
+{
+ u64 val;
+ int ret;
+
+ ret = cci_read(og0ve1b->regmap, OG0VE1B_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OG0VE1B_CHIP_ID) {
+ dev_err(og0ve1b->dev, "chip id mismatch: %x!=%llx\n",
+ OG0VE1B_CHIP_ID, val);
+ return -ENODEV;
+ }
+
+ ret = cci_read(og0ve1b->regmap, OG0VE1B_REG_PRE_ISP,
+ &og0ve1b->pre_isp, NULL);
+ if (ret)
+ dev_err(og0ve1b->dev, "failed to read pre_isp: %d\n", ret);
+
+ return ret;
+}
+
+static int og0ve1b_check_hwcfg(struct og0ve1b *og0ve1b)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(og0ve1b->dev), *ep;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned long freq_bitmap;
+ int ret;
+
+ if (!fwnode)
+ return -ENODEV;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_freq_to_bitmap(og0ve1b->dev,
+ bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ og0ve1b_link_freq_menu,
+ ARRAY_SIZE(og0ve1b_link_freq_menu),
+ &freq_bitmap);
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int og0ve1b_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 0);
+ usleep_range(10 * USEC_PER_MSEC, 15 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(og0ve1b->xvclk);
+ if (ret)
+ goto reset_gpio;
+
+ return 0;
+
+reset_gpio:
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 1);
+
+ regulator_bulk_disable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+
+ return ret;
+}
+
+static int og0ve1b_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+
+ clk_disable_unprepare(og0ve1b->xvclk);
+
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 1);
+
+ regulator_bulk_disable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+
+ return 0;
+}
+
+static int og0ve1b_probe(struct i2c_client *client)
+{
+ struct og0ve1b *og0ve1b;
+ unsigned long freq;
+ unsigned int i;
+ int ret;
+
+ og0ve1b = devm_kzalloc(&client->dev, sizeof(*og0ve1b), GFP_KERNEL);
+ if (!og0ve1b)
+ return -ENOMEM;
+
+ og0ve1b->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&og0ve1b->sd, client, &og0ve1b_subdev_ops);
+
+ og0ve1b->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(og0ve1b->regmap))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->regmap),
+ "failed to init CCI\n");
+
+ og0ve1b->xvclk = devm_v4l2_sensor_clk_get(og0ve1b->dev, NULL);
+ if (IS_ERR(og0ve1b->xvclk))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->xvclk),
+ "failed to get XVCLK clock\n");
+
+ freq = clk_get_rate(og0ve1b->xvclk);
+ if (freq && freq != OG0VE1B_MCLK_FREQ_24MHZ)
+ return dev_err_probe(og0ve1b->dev, -EINVAL,
+ "XVCLK clock frequency %lu is not supported\n",
+ freq);
+
+ ret = og0ve1b_check_hwcfg(og0ve1b);
+ if (ret)
+ return dev_err_probe(og0ve1b->dev, ret,
+ "failed to check HW configuration\n");
+
+ og0ve1b->reset_gpio = devm_gpiod_get_optional(og0ve1b->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(og0ve1b->reset_gpio))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->reset_gpio),
+ "cannot get reset GPIO\n");
+
+ for (i = 0; i < OG0VE1B_NUM_SUPPLIES; i++)
+ og0ve1b->supplies[i].supply = og0ve1b_supply_names[i];
+
+ ret = devm_regulator_bulk_get(og0ve1b->dev, OG0VE1B_NUM_SUPPLIES,
+ og0ve1b->supplies);
+ if (ret)
+ return dev_err_probe(og0ve1b->dev, ret,
+ "failed to get supply regulators\n");
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = og0ve1b_power_on(og0ve1b->dev);
+ if (ret)
+ return ret;
+
+ ret = og0ve1b_identify_sensor(og0ve1b);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret, "failed to find sensor\n");
+ goto power_off;
+ }
+
+ ret = og0ve1b_init_controls(og0ve1b);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret, "failed to init controls\n");
+ goto power_off;
+ }
+
+ og0ve1b->sd.state_lock = og0ve1b->ctrl_handler.lock;
+ og0ve1b->sd.internal_ops = &og0ve1b_internal_ops;
+ og0ve1b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ og0ve1b->sd.entity.ops = &og0ve1b_subdev_entity_ops;
+ og0ve1b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ og0ve1b->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&og0ve1b->sd.entity, 1, &og0ve1b->pad);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to init media entity pads\n");
+ goto v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&og0ve1b->sd);
+ if (ret < 0) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to init media entity pads\n");
+ goto media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(og0ve1b->dev);
+ pm_runtime_enable(og0ve1b->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&og0ve1b->sd);
+ if (ret < 0) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to register V4L2 subdev\n");
+ goto subdev_cleanup;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_idle(og0ve1b->dev);
+ pm_runtime_set_autosuspend_delay(og0ve1b->dev, 1000);
+ pm_runtime_use_autosuspend(og0ve1b->dev);
+
+ return 0;
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&og0ve1b->sd);
+ pm_runtime_disable(og0ve1b->dev);
+ pm_runtime_set_suspended(og0ve1b->dev);
+
+media_entity_cleanup:
+ media_entity_cleanup(&og0ve1b->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(og0ve1b->sd.ctrl_handler);
+
+power_off:
+ og0ve1b_power_off(og0ve1b->dev);
+
+ return ret;
+}
+
+static void og0ve1b_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(og0ve1b->dev);
+
+ if (!pm_runtime_status_suspended(og0ve1b->dev)) {
+ og0ve1b_power_off(og0ve1b->dev);
+ pm_runtime_set_suspended(og0ve1b->dev);
+ }
+}
+
+static const struct dev_pm_ops og0ve1b_pm_ops = {
+ SET_RUNTIME_PM_OPS(og0ve1b_power_off, og0ve1b_power_on, NULL)
+};
+
+static const struct of_device_id og0ve1b_of_match[] = {
+ { .compatible = "ovti,og0ve1b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, og0ve1b_of_match);
+
+static struct i2c_driver og0ve1b_i2c_driver = {
+ .driver = {
+ .name = "og0ve1b",
+ .pm = &og0ve1b_pm_ops,
+ .of_match_table = og0ve1b_of_match,
+ },
+ .probe = og0ve1b_probe,
+ .remove = og0ve1b_remove,
+};
+
+module_i2c_driver(og0ve1b_i2c_driver);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OG0VE1B sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 6c30e1a0d814..70d9d7c43f18 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -100,7 +100,8 @@ struct ov02a10_mode {
};
struct ov02a10 {
- u32 eclk_freq;
+ struct device *dev;
+
/* Indication of MIPI transmission speed select */
u32 mipi_clock_voltage;
@@ -392,7 +393,7 @@ static int ov02a10_check_sensor_id(struct ov02a10 *ov02a10)
chip_id = le16_to_cpu((__force __le16)ret);
if ((chip_id & OV02A10_ID_MASK) != OV02A10_ID) {
- dev_err(&client->dev, "unexpected sensor id(0x%04x)\n", chip_id);
+ dev_err(ov02a10->dev, "unexpected sensor id(0x%04x)\n", chip_id);
return -EINVAL;
}
@@ -481,7 +482,7 @@ static int __ov02a10_start_stream(struct ov02a10 *ov02a10)
ret = i2c_smbus_write_byte_data(client, REG_MIRROR_FLIP_CONTROL,
REG_MIRROR_FLIP_ENABLE);
if (ret < 0) {
- dev_err(&client->dev, "failed to set orientation\n");
+ dev_err(ov02a10->dev, "failed to set orientation\n");
return ret;
}
ret = i2c_smbus_write_byte_data(client, REG_GLOBAL_EFFECTIVE,
@@ -530,7 +531,6 @@ static int ov02a10_init_state(struct v4l2_subdev *sd,
static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
{
struct ov02a10 *ov02a10 = to_ov02a10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
int ret;
mutex_lock(&ov02a10->mutex);
@@ -541,7 +541,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
if (on) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02a10->dev);
if (ret < 0)
goto unlock_and_return;
@@ -553,7 +553,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
} else {
__ov02a10_stop_stream(ov02a10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
}
ov02a10->streaming = on;
@@ -562,7 +562,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
return 0;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
unlock_and_return:
mutex_unlock(&ov02a10->mutex);
@@ -662,7 +662,6 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02a10 *ov02a10 = container_of(ctrl->handler,
struct ov02a10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
s64 max_expo;
int ret;
@@ -678,7 +677,7 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02a10->dev))
return 0;
switch (ctrl->id) {
@@ -699,7 +698,7 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
return ret;
}
@@ -734,7 +733,6 @@ static const struct v4l2_ctrl_ops ov02a10_ctrl_ops = {
static int ov02a10_initialize_controls(struct ov02a10 *ov02a10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
const struct ov02a10_mode *mode;
struct v4l2_ctrl_handler *handler;
struct v4l2_ctrl *ctrl;
@@ -790,7 +788,7 @@ static int ov02a10_initialize_controls(struct ov02a10 *ov02a10)
if (handler->error) {
ret = handler->error;
- dev_err(&client->dev, "failed to init controls(%d)\n", ret);
+ dev_err(ov02a10->dev, "failed to init controls(%d)\n", ret);
goto err_free_handler;
}
@@ -866,6 +864,8 @@ static int ov02a10_probe(struct i2c_client *client)
if (!ov02a10)
return -ENOMEM;
+ ov02a10->dev = dev;
+
ret = ov02a10_check_hwcfg(dev, ov02a10);
if (ret)
return dev_err_probe(dev, ret,
@@ -885,22 +885,11 @@ static int ov02a10_probe(struct i2c_client *client)
ov02a10->fmt.code = MEDIA_BUS_FMT_SRGGB10_1X10;
}
- ov02a10->eclk = devm_clk_get(dev, "eclk");
+ ov02a10->eclk = devm_v4l2_sensor_clk_get_legacy(dev, "eclk", false, 0);
if (IS_ERR(ov02a10->eclk))
return dev_err_probe(dev, PTR_ERR(ov02a10->eclk),
"failed to get eclk\n");
- ret = device_property_read_u32(dev, "clock-frequency",
- &ov02a10->eclk_freq);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to get eclk frequency\n");
-
- ret = clk_set_rate(ov02a10->eclk, ov02a10->eclk_freq);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to set eclk frequency (24MHz)\n");
-
if (clk_get_rate(ov02a10->eclk) != OV02A10_ECLK_FREQ)
dev_warn(dev, "eclk mismatched, mode is based on 24MHz\n");
@@ -985,10 +974,10 @@ static void ov02a10_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- ov02a10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02a10->dev);
+ if (!pm_runtime_status_suspended(ov02a10->dev))
+ ov02a10_power_off(ov02a10->dev);
+ pm_runtime_set_suspended(ov02a10->dev);
mutex_destroy(&ov02a10->mutex);
}
diff --git a/drivers/media/i2c/ov02c10.c b/drivers/media/i2c/ov02c10.c
index 089a4fd9627c..8c4d85dc7922 100644
--- a/drivers/media/i2c/ov02c10.c
+++ b/drivers/media/i2c/ov02c10.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/version.h>
#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -373,6 +372,8 @@ static const char * const ov02c10_supply_names[] = {
};
struct ov02c10 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -418,7 +419,6 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02c10 *ov02c10 = container_of(ctrl->handler,
struct ov02c10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
const u32 height = supported_modes[0].height;
s64 exposure_max;
int ret = 0;
@@ -434,7 +434,7 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02c10->dev))
return 0;
switch (ctrl->id) {
@@ -467,7 +467,7 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return ret;
}
@@ -478,7 +478,6 @@ static const struct v4l2_ctrl_ops ov02c10_ctrl_ops = {
static int ov02c10_init_controls(struct ov02c10 *ov02c10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &ov02c10->ctrl_handler;
const struct ov02c10_mode *mode = &supported_modes[0];
u32 vblank_min, vblank_max, vblank_default, vts_def;
@@ -542,7 +541,7 @@ static int ov02c10_init_controls(struct ov02c10 *ov02c10)
ARRAY_SIZE(ov02c10_test_pattern_menu) - 1,
0, 0, ov02c10_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov02c10->dev, &props);
if (ret)
return ret;
@@ -570,12 +569,11 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
u32 pad, u64 streams_mask)
{
const struct ov02c10_mode *mode = &supported_modes[0];
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02c10 *ov02c10 = to_ov02c10(sd);
const struct reg_sequence *reg_sequence;
int ret, sequence_length;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02c10->dev);
if (ret)
return ret;
@@ -584,7 +582,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02c10->regmap,
reg_sequence, sequence_length);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov02c10->dev, "failed to set mode\n");
goto out;
}
@@ -593,7 +591,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02c10->regmap,
reg_sequence, sequence_length);
if (ret) {
- dev_err(&client->dev, "failed to write lane settings\n");
+ dev_err(ov02c10->dev, "failed to write lane settings\n");
goto out;
}
@@ -604,7 +602,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 1, NULL);
out:
if (ret)
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return ret;
}
@@ -613,11 +611,10 @@ static int ov02c10_disable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02c10 *ov02c10 = to_ov02c10(sd);
cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 0, NULL);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return 0;
}
@@ -778,7 +775,6 @@ static const struct v4l2_subdev_internal_ops ov02c10_internal_ops = {
static int ov02c10_identify_module(struct ov02c10 *ov02c10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
u64 chip_id;
int ret;
@@ -787,7 +783,7 @@ static int ov02c10_identify_module(struct ov02c10 *ov02c10)
return ret;
if (chip_id != OV02C10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx",
+ dev_err(ov02c10->dev, "chip id mismatch: %x!=%llx",
OV02C10_CHIP_ID, chip_id);
return -ENXIO;
}
@@ -795,14 +791,14 @@ static int ov02c10_identify_module(struct ov02c10 *ov02c10)
return 0;
}
-static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
+static int ov02c10_check_hwcfg(struct ov02c10 *ov02c10)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov02c10->dev;
struct fwnode_handle *ep, *fwnode = dev_fwnode(dev);
unsigned long link_freq_bitmap;
- u32 mclk;
int ret;
/*
@@ -814,31 +810,6 @@ static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
return dev_err_probe(dev, -EPROBE_DEFER,
"waiting for fwnode graph endpoint\n");
- ov02c10->img_clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(ov02c10->img_clk)) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, PTR_ERR(ov02c10->img_clk),
- "failed to get imaging clock\n");
- }
-
- if (ov02c10->img_clk) {
- mclk = clk_get_rate(ov02c10->img_clk);
- } else {
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, ret,
- "reading clock-frequency property\n");
- }
- }
-
- if (mclk != OV02C10_MCLK) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, -EINVAL,
- "external clock %u is not supported\n",
- mclk);
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -873,35 +844,50 @@ check_hwcfg_error:
static void ov02c10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
v4l2_async_unregister_subdev(sd);
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev)) {
- ov02c10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02c10->dev);
+ if (!pm_runtime_status_suspended(ov02c10->dev)) {
+ ov02c10_power_off(ov02c10->dev);
+ pm_runtime_set_suspended(ov02c10->dev);
}
}
static int ov02c10_probe(struct i2c_client *client)
{
struct ov02c10 *ov02c10;
+ unsigned long freq;
int ret;
ov02c10 = devm_kzalloc(&client->dev, sizeof(*ov02c10), GFP_KERNEL);
if (!ov02c10)
return -ENOMEM;
+ ov02c10->dev = &client->dev;
+
+ ov02c10->img_clk = devm_v4l2_sensor_clk_get(ov02c10->dev, NULL);
+ if (IS_ERR(ov02c10->img_clk))
+ return dev_err_probe(ov02c10->dev, PTR_ERR(ov02c10->img_clk),
+ "failed to get imaging clock\n");
+
+ freq = clk_get_rate(ov02c10->img_clk);
+ if (freq != OV02C10_MCLK)
+ return dev_err_probe(ov02c10->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov02c10->sd, client, &ov02c10_subdev_ops);
/* Check HW config */
- ret = ov02c10_check_hwcfg(&client->dev, ov02c10);
+ ret = ov02c10_check_hwcfg(ov02c10);
if (ret)
return ret;
- ret = ov02c10_get_pm_resources(&client->dev);
+ ret = ov02c10_get_pm_resources(ov02c10->dev);
if (ret)
return ret;
@@ -909,21 +895,21 @@ static int ov02c10_probe(struct i2c_client *client)
if (IS_ERR(ov02c10->regmap))
return PTR_ERR(ov02c10->regmap);
- ret = ov02c10_power_on(&client->dev);
+ ret = ov02c10_power_on(ov02c10->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "failed to power on\n");
+ dev_err_probe(ov02c10->dev, ret, "failed to power on\n");
return ret;
}
ret = ov02c10_identify_module(ov02c10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov02c10->dev, "failed to find sensor: %d", ret);
goto probe_error_power_off;
}
ret = ov02c10_init_controls(ov02c10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov02c10->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -934,33 +920,33 @@ static int ov02c10_probe(struct i2c_client *client)
ov02c10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov02c10->sd.entity, 1, &ov02c10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov02c10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ov02c10->sd.state_lock = ov02c10->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&ov02c10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to init subdev: %d", ret);
+ dev_err(ov02c10->dev, "failed to init subdev: %d", ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov02c10->dev);
+ pm_runtime_enable(ov02c10->dev);
ret = v4l2_async_register_subdev_sensor(&ov02c10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov02c10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_v4l2_subdev_cleanup;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov02c10->dev);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02c10->dev);
+ pm_runtime_set_suspended(ov02c10->dev);
v4l2_subdev_cleanup(&ov02c10->sd);
probe_error_media_entity_cleanup:
@@ -970,7 +956,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov02c10->sd.ctrl_handler);
probe_error_power_off:
- ov02c10_power_off(&client->dev);
+ ov02c10_power_off(ov02c10->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov02e10.c b/drivers/media/i2c/ov02e10.c
index d74dc62e189d..4a64cba99991 100644
--- a/drivers/media/i2c/ov02e10.c
+++ b/drivers/media/i2c/ov02e10.c
@@ -226,6 +226,8 @@ static const char * const ov02e10_supply_names[] = {
};
struct ov02e10 {
+ struct device *dev;
+
struct regmap *regmap;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -288,7 +290,6 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02e10 *ov02e10 = container_of(ctrl->handler,
struct ov02e10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
s64 exposure_max;
int ret;
@@ -307,7 +308,7 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02e10->dev))
return 0;
ret = cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
@@ -363,7 +364,7 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
OV02E10_COMMAND_UPDATE, &ret);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return ret;
}
@@ -374,7 +375,6 @@ static const struct v4l2_ctrl_ops ov02e10_ctrl_ops = {
static int ov02e10_init_controls(struct ov02e10 *ov02e10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &ov02e10->ctrl_handler;
const struct ov02e10_mode *mode = ov02e10->cur_mode;
u32 vblank_min, vblank_max, vblank_def;
@@ -442,7 +442,7 @@ static int ov02e10_init_controls(struct ov02e10 *ov02e10)
ARRAY_SIZE(ov02e10_test_pattern_menu) - 1,
0, 0, ov02e10_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov02e10->dev, &props);
if (ret)
return ret;
@@ -481,12 +481,11 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02e10 *ov02e10 = to_ov02e10(sd);
const struct reg_sequence_list *reg_list;
int ret;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02e10->dev);
if (ret)
return ret;
@@ -494,7 +493,7 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02e10->regmap, reg_list->regs,
reg_list->num_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov02e10->dev, "failed to set mode\n");
goto out;
}
@@ -506,7 +505,7 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
out:
if (ret)
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return ret;
}
@@ -515,11 +514,10 @@ static int ov02e10_disable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02e10 *ov02e10 = to_ov02e10(sd);
ov02e10_set_stream_mode(ov02e10, 0);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return 0;
}
@@ -724,7 +722,6 @@ static const struct v4l2_subdev_internal_ops ov02e10_internal_ops = {
static int ov02e10_identify_module(struct ov02e10 *ov02e10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
int ret;
u64 val;
@@ -735,7 +732,7 @@ static int ov02e10_identify_module(struct ov02e10 *ov02e10)
return ret;
if (val != OV02E10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov02e10->dev, "chip id mismatch: %x!=%x\n",
OV02E10_CHIP_ID, (u32)val);
return -ENXIO;
}
@@ -743,15 +740,15 @@ static int ov02e10_identify_module(struct ov02e10 *ov02e10)
return 0;
}
-static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
+static int ov02e10_check_hwcfg(struct ov02e10 *ov02e10)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov02e10->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned long link_freq_bitmap;
- u32 ext_clk;
int ret;
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
@@ -764,31 +761,6 @@ static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
if (ret)
return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- ov02e10->img_clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(ov02e10->img_clk)) {
- ret = dev_err_probe(dev, PTR_ERR(ov02e10->img_clk),
- "failed to get imaging clock\n");
- goto out_err;
- }
-
- if (ov02e10->img_clk) {
- ext_clk = clk_get_rate(ov02e10->img_clk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto out_err;
- }
- }
-
- if (ext_clk != OV02E10_MCLK) {
- dev_err(dev, "external clock %d is not supported\n",
- ext_clk);
- ret = -EINVAL;
- goto out_err;
- }
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV02E10_DATA_LANES) {
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -823,32 +795,47 @@ out_err:
static void ov02e10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
v4l2_async_unregister_subdev(sd);
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov02e10->dev);
- if (!pm_runtime_status_suspended(&client->dev)) {
- ov02e10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(ov02e10->dev)) {
+ ov02e10_power_off(ov02e10->dev);
+ pm_runtime_set_suspended(ov02e10->dev);
}
}
static int ov02e10_probe(struct i2c_client *client)
{
struct ov02e10 *ov02e10;
+ unsigned long freq;
int ret;
ov02e10 = devm_kzalloc(&client->dev, sizeof(*ov02e10), GFP_KERNEL);
if (!ov02e10)
return -ENOMEM;
+ ov02e10->dev = &client->dev;
+
+ ov02e10->img_clk = devm_v4l2_sensor_clk_get(ov02e10->dev, NULL);
+ if (IS_ERR(ov02e10->img_clk))
+ return dev_err_probe(ov02e10->dev, PTR_ERR(ov02e10->img_clk),
+ "failed to get imaging clock\n");
+
+ freq = clk_get_rate(ov02e10->img_clk);
+ if (freq != OV02E10_MCLK)
+ return dev_err_probe(ov02e10->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov02e10->sd, client, &ov02e10_subdev_ops);
/* Check HW config */
- ret = ov02e10_check_hwcfg(&client->dev, ov02e10);
+ ret = ov02e10_check_hwcfg(ov02e10);
if (ret)
return ret;
@@ -857,27 +844,27 @@ static int ov02e10_probe(struct i2c_client *client)
if (IS_ERR(ov02e10->regmap))
return PTR_ERR(ov02e10->regmap);
- ret = ov02e10_get_pm_resources(&client->dev);
+ ret = ov02e10_get_pm_resources(ov02e10->dev);
if (ret)
return ret;
- ret = ov02e10_power_on(&client->dev);
+ ret = ov02e10_power_on(ov02e10->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "failed to power on\n");
+ dev_err_probe(ov02e10->dev, ret, "failed to power on\n");
return ret;
}
/* Check module identity */
ret = ov02e10_identify_module(ov02e10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov02e10->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
ov02e10->cur_mode = &supported_modes[0];
ret = ov02e10_init_controls(ov02e10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ dev_err(ov02e10->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -891,33 +878,33 @@ static int ov02e10_probe(struct i2c_client *client)
ov02e10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov02e10->sd.entity, 1, &ov02e10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov02e10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ov02e10->sd.state_lock = ov02e10->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&ov02e10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to init subdev: %d", ret);
+ dev_err(ov02e10->dev, "failed to init subdev: %d", ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov02e10->dev);
+ pm_runtime_enable(ov02e10->dev);
ret = v4l2_async_register_subdev_sensor(&ov02e10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov02e10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_v4l2_subdev_cleanup;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov02e10->dev);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02e10->dev);
+ pm_runtime_set_suspended(ov02e10->dev);
v4l2_subdev_cleanup(&ov02e10->sd);
probe_error_media_entity_cleanup:
@@ -927,7 +914,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov02e10->sd.ctrl_handler);
probe_error_power_off:
- ov02e10_power_off(&client->dev);
+ ov02e10_power_off(ov02e10->dev);
return ret;
}
@@ -961,7 +948,7 @@ static struct i2c_driver ov02e10_i2c_driver = {
module_i2c_driver(ov02e10_i2c_driver);
-MODULE_AUTHOR("Jingjing Xiong <jingjing.xiong@intel.com>");
+MODULE_AUTHOR("Jingjing Xiong");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_AUTHOR("Alan Stern <stern@rowland.harvard.edu>");
MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@linaro.org>");
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index 1bacbdfa4298..43ec2a1f2fcf 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -515,12 +515,13 @@ static const char * const ov08d10_test_pattern_menu[] = {
};
struct ov08d10 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
- struct clk *xvclk;
-
/* V4L2 Controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
@@ -663,7 +664,7 @@ static int ov08d10_write_reg_list(struct ov08d10 *ov08d10,
ret = i2c_smbus_write_byte_data(client, r_list->regs[i].address,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov08d10->dev,
"failed to write reg 0x%2.2x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -849,7 +850,6 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov08d10 *ov08d10 = container_of(ctrl->handler,
struct ov08d10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
s64 exposure_max;
int ret;
@@ -865,7 +865,7 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov08d10->dev))
return 0;
switch (ctrl->id) {
@@ -901,7 +901,7 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
return ret;
}
@@ -1025,32 +1025,32 @@ static int ov08d10_start_streaming(struct ov08d10 *ov08d10)
/* soft reset */
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
ret = i2c_smbus_write_byte_data(client, 0x20, 0x0e);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
usleep_range(3000, 4000);
ret = i2c_smbus_write_byte_data(client, 0x20, 0x0b);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
/* update sensor setting */
ret = ov08d10_write_reg_list(ov08d10, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov08d10->dev, "failed to set plls");
return ret;
}
reg_list = &ov08d10->cur_mode->reg_list;
ret = ov08d10_write_reg_list(ov08d10, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov08d10->dev, "failed to set mode");
return ret;
}
@@ -1077,19 +1077,19 @@ static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MODE_SELECT,
OV08D10_MODE_STANDBY);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
}
@@ -1097,12 +1097,11 @@ static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08d10 *ov08d10 = to_ov08d10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov08d10->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov08d10->dev);
if (ret < 0) {
mutex_unlock(&ov08d10->mutex);
return ret;
@@ -1112,11 +1111,11 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov08d10_stop_streaming(ov08d10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
}
} else {
ov08d10_stop_streaming(ov08d10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -1293,7 +1292,7 @@ static int ov08d10_identify_module(struct ov08d10 *ov08d10)
chip_id = val | ret;
if ((chip_id & OV08D10_ID_MASK) != OV08D10_CHIP_ID) {
- dev_err(&client->dev, "unexpected sensor id(0x%04x)\n",
+ dev_err(ov08d10->dev, "unexpected sensor id(0x%04x)\n",
chip_id);
return -EINVAL;
}
@@ -1301,28 +1300,20 @@ static int ov08d10_identify_module(struct ov08d10 *ov08d10)
return 0;
}
-static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10, struct device *dev)
+static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10)
{
+ struct device *dev = ov08d10->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 xvclk_rate;
unsigned int i, j;
int ret;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
- if (ret)
- return ret;
-
- if (xvclk_rate != OV08D10_XVCLK_19_2)
- dev_warn(dev, "external clock rate %u is unsupported",
- xvclk_rate);
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1380,22 +1371,35 @@ static void ov08d10_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov08d10->dev);
mutex_destroy(&ov08d10->mutex);
}
static int ov08d10_probe(struct i2c_client *client)
{
struct ov08d10 *ov08d10;
+ unsigned long freq;
int ret;
ov08d10 = devm_kzalloc(&client->dev, sizeof(*ov08d10), GFP_KERNEL);
if (!ov08d10)
return -ENOMEM;
- ret = ov08d10_get_hwcfg(ov08d10, &client->dev);
+ ov08d10->dev = &client->dev;
+
+ ov08d10->clk = devm_v4l2_sensor_clk_get(ov08d10->dev, NULL);
+ if (IS_ERR(ov08d10->clk))
+ return dev_err_probe(ov08d10->dev, PTR_ERR(ov08d10->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov08d10->clk);
+ if (freq != OV08D10_XVCLK_19_2)
+ dev_warn(ov08d10->dev,
+ "external clock rate %lu is not supported\n", freq);
+
+ ret = ov08d10_get_hwcfg(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
+ dev_err(ov08d10->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
@@ -1404,7 +1408,7 @@ static int ov08d10_probe(struct i2c_client *client)
ret = ov08d10_identify_module(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov08d10->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -1412,7 +1416,7 @@ static int ov08d10_probe(struct i2c_client *client)
ov08d10->cur_mode = &ov08d10->priv_lane->sp_modes[0];
ret = ov08d10_init_controls(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov08d10->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1422,13 +1426,13 @@ static int ov08d10_probe(struct i2c_client *client)
ov08d10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov08d10->sd.entity, 1, &ov08d10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov08d10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov08d10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov08d10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
@@ -1437,9 +1441,9 @@ static int ov08d10_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov08d10->dev);
+ pm_runtime_enable(ov08d10->dev);
+ pm_runtime_idle(ov08d10->dev);
return 0;
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index e0094305ca2a..5eaf454f4763 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
-#include <linux/i2c.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -1305,6 +1306,8 @@ static const char * const ov08x40_supply_names[] = {
};
struct ov08x40 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1513,7 +1516,6 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
static int ov08x40_write_regs(struct ov08x40 *ov08x,
const struct ov08x40_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
int ret;
u32 i;
@@ -1522,7 +1524,7 @@ static int ov08x40_write_regs(struct ov08x40 *ov08x,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov08x->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -1648,7 +1650,7 @@ static int ov08x40_set_ctrl_hflip(struct ov08x40 *ov08x, u32 ctrl_val)
return ov08x40_write_reg(ov08x, OV08X40_REG_MIRROR,
OV08X40_REG_VALUE_08BIT,
- ctrl_val ? val | BIT(2) : val & ~BIT(2));
+ ctrl_val ? val & ~BIT(2) : val | BIT(2));
}
static int ov08x40_set_ctrl_vflip(struct ov08x40 *ov08x, u32 ctrl_val)
@@ -1670,7 +1672,6 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov08x40 *ov08x = container_of(ctrl->handler,
struct ov08x40, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
s64 max;
int exp;
int fll;
@@ -1699,7 +1700,7 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov08x->dev))
return 0;
switch (ctrl->id) {
@@ -1737,13 +1738,13 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
ov08x40_set_ctrl_vflip(ov08x, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov08x->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
return ret;
}
@@ -1912,7 +1913,6 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
static int ov08x40_start_streaming(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
const struct ov08x40_reg_list *reg_list;
int ret, link_freq_index;
@@ -1920,7 +1920,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
ret = ov08x40_write_reg(ov08x, OV08X40_REG_SOFTWARE_RST,
OV08X40_REG_VALUE_08BIT, OV08X40_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov08x->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1930,14 +1930,14 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set plls\n", __func__);
return ret;
}
reg_list = &ov08x40_global_setting;
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set global setting\n",
+ dev_err(ov08x->dev, "%s failed to set global setting\n",
__func__);
return ret;
}
@@ -1946,7 +1946,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
reg_list = &ov08x->cur_mode->reg_list;
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1962,7 +1962,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
}
if (ret) {
- dev_err(&client->dev, "%s failed to set regs\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set regs\n", __func__);
return ret;
}
@@ -1986,7 +1986,6 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
/* Verify chip ID */
static int ov08x40_identify_module(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
int ret;
u32 val;
@@ -1996,17 +1995,17 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
OV08X40_REG_VALUE_24BIT, &val);
if (ret) {
- dev_err(&client->dev, "error reading chip-id register: %d\n", ret);
+ dev_err(ov08x->dev, "error reading chip-id register: %d\n", ret);
return ret;
}
if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov08x->dev, "chip id mismatch: %x!=%x\n",
OV08X40_CHIP_ID, val);
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id 0x%x\n", val);
+ dev_dbg(ov08x->dev, "chip id 0x%x\n", val);
ov08x->identified = true;
return 0;
@@ -2015,13 +2014,12 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov08x->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov08x->dev);
if (ret < 0)
goto err_unlock;
@@ -2038,7 +2036,7 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov08x40_stop_streaming(ov08x);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
}
mutex_unlock(&ov08x->mutex);
@@ -2046,7 +2044,7 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
err_unlock:
mutex_unlock(&ov08x->mutex);
@@ -2079,7 +2077,6 @@ static const struct v4l2_subdev_internal_ops ov08x40_internal_ops = {
static int ov08x40_init_controls(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -2160,12 +2157,12 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov08x->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov08x->dev, &props);
if (ret)
goto error;
@@ -2191,11 +2188,12 @@ static void ov08x40_free_controls(struct ov08x40 *ov08x)
mutex_destroy(&ov08x->mutex);
}
-static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
+static int ov08x40_check_hwcfg(struct ov08x40 *ov08x)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov08x->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned int i;
@@ -2232,23 +2230,14 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
if (ret)
goto out_err;
- ov08x->xvclk = devm_clk_get_optional(dev, NULL);
+ ov08x->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov08x->xvclk)) {
ret = dev_err_probe(dev, PTR_ERR(ov08x->xvclk),
"getting xvclk\n");
goto out_err;
}
- if (ov08x->xvclk) {
- xvclk_rate = clk_get_rate(ov08x->xvclk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &xvclk_rate);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto out_err;
- }
- }
+ xvclk_rate = clk_get_rate(ov08x->xvclk);
if (xvclk_rate != OV08X40_XVCLK) {
dev_err(dev, "external clock %d is not supported\n",
xvclk_rate);
@@ -2294,19 +2283,21 @@ static int ov08x40_probe(struct i2c_client *client)
if (!ov08x)
return -ENOMEM;
+ ov08x->dev = &client->dev;
+
/* Check HW config */
- ret = ov08x40_check_hwcfg(ov08x, &client->dev);
+ ret = ov08x40_check_hwcfg(ov08x);
if (ret)
return ret;
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov08x->dev);
if (full_power) {
- ret = ov08x40_power_on(&client->dev);
+ ret = ov08x40_power_on(ov08x->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov08x->dev, "failed to power on\n");
return ret;
}
@@ -2333,7 +2324,7 @@ static int ov08x40_probe(struct i2c_client *client)
ov08x->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov08x->sd.entity, 1, &ov08x->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov08x->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -2342,9 +2333,9 @@ static int ov08x40_probe(struct i2c_client *client)
goto error_media_entity;
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov08x->dev);
+ pm_runtime_enable(ov08x->dev);
+ pm_runtime_idle(ov08x->dev);
return 0;
@@ -2355,7 +2346,7 @@ error_handler_free:
ov08x40_free_controls(ov08x);
probe_power_off:
- ov08x40_power_off(&client->dev);
+ ov08x40_power_off(ov08x->dev);
return ret;
}
@@ -2369,10 +2360,10 @@ static void ov08x40_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov08x40_free_controls(ov08x);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- ov08x40_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov08x->dev);
+ if (!pm_runtime_status_suspended(ov08x->dev))
+ ov08x40_power_off(ov08x->dev);
+ pm_runtime_set_suspended(ov08x->dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ov08x40_pm_ops, ov08x40_power_off,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 7a3fc1d28514..162b49046990 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017 Intel Corporation.
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -1028,6 +1029,9 @@ static const struct ov13858_mode supported_modes[] = {
};
struct ov13858 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1117,7 +1121,6 @@ static int ov13858_write_reg(struct ov13858 *ov13858, u16 reg, u32 len,
static int ov13858_write_regs(struct ov13858 *ov13858,
const struct ov13858_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
int ret;
u32 i;
@@ -1126,7 +1129,7 @@ static int ov13858_write_regs(struct ov13858 *ov13858,
regs[i].val);
if (ret) {
dev_err_ratelimited(
- &client->dev,
+ ov13858->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -1209,7 +1212,6 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov13858 *ov13858 = container_of(ctrl->handler,
struct ov13858, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
s64 max;
int ret;
@@ -1228,7 +1230,7 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov13858->dev))
return 0;
ret = 0;
@@ -1256,13 +1258,13 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov13858_enable_test_pattern(ov13858, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov13858->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
return ret;
}
@@ -1408,7 +1410,6 @@ static int ov13858_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
/* Start streaming */
static int ov13858_start_streaming(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
const struct ov13858_reg_list *reg_list;
int ret, link_freq_index;
@@ -1416,7 +1417,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
ret = ov13858_write_reg(ov13858, OV13858_REG_SOFTWARE_RST,
OV13858_REG_VALUE_08BIT, OV13858_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov13858->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1426,7 +1427,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov13858_write_reg_list(ov13858, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov13858->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -1434,7 +1435,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
reg_list = &ov13858->cur_mode->reg_list;
ret = ov13858_write_reg_list(ov13858, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov13858->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1458,13 +1459,12 @@ static int ov13858_stop_streaming(struct ov13858 *ov13858)
static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov13858 *ov13858 = to_ov13858(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov13858->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov13858->dev);
if (ret < 0)
goto err_unlock;
@@ -1477,7 +1477,7 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov13858_stop_streaming(ov13858);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
}
mutex_unlock(&ov13858->mutex);
@@ -1485,7 +1485,7 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
err_unlock:
mutex_unlock(&ov13858->mutex);
@@ -1495,7 +1495,6 @@ err_unlock:
/* Verify chip ID */
static int ov13858_identify_module(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
int ret;
u32 val;
@@ -1505,7 +1504,7 @@ static int ov13858_identify_module(struct ov13858 *ov13858)
return ret;
if (val != OV13858_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov13858->dev, "chip id mismatch: %x!=%x\n",
OV13858_CHIP_ID, val);
return -EIO;
}
@@ -1552,7 +1551,6 @@ static const struct v4l2_subdev_internal_ops ov13858_internal_ops = {
/* Initialize control handlers */
static int ov13858_init_controls(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1626,12 +1624,12 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
0, 0, ov13858_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov13858->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov13858->dev, &props);
if (ret)
goto error;
@@ -1660,24 +1658,33 @@ static void ov13858_free_controls(struct ov13858 *ov13858)
static int ov13858_probe(struct i2c_client *client)
{
struct ov13858 *ov13858;
+ unsigned long freq;
int ret;
- u32 val = 0;
-
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000)
- return -EINVAL;
ov13858 = devm_kzalloc(&client->dev, sizeof(*ov13858), GFP_KERNEL);
if (!ov13858)
return -ENOMEM;
+ ov13858->dev = &client->dev;
+
+ ov13858->clk = devm_v4l2_sensor_clk_get(ov13858->dev, NULL);
+ if (IS_ERR(ov13858->clk))
+ return dev_err_probe(ov13858->dev, PTR_ERR(ov13858->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov13858->clk);
+ if (freq != 19200000)
+ return dev_err_probe(ov13858->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13858->sd, client, &ov13858_subdev_ops);
/* Check module identity */
ret = ov13858_identify_module(ov13858);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov13858->dev, "failed to find sensor: %d\n", ret);
return ret;
}
@@ -1699,7 +1706,7 @@ static int ov13858_probe(struct i2c_client *client)
ov13858->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov13858->sd.entity, 1, &ov13858->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13858->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -1711,9 +1718,9 @@ static int ov13858_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov13858->dev);
+ pm_runtime_enable(ov13858->dev);
+ pm_runtime_idle(ov13858->dev);
return 0;
@@ -1722,7 +1729,7 @@ error_media_entity:
error_handler_free:
ov13858_free_controls(ov13858);
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13858->dev, "%s failed:%d\n", __func__, ret);
return ret;
}
@@ -1736,7 +1743,7 @@ static void ov13858_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov13858_free_controls(ov13858);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov13858->dev);
}
static const struct i2c_device_id ov13858_id_table[] = {
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index e85c7d33a670..869bc78ed792 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -700,6 +700,8 @@ static const struct ov13b10_mode supported_2_lanes_modes[] = {
};
struct ov13b10 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -805,7 +807,6 @@ static int ov13b10_write_reg(struct ov13b10 *ov13b,
static int ov13b10_write_regs(struct ov13b10 *ov13b,
const struct ov13b10_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
int ret;
u32 i;
@@ -813,7 +814,7 @@ static int ov13b10_write_regs(struct ov13b10 *ov13b,
ret = ov13b10_write_reg(ov13b, regs[i].address, 1,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov13b->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -968,7 +969,6 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov13b10 *ov13b = container_of(ctrl->handler,
struct ov13b10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
s64 max;
int ret;
@@ -987,7 +987,7 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov13b->dev))
return 0;
ret = 0;
@@ -1021,13 +1021,13 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
ov13b10_set_ctrl_vflip(ov13b, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov13b->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
return ret;
}
@@ -1166,7 +1166,6 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
/* Verify chip ID */
static int ov13b10_identify_module(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
int ret;
u32 val;
@@ -1179,7 +1178,7 @@ static int ov13b10_identify_module(struct ov13b10 *ov13b)
return ret;
if (val != OV13B10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov13b->dev, "chip id mismatch: %x!=%x\n",
OV13B10_CHIP_ID, val);
return -EIO;
}
@@ -1234,7 +1233,6 @@ static int ov13b10_power_on(struct device *dev)
static int ov13b10_start_streaming(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
const struct ov13b10_reg_list *reg_list;
int ret, link_freq_index;
@@ -1246,7 +1244,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov13b->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1255,7 +1253,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov13b10_write_reg_list(ov13b, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov13b->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -1263,7 +1261,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
reg_list = &ov13b->cur_mode->reg_list;
ret = ov13b10_write_reg_list(ov13b, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov13b->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1287,13 +1285,12 @@ static int ov13b10_stop_streaming(struct ov13b10 *ov13b)
static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov13b10 *ov13b = to_ov13b10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov13b->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov13b->dev);
if (ret < 0)
goto err_unlock;
@@ -1306,7 +1303,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov13b10_stop_streaming(ov13b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
}
mutex_unlock(&ov13b->mutex);
@@ -1314,7 +1311,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
err_unlock:
mutex_unlock(&ov13b->mutex);
@@ -1360,7 +1357,6 @@ static const struct v4l2_subdev_internal_ops ov13b10_internal_ops = {
/* Initialize control handlers */
static int ov13b10_init_controls(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1443,12 +1439,12 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov13b->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov13b->dev, &props);
if (ret)
goto error;
@@ -1474,44 +1470,49 @@ static void ov13b10_free_controls(struct ov13b10 *ov13b)
mutex_destroy(&ov13b->mutex);
}
-static int ov13b10_get_pm_resources(struct device *dev)
+static int ov13b10_get_pm_resources(struct ov13b10 *ov13b)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13b10 *ov13b = to_ov13b10(sd);
+ unsigned long freq;
int ret;
- ov13b->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ ov13b->reset = devm_gpiod_get_optional(ov13b->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ov13b->reset))
- return dev_err_probe(dev, PTR_ERR(ov13b->reset),
+ return dev_err_probe(ov13b->dev, PTR_ERR(ov13b->reset),
"failed to get reset gpio\n");
- ov13b->img_clk = devm_clk_get_optional(dev, NULL);
+ ov13b->img_clk = devm_v4l2_sensor_clk_get(ov13b->dev, NULL);
if (IS_ERR(ov13b->img_clk))
- return dev_err_probe(dev, PTR_ERR(ov13b->img_clk),
+ return dev_err_probe(ov13b->dev, PTR_ERR(ov13b->img_clk),
"failed to get imaging clock\n");
- ov13b->avdd = devm_regulator_get_optional(dev, "avdd");
+ freq = clk_get_rate(ov13b->img_clk);
+ if (freq != OV13B10_EXT_CLK)
+ return dev_err_probe(ov13b->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
+ ov13b->avdd = devm_regulator_get_optional(ov13b->dev, "avdd");
if (IS_ERR(ov13b->avdd)) {
ret = PTR_ERR(ov13b->avdd);
ov13b->avdd = NULL;
if (ret != -ENODEV)
- return dev_err_probe(dev, ret,
+ return dev_err_probe(ov13b->dev, ret,
"failed to get avdd regulator\n");
}
return 0;
}
-static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
+static int ov13b10_check_hwcfg(struct ov13b10 *ov13b)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov13b->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned int i, j;
int ret;
- u32 ext_clk;
u8 dlane;
if (!fwnode)
@@ -1521,19 +1522,6 @@ static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
if (!ep)
return -EPROBE_DEFER;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- if (ext_clk != OV13B10_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- ext_clk);
- return -EINVAL;
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1602,32 +1590,34 @@ static int ov13b10_probe(struct i2c_client *client)
if (!ov13b)
return -ENOMEM;
+ ov13b->dev = &client->dev;
+
/* Check HW config */
- ret = ov13b10_check_hwcfg(&client->dev, ov13b);
+ ret = ov13b10_check_hwcfg(ov13b);
if (ret) {
- dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ dev_err(ov13b->dev, "failed to check hwcfg: %d", ret);
return ret;
}
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
- ret = ov13b10_get_pm_resources(&client->dev);
+ ret = ov13b10_get_pm_resources(ov13b);
if (ret)
return ret;
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov13b->dev);
if (full_power) {
- ret = ov13b10_power_on(&client->dev);
+ ret = ov13b10_power_on(ov13b->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov13b->dev, "failed to power on\n");
return ret;
}
/* Check module identity */
ret = ov13b10_identify_module(ov13b);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov13b->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
}
@@ -1646,7 +1636,7 @@ static int ov13b10_probe(struct i2c_client *client)
ov13b->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov13b->sd.entity, 1, &ov13b->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13b->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -1657,9 +1647,9 @@ static int ov13b10_probe(struct i2c_client *client)
*/
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov13b->dev);
+ pm_runtime_enable(ov13b->dev);
+ pm_runtime_idle(ov13b->dev);
ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
if (ret < 0)
@@ -1668,17 +1658,17 @@ static int ov13b10_probe(struct i2c_client *client)
return 0;
error_media_entity_runtime_pm:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov13b->dev);
if (full_power)
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(ov13b->dev);
media_entity_cleanup(&ov13b->sd.entity);
error_handler_free:
ov13b10_free_controls(ov13b);
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13b->dev, "%s failed:%d\n", __func__, ret);
error_power_off:
- ov13b10_power_off(&client->dev);
+ ov13b10_power_off(ov13b->dev);
return ret;
}
@@ -1692,8 +1682,8 @@ static void ov13b10_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov13b10_free_controls(ov13b);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov13b->dev);
+ pm_runtime_set_suspended(ov13b->dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 586b31ba076b..061401b020fc 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1437,9 +1437,10 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->pdata = pdata;
ov2659->client = client;
- ov2659->clk = devm_clk_get(&client->dev, "xvclk");
+ ov2659->clk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
if (IS_ERR(ov2659->clk))
- return PTR_ERR(ov2659->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(ov2659->clk),
+ "failed to get xvclk\n");
ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
if (ov2659->xvclk_frequency < 6000000 ||
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 7237fb27ecd0..78e63bd1b35b 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1079,7 +1079,6 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
struct device *dev = sensor->dev;
struct fwnode_handle *ep_fwnode;
struct gpio_desc *gpio;
- unsigned int rate = 0;
int i, ret;
/*
@@ -1114,38 +1113,14 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
sensor->pwdn_gpio = gpio;
- sensor->xvclk = devm_clk_get_optional(dev, "xvclk");
+ sensor->xvclk = devm_v4l2_sensor_clk_get(dev, "xvclk");
if (IS_ERR(sensor->xvclk)) {
ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
"xvclk clock missing or invalid\n");
goto out_free_bus_cfg;
}
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either DT or
- * ACPI... but we also need to support the weird IPU3 case which will
- * have an external clock AND a clock-frequency property. Check for the
- * clock-frequency property and if found, set that rate if we managed
- * to acquire a clock. This should cover the ACPI case. If the system
- * uses devicetree then the configured rate should already be set, so
- * we can just read it.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (ret && !sensor->xvclk) {
- dev_err_probe(dev, ret, "invalid clock config\n");
- goto out_free_bus_cfg;
- }
-
- if (!ret && sensor->xvclk) {
- ret = clk_set_rate(sensor->xvclk, rate);
- if (ret) {
- dev_err_probe(dev, ret, "failed to set clock rate\n");
- goto out_free_bus_cfg;
- }
- }
-
- sensor->xvclk_freq = rate ?: clk_get_rate(sensor->xvclk);
+ sensor->xvclk_freq = clk_get_rate(sensor->xvclk);
for (i = 0; i < ARRAY_SIZE(ov2680_xvclk_freqs); i++) {
if (sensor->xvclk_freq == ov2680_xvclk_freqs[i])
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 9b8481b8dcd4..4911a4eea126 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -783,16 +783,12 @@ static int ov2685_probe(struct i2c_client *client)
ov2685->client = client;
ov2685->cur_mode = &supported_modes[0];
- ov2685->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov2685->xvclk)) {
- dev_err(dev, "Failed to get xvclk\n");
- return -EINVAL;
- }
- ret = clk_set_rate(ov2685->xvclk, OV2685_XVCLK_FREQ);
- if (ret < 0) {
- dev_err(dev, "Failed to set xvclk rate (24MHz)\n");
- return ret;
- }
+ ov2685->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", true,
+ OV2685_XVCLK_FREQ);
+ if (IS_ERR(ov2685->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov2685->xvclk),
+ "Failed to get xvclk\n");
+
if (clk_get_rate(ov2685->xvclk) != OV2685_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
diff --git a/drivers/media/i2c/ov2735.c b/drivers/media/i2c/ov2735.c
new file mode 100644
index 000000000000..b96600204141
--- /dev/null
+++ b/drivers/media/i2c/ov2735.c
@@ -0,0 +1,1109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Support for the OV2735
+ *
+ * Copyright (C) 2025 Silicon Signals Pvt. Ltd.
+ *
+ * Based on Rockchip ov2735 Camera Driver
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ *
+ * Inspired from ov8858, imx219, imx283 camera drivers.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device/devres.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <linux/types.h>
+#include <linux/time.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+
+#define OV2735_XCLK_FREQ (24 * HZ_PER_MHZ)
+
+/* Add page number in CCI private bits [31:28] of the register address */
+#define OV2735_PAGE_REG8(p, x) (((p) << CCI_REG_PRIVATE_SHIFT) | CCI_REG8(x))
+#define OV2735_PAGE_REG16(p, x) (((p) << CCI_REG_PRIVATE_SHIFT) | CCI_REG16(x))
+
+#define OV2735_REG_PAGE_SELECT CCI_REG8(0xfd)
+
+/* Page 0 */
+#define OV2735_REG_CHIPID OV2735_PAGE_REG16(0x00, 0x02)
+#define OV2735_CHIPID 0x2735
+
+#define OV2735_REG_SOFT_RESET OV2735_PAGE_REG8(0x00, 0x20)
+
+/* Clock Settings */
+#define OV2735_REG_PLL_CTRL OV2735_PAGE_REG8(0x00, 0x2f)
+#define OV2735_PLL_CTRL_ENABLE 0x7f
+#define OV2735_REG_PLL_OUTDIV OV2735_PAGE_REG8(0x00, 0x34)
+#define OV2735_REG_CLK_MODE OV2735_PAGE_REG8(0x00, 0x30)
+#define OV2735_REG_CLOCK_REG1 OV2735_PAGE_REG8(0x00, 0x33)
+#define OV2735_REG_CLOCK_REG2 OV2735_PAGE_REG8(0x00, 0x35)
+
+/* Page 1 */
+#define OV2735_REG_STREAM_CTRL OV2735_PAGE_REG8(0x01, 0xa0)
+#define OV2735_STREAM_CTRL_ON 0x01
+#define OV2735_STREAM_CTRL_OFF 0x00
+
+#define OV2735_REG_UPDOWN_MIRROR OV2735_PAGE_REG8(0x01, 0x3f)
+#define OV2735_REG_BINNING_DAC_CODE_MODE OV2735_PAGE_REG8(0x01, 0x30)
+#define OV2735_REG_FRAME_LENGTH OV2735_PAGE_REG16(0x01, 0x0e)
+#define OV2735_FRAME_LENGTH_MAX 0x0fff
+#define OV2735_REG_FRAME_EXP_SEPERATE_EN OV2735_PAGE_REG8(0x01, 0x0d)
+#define OV2735_FRAME_EXP_SEPERATE_EN 0x10
+#define OV2735_REG_FRAME_SYNC OV2735_PAGE_REG8(0x01, 0x01)
+
+#define OV2735_REG_HBLANK OV2735_PAGE_REG16(0x01, 0x09)
+
+#define OV2735_REG_HS_MIPI OV2735_PAGE_REG8(0x01, 0xb1)
+#define OV2735_REG_MIPI_CTRL1 OV2735_PAGE_REG8(0x01, 0x92)
+#define OV2735_REG_MIPI_CTRL2 OV2735_PAGE_REG8(0x01, 0x94)
+#define OV2735_REG_MIPI_CTRL3 OV2735_PAGE_REG8(0x01, 0xa1)
+#define OV2735_REG_MIPI_CTRL4 OV2735_PAGE_REG8(0x01, 0xb2)
+#define OV2735_REG_MIPI_CTRL5 OV2735_PAGE_REG8(0x01, 0xb3)
+#define OV2735_REG_MIPI_CTRL6 OV2735_PAGE_REG8(0x01, 0xb4)
+#define OV2735_REG_MIPI_CTRL7 OV2735_PAGE_REG8(0x01, 0xb5)
+#define OV2735_REG_HIGH_SPEED OV2735_PAGE_REG8(0x01, 0x9d)
+#define OV2735_REG_PREPARE OV2735_PAGE_REG8(0x01, 0x95)
+#define OV2735_REG_R_HS_ZERO OV2735_PAGE_REG8(0x01, 0x96)
+#define OV2735_REG_TRAIL OV2735_PAGE_REG8(0x01, 0x98)
+#define OV2735_REG_R_CLK_ZERO OV2735_PAGE_REG8(0x01, 0x9c)
+#define OV2735_REG_MIPI_COLOMN_NUMBER OV2735_PAGE_REG16(0x01, 0x8e)
+#define OV2735_REG_MIPI_LINE_NUMBER OV2735_PAGE_REG16(0x01, 0x90)
+
+/* Timing control registers */
+#define OV2735_REG_TIMING_CTRL2 OV2735_PAGE_REG8(0x01, 0x1a)
+#define OV2735_REG_TIMING_CTRL3 OV2735_PAGE_REG8(0x01, 0x1c)
+#define OV2735_REG_TIMING_CTRL1 OV2735_PAGE_REG8(0x01, 0x16)
+#define OV2735_REG_RST_NUM OV2735_PAGE_REG16(0x01, 0x10)
+#define OV2735_REG_RST_NUM2 OV2735_PAGE_REG16(0x01, 0x32)
+#define OV2735_REG_BOOST_EN OV2735_PAGE_REG8(0x01, 0xd0)
+#define OV2735_REG_B2_NUM OV2735_PAGE_REG16(0x01, 0xd1)
+#define OV2735_REG_B4_NUM OV2735_PAGE_REG16(0x01, 0xd3)
+#define OV2735_REG_PIXEL_CYCLE_P0 OV2735_PAGE_REG8(0x01, 0x50)
+#define OV2735_REG_PIXEL_CYCLE_P1 OV2735_PAGE_REG8(0x01, 0x51)
+#define OV2735_REG_PIXEL_CYCLE_P2 OV2735_PAGE_REG8(0x01, 0x52)
+#define OV2735_REG_PIXEL_CYCLE_P3 OV2735_PAGE_REG8(0x01, 0x53)
+#define OV2735_REG_PIXEL_CYCLE_P5 OV2735_PAGE_REG8(0x01, 0x55)
+#define OV2735_REG_PIXEL_CYCLE_P7 OV2735_PAGE_REG16(0x01, 0x57)
+#define OV2735_REG_PIXEL_CYCLE_P9 OV2735_PAGE_REG8(0x01, 0x5a)
+#define OV2735_REG_PIXEL_CYCLE_P10 OV2735_PAGE_REG8(0x01, 0x5b)
+#define OV2735_REG_PIXEL_CYCLE_P12 OV2735_PAGE_REG8(0x01, 0x5d)
+#define OV2735_REG_PIXEL_CYCLE_P18 OV2735_PAGE_REG8(0x01, 0x64)
+#define OV2735_REG_PIXEL_CYCLE_P20 OV2735_PAGE_REG8(0x01, 0x66)
+#define OV2735_REG_PIXEL_CYCLE_P22 OV2735_PAGE_REG8(0x01, 0x68)
+#define OV2735_REG_PIXEL_CYCLE_P33 OV2735_PAGE_REG16(0x01, 0x74)
+#define OV2735_REG_PIXEL_CYCLE_P34 OV2735_PAGE_REG8(0x01, 0x76)
+#define OV2735_REG_PIXEL_CYCLE_P35_P36 OV2735_PAGE_REG8(0x01, 0x77)
+#define OV2735_REG_PIXEL_CYCLE_P37_P38 OV2735_PAGE_REG8(0x01, 0x78)
+#define OV2735_REG_PIXEL_CYCLE_P31 OV2735_PAGE_REG8(0x01, 0x72)
+#define OV2735_REG_PIXEL_CYCLE_P32 OV2735_PAGE_REG8(0x01, 0x73)
+#define OV2735_REG_PIXEL_CYCLE_P44 OV2735_PAGE_REG8(0x01, 0x7d)
+#define OV2735_REG_PIXEL_CYCLE_P45 OV2735_PAGE_REG8(0x01, 0x7e)
+#define OV2735_REG_PIXEL_BIAS_CTRL_RH_RL OV2735_PAGE_REG8(0x01, 0x8a)
+#define OV2735_REG_PIXEL_BIAS_CTRL_SH_SL OV2735_PAGE_REG8(0x01, 0x8b)
+
+/* Analog Control registers */
+#define OV2735_REG_ICOMP OV2735_PAGE_REG8(0x01, 0x19)
+#define OV2735_REG_PCP_RST_SEL OV2735_PAGE_REG8(0x01, 0x21)
+#define OV2735_REG_VNCP OV2735_PAGE_REG8(0x01, 0x20)
+#define OV2735_REG_ANALOG_CTRL3 OV2735_PAGE_REG8(0x01, 0x25)
+#define OV2735_REG_ANALOG_CTRL4 OV2735_PAGE_REG8(0x01, 0x26)
+#define OV2735_REG_ANALOG_CTRL5 OV2735_PAGE_REG8(0x01, 0x29)
+#define OV2735_REG_ANALOG_CTRL6 OV2735_PAGE_REG8(0x01, 0x2a)
+#define OV2735_REG_ANALOG_CTRL8 OV2735_PAGE_REG8(0x01, 0x2c)
+
+/* BLC registers */
+#define OV2735_REG_BLC_GAIN_BLUE OV2735_PAGE_REG8(0x01, 0x86)
+#define OV2735_REG_BLC_GAIN_RED OV2735_PAGE_REG8(0x01, 0x87)
+#define OV2735_REG_BLC_GAIN_GR OV2735_PAGE_REG8(0x01, 0x88)
+#define OV2735_REG_BLC_GAIN_GB OV2735_PAGE_REG8(0x01, 0x89)
+#define OV2735_REG_GB_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf0)
+#define OV2735_REG_BLUE_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf1)
+#define OV2735_REG_RED_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf2)
+#define OV2735_REG_GR_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf3)
+#define OV2735_REG_BLC_BPC_TH_P OV2735_PAGE_REG8(0x01, 0xfc)
+#define OV2735_REG_BLC_BPC_TH_N OV2735_PAGE_REG8(0x01, 0xfe)
+#define OV2735_REG_ABL OV2735_PAGE_REG8(0x01, 0xfb)
+
+#define OV2735_REG_TEST_PATTERN OV2735_PAGE_REG8(0x01, 0xb2)
+#define OV2735_TEST_PATTERN_ENABLE 0x01
+#define OV2735_TEST_PATTERN_DISABLE 0xfe
+
+#define OV2735_REG_LONG_EXPOSURE OV2735_PAGE_REG16(0x01, 0x03)
+#define OV2735_EXPOSURE_MIN 4
+#define OV2735_EXPOSURE_STEP 1
+#define OV2735_EXPOSURE_MARGIN 4
+
+#define OV2735_REG_ANALOG_GAIN OV2735_PAGE_REG8(0x01, 0x24)
+#define OV2735_ANALOG_GAIN_MIN 0x10
+#define OV2735_ANALOG_GAIN_MAX 0xff
+#define OV2735_ANALOG_GAIN_STEP 1
+#define OV2735_ANALOG_GAIN_DEFAULT 0x10
+
+/* Page 2 */
+#define OV2735_REG_V_START OV2735_PAGE_REG16(0x02, 0xa0)
+#define OV2735_REG_V_SIZE OV2735_PAGE_REG16(0x02, 0xa2)
+#define OV2735_REG_H_START OV2735_PAGE_REG16(0x02, 0xa4)
+#define OV2735_REG_H_SIZE OV2735_PAGE_REG16(0x02, 0xa6)
+
+#define OV2735_LINK_FREQ_420MHZ (420 * HZ_PER_MHZ)
+#define OV2735_PIXEL_RATE (168 * HZ_PER_MHZ)
+
+/* OV2735 native and active pixel array size */
+static const struct v4l2_rect ov2735_native_area = {
+ .top = 0,
+ .left = 0,
+ .width = 1936,
+ .height = 1096,
+};
+
+static const struct v4l2_rect ov2735_active_area = {
+ .top = 8,
+ .left = 8,
+ .width = 1920,
+ .height = 1080,
+};
+
+static const char * const ov2735_supply_name[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+/* PLL_OUT = [PLL_IN * (pll_nc +3)] / [(pll_mc + 1) * (pll_outdiv + 1)] */
+struct ov2735_pll_parameters {
+ u8 pll_nc;
+ u8 pll_mc;
+ u8 pll_outdiv;
+};
+
+struct ov2735 {
+ struct device *dev;
+ struct regmap *cci;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct clk *xclk;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov2735_supply_name)];
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *test_pattern;
+
+ u32 link_freq_index;
+
+ u8 current_page;
+ struct mutex page_lock;
+};
+
+struct ov2735_mode {
+ u32 width;
+ u32 height;
+ u32 hts_def;
+ u32 vts_def;
+ u32 exp_def;
+ struct v4l2_rect crop;
+};
+
+static const struct cci_reg_sequence ov2735_common_regs[] = {
+ { OV2735_REG_CLK_MODE, 0x15 },
+ { OV2735_REG_CLOCK_REG1, 0x01 },
+ { OV2735_REG_CLOCK_REG2, 0x20 },
+ { OV2735_REG_BINNING_DAC_CODE_MODE, 0x00 },
+ { OV2735_REG_ABL, 0x73 },
+ { OV2735_REG_FRAME_SYNC, 0x01 },
+
+ /* Timing ctrl */
+ { OV2735_REG_TIMING_CTRL2, 0x6b },
+ { OV2735_REG_TIMING_CTRL3, 0xea },
+ { OV2735_REG_TIMING_CTRL1, 0x0c },
+ { OV2735_REG_RST_NUM, 0x0063 },
+ { OV2735_REG_RST_NUM2, 0x006f },
+ { OV2735_REG_BOOST_EN, 0x02 },
+ { OV2735_REG_B2_NUM, 0x0120 },
+ { OV2735_REG_B4_NUM, 0x042a },
+ { OV2735_REG_PIXEL_CYCLE_P0, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P1, 0x2c },
+ { OV2735_REG_PIXEL_CYCLE_P2, 0x29 },
+ { OV2735_REG_PIXEL_CYCLE_P3, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P5, 0x44 },
+ { OV2735_REG_PIXEL_CYCLE_P7, 0x0029 },
+ { OV2735_REG_PIXEL_CYCLE_P9, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P10, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P12, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P18, 0x2f },
+ { OV2735_REG_PIXEL_CYCLE_P20, 0x62 },
+ { OV2735_REG_PIXEL_CYCLE_P22, 0x5b },
+ { OV2735_REG_PIXEL_CYCLE_P33, 0x0046 },
+ { OV2735_REG_PIXEL_CYCLE_P34, 0x36 },
+ { OV2735_REG_PIXEL_CYCLE_P35_P36, 0x4f },
+ { OV2735_REG_PIXEL_CYCLE_P37_P38, 0xef },
+ { OV2735_REG_PIXEL_CYCLE_P31, 0xcf },
+ { OV2735_REG_PIXEL_CYCLE_P32, 0x36 },
+ { OV2735_REG_PIXEL_CYCLE_P44, 0x0d },
+ { OV2735_REG_PIXEL_CYCLE_P45, 0x0d },
+ { OV2735_REG_PIXEL_BIAS_CTRL_RH_RL, 0x77 },
+ { OV2735_REG_PIXEL_BIAS_CTRL_SH_SL, 0x77 },
+
+ /* Analog ctrl */
+ { OV2735_REG_ANALOG_CTRL4, 0x5a },
+ { OV2735_REG_ANALOG_CTRL5, 0x01 },
+ { OV2735_REG_ANALOG_CTRL6, 0xd2 },
+ { OV2735_REG_ANALOG_CTRL8, 0x40 },
+ { OV2735_REG_PCP_RST_SEL, 0x00 },
+ { OV2735_REG_ICOMP, 0xc3 },
+
+ { OV2735_REG_HS_MIPI, 0x83 },
+ { OV2735_REG_MIPI_CTRL5, 0x0b },
+ { OV2735_REG_MIPI_CTRL6, 0x14 },
+ { OV2735_REG_HIGH_SPEED, 0x40 },
+ { OV2735_REG_MIPI_CTRL3, 0x05 },
+ { OV2735_REG_MIPI_CTRL2, 0x44 },
+ { OV2735_REG_PREPARE, 0x33 },
+ { OV2735_REG_R_HS_ZERO, 0x1f },
+ { OV2735_REG_TRAIL, 0x45 },
+ { OV2735_REG_R_CLK_ZERO, 0x10 },
+ { OV2735_REG_MIPI_CTRL7, 0x70 },
+ { OV2735_REG_ANALOG_CTRL3, 0xe0 },
+ { OV2735_REG_VNCP, 0x7b },
+
+ /* BLC */
+ { OV2735_REG_BLC_GAIN_BLUE, 0x77 },
+ { OV2735_REG_BLC_GAIN_GB, 0x77 },
+ { OV2735_REG_BLC_GAIN_RED, 0x74 },
+ { OV2735_REG_BLC_GAIN_GR, 0x74 },
+ { OV2735_REG_BLC_BPC_TH_P, 0xe0 },
+ { OV2735_REG_BLC_BPC_TH_N, 0xe0 },
+ { OV2735_REG_GB_SUBOFFSET, 0x40 },
+ { OV2735_REG_BLUE_SUBOFFSET, 0x40 },
+ { OV2735_REG_RED_SUBOFFSET, 0x40 },
+ { OV2735_REG_GR_SUBOFFSET, 0x40 },
+};
+
+static const struct ov2735_mode supported_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .exp_def = 399,
+ .hts_def = 2200,
+ .vts_def = 2545,
+ .crop = {
+ .top = 8,
+ .left = 8,
+ .width = 1920,
+ .height = 1080,
+ },
+ },
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV2735_LINK_FREQ_420MHZ,
+};
+
+static const struct ov2735_pll_parameters pll_configs[] = {
+ /* For 420MHz pll_configs */
+ {
+ .pll_nc = 4,
+ .pll_mc = 0,
+ .pll_outdiv = 1,
+ },
+};
+
+static const char * const ov2735_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color",
+};
+
+static int ov2735_page_access(struct ov2735 *ov2735, u32 reg, int *err)
+{
+ u8 page = reg >> CCI_REG_PRIVATE_SHIFT;
+ int ret = 0;
+
+ if (err && *err)
+ return *err;
+
+ guard(mutex)(&ov2735->page_lock);
+
+ /* Perform page access before read/write */
+ if (ov2735->current_page == page)
+ return ret;
+
+ ret = cci_write(ov2735->cci, OV2735_REG_PAGE_SELECT, page, err);
+ if (!ret)
+ ov2735->current_page = page;
+
+ return ret;
+}
+
+static int ov2735_read(struct ov2735 *ov2735, u32 reg, u64 *val, int *err)
+{
+ u32 addr = reg & ~CCI_REG_PRIVATE_MASK;
+ int ret;
+
+ ret = ov2735_page_access(ov2735, reg, err);
+ if (ret)
+ return ret;
+
+ return cci_read(ov2735->cci, addr, val, err);
+}
+
+static int ov2735_write(struct ov2735 *ov2735, u32 reg, u64 val, int *err)
+{
+ u32 addr = reg & ~CCI_REG_PRIVATE_MASK;
+ int ret;
+
+ ret = ov2735_page_access(ov2735, reg, err);
+ if (ret)
+ return ret;
+
+ return cci_write(ov2735->cci, addr, val, err);
+}
+
+static int ov2735_multi_reg_write(struct ov2735 *ov2735,
+ const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = ov2735_write(ov2735, regs[i].reg, regs[i].val, err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct ov2735 *to_ov2735(struct v4l2_subdev *_sd)
+{
+ return container_of_const(_sd, struct ov2735, sd);
+}
+
+static int ov2735_enable_test_pattern(struct ov2735 *ov2735, u32 pattern)
+{
+ int ret;
+ u64 val;
+
+ ret = ov2735_read(ov2735, OV2735_REG_TEST_PATTERN, &val, NULL);
+ if (ret)
+ return ret;
+
+ switch (pattern) {
+ case 0:
+ val &= ~OV2735_TEST_PATTERN_ENABLE;
+ break;
+ case 1:
+ val |= OV2735_TEST_PATTERN_ENABLE;
+ break;
+ }
+
+ return ov2735_write(ov2735, OV2735_REG_TEST_PATTERN, val, NULL);
+}
+
+static int ov2735_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2735 *ov2735 =
+ container_of_const(ctrl->handler, struct ov2735, handler);
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
+ u64 vts;
+ int ret = 0;
+
+ state = v4l2_subdev_get_locked_active_state(&ov2735->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Honour the VBLANK limits when setting exposure */
+ s64 max = fmt->height + ctrl->val - OV2735_EXPOSURE_MARGIN;
+
+ ret = __v4l2_ctrl_modify_range(ov2735->exposure,
+ ov2735->exposure->minimum, max,
+ ov2735->exposure->step,
+ ov2735->exposure->default_value);
+ if (ret)
+ return ret;
+ }
+
+ if (pm_runtime_get_if_in_use(ov2735->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ov2735_write(ov2735, OV2735_REG_LONG_EXPOSURE, ctrl->val, &ret);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ov2735_write(ov2735, OV2735_REG_ANALOG_GAIN, ctrl->val, &ret);
+ break;
+ case V4L2_CID_HBLANK:
+ ov2735_write(ov2735, OV2735_REG_HBLANK, ctrl->val, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ vts = ctrl->val + fmt->height;
+ ov2735_write(ov2735, OV2735_REG_FRAME_EXP_SEPERATE_EN,
+ OV2735_FRAME_EXP_SEPERATE_EN, &ret);
+ ov2735_write(ov2735, OV2735_REG_FRAME_LENGTH, vts, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov2735_enable_test_pattern(ov2735, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ ov2735_write(ov2735, OV2735_REG_FRAME_SYNC, 0x01, &ret);
+
+ pm_runtime_put(ov2735->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov2735_ctrl_ops = {
+ .s_ctrl = ov2735_set_ctrl,
+};
+
+static int ov2735_init_controls(struct ov2735 *ov2735)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ struct v4l2_fwnode_device_properties props;
+ const struct ov2735_mode *mode = &supported_modes[0];
+ u64 hblank_def, vblank_def, exp_max;
+ int ret;
+
+ ctrl_hdlr = &ov2735->handler;
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ov2735->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV2735_PIXEL_RATE, 1,
+ OV2735_PIXEL_RATE);
+
+ ov2735->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov2735->link_freq_index,
+ 0, link_freq_menu_items);
+ if (ov2735->link_freq)
+ ov2735->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hblank_def = mode->hts_def - mode->width;
+ ov2735->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_HBLANK, hblank_def,
+ hblank_def, 1, hblank_def);
+
+ vblank_def = mode->vts_def - mode->height;
+ ov2735->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_def,
+ OV2735_FRAME_LENGTH_MAX - mode->height,
+ 1, vblank_def);
+
+ exp_max = mode->vts_def - OV2735_EXPOSURE_MARGIN;
+ ov2735->exposure =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV2735_EXPOSURE_MIN, exp_max,
+ OV2735_EXPOSURE_STEP, mode->exp_def);
+
+ ov2735->gain =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, OV2735_ANALOG_GAIN_MIN,
+ OV2735_ANALOG_GAIN_MAX, OV2735_ANALOG_GAIN_STEP,
+ OV2735_ANALOG_GAIN_DEFAULT);
+
+ ov2735->test_pattern =
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov2735_test_pattern_menu) - 1,
+ 0, 0, ov2735_test_pattern_menu);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(ov2735->dev, "control init failed (%d)\n", ret);
+ goto err_handler_free;
+ }
+
+ ret = v4l2_fwnode_device_parse(ov2735->dev, &props);
+ if (ret)
+ goto err_handler_free;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr,
+ &ov2735_ctrl_ops, &props);
+ if (ret)
+ goto err_handler_free;
+
+ ov2735->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+err_handler_free:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static int ov2735_set_pll_ctrl(struct ov2735 *ov2735)
+{
+ const struct ov2735_pll_parameters *pll_parameters;
+ u8 pll_ctrl;
+ u8 pll_outdiv;
+ int ret = 0;
+
+ pll_parameters = &pll_configs[ov2735->link_freq_index];
+
+ /* BIT[7]: pll_clk_sel, BIT[6:2]: pll_nc, BIT[1:0]: pll_mc */
+ pll_ctrl = ((pll_parameters->pll_nc << 2) | (pll_parameters->pll_mc << 0)) &
+ OV2735_PLL_CTRL_ENABLE;
+
+ pll_outdiv = pll_parameters->pll_outdiv;
+
+ ov2735_write(ov2735, OV2735_REG_PLL_CTRL, pll_ctrl, &ret);
+ ov2735_write(ov2735, OV2735_REG_PLL_OUTDIV, pll_outdiv, &ret);
+
+ return ret;
+}
+
+static int ov2735_set_framefmt(struct ov2735 *ov2735,
+ struct v4l2_subdev_state *state)
+{
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ int ret = 0;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+
+ ov2735_write(ov2735, OV2735_REG_V_START, crop->top, &ret);
+ ov2735_write(ov2735, OV2735_REG_V_SIZE, format->height, &ret);
+ ov2735_write(ov2735, OV2735_REG_MIPI_LINE_NUMBER, format->height, &ret);
+ ov2735_write(ov2735, OV2735_REG_H_START, crop->left, &ret);
+ /* OV2735_REG_H_SIZE: Image half horizontal size */
+ ov2735_write(ov2735, OV2735_REG_H_SIZE, (format->width / 2), &ret);
+ ov2735_write(ov2735, OV2735_REG_MIPI_COLOMN_NUMBER, format->width, &ret);
+
+ return ret;
+}
+
+static int ov2735_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov2735->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Apply pll settings */
+ ret = ov2735_set_pll_ctrl(ov2735);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to set frame format: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ ret = ov2735_multi_reg_write(ov2735, ov2735_common_regs,
+ ARRAY_SIZE(ov2735_common_regs), NULL);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to write common registers\n");
+ goto err_rpm_put;
+ }
+
+ /* Apply format settings */
+ ret = ov2735_set_framefmt(ov2735, state);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to set frame format: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov2735->sd.ctrl_handler);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = ov2735_write(ov2735, OV2735_REG_STREAM_CTRL,
+ OV2735_STREAM_CTRL_ON, NULL);
+ if (ret)
+ goto err_rpm_put;
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(ov2735->dev);
+ return ret;
+}
+
+static int ov2735_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = ov2735_write(ov2735, OV2735_REG_STREAM_CTRL,
+ OV2735_STREAM_CTRL_OFF, NULL);
+ if (ret)
+ dev_err(ov2735->dev, "%s failed to set stream\n", __func__);
+
+ pm_runtime_put(ov2735->dev);
+
+ return ret;
+}
+
+static int ov2735_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ return 0;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r = ov2735_native_area;
+ return 0;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r = ov2735_active_area;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ov2735_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov2735_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov2735_set_framing_limits(struct ov2735 *ov2735,
+ const struct ov2735_mode *mode)
+{
+ u32 hblank, vblank_def;
+ int ret;
+
+ hblank = mode->hts_def - mode->width;
+ ret = __v4l2_ctrl_modify_range(ov2735->hblank, hblank, hblank, 1,
+ hblank);
+ if (ret)
+ return ret;
+
+ vblank_def = mode->vts_def - mode->height;
+ return __v4l2_ctrl_modify_range(ov2735->vblank, vblank_def,
+ OV2735_FRAME_LENGTH_MAX - mode->height,
+ 1, vblank_def);
+}
+
+static int ov2735_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct ov2735_mode *mode;
+ struct v4l2_rect *crop;
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ format = v4l2_subdev_state_get_format(sd_state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ fmt->format.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->format.xfer_func = V4L2_XFER_FUNC_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ ret = ov2735_set_framing_limits(ov2735, mode);
+ if (ret)
+ return ret;
+ }
+
+ *format = fmt->format;
+
+ /* Initialize crop rectangle */
+ crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ *crop = mode->crop;
+
+ return 0;
+}
+
+static int ov2735_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .format = {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ ov2735_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov2735_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov2735_pad_ops = {
+ .enum_mbus_code = ov2735_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ov2735_set_pad_format,
+ .get_selection = ov2735_get_selection,
+ .enum_frame_size = ov2735_enum_frame_size,
+ .enable_streams = ov2735_enable_streams,
+ .disable_streams = ov2735_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov2735_subdev_ops = {
+ .video = &ov2735_video_ops,
+ .pad = &ov2735_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov2735_internal_ops = {
+ .init_state = ov2735_init_state,
+};
+
+static int ov2735_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov2735_supply_name),
+ ov2735->supplies);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to enable regulators\n");
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ov2735->enable_gpio, 1);
+ /* T4: delay from PWDN pulling low to RSTB pulling high */
+ fsleep(4 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(ov2735->xclk);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to enable clock\n");
+ goto err_regulator_off;
+ }
+
+ gpiod_set_value_cansleep(ov2735->reset_gpio, 0);
+ /* T5: delay from RSTB pulling high to first I2C command */
+ fsleep(5 * USEC_PER_MSEC);
+
+ return 0;
+
+err_regulator_off:
+ regulator_bulk_disable(ARRAY_SIZE(ov2735_supply_name), ov2735->supplies);
+ return ret;
+}
+
+static int ov2735_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+
+ gpiod_set_value_cansleep(ov2735->enable_gpio, 0);
+ clk_disable_unprepare(ov2735->xclk);
+ gpiod_set_value_cansleep(ov2735->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ov2735_supply_name), ov2735->supplies);
+
+ return 0;
+}
+
+static int ov2735_get_regulators(struct ov2735 *ov2735)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov2735_supply_name); i++)
+ ov2735->supplies[i].supply = ov2735_supply_name[i];
+
+ return devm_regulator_bulk_get(ov2735->dev,
+ ARRAY_SIZE(ov2735_supply_name),
+ ov2735->supplies);
+}
+
+static int ov2735_identify_module(struct ov2735 *ov2735)
+{
+ u64 chip_id;
+ int ret;
+
+ ret = ov2735_read(ov2735, OV2735_REG_CHIPID, &chip_id, NULL);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to read chip id %x\n",
+ OV2735_CHIPID);
+
+ if (chip_id != OV2735_CHIPID)
+ return dev_err_probe(ov2735->dev, -EIO,
+ "chip id mismatch: %x!=%llx\n",
+ OV2735_CHIPID, chip_id);
+
+ return 0;
+}
+
+static int ov2735_parse_endpoint(struct ov2735 *ov2735)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *ep;
+ unsigned long link_freq_bitmap;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(ov2735->dev), NULL);
+ if (!ep)
+ return dev_err_probe(ov2735->dev, -ENXIO,
+ "Failed to get next endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ ret = dev_err_probe(ov2735->dev, -EINVAL,
+ "only 2 data lanes are supported\n");
+ goto error_out;
+ }
+
+ ret = v4l2_link_freq_to_bitmap(ov2735->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret) {
+ ret = dev_err_probe(ov2735->dev, -EINVAL,
+ "only 420MHz frequency is available\n");
+ goto error_out;
+ }
+
+ ov2735->link_freq_index = __ffs(link_freq_bitmap);
+
+error_out:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+};
+
+static int ov2735_probe(struct i2c_client *client)
+{
+ struct ov2735 *ov2735;
+ unsigned int xclk_freq;
+ int ret;
+
+ ov2735 = devm_kzalloc(&client->dev, sizeof(*ov2735), GFP_KERNEL);
+ if (!ov2735)
+ return -ENOMEM;
+
+ ov2735->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&ov2735->sd, client, &ov2735_subdev_ops);
+ ov2735->sd.internal_ops = &ov2735_internal_ops;
+
+ ov2735->cci = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(ov2735->cci))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->cci),
+ "failed to initialize CCI\n");
+
+ /* Set Current page to 0 */
+ ov2735->current_page = 0;
+
+ ret = devm_mutex_init(ov2735->dev, &ov2735->page_lock);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "Failed to initialize lock\n");
+
+ /* Get system clock (xvclk) */
+ ov2735->xclk = devm_v4l2_sensor_clk_get(ov2735->dev, NULL);
+ if (IS_ERR(ov2735->xclk))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->xclk),
+ "failed to get xclk\n");
+
+ xclk_freq = clk_get_rate(ov2735->xclk);
+ if (xclk_freq != OV2735_XCLK_FREQ)
+ return dev_err_probe(ov2735->dev, -EINVAL,
+ "xclk frequency not supported: %u Hz\n",
+ xclk_freq);
+
+ ret = ov2735_get_regulators(ov2735);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to get regulators\n");
+
+ ret = ov2735_parse_endpoint(ov2735);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to parse endpoint configuration\n");
+
+ ov2735->reset_gpio = devm_gpiod_get_optional(ov2735->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ov2735->reset_gpio))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ov2735->enable_gpio = devm_gpiod_get_optional(ov2735->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ov2735->enable_gpio))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->enable_gpio),
+ "failed to get enable GPIO\n");
+
+ ret = ov2735_power_on(ov2735->dev);
+ if (ret)
+ return ret;
+
+ ret = ov2735_identify_module(ov2735);
+ if (ret)
+ goto error_power_off;
+
+ ret = ov2735_init_controls(ov2735);
+ if (ret)
+ goto error_power_off;
+
+ /* Initialize subdev */
+ ov2735->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov2735->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov2735->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov2735->sd.entity, 1, &ov2735->pad);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret, "failed to init entity pads\n");
+ goto error_handler_free;
+ }
+
+ ov2735->sd.state_lock = ov2735->handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov2735->sd);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret, "subdev init error\n");
+ goto error_media_entity;
+ }
+
+ ret = devm_pm_runtime_get_noresume(ov2735->dev);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to get runtime PM noresume\n");
+ goto error_subdev_cleanup;
+ }
+
+ ret = devm_pm_runtime_set_active_enabled(ov2735->dev);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to set runtime PM active+enabled\n");
+ goto error_subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov2735->sd);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to register ov2735 sub-device\n");
+ goto error_subdev_cleanup;
+ }
+
+ return 0;
+
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&ov2735->sd);
+
+error_media_entity:
+ media_entity_cleanup(&ov2735->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(ov2735->sd.ctrl_handler);
+
+error_power_off:
+ ov2735_power_off(ov2735->dev);
+
+ return ret;
+}
+
+static void ov2735_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(&ov2735->sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(ov2735->sd.ctrl_handler);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov2735_pm_ops,
+ ov2735_power_off, ov2735_power_on, NULL);
+
+static const struct of_device_id ov2735_id[] = {
+ { .compatible = "ovti,ov2735" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov2735_id);
+
+static struct i2c_driver ov2735_driver = {
+ .driver = {
+ .name = "ov2735",
+ .pm = pm_ptr(&ov2735_pm_ops),
+ .of_match_table = ov2735_id,
+ },
+ .probe = ov2735_probe,
+ .remove = ov2735_remove,
+};
+module_i2c_driver(ov2735_driver);
+
+MODULE_DESCRIPTION("OV2735 Camera Sensor Driver");
+MODULE_AUTHOR("Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>");
+MODULE_AUTHOR("Himanshu Bhavani <himanshu.bhavani@siliconsignals.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 4e959534e6e7..fb590dfadda1 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -519,6 +519,8 @@ static const struct ov2740_mode supported_modes_180mhz[] = {
};
struct ov2740 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -616,7 +618,6 @@ static int ov2740_write_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 val)
static int ov2740_write_reg_list(struct ov2740 *ov2740,
const struct ov2740_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
unsigned int i;
int ret;
@@ -624,7 +625,7 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
ret = ov2740_write_reg(ov2740, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov2740->dev,
"write reg 0x%4.4x return err = %d\n",
r_list->regs[i].address, ret);
return ret;
@@ -636,7 +637,6 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
static int ov2740_identify_module(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
int ret;
u32 val;
@@ -648,12 +648,12 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return ret;
if (val != OV2740_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x != %x\n",
+ dev_err(ov2740->dev, "chip id mismatch: %x != %x\n",
OV2740_CHIP_ID, val);
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id: 0x%x\n", val);
+ dev_dbg(ov2740->dev, "chip id: 0x%x\n", val);
ov2740->identified = true;
@@ -704,7 +704,6 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2740 *ov2740 = container_of(ctrl->handler,
struct ov2740, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
s64 exposure_max;
int ret;
@@ -720,7 +719,7 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov2740->dev))
return 0;
switch (ctrl->id) {
@@ -753,7 +752,7 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
return ret;
}
@@ -764,7 +763,6 @@ static const struct v4l2_ctrl_ops ov2740_ctrl_ops = {
static int ov2740_init_controls(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max, h_blank, pixel_rate;
u32 vblank_min, vblank_max, vblank_default;
@@ -821,7 +819,7 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
0, 0, ov2740_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov2740->dev, &props);
if (ret) {
v4l2_ctrl_handler_free(ctrl_hdlr);
return ret;
@@ -940,7 +938,6 @@ err:
static int ov2740_start_streaming(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
const struct ov2740_reg_list *reg_list;
int link_freq_index;
int ret;
@@ -955,7 +952,7 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
/* Reset the sensor */
ret = ov2740_write_reg(ov2740, 0x0103, 1, 0x01);
if (ret) {
- dev_err(&client->dev, "failed to reset\n");
+ dev_err(ov2740->dev, "failed to reset\n");
return ret;
}
@@ -965,14 +962,14 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls\n");
+ dev_err(ov2740->dev, "failed to set plls\n");
return ret;
}
reg_list = &ov2740->cur_mode->reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov2740->dev, "failed to set mode\n");
return ret;
}
@@ -983,31 +980,28 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
OV2740_MODE_STREAMING);
if (ret)
- dev_err(&client->dev, "failed to start streaming\n");
+ dev_err(ov2740->dev, "failed to start streaming\n");
return ret;
}
static void ov2740_stop_streaming(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
-
if (ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
OV2740_MODE_STANDBY))
- dev_err(&client->dev, "failed to stop streaming\n");
+ dev_err(ov2740->dev, "failed to stop streaming\n");
}
static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov2740 *ov2740 = to_ov2740(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_subdev_state *sd_state;
int ret = 0;
sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov2740->dev);
if (ret < 0)
goto out_unlock;
@@ -1015,11 +1009,11 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov2740_stop_streaming(ov2740);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
}
} else {
ov2740_stop_streaming(ov2740);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
}
out_unlock:
@@ -1131,16 +1125,14 @@ static const struct media_entity_operations ov2740_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
-static int ov2740_check_hwcfg(struct device *dev)
+static int ov2740_check_hwcfg(struct ov2740 *ov2740)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2740 *ov2740 = to_ov2740(sd);
+ struct device *dev = ov2740->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
@@ -1153,20 +1145,6 @@ static int ov2740_check_hwcfg(struct device *dev)
return dev_err_probe(dev, -EPROBE_DEFER,
"waiting for fwnode graph endpoint\n");
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, ret,
- "reading clock-frequency property\n");
- }
-
- if (mclk != OV2740_MCLK) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, -EINVAL,
- "external clock %d is not supported\n",
- mclk);
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1270,7 +1248,7 @@ static int ov2740_register_nvmem(struct i2c_client *client,
struct regmap_config regmap_config = { };
struct nvmem_config nvmem_config = { };
struct regmap *regmap;
- struct device *dev = &client->dev;
+ struct device *dev = ov2740->dev;
nvm = devm_kzalloc(dev, sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -1349,6 +1327,7 @@ static int ov2740_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov2740 *ov2740;
+ unsigned long freq;
bool full_power;
unsigned int i;
int ret;
@@ -1357,10 +1336,12 @@ static int ov2740_probe(struct i2c_client *client)
if (!ov2740)
return -ENOMEM;
+ ov2740->dev = &client->dev;
+
v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
ov2740->sd.internal_ops = &ov2740_internal_ops;
- ret = ov2740_check_hwcfg(dev);
+ ret = ov2740_check_hwcfg(ov2740);
if (ret)
return ret;
@@ -1384,11 +1365,17 @@ static int ov2740_probe(struct i2c_client *client)
msleep(20);
}
- ov2740->clk = devm_clk_get_optional(dev, "clk");
+ ov2740->clk = devm_v4l2_sensor_clk_get(dev, "clk");
if (IS_ERR(ov2740->clk))
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
+ freq = clk_get_rate(ov2740->clk);
+ if (freq != OV2740_MCLK)
+ return dev_err_probe(dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
for (i = 0; i < ARRAY_SIZE(ov2740_supply_name); i++)
ov2740->supplies[i].supply = ov2740_supply_name[i];
@@ -1397,7 +1384,7 @@ static int ov2740_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov2740->dev);
if (full_power) {
/* ACPI does not always clear the reset GPIO / enable the clock */
ret = ov2740_resume(dev);
@@ -1435,9 +1422,9 @@ static int ov2740_probe(struct i2c_client *client)
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov2740->dev);
+ pm_runtime_enable(ov2740->dev);
+ pm_runtime_idle(ov2740->dev);
ret = v4l2_async_register_subdev_sensor(&ov2740->sd);
if (ret < 0) {
@@ -1447,13 +1434,13 @@ static int ov2740_probe(struct i2c_client *client)
ret = ov2740_register_nvmem(client, ov2740);
if (ret)
- dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
+ dev_warn(ov2740->dev, "register nvmem failed, ret %d\n", ret);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov2740->dev);
+ pm_runtime_set_suspended(ov2740->dev);
v4l2_subdev_cleanup(&ov2740->sd);
probe_error_media_entity_cleanup:
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 7d740ad3926f..a59d25b09b5b 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -907,20 +907,12 @@ static int ov4689_probe(struct i2c_client *client)
ov4689->cur_mode = &supported_modes[OV4689_MODE_2688_1520];
- ov4689->xvclk = devm_clk_get_optional(dev, NULL);
+ ov4689->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov4689->xvclk))
return dev_err_probe(dev, PTR_ERR(ov4689->xvclk),
"Failed to get external clock\n");
- if (!ov4689->xvclk) {
- dev_dbg(dev,
- "No clock provided, using clock-frequency property\n");
- device_property_read_u32(dev, "clock-frequency",
- &ov4689->clock_rate);
- } else {
- ov4689->clock_rate = clk_get_rate(ov4689->xvclk);
- }
-
+ ov4689->clock_rate = clk_get_rate(ov4689->xvclk);
if (ov4689->clock_rate != OV4689_XVCLK_FREQ) {
dev_err(dev,
"External clock rate mismatch: got %d Hz, expected %d Hz\n",
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 84198613381d..85ecc23b3587 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3895,11 +3895,10 @@ static int ov5640_probe(struct i2c_client *client)
ov5640_dvp_default_fmt;
/* get system clock (xclk) */
- sensor->xclk = devm_clk_get(dev, "xclk");
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, "xclk");
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "failed to get xclk\n");
sensor->xclk_freq = clk_get_rate(sensor->xclk);
if (sensor->xclk_freq < OV5640_XCLK_MIN ||
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 58c846a44376..b10d408034a1 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -1042,27 +1042,18 @@ static int ov5645_probe(struct i2c_client *client)
"invalid bus type, must be CSI2\n");
/* get system clock (xclk) */
- ov5645->xclk = devm_clk_get(dev, NULL);
+ ov5645->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, false, 0);
if (IS_ERR(ov5645->xclk))
return dev_err_probe(dev, PTR_ERR(ov5645->xclk),
"could not get xclk");
- ret = of_property_read_u32(dev->of_node, "clock-frequency", &xclk_freq);
- if (ret)
- return dev_err_probe(dev, ret,
- "could not get xclk frequency\n");
-
/* external clock must be 24MHz, allow 1% tolerance */
+ xclk_freq = clk_get_rate(ov5645->xclk);
if (xclk_freq < 23760000 || xclk_freq > 24240000)
return dev_err_probe(dev, -EINVAL,
"unsupported xclk frequency %u\n",
xclk_freq);
- ret = clk_set_rate(ov5645->xclk, xclk_freq);
- if (ret)
- return dev_err_probe(dev, ret,
- "could not set xclk frequency\n");
-
for (i = 0; i < OV5645_NUM_SUPPLIES; i++)
ov5645->supplies[i].supply = ov5645_supply_name[i];
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index a727beb9d57e..e193fef4fced 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1398,11 +1398,10 @@ static int ov5647_probe(struct i2c_client *client)
}
}
- sensor->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "could not get xclk\n");
xclk_freq = clk_get_rate(sensor->xclk);
if (xclk_freq != 25000000) {
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index 4b86d2631bd1..f0b839cd65f1 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -1061,8 +1061,8 @@ static int ov5648_sw_standby(struct ov5648_sensor *sensor, int standby)
static int ov5648_chip_id_check(struct ov5648_sensor *sensor)
{
- u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
- u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
+ static const u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
+ static const u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
unsigned int i;
u8 value;
int ret;
@@ -2521,10 +2521,10 @@ static int ov5648_probe(struct i2c_client *client)
/* External Clock */
- sensor->xvclk = devm_clk_get(dev, NULL);
+ sensor->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xvclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->xvclk);
+ ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
+ "failed to get external clock\n");
goto error_endpoint;
}
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index b9efb2d2276a..04b3183b7bcb 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -12,6 +11,8 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -1854,6 +1855,8 @@ static const struct ov5670_mode supported_modes[] = {
};
struct ov5670 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_fwnode_endpoint endpoint;
@@ -1959,7 +1962,6 @@ static int ov5670_write_reg(struct ov5670 *ov5670, u16 reg, unsigned int len,
static int ov5670_write_regs(struct ov5670 *ov5670,
const struct ov5670_reg *regs, unsigned int len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
unsigned int i;
int ret;
@@ -1967,7 +1969,7 @@ static int ov5670_write_regs(struct ov5670 *ov5670,
ret = ov5670_write_reg(ov5670, regs[i].address, 1, regs[i].val);
if (ret) {
dev_err_ratelimited(
- &client->dev,
+ ov5670->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -2032,7 +2034,6 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5670 *ov5670 = container_of(ctrl->handler,
struct ov5670, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
s64 max;
int ret;
@@ -2048,7 +2049,7 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov5670->dev))
return 0;
switch (ctrl->id) {
@@ -2080,12 +2081,12 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n",
+ dev_info(ov5670->dev, "%s Unhandled id:0x%x, val:0x%x\n",
__func__, ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
return ret;
}
@@ -2099,7 +2100,6 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
{
struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&ov5670->endpoint.bus.mipi_csi2;
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
unsigned int lanes_count;
@@ -2177,7 +2177,7 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov5670->dev, &props);
if (ret)
goto error;
@@ -2350,7 +2350,6 @@ static int ov5670_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
/* Verify chip ID */
static int ov5670_identify_module(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
int ret;
u32 val;
@@ -2363,7 +2362,7 @@ static int ov5670_identify_module(struct ov5670 *ov5670)
return ret;
if (val != OV5670_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov5670->dev, "chip id mismatch: %x!=%x\n",
OV5670_CHIP_ID, val);
return -ENXIO;
}
@@ -2389,7 +2388,6 @@ static int ov5670_mipi_configure(struct ov5670 *ov5670)
/* Prepare streaming by writing default values and customized values */
static int ov5670_start_streaming(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
const struct ov5670_reg_list *reg_list;
int link_freq_index;
int ret;
@@ -2402,7 +2400,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
ret = ov5670_write_reg(ov5670, OV5670_REG_SOFTWARE_RST,
OV5670_REG_VALUE_08BIT, OV5670_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov5670->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -2412,7 +2410,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5670_write_reg_list(ov5670, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -2420,13 +2418,13 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
reg_list = &ov5670->cur_mode->reg_list;
ret = ov5670_write_reg_list(ov5670, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set mode\n", __func__);
return ret;
}
ret = ov5670_mipi_configure(ov5670);
if (ret) {
- dev_err(&client->dev, "%s failed to configure MIPI\n", __func__);
+ dev_err(ov5670->dev, "%s failed to configure MIPI\n", __func__);
return ret;
}
@@ -2438,7 +2436,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
OV5670_REG_VALUE_08BIT, OV5670_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set stream\n", __func__);
return ret;
}
@@ -2447,13 +2445,12 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
static int ov5670_stop_streaming(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
int ret;
ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
OV5670_REG_VALUE_08BIT, OV5670_MODE_STANDBY);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set stream\n", __func__);
/* Return success even if it was an error, as there is nothing the
* caller can do about it.
@@ -2464,13 +2461,12 @@ static int ov5670_stop_streaming(struct ov5670 *ov5670)
static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5670 *ov5670 = to_ov5670(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov5670->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov5670->dev);
if (ret < 0)
goto unlock_and_return;
@@ -2479,12 +2475,12 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
goto error;
} else {
ret = ov5670_stop_streaming(ov5670);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
}
goto unlock_and_return;
error:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
unlock_and_return:
mutex_unlock(&ov5670->mutex);
@@ -2621,26 +2617,23 @@ static const struct media_entity_operations ov5670_subdev_entity_ops = {
static int ov5670_regulators_probe(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
unsigned int i;
for (i = 0; i < OV5670_NUM_SUPPLIES; i++)
ov5670->supplies[i].supply = ov5670_supply_names[i];
- return devm_regulator_bulk_get(&client->dev, OV5670_NUM_SUPPLIES,
+ return devm_regulator_bulk_get(ov5670->dev, OV5670_NUM_SUPPLIES,
ov5670->supplies);
}
static int ov5670_gpio_probe(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
-
- ov5670->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ ov5670->pwdn_gpio = devm_gpiod_get_optional(ov5670->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(ov5670->pwdn_gpio))
return PTR_ERR(ov5670->pwdn_gpio);
- ov5670->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ ov5670->reset_gpio = devm_gpiod_get_optional(ov5670->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov5670->reset_gpio))
return PTR_ERR(ov5670->reset_gpio);
@@ -2660,18 +2653,16 @@ static int ov5670_probe(struct i2c_client *client)
if (!ov5670)
return -ENOMEM;
- ov5670->xvclk = devm_clk_get_optional(&client->dev, NULL);
- if (!IS_ERR_OR_NULL(ov5670->xvclk))
- input_clk = clk_get_rate(ov5670->xvclk);
- else if (!ov5670->xvclk || PTR_ERR(ov5670->xvclk) == -ENOENT)
- device_property_read_u32(&client->dev, "clock-frequency",
- &input_clk);
- else
- return dev_err_probe(&client->dev, PTR_ERR(ov5670->xvclk),
+ ov5670->dev = &client->dev;
+
+ ov5670->xvclk = devm_v4l2_sensor_clk_get(ov5670->dev, NULL);
+ if (IS_ERR(ov5670->xvclk))
+ return dev_err_probe(ov5670->dev, PTR_ERR(ov5670->xvclk),
"error getting clock\n");
+ input_clk = clk_get_rate(ov5670->xvclk);
if (input_clk != OV5670_XVCLK_FREQ) {
- dev_err(&client->dev,
+ dev_err(ov5670->dev,
"Unsupported clock frequency %u\n", input_clk);
return -EINVAL;
}
@@ -2682,20 +2673,20 @@ static int ov5670_probe(struct i2c_client *client)
ret = ov5670_regulators_probe(ov5670);
if (ret)
- return dev_err_probe(&client->dev, ret, "Regulators probe failed\n");
+ return dev_err_probe(ov5670->dev, ret, "Regulators probe failed\n");
ret = ov5670_gpio_probe(ov5670);
if (ret)
- return dev_err_probe(&client->dev, ret, "GPIO probe failed\n");
+ return dev_err_probe(ov5670->dev, ret, "GPIO probe failed\n");
/*
* Graph Endpoint. If it's missing we defer rather than fail, as this
* sensor is known to co-exist on systems with the IPU3 and so it might
* be created by the ipu-bridge.
*/
- handle = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ handle = fwnode_graph_get_next_endpoint(dev_fwnode(ov5670->dev), NULL);
if (!handle)
- return dev_err_probe(&client->dev, -EPROBE_DEFER,
+ return dev_err_probe(ov5670->dev, -EPROBE_DEFER,
"Endpoint for node get failed\n");
ov5670->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
@@ -2704,20 +2695,20 @@ static int ov5670_probe(struct i2c_client *client)
ret = v4l2_fwnode_endpoint_alloc_parse(handle, &ov5670->endpoint);
fwnode_handle_put(handle);
if (ret)
- return dev_err_probe(&client->dev, ret, "Endpoint parse failed\n");
+ return dev_err_probe(ov5670->dev, ret, "Endpoint parse failed\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov5670->dev);
if (full_power) {
- ret = ov5670_runtime_resume(&client->dev);
+ ret = ov5670_runtime_resume(ov5670->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "Power up failed\n");
+ dev_err_probe(ov5670->dev, ret, "Power up failed\n");
goto error_endpoint;
}
/* Check module identity */
ret = ov5670_identify_module(ov5670);
if (ret) {
- dev_err_probe(&client->dev, ret, "ov5670_identify_module() error\n");
+ dev_err_probe(ov5670->dev, ret, "ov5670_identify_module() error\n");
goto error_power_off;
}
}
@@ -2729,7 +2720,7 @@ static int ov5670_probe(struct i2c_client *client)
ret = ov5670_init_controls(ov5670);
if (ret) {
- dev_err_probe(&client->dev, ret, "ov5670_init_controls() error\n");
+ dev_err_probe(ov5670->dev, ret, "ov5670_init_controls() error\n");
goto error_mutex_destroy;
}
@@ -2742,28 +2733,28 @@ static int ov5670_probe(struct i2c_client *client)
ov5670->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov5670->sd.entity, 1, &ov5670->pad);
if (ret) {
- dev_err_probe(&client->dev, ret, "media_entity_pads_init() error\n");
+ dev_err_probe(ov5670->dev, ret, "media_entity_pads_init() error\n");
goto error_handler_free;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov5670->dev);
+ pm_runtime_enable(ov5670->dev);
/* Async register for subdev */
ret = v4l2_async_register_subdev_sensor(&ov5670->sd);
if (ret < 0) {
- dev_err_probe(&client->dev, ret, "v4l2_async_register_subdev() error\n");
+ dev_err_probe(ov5670->dev, ret, "v4l2_async_register_subdev() error\n");
goto error_pm_disable;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov5670->dev);
return 0;
error_pm_disable:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov5670->dev);
media_entity_cleanup(&ov5670->sd.entity);
@@ -2775,7 +2766,7 @@ error_mutex_destroy:
error_power_off:
if (full_power)
- ov5670_runtime_suspend(&client->dev);
+ ov5670_runtime_suspend(ov5670->dev);
error_endpoint:
v4l2_fwnode_endpoint_free(&ov5670->endpoint);
@@ -2793,8 +2784,8 @@ static void ov5670_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
mutex_destroy(&ov5670->mutex);
- pm_runtime_disable(&client->dev);
- ov5670_runtime_suspend(&client->dev);
+ pm_runtime_disable(ov5670->dev);
+ ov5670_runtime_suspend(ov5670->dev);
v4l2_fwnode_endpoint_free(&ov5670->endpoint);
}
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index e7aec281e9a4..30e27d39ee44 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -11,6 +10,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -493,6 +494,8 @@ static const struct ov5675_mode supported_modes[] = {
};
struct ov5675 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -584,7 +587,6 @@ static int ov5675_write_reg(struct ov5675 *ov5675, u16 reg, u16 len, u32 val)
static int ov5675_write_reg_list(struct ov5675 *ov5675,
const struct ov5675_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
unsigned int i;
int ret;
@@ -592,7 +594,7 @@ static int ov5675_write_reg_list(struct ov5675 *ov5675,
ret = ov5675_write_reg(ov5675, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov5675->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -700,7 +702,6 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5675 *ov5675 = container_of(ctrl->handler,
struct ov5675, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
s64 exposure_max;
int ret = 0;
@@ -716,7 +717,7 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov5675->dev))
return 0;
switch (ctrl->id) {
@@ -765,7 +766,7 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
return ret;
}
@@ -776,7 +777,6 @@ static const struct v4l2_ctrl_ops ov5675_ctrl_ops = {
static int ov5675_init_controls(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max, h_blank;
@@ -839,7 +839,7 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
return ctrl_hdlr->error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov5675->dev, &props);
if (ret)
goto error;
@@ -869,7 +869,6 @@ static void ov5675_update_pad_format(const struct ov5675_mode *mode,
static int ov5675_identify_module(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
int ret;
u32 val;
@@ -882,7 +881,7 @@ static int ov5675_identify_module(struct ov5675 *ov5675)
return ret;
if (val != OV5675_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov5675->dev, "chip id mismatch: %x!=%x",
OV5675_CHIP_ID, val);
return -ENXIO;
}
@@ -894,7 +893,6 @@ static int ov5675_identify_module(struct ov5675 *ov5675)
static int ov5675_start_streaming(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
const struct ov5675_reg_list *reg_list;
int link_freq_index, ret;
@@ -906,14 +904,14 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov5675->dev, "failed to set plls");
return ret;
}
reg_list = &ov5675->cur_mode->reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov5675->dev, "failed to set mode");
return ret;
}
@@ -924,7 +922,7 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
ret = ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
OV5675_REG_VALUE_08BIT, OV5675_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov5675->dev, "failed to set stream");
return ret;
}
@@ -933,22 +931,19 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
static void ov5675_stop_streaming(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
-
if (ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
OV5675_REG_VALUE_08BIT, OV5675_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov5675->dev, "failed to set stream");
}
static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5675 *ov5675 = to_ov5675(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov5675->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov5675->dev);
if (ret < 0) {
mutex_unlock(&ov5675->mutex);
return ret;
@@ -958,11 +953,11 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov5675_stop_streaming(ov5675);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
}
} else {
ov5675_stop_streaming(ov5675);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
}
mutex_unlock(&ov5675->mutex);
@@ -1171,8 +1166,9 @@ static const struct v4l2_subdev_internal_ops ov5675_internal_ops = {
.open = ov5675_open,
};
-static int ov5675_get_hwcfg(struct ov5675 *ov5675, struct device *dev)
+static int ov5675_get_hwcfg(struct ov5675 *ov5675)
{
+ struct device *dev = ov5675->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -1185,24 +1181,13 @@ static int ov5675_get_hwcfg(struct ov5675 *ov5675, struct device *dev)
if (!fwnode)
return -ENXIO;
- ov5675->xvclk = devm_clk_get_optional(dev, NULL);
+ ov5675->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov5675->xvclk))
return dev_err_probe(dev, PTR_ERR(ov5675->xvclk),
"failed to get xvclk: %ld\n",
PTR_ERR(ov5675->xvclk));
- if (ov5675->xvclk) {
- xvclk_rate = clk_get_rate(ov5675->xvclk);
- } else {
- ret = fwnode_property_read_u32(fwnode, "clock-frequency",
- &xvclk_rate);
-
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
- }
-
+ xvclk_rate = clk_get_rate(ov5675->xvclk);
if (xvclk_rate != OV5675_XVCLK_19_2) {
dev_err(dev, "external clock rate %u is unsupported",
xvclk_rate);
@@ -1276,12 +1261,12 @@ static void ov5675_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov5675->dev);
mutex_destroy(&ov5675->mutex);
- if (!pm_runtime_status_suspended(&client->dev))
- ov5675_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(ov5675->dev))
+ ov5675_power_off(ov5675->dev);
+ pm_runtime_set_suspended(ov5675->dev);
}
static int ov5675_probe(struct i2c_client *client)
@@ -1294,23 +1279,25 @@ static int ov5675_probe(struct i2c_client *client)
if (!ov5675)
return -ENOMEM;
- ret = ov5675_get_hwcfg(ov5675, &client->dev);
+ ov5675->dev = &client->dev;
+
+ ret = ov5675_get_hwcfg(ov5675);
if (ret)
return ret;
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
- ret = ov5675_power_on(&client->dev);
+ ret = ov5675_power_on(ov5675->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on: %d\n", ret);
+ dev_err(ov5675->dev, "failed to power on: %d\n", ret);
return ret;
}
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov5675->dev);
if (full_power) {
ret = ov5675_identify_module(ov5675);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov5675->dev, "failed to find sensor: %d", ret);
goto probe_power_off;
}
}
@@ -1319,7 +1306,7 @@ static int ov5675_probe(struct i2c_client *client)
ov5675->cur_mode = &supported_modes[0];
ret = ov5675_init_controls(ov5675);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov5675->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1330,22 +1317,22 @@ static int ov5675_probe(struct i2c_client *client)
ov5675->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov5675->sd.entity, 1, &ov5675->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov5675->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov5675->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov5675->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov5675->dev);
+ pm_runtime_enable(ov5675->dev);
+ pm_runtime_idle(ov5675->dev);
return 0;
@@ -1356,7 +1343,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov5675->sd.ctrl_handler);
mutex_destroy(&ov5675->mutex);
probe_power_off:
- ov5675_power_off(&client->dev);
+ ov5675_power_off(ov5675->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 485efd15257e..d294477f9dd3 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -1289,25 +1289,13 @@ static int ov5693_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&ov5693->sd, client, &ov5693_ops);
- ov5693->xvclk = devm_clk_get_optional(&client->dev, "xvclk");
+ ov5693->xvclk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
if (IS_ERR(ov5693->xvclk))
return dev_err_probe(&client->dev, PTR_ERR(ov5693->xvclk),
"failed to get xvclk: %ld\n",
PTR_ERR(ov5693->xvclk));
- if (ov5693->xvclk) {
- xvclk_rate = clk_get_rate(ov5693->xvclk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(&client->dev),
- "clock-frequency",
- &xvclk_rate);
-
- if (ret) {
- dev_err(&client->dev, "can't get clock frequency");
- return ret;
- }
- }
-
+ xvclk_rate = clk_get_rate(ov5693->xvclk);
if (xvclk_rate != OV5693_XVCLK_FREQ)
dev_warn(&client->dev, "Found clk freq %u, expected %u\n",
xvclk_rate, OV5693_XVCLK_FREQ);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 663eccdfea6a..5bb6ce7b3237 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1264,16 +1264,12 @@ static int ov5695_probe(struct i2c_client *client)
ov5695->client = client;
ov5695->cur_mode = &supported_modes[0];
- ov5695->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov5695->xvclk)) {
- dev_err(dev, "Failed to get xvclk\n");
- return -EINVAL;
- }
- ret = clk_set_rate(ov5695->xvclk, OV5695_XVCLK_FREQ);
- if (ret < 0) {
- dev_err(dev, "Failed to set xvclk rate (24MHz)\n");
- return ret;
- }
+ ov5695->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", true,
+ OV5695_XVCLK_FREQ);
+ if (IS_ERR(ov5695->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov5695->xvclk),
+ "Failed to get xvclk\n");
+
if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
diff --git a/drivers/media/i2c/ov6211.c b/drivers/media/i2c/ov6211.c
new file mode 100644
index 000000000000..e3ac5ecf27d1
--- /dev/null
+++ b/drivers/media/i2c/ov6211.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024-2025 Linaro Ltd
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV6211_LINK_FREQ_480MHZ (480 * HZ_PER_MHZ)
+#define OV6211_MCLK_FREQ_24MHZ (24 * HZ_PER_MHZ)
+
+#define OV6211_REG_CHIP_ID CCI_REG16(0x300a)
+#define OV6211_CHIP_ID 0x6710
+
+#define OV6211_REG_MODE_SELECT CCI_REG8(0x0100)
+#define OV6211_MODE_STANDBY 0x00
+#define OV6211_MODE_STREAMING BIT(0)
+
+#define OV6211_REG_SOFTWARE_RST CCI_REG8(0x0103)
+#define OV6211_SOFTWARE_RST BIT(0)
+
+/* Exposure controls from sensor */
+#define OV6211_REG_EXPOSURE CCI_REG24(0x3500)
+#define OV6211_EXPOSURE_MIN 1
+#define OV6211_EXPOSURE_MAX_MARGIN 4
+#define OV6211_EXPOSURE_STEP 1
+#define OV6211_EXPOSURE_DEFAULT 210
+
+/* Analogue gain controls from sensor */
+#define OV6211_REG_ANALOGUE_GAIN CCI_REG16(0x350a)
+#define OV6211_ANALOGUE_GAIN_MIN 1
+#define OV6211_ANALOGUE_GAIN_MAX 0x3ff
+#define OV6211_ANALOGUE_GAIN_STEP 1
+#define OV6211_ANALOGUE_GAIN_DEFAULT 160
+
+/* Test pattern */
+#define OV6211_REG_PRE_ISP CCI_REG8(0x5e00)
+#define OV6211_TEST_PATTERN_ENABLE BIT(7)
+
+#define to_ov6211(_sd) container_of(_sd, struct ov6211, sd)
+
+static const s64 ov6211_link_freq_menu[] = {
+ OV6211_LINK_FREQ_480MHZ,
+};
+
+struct ov6211_reg_list {
+ const struct cci_reg_sequence *regs;
+ unsigned int num_regs;
+};
+
+struct ov6211_mode {
+ u32 width; /* Frame width in pixels */
+ u32 height; /* Frame height in pixels */
+ u32 hts; /* Horizontal timing size */
+ u32 vts; /* Default vertical timing size */
+ u32 bpp; /* Bits per pixel */
+
+ const struct ov6211_reg_list reg_list; /* Sensor register setting */
+};
+
+static const char * const ov6211_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Colour Bars",
+};
+
+static const char * const ov6211_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+#define OV6211_NUM_SUPPLIES ARRAY_SIZE(ov6211_supply_names)
+
+struct ov6211 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[OV6211_NUM_SUPPLIES];
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Saved register values */
+ u64 pre_isp;
+};
+
+static const struct cci_reg_sequence ov6211_400x400_120fps_mode[] = {
+ { CCI_REG8(0x3005), 0x00 },
+ { CCI_REG8(0x3013), 0x12 },
+ { CCI_REG8(0x3014), 0x04 },
+ { CCI_REG8(0x3016), 0x10 },
+ { CCI_REG8(0x3017), 0x00 },
+ { CCI_REG8(0x3018), 0x00 },
+ { CCI_REG8(0x301a), 0x00 },
+ { CCI_REG8(0x301b), 0x00 },
+ { CCI_REG8(0x301c), 0x00 },
+ { CCI_REG8(0x3037), 0xf0 },
+ { CCI_REG8(0x3080), 0x01 },
+ { CCI_REG8(0x3081), 0x00 },
+ { CCI_REG8(0x3082), 0x01 },
+ { CCI_REG8(0x3098), 0x04 },
+ { CCI_REG8(0x3099), 0x28 },
+ { CCI_REG8(0x309a), 0x06 },
+ { CCI_REG8(0x309b), 0x04 },
+ { CCI_REG8(0x309c), 0x00 },
+ { CCI_REG8(0x309d), 0x00 },
+ { CCI_REG8(0x309e), 0x01 },
+ { CCI_REG8(0x309f), 0x00 },
+ { CCI_REG8(0x30b0), 0x08 },
+ { CCI_REG8(0x30b1), 0x02 },
+ { CCI_REG8(0x30b2), 0x00 },
+ { CCI_REG8(0x30b3), 0x28 },
+ { CCI_REG8(0x30b4), 0x02 },
+ { CCI_REG8(0x30b5), 0x00 },
+ { CCI_REG8(0x3106), 0xd9 },
+ { CCI_REG8(0x3503), 0x07 },
+ { CCI_REG8(0x3509), 0x10 },
+ { CCI_REG8(0x3600), 0xfc },
+ { CCI_REG8(0x3620), 0xb7 },
+ { CCI_REG8(0x3621), 0x05 },
+ { CCI_REG8(0x3626), 0x31 },
+ { CCI_REG8(0x3627), 0x40 },
+ { CCI_REG8(0x3632), 0xa3 },
+ { CCI_REG8(0x3633), 0x34 },
+ { CCI_REG8(0x3634), 0x40 },
+ { CCI_REG8(0x3636), 0x00 },
+ { CCI_REG8(0x3660), 0x80 },
+ { CCI_REG8(0x3662), 0x03 },
+ { CCI_REG8(0x3664), 0xf0 },
+ { CCI_REG8(0x366a), 0x10 },
+ { CCI_REG8(0x366b), 0x06 },
+ { CCI_REG8(0x3680), 0xf4 },
+ { CCI_REG8(0x3681), 0x50 },
+ { CCI_REG8(0x3682), 0x00 },
+ { CCI_REG8(0x3708), 0x20 },
+ { CCI_REG8(0x3709), 0x40 },
+ { CCI_REG8(0x370d), 0x03 },
+ { CCI_REG8(0x373b), 0x02 },
+ { CCI_REG8(0x373c), 0x08 },
+ { CCI_REG8(0x3742), 0x00 },
+ { CCI_REG8(0x3744), 0x16 },
+ { CCI_REG8(0x3745), 0x08 },
+ { CCI_REG8(0x3781), 0xfc },
+ { CCI_REG8(0x3788), 0x00 },
+ { CCI_REG8(0x3800), 0x00 },
+ { CCI_REG8(0x3801), 0x04 },
+ { CCI_REG8(0x3802), 0x00 },
+ { CCI_REG8(0x3803), 0x04 },
+ { CCI_REG8(0x3804), 0x01 },
+ { CCI_REG8(0x3805), 0x9b },
+ { CCI_REG8(0x3806), 0x01 },
+ { CCI_REG8(0x3807), 0x9b },
+ { CCI_REG8(0x3808), 0x01 }, /* output width */
+ { CCI_REG8(0x3809), 0x90 },
+ { CCI_REG8(0x380a), 0x01 }, /* output height */
+ { CCI_REG8(0x380b), 0x90 },
+ { CCI_REG8(0x380c), 0x05 }, /* horizontal timing size */
+ { CCI_REG8(0x380d), 0xf2 },
+ { CCI_REG8(0x380e), 0x01 }, /* vertical timing size */
+ { CCI_REG8(0x380f), 0xb6 },
+ { CCI_REG8(0x3810), 0x00 },
+ { CCI_REG8(0x3811), 0x04 },
+ { CCI_REG8(0x3812), 0x00 },
+ { CCI_REG8(0x3813), 0x04 },
+ { CCI_REG8(0x3814), 0x11 },
+ { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x00 },
+ { CCI_REG8(0x3821), 0x00 },
+ { CCI_REG8(0x382b), 0xfa },
+ { CCI_REG8(0x382f), 0x04 },
+ { CCI_REG8(0x3832), 0x00 },
+ { CCI_REG8(0x3833), 0x05 },
+ { CCI_REG8(0x3834), 0x00 },
+ { CCI_REG8(0x3835), 0x05 },
+ { CCI_REG8(0x3882), 0x04 },
+ { CCI_REG8(0x3883), 0x00 },
+ { CCI_REG8(0x38a4), 0x10 },
+ { CCI_REG8(0x38a5), 0x00 },
+ { CCI_REG8(0x38b1), 0x03 },
+ { CCI_REG8(0x3b80), 0x00 },
+ { CCI_REG8(0x3b81), 0xff },
+ { CCI_REG8(0x3b82), 0x10 },
+ { CCI_REG8(0x3b83), 0x00 },
+ { CCI_REG8(0x3b84), 0x08 },
+ { CCI_REG8(0x3b85), 0x00 },
+ { CCI_REG8(0x3b86), 0x01 },
+ { CCI_REG8(0x3b87), 0x00 },
+ { CCI_REG8(0x3b88), 0x00 },
+ { CCI_REG8(0x3b89), 0x00 },
+ { CCI_REG8(0x3b8a), 0x00 },
+ { CCI_REG8(0x3b8b), 0x05 },
+ { CCI_REG8(0x3b8c), 0x00 },
+ { CCI_REG8(0x3b8d), 0x00 },
+ { CCI_REG8(0x3b8e), 0x01 },
+ { CCI_REG8(0x3b8f), 0xb2 },
+ { CCI_REG8(0x3b94), 0x05 },
+ { CCI_REG8(0x3b95), 0xf2 },
+ { CCI_REG8(0x3b96), 0xc0 },
+ { CCI_REG8(0x4004), 0x04 },
+ { CCI_REG8(0x404e), 0x01 },
+ { CCI_REG8(0x4801), 0x0f },
+ { CCI_REG8(0x4806), 0x0f },
+ { CCI_REG8(0x4837), 0x43 },
+ { CCI_REG8(0x5a08), 0x00 },
+ { CCI_REG8(0x5a01), 0x00 },
+ { CCI_REG8(0x5a03), 0x00 },
+ { CCI_REG8(0x5a04), 0x10 },
+ { CCI_REG8(0x5a05), 0xa0 },
+ { CCI_REG8(0x5a06), 0x0c },
+ { CCI_REG8(0x5a07), 0x78 },
+};
+
+static const struct ov6211_mode supported_modes[] = {
+ {
+ .width = 400,
+ .height = 400,
+ .hts = 1522,
+ .vts = 438,
+ .bpp = 8,
+ .reg_list = {
+ .regs = ov6211_400x400_120fps_mode,
+ .num_regs = ARRAY_SIZE(ov6211_400x400_120fps_mode),
+ },
+ },
+};
+
+static int ov6211_set_test_pattern(struct ov6211 *ov6211, u32 pattern)
+{
+ u64 val = ov6211->pre_isp;
+
+ if (pattern)
+ val |= OV6211_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OV6211_TEST_PATTERN_ENABLE;
+
+ return cci_write(ov6211->regmap, OV6211_REG_PRE_ISP, val, NULL);
+}
+
+static int ov6211_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov6211 *ov6211 = container_of(ctrl->handler, struct ov6211,
+ ctrl_handler);
+ int ret;
+
+ /* V4L2 controls are applied, when sensor is powered up for streaming */
+ if (!pm_runtime_get_if_active(ov6211->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(ov6211->regmap, OV6211_REG_ANALOGUE_GAIN,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(ov6211->regmap, OV6211_REG_EXPOSURE,
+ ctrl->val << 4, NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov6211_set_test_pattern(ov6211, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(ov6211->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov6211_ctrl_ops = {
+ .s_ctrl = ov6211_set_ctrl,
+};
+
+static int ov6211_init_controls(struct ov6211 *ov6211)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov6211->ctrl_handler;
+ const struct ov6211_mode *mode = &supported_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, pixel_rate, h_blank;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov6211_link_freq_menu) - 1,
+ 0, ov6211_link_freq_menu);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = ov6211_link_freq_menu[0] / mode->bpp;
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ 0, pixel_rate, 1, pixel_rate);
+
+ h_blank = mode->hts - mode->width;
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_HBLANK,
+ h_blank, h_blank, 1, h_blank);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_VBLANK,
+ mode->vts - mode->height,
+ mode->vts - mode->height, 1,
+ mode->vts - mode->height);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV6211_ANALOGUE_GAIN_MIN, OV6211_ANALOGUE_GAIN_MAX,
+ OV6211_ANALOGUE_GAIN_STEP,
+ OV6211_ANALOGUE_GAIN_DEFAULT);
+
+ exposure_max = (mode->vts - OV6211_EXPOSURE_MAX_MARGIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV6211_EXPOSURE_MIN, exposure_max,
+ OV6211_EXPOSURE_STEP,
+ OV6211_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov6211_test_pattern_menu) - 1,
+ 0, 0, ov6211_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ret = v4l2_fwnode_device_parse(ov6211->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov6211_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ov6211->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void ov6211_update_pad_format(const struct ov6211_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_Y8_1X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int ov6211_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ const struct ov6211_reg_list *reg_list = &supported_modes[0].reg_list;
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov6211->dev);
+ if (ret)
+ return ret;
+
+ /* Skip a step of explicit entering into the standby mode */
+ ret = cci_write(ov6211->regmap, OV6211_REG_SOFTWARE_RST,
+ OV6211_SOFTWARE_RST, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to software reset: %d\n", ret);
+ goto error;
+ }
+
+ ret = cci_multi_reg_write(ov6211->regmap, reg_list->regs,
+ reg_list->num_regs, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to set mode: %d\n", ret);
+ goto error;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov6211->sd.ctrl_handler);
+ if (ret)
+ goto error;
+
+ ret = cci_write(ov6211->regmap, OV6211_REG_MODE_SELECT,
+ OV6211_MODE_STREAMING, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to start streaming: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_put_autosuspend(ov6211->dev);
+
+ return ret;
+}
+
+static int ov6211_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = cci_write(ov6211->regmap, OV6211_REG_MODE_SELECT,
+ OV6211_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(ov6211->dev, "failed to stop streaming: %d\n", ret);
+
+ pm_runtime_put_autosuspend(ov6211->dev);
+
+ return ret;
+}
+
+static int ov6211_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct ov6211_mode *mode;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height);
+
+ ov6211_update_pad_format(mode, &fmt->format);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int ov6211_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
+
+ return 0;
+}
+
+static int ov6211_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_Y8_1X8)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov6211_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ ov6211_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov6211_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov6211_pad_ops = {
+ .set_fmt = ov6211_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = ov6211_enum_mbus_code,
+ .enum_frame_size = ov6211_enum_frame_size,
+ .enable_streams = ov6211_enable_streams,
+ .disable_streams = ov6211_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov6211_subdev_ops = {
+ .video = &ov6211_video_ops,
+ .pad = &ov6211_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov6211_internal_ops = {
+ .init_state = ov6211_init_state,
+};
+
+static const struct media_entity_operations ov6211_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int ov6211_identify_sensor(struct ov6211 *ov6211)
+{
+ u64 val;
+ int ret;
+
+ ret = cci_read(ov6211->regmap, OV6211_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OV6211_CHIP_ID) {
+ dev_err(ov6211->dev, "chip id mismatch: %x!=%llx\n",
+ OV6211_CHIP_ID, val);
+ return -ENODEV;
+ }
+
+ ret = cci_read(ov6211->regmap, OV6211_REG_PRE_ISP,
+ &ov6211->pre_isp, NULL);
+ if (ret)
+ dev_err(ov6211->dev, "failed to read pre_isp: %d\n", ret);
+
+ return ret;
+}
+
+static int ov6211_check_hwcfg(struct ov6211 *ov6211)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(ov6211->dev), *ep;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned long freq_bitmap;
+ int ret;
+
+ if (!fwnode)
+ return -ENODEV;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_freq_to_bitmap(ov6211->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ ov6211_link_freq_menu,
+ ARRAY_SIZE(ov6211_link_freq_menu),
+ &freq_bitmap);
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov6211_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 0);
+ usleep_range(10 * USEC_PER_MSEC, 15 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(ov6211->xvclk);
+ if (ret)
+ goto reset_gpio;
+
+ return 0;
+
+reset_gpio:
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 1);
+
+ regulator_bulk_disable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+
+ return ret;
+}
+
+static int ov6211_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+
+ clk_disable_unprepare(ov6211->xvclk);
+
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 1);
+
+ regulator_bulk_disable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+
+ return 0;
+}
+
+static int ov6211_probe(struct i2c_client *client)
+{
+ struct ov6211 *ov6211;
+ unsigned long freq;
+ unsigned int i;
+ int ret;
+
+ ov6211 = devm_kzalloc(&client->dev, sizeof(*ov6211), GFP_KERNEL);
+ if (!ov6211)
+ return -ENOMEM;
+
+ ov6211->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&ov6211->sd, client, &ov6211_subdev_ops);
+
+ ov6211->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov6211->regmap))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->regmap),
+ "failed to init CCI\n");
+
+ ov6211->xvclk = devm_v4l2_sensor_clk_get(ov6211->dev, NULL);
+ if (IS_ERR(ov6211->xvclk))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->xvclk),
+ "failed to get XVCLK clock\n");
+
+ freq = clk_get_rate(ov6211->xvclk);
+ if (freq && freq != OV6211_MCLK_FREQ_24MHZ)
+ return dev_err_probe(ov6211->dev, -EINVAL,
+ "XVCLK clock frequency %lu is not supported\n",
+ freq);
+
+ ret = ov6211_check_hwcfg(ov6211);
+ if (ret)
+ return dev_err_probe(ov6211->dev, ret,
+ "failed to check HW configuration\n");
+
+ ov6211->reset_gpio = devm_gpiod_get_optional(ov6211->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov6211->reset_gpio))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->reset_gpio),
+ "cannot get reset GPIO\n");
+
+ for (i = 0; i < OV6211_NUM_SUPPLIES; i++)
+ ov6211->supplies[i].supply = ov6211_supply_names[i];
+
+ ret = devm_regulator_bulk_get(ov6211->dev, OV6211_NUM_SUPPLIES,
+ ov6211->supplies);
+ if (ret)
+ return dev_err_probe(ov6211->dev, ret,
+ "failed to get supply regulators\n");
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = ov6211_power_on(ov6211->dev);
+ if (ret)
+ return ret;
+
+ ret = ov6211_identify_sensor(ov6211);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret, "failed to find sensor\n");
+ goto power_off;
+ }
+
+ ret = ov6211_init_controls(ov6211);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret, "failed to init controls\n");
+ goto power_off;
+ }
+
+ ov6211->sd.state_lock = ov6211->ctrl_handler.lock;
+ ov6211->sd.internal_ops = &ov6211_internal_ops;
+ ov6211->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov6211->sd.entity.ops = &ov6211_subdev_entity_ops;
+ ov6211->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov6211->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov6211->sd.entity, 1, &ov6211->pad);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to init media entity pads\n");
+ goto v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&ov6211->sd);
+ if (ret < 0) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to init media entity pads\n");
+ goto media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(ov6211->dev);
+ pm_runtime_enable(ov6211->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov6211->sd);
+ if (ret < 0) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to register V4L2 subdev\n");
+ goto subdev_cleanup;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_idle(ov6211->dev);
+ pm_runtime_set_autosuspend_delay(ov6211->dev, 1000);
+ pm_runtime_use_autosuspend(ov6211->dev);
+
+ return 0;
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&ov6211->sd);
+ pm_runtime_disable(ov6211->dev);
+ pm_runtime_set_suspended(ov6211->dev);
+
+media_entity_cleanup:
+ media_entity_cleanup(&ov6211->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov6211->sd.ctrl_handler);
+
+power_off:
+ ov6211_power_off(ov6211->dev);
+
+ return ret;
+}
+
+static void ov6211_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(ov6211->dev);
+
+ if (!pm_runtime_status_suspended(ov6211->dev)) {
+ ov6211_power_off(ov6211->dev);
+ pm_runtime_set_suspended(ov6211->dev);
+ }
+}
+
+static const struct dev_pm_ops ov6211_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov6211_power_off, ov6211_power_on, NULL)
+};
+
+static const struct of_device_id ov6211_of_match[] = {
+ { .compatible = "ovti,ov6211" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov6211_of_match);
+
+static struct i2c_driver ov6211_i2c_driver = {
+ .driver = {
+ .name = "ov6211",
+ .pm = &ov6211_pm_ops,
+ .of_match_table = ov6211_of_match,
+ },
+ .probe = ov6211_probe,
+ .remove = ov6211_remove,
+};
+
+module_i2c_driver(ov6211_i2c_driver);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OV6211 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
index 2031cbd05c26..78b62c169b99 100644
--- a/drivers/media/i2c/ov64a40.c
+++ b/drivers/media/i2c/ov64a40.c
@@ -3546,7 +3546,7 @@ static int ov64a40_probe(struct i2c_client *client)
return PTR_ERR(ov64a40->cci);
}
- ov64a40->xclk = devm_clk_get(&client->dev, NULL);
+ ov64a40->xclk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(ov64a40->xclk))
return dev_err_probe(&client->dev, PTR_ERR(ov64a40->xclk),
"Failed to get clock\n");
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
deleted file mode 100644
index 9c7627161142..000000000000
--- a/drivers/media/i2c/ov6650.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * V4L2 subdevice driver for OmniVision OV6650 Camera Sensor
- *
- * Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- *
- * Based on OmniVision OV96xx Camera Driver
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
- *
- * Based on ov772x camera driver:
- * Copyright (C) 2008 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ov7670 and soc_camera_platform driver,
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- * Copyright (C) 2008 Magnus Damm
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * Hardware specific bits initially based on former work by Matt Callow
- * drivers/media/video/omap/sensor_ov6650.c
- * Copyright (C) 2006 Matt Callow
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/module.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-/* Register definitions */
-#define REG_GAIN 0x00 /* range 00 - 3F */
-#define REG_BLUE 0x01
-#define REG_RED 0x02
-#define REG_SAT 0x03 /* [7:4] saturation [0:3] reserved */
-#define REG_HUE 0x04 /* [7:6] rsrvd [5] hue en [4:0] hue */
-
-#define REG_BRT 0x06
-
-#define REG_PIDH 0x0a
-#define REG_PIDL 0x0b
-
-#define REG_AECH 0x10
-#define REG_CLKRC 0x11 /* Data Format and Internal Clock */
- /* [7:6] Input system clock (MHz)*/
- /* 00=8, 01=12, 10=16, 11=24 */
- /* [5:0]: Internal Clock Pre-Scaler */
-#define REG_COMA 0x12 /* [7] Reset */
-#define REG_COMB 0x13
-#define REG_COMC 0x14
-#define REG_COMD 0x15
-#define REG_COML 0x16
-#define REG_HSTRT 0x17
-#define REG_HSTOP 0x18
-#define REG_VSTRT 0x19
-#define REG_VSTOP 0x1a
-#define REG_PSHFT 0x1b
-#define REG_MIDH 0x1c
-#define REG_MIDL 0x1d
-#define REG_HSYNS 0x1e
-#define REG_HSYNE 0x1f
-#define REG_COME 0x20
-#define REG_YOFF 0x21
-#define REG_UOFF 0x22
-#define REG_VOFF 0x23
-#define REG_AEW 0x24
-#define REG_AEB 0x25
-#define REG_COMF 0x26
-#define REG_COMG 0x27
-#define REG_COMH 0x28
-#define REG_COMI 0x29
-
-#define REG_FRARL 0x2b
-#define REG_COMJ 0x2c
-#define REG_COMK 0x2d
-#define REG_AVGY 0x2e
-#define REG_REF0 0x2f
-#define REG_REF1 0x30
-#define REG_REF2 0x31
-#define REG_FRAJH 0x32
-#define REG_FRAJL 0x33
-#define REG_FACT 0x34
-#define REG_L1AEC 0x35
-#define REG_AVGU 0x36
-#define REG_AVGV 0x37
-
-#define REG_SPCB 0x60
-#define REG_SPCC 0x61
-#define REG_GAM1 0x62
-#define REG_GAM2 0x63
-#define REG_GAM3 0x64
-#define REG_SPCD 0x65
-
-#define REG_SPCE 0x68
-#define REG_ADCL 0x69
-
-#define REG_RMCO 0x6c
-#define REG_GMCO 0x6d
-#define REG_BMCO 0x6e
-
-
-/* Register bits, values, etc. */
-#define OV6650_PIDH 0x66 /* high byte of product ID number */
-#define OV6650_PIDL 0x50 /* low byte of product ID number */
-#define OV6650_MIDH 0x7F /* high byte of mfg ID */
-#define OV6650_MIDL 0xA2 /* low byte of mfg ID */
-
-#define DEF_GAIN 0x00
-#define DEF_BLUE 0x80
-#define DEF_RED 0x80
-
-#define SAT_SHIFT 4
-#define SAT_MASK (0xf << SAT_SHIFT)
-#define SET_SAT(x) (((x) << SAT_SHIFT) & SAT_MASK)
-
-#define HUE_EN BIT(5)
-#define HUE_MASK 0x1f
-#define DEF_HUE 0x10
-#define SET_HUE(x) (HUE_EN | ((x) & HUE_MASK))
-
-#define DEF_AECH 0x4D
-
-#define CLKRC_8MHz 0x00
-#define CLKRC_12MHz 0x40
-#define CLKRC_16MHz 0x80
-#define CLKRC_24MHz 0xc0
-#define CLKRC_DIV_MASK 0x3f
-#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
-#define DEF_CLKRC 0x00
-
-#define COMA_RESET BIT(7)
-#define COMA_QCIF BIT(5)
-#define COMA_RAW_RGB BIT(4)
-#define COMA_RGB BIT(3)
-#define COMA_BW BIT(2)
-#define COMA_WORD_SWAP BIT(1)
-#define COMA_BYTE_SWAP BIT(0)
-#define DEF_COMA 0x00
-
-#define COMB_FLIP_V BIT(7)
-#define COMB_FLIP_H BIT(5)
-#define COMB_BAND_FILTER BIT(4)
-#define COMB_AWB BIT(2)
-#define COMB_AGC BIT(1)
-#define COMB_AEC BIT(0)
-#define DEF_COMB 0x5f
-
-#define COML_ONE_CHANNEL BIT(7)
-
-#define DEF_HSTRT 0x24
-#define DEF_HSTOP 0xd4
-#define DEF_VSTRT 0x04
-#define DEF_VSTOP 0x94
-
-#define COMF_HREF_LOW BIT(4)
-
-#define COMJ_PCLK_RISING BIT(4)
-#define COMJ_VSYNC_HIGH BIT(0)
-
-/* supported resolutions */
-#define W_QCIF (DEF_HSTOP - DEF_HSTRT)
-#define W_CIF (W_QCIF << 1)
-#define H_QCIF (DEF_VSTOP - DEF_VSTRT)
-#define H_CIF (H_QCIF << 1)
-
-#define FRAME_RATE_MAX 30
-
-
-struct ov6650_reg {
- u8 reg;
- u8 val;
-};
-
-struct ov6650 {
- struct v4l2_subdev subdev;
- struct v4l2_ctrl_handler hdl;
- struct {
- /* exposure/autoexposure cluster */
- struct v4l2_ctrl *autoexposure;
- struct v4l2_ctrl *exposure;
- };
- struct {
- /* gain/autogain cluster */
- struct v4l2_ctrl *autogain;
- struct v4l2_ctrl *gain;
- };
- struct {
- /* blue/red/autowhitebalance cluster */
- struct v4l2_ctrl *autowb;
- struct v4l2_ctrl *blue;
- struct v4l2_ctrl *red;
- };
- struct clk *clk;
- bool half_scale; /* scale down output by 2 */
- struct v4l2_rect rect; /* sensor cropping window */
- struct v4l2_fract tpf; /* as requested with set_frame_interval */
- u32 code;
-};
-
-struct ov6650_xclk {
- unsigned long rate;
- u8 clkrc;
-};
-
-static const struct ov6650_xclk ov6650_xclk[] = {
-{
- .rate = 8000000,
- .clkrc = CLKRC_8MHz,
-},
-{
- .rate = 12000000,
- .clkrc = CLKRC_12MHz,
-},
-{
- .rate = 16000000,
- .clkrc = CLKRC_16MHz,
-},
-{
- .rate = 24000000,
- .clkrc = CLKRC_24MHz,
-},
-};
-
-static u32 ov6650_codes[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_Y8_1X8,
-};
-
-static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
- .width = W_CIF,
- .height = H_CIF,
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .field = V4L2_FIELD_NONE,
- .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
- .quantization = V4L2_QUANTIZATION_DEFAULT,
- .xfer_func = V4L2_XFER_FUNC_DEFAULT,
-};
-
-/* read a register */
-static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- int ret;
- u8 data = reg;
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = &data,
- };
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- goto err;
-
- msg.flags = I2C_M_RD;
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- goto err;
-
- *val = data;
- return 0;
-
-err:
- dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg);
- return ret;
-}
-
-/* write a register */
-static int ov6650_reg_write(struct i2c_client *client, u8 reg, u8 val)
-{
- int ret;
- unsigned char data[2] = { reg, val };
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = 2,
- .buf = data,
- };
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- udelay(100);
-
- if (ret < 0) {
- dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg);
- return ret;
- }
- return 0;
-}
-
-
-/* Read a register, alter its bits, write it back */
-static int ov6650_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 mask)
-{
- u8 val;
- int ret;
-
- ret = ov6650_reg_read(client, reg, &val);
- if (ret) {
- dev_err(&client->dev,
- "[Read]-Modify-Write of register 0x%02x failed!\n",
- reg);
- return ret;
- }
-
- val &= ~mask;
- val |= set;
-
- ret = ov6650_reg_write(client, reg, val);
- if (ret)
- dev_err(&client->dev,
- "Read-Modify-[Write] of register 0x%02x failed!\n",
- reg);
-
- return ret;
-}
-
-static struct ov6650 *to_ov6650(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client), struct ov6650, subdev);
-}
-
-/* Start/Stop streaming from the device */
-static int ov6650_s_stream(struct v4l2_subdev *sd, int enable)
-{
- return 0;
-}
-
-/* Get status of additional camera capabilities */
-static int ov6550_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- uint8_t reg, reg2;
- int ret;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- ret = ov6650_reg_read(client, REG_GAIN, &reg);
- if (!ret)
- priv->gain->val = reg;
- return ret;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = ov6650_reg_read(client, REG_BLUE, &reg);
- if (!ret)
- ret = ov6650_reg_read(client, REG_RED, &reg2);
- if (!ret) {
- priv->blue->val = reg;
- priv->red->val = reg2;
- }
- return ret;
- case V4L2_CID_EXPOSURE_AUTO:
- ret = ov6650_reg_read(client, REG_AECH, &reg);
- if (!ret)
- priv->exposure->val = reg;
- return ret;
- }
- return -EINVAL;
-}
-
-/* Set status of additional camera capabilities */
-static int ov6550_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_AGC : 0, COMB_AGC);
- if (!ret && !ctrl->val)
- ret = ov6650_reg_write(client, REG_GAIN, priv->gain->val);
- return ret;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_AWB : 0, COMB_AWB);
- if (!ret && !ctrl->val) {
- ret = ov6650_reg_write(client, REG_BLUE, priv->blue->val);
- if (!ret)
- ret = ov6650_reg_write(client, REG_RED,
- priv->red->val);
- }
- return ret;
- case V4L2_CID_SATURATION:
- return ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->val),
- SAT_MASK);
- case V4L2_CID_HUE:
- return ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->val),
- HUE_MASK);
- case V4L2_CID_BRIGHTNESS:
- return ov6650_reg_write(client, REG_BRT, ctrl->val);
- case V4L2_CID_EXPOSURE_AUTO:
- ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ==
- V4L2_EXPOSURE_AUTO ? COMB_AEC : 0, COMB_AEC);
- if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL)
- ret = ov6650_reg_write(client, REG_AECH,
- priv->exposure->val);
- return ret;
- case V4L2_CID_GAMMA:
- return ov6650_reg_write(client, REG_GAM1, ctrl->val);
- case V4L2_CID_VFLIP:
- return ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_FLIP_V : 0, COMB_FLIP_V);
- case V4L2_CID_HFLIP:
- return ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_FLIP_H : 0, COMB_FLIP_H);
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov6650_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 val;
-
- if (reg->reg & ~0xff)
- return -EINVAL;
-
- reg->size = 1;
-
- ret = ov6650_reg_read(client, reg->reg, &val);
- if (!ret)
- reg->val = (__u64)val;
-
- return ret;
-}
-
-static int ov6650_set_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg & ~0xff || reg->val & ~0xff)
- return -EINVAL;
-
- return ov6650_reg_write(client, reg->reg, reg->val);
-}
-#endif
-
-static int ov6650_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret = 0;
-
- if (on)
- ret = clk_prepare_enable(priv->clk);
- else
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
-static int ov6650_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect *rect;
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- /* pre-select try crop rectangle */
- rect = v4l2_subdev_state_get_crop(sd_state, 0);
-
- } else {
- /* pre-select active crop rectangle */
- rect = &priv->rect;
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.left = DEF_HSTRT << 1;
- sel->r.top = DEF_VSTRT << 1;
- sel->r.width = W_CIF;
- sel->r.height = H_CIF;
- return 0;
-
- case V4L2_SEL_TGT_CROP:
- /* use selected crop rectangle */
- sel->r = *rect;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
-{
- return width > rect->width >> 1 || height > rect->height >> 1;
-}
-
-static void ov6650_bind_align_crop_rectangle(struct v4l2_rect *rect)
-{
- v4l_bound_align_image(&rect->width, 2, W_CIF, 1,
- &rect->height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect->left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect->width, 1,
- &rect->top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect->height,
- 1, 0);
-}
-
-static int ov6650_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret;
-
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- ov6650_bind_align_crop_rectangle(&sel->r);
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_rect *crop =
- v4l2_subdev_state_get_crop(sd_state, 0);
- struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_state_get_format(sd_state, 0);
- /* detect current pad config scaling factor */
- bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
-
- /* store new crop rectangle */
- *crop = sel->r;
-
- /* adjust frame size */
- mf->width = crop->width >> half_scale;
- mf->height = crop->height >> half_scale;
-
- return 0;
- }
-
- /* V4L2_SUBDEV_FORMAT_ACTIVE */
-
- /* apply new crop rectangle */
- ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
- if (!ret) {
- priv->rect.width += priv->rect.left - sel->r.left;
- priv->rect.left = sel->r.left;
- ret = ov6650_reg_write(client, REG_HSTOP,
- (sel->r.left + sel->r.width) >> 1);
- }
- if (!ret) {
- priv->rect.width = sel->r.width;
- ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
- }
- if (!ret) {
- priv->rect.height += priv->rect.top - sel->r.top;
- priv->rect.top = sel->r.top;
- ret = ov6650_reg_write(client, REG_VSTOP,
- (sel->r.top + sel->r.height) >> 1);
- }
- if (!ret)
- priv->rect.height = sel->r.height;
-
- return ret;
-}
-
-static int ov6650_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
-
- if (format->pad)
- return -EINVAL;
-
- /* initialize response with default media bus frame format */
- *mf = ov6650_def_fmt;
-
- /* update media bus format code and frame size */
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_state_get_format(sd_state, 0);
-
- mf->width = try_fmt->width;
- mf->height = try_fmt->height;
- mf->code = try_fmt->code;
-
- } else {
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- }
- return 0;
-}
-
-#define to_clkrc(div) ((div) - 1)
-
-/* set the format we will capture in */
-static int ov6650_s_fmt(struct v4l2_subdev *sd, u32 code, bool half_scale)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
- int ret;
-
- /* select color matrix configuration for given color encoding */
- switch (code) {
- case MEDIA_BUS_FMT_Y8_1X8:
- dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
- coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
- coma_set |= COMA_BW;
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
- coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
- coma_set |= COMA_WORD_SWAP;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
- COMA_BYTE_SWAP;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
- if (half_scale) {
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
- coma_set |= COMA_BYTE_SWAP;
- } else {
- coma_mask |= COMA_RGB | COMA_BW;
- coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
- }
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
- if (half_scale) {
- coma_mask |= COMA_RGB | COMA_BW;
- coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
- } else {
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
- coma_set |= COMA_BYTE_SWAP;
- }
- break;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
- coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
- coma_set |= COMA_RAW_RGB | COMA_RGB;
- break;
- default:
- dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
- return -EINVAL;
- }
-
- if (code == MEDIA_BUS_FMT_Y8_1X8 ||
- code == MEDIA_BUS_FMT_SBGGR8_1X8) {
- coml_mask = COML_ONE_CHANNEL;
- coml_set = 0;
- } else {
- coml_mask = 0;
- coml_set = COML_ONE_CHANNEL;
- }
-
- if (half_scale) {
- dev_dbg(&client->dev, "max resolution: QCIF\n");
- coma_set |= COMA_QCIF;
- } else {
- dev_dbg(&client->dev, "max resolution: CIF\n");
- coma_mask |= COMA_QCIF;
- }
-
- ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret) {
- priv->half_scale = half_scale;
-
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
- }
- if (!ret)
- priv->code = code;
-
- return ret;
-}
-
-static int ov6650_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect *crop;
- bool half_scale;
-
- if (format->pad)
- return -EINVAL;
-
- switch (mf->code) {
- case MEDIA_BUS_FMT_Y10_1X10:
- mf->code = MEDIA_BUS_FMT_Y8_1X8;
- fallthrough;
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- break;
- default:
- mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
- fallthrough;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- break;
- }
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- crop = v4l2_subdev_state_get_crop(sd_state, 0);
- else
- crop = &priv->rect;
-
- half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_state_get_format(sd_state, 0);
-
- /* store new mbus frame format code and size in pad config */
- try_fmt->width = crop->width >> half_scale;
- try_fmt->height = crop->height >> half_scale;
- try_fmt->code = mf->code;
-
- /* return default mbus frame format updated with pad config */
- *mf = ov6650_def_fmt;
- mf->width = try_fmt->width;
- mf->height = try_fmt->height;
- mf->code = try_fmt->code;
-
- } else {
- int ret = 0;
-
- /* apply new media bus frame format and scaling if changed */
- if (mf->code != priv->code || half_scale != priv->half_scale)
- ret = ov6650_s_fmt(sd, mf->code, half_scale);
- if (ret)
- return ret;
-
- /* return default format updated with active size and code */
- *mf = ov6650_def_fmt;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- }
- return 0;
-}
-
-static int ov6650_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index >= ARRAY_SIZE(ov6650_codes))
- return -EINVAL;
-
- code->code = ov6650_codes[code->index];
- return 0;
-}
-
-static int ov6650_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval_enum *fie)
-{
- int i;
-
- /* enumerate supported frame intervals not exceeding 1 second */
- if (fie->index > CLKRC_DIV_MASK ||
- GET_CLKRC_DIV(fie->index) > FRAME_RATE_MAX)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ov6650_codes); i++)
- if (fie->code == ov6650_codes[i])
- break;
- if (i == ARRAY_SIZE(ov6650_codes))
- return -EINVAL;
-
- if (!fie->width || fie->width > W_CIF ||
- !fie->height || fie->height > H_CIF)
- return -EINVAL;
-
- fie->interval.numerator = GET_CLKRC_DIV(fie->index);
- fie->interval.denominator = FRAME_RATE_MAX;
-
- return 0;
-}
-
-static int ov6650_get_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- ival->interval = priv->tpf;
-
- dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
- ival->interval.numerator, ival->interval.denominator);
-
- return 0;
-}
-
-static int ov6650_set_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_fract *tpf = &ival->interval;
- int div, ret;
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (tpf->numerator == 0 || tpf->denominator == 0)
- div = 1; /* Reset to full rate */
- else
- div = (tpf->numerator * FRAME_RATE_MAX) / tpf->denominator;
-
- if (div == 0)
- div = 1;
- else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
- div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
- if (!ret) {
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- *tpf = priv->tpf;
- }
-
- return ret;
-}
-
-/* Soft reset the camera. This has nothing to do with the RESET pin! */
-static int ov6650_reset(struct i2c_client *client)
-{
- int ret;
-
- dev_dbg(&client->dev, "reset\n");
-
- ret = ov6650_reg_rmw(client, REG_COMA, COMA_RESET, 0);
- if (ret)
- dev_err(&client->dev,
- "An error occurred while entering soft reset!\n");
-
- return ret;
-}
-
-/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
-{
- int ret;
-
- dev_dbg(&client->dev, "initializing\n");
-
- ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
-
- return ret;
-}
-
-static int ov6650_video_probe(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- const struct ov6650_xclk *xclk = NULL;
- unsigned long rate;
- u8 pidh, pidl, midh, midl;
- int i, ret = 0;
-
- priv->clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&client->dev, "clk request err: %d\n", ret);
- return ret;
- }
-
- rate = clk_get_rate(priv->clk);
- for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
- if (rate != ov6650_xclk[i].rate)
- continue;
-
- xclk = &ov6650_xclk[i];
- dev_info(&client->dev, "using host default clock rate %lukHz\n",
- rate / 1000);
- break;
- }
- for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
- ret = clk_set_rate(priv->clk, ov6650_xclk[i].rate);
- if (ret || clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
- continue;
-
- xclk = &ov6650_xclk[i];
- dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
- xclk->rate / 1000);
- break;
- }
- if (!xclk) {
- dev_err(&client->dev, "unable to get supported clock rate\n");
- if (!ret)
- ret = -EINVAL;
- return ret;
- }
-
- ret = ov6650_s_power(sd, 1);
- if (ret < 0)
- return ret;
-
- msleep(20);
-
- /*
- * check and show product ID and manufacturer ID
- */
- ret = ov6650_reg_read(client, REG_PIDH, &pidh);
- if (!ret)
- ret = ov6650_reg_read(client, REG_PIDL, &pidl);
- if (!ret)
- ret = ov6650_reg_read(client, REG_MIDH, &midh);
- if (!ret)
- ret = ov6650_reg_read(client, REG_MIDL, &midl);
-
- if (ret)
- goto done;
-
- if ((pidh != OV6650_PIDH) || (pidl != OV6650_PIDL)) {
- dev_err(&client->dev, "Product ID error 0x%02x:0x%02x\n",
- pidh, pidl);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev,
- "ov6650 Product ID 0x%02x:0x%02x Manufacturer ID 0x%02x:0x%02x\n",
- pidh, pidl, midh, midl);
-
- ret = ov6650_reset(client);
- if (!ret)
- ret = ov6650_prog_dflt(client, xclk->clkrc);
- if (!ret) {
- /* driver default frame format, no scaling */
- ret = ov6650_s_fmt(sd, ov6650_def_fmt.code, false);
- }
- if (!ret)
- ret = v4l2_ctrl_handler_setup(&priv->hdl);
-
-done:
- ov6650_s_power(sd, 0);
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ov6550_ctrl_ops = {
- .g_volatile_ctrl = ov6550_g_volatile_ctrl,
- .s_ctrl = ov6550_s_ctrl,
-};
-
-static const struct v4l2_subdev_core_ops ov6650_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ov6650_get_register,
- .s_register = ov6650_set_register,
-#endif
- .s_power = ov6650_s_power,
-};
-
-/* Request bus settings on camera side */
-static int ov6650_get_mbus_config(struct v4l2_subdev *sd,
- unsigned int pad,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 comj, comf;
- int ret;
-
- ret = ov6650_reg_read(client, REG_COMJ, &comj);
- if (ret)
- return ret;
-
- ret = ov6650_reg_read(client, REG_COMF, &comf);
- if (ret)
- return ret;
-
- cfg->type = V4L2_MBUS_PARALLEL;
-
- cfg->bus.parallel.flags = V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH
- | ((comj & COMJ_VSYNC_HIGH) ? V4L2_MBUS_VSYNC_ACTIVE_HIGH
- : V4L2_MBUS_VSYNC_ACTIVE_LOW)
- | ((comf & COMF_HREF_LOW) ? V4L2_MBUS_HSYNC_ACTIVE_LOW
- : V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- | ((comj & COMJ_PCLK_RISING) ? V4L2_MBUS_PCLK_SAMPLE_RISING
- : V4L2_MBUS_PCLK_SAMPLE_FALLING);
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov6650_video_ops = {
- .s_stream = ov6650_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
- .enum_mbus_code = ov6650_enum_mbus_code,
- .enum_frame_interval = ov6650_enum_frame_interval,
- .get_selection = ov6650_get_selection,
- .set_selection = ov6650_set_selection,
- .get_fmt = ov6650_get_fmt,
- .set_fmt = ov6650_set_fmt,
- .get_frame_interval = ov6650_get_frame_interval,
- .set_frame_interval = ov6650_set_frame_interval,
- .get_mbus_config = ov6650_get_mbus_config,
-};
-
-static const struct v4l2_subdev_ops ov6650_subdev_ops = {
- .core = &ov6650_core_ops,
- .video = &ov6650_video_ops,
- .pad = &ov6650_pad_ops,
-};
-
-static const struct v4l2_subdev_internal_ops ov6650_internal_ops = {
- .registered = ov6650_video_probe,
-};
-
-/*
- * i2c_driver function
- */
-static int ov6650_probe(struct i2c_client *client)
-{
- struct ov6650 *priv;
- int ret;
-
- priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
- v4l2_ctrl_handler_init(&priv->hdl, 13);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- priv->autogain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
- priv->gain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_GAIN, 0, 0x3f, 1, DEF_GAIN);
- priv->autowb = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- priv->blue = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, DEF_BLUE);
- priv->red = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_RED_BALANCE, 0, 0xff, 1, DEF_RED);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_SATURATION, 0, 0xf, 1, 0x8);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_HUE, 0, HUE_MASK, 1, DEF_HUE);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 0xff, 1, 0x80);
- priv->autoexposure = v4l2_ctrl_new_std_menu(&priv->hdl,
- &ov6550_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
- priv->exposure = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_EXPOSURE, 0, 0xff, 1, DEF_AECH);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
-
- priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- ret = priv->hdl.error;
- goto ectlhdlfree;
- }
-
- v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
- v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
- v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
- V4L2_EXPOSURE_MANUAL, true);
-
- priv->rect.left = DEF_HSTRT << 1;
- priv->rect.top = DEF_VSTRT << 1;
- priv->rect.width = W_CIF;
- priv->rect.height = H_CIF;
-
- /* Hardware default frame interval */
- priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- priv->subdev.internal_ops = &ov6650_internal_ops;
-
- ret = v4l2_async_register_subdev(&priv->subdev);
- if (!ret)
- return 0;
-ectlhdlfree:
- v4l2_ctrl_handler_free(&priv->hdl);
-
- return ret;
-}
-
-static void ov6650_remove(struct i2c_client *client)
-{
- struct ov6650 *priv = to_ov6650(client);
-
- v4l2_async_unregister_subdev(&priv->subdev);
- v4l2_ctrl_handler_free(&priv->hdl);
-}
-
-static const struct i2c_device_id ov6650_id[] = {
- { "ov6650" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ov6650_id);
-
-static struct i2c_driver ov6650_i2c_driver = {
- .driver = {
- .name = "ov6650",
- },
- .probe = ov6650_probe,
- .remove = ov6650_remove,
- .id_table = ov6650_id,
-};
-
-module_i2c_driver(ov6650_i2c_driver);
-
-MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
-MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 31a42d81e970..27afc3fc0175 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1630,7 +1630,6 @@ static int ov7251_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov7251 *ov7251;
- unsigned int rate = 0, clk_rate = 0;
int ret;
int i;
@@ -1646,33 +1645,12 @@ static int ov7251_probe(struct i2c_client *client)
return ret;
/* get system clock (xclk) */
- ov7251->xclk = devm_clk_get_optional(dev, NULL);
+ ov7251->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov7251->xclk))
return dev_err_probe(dev, PTR_ERR(ov7251->xclk),
"could not get xclk");
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either DT or
- * ACPI. We also need to support the IPU3 case which will have both an
- * external clock AND a clock-frequency property.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (ret && !ov7251->xclk)
- return dev_err_probe(dev, ret, "invalid clock config\n");
-
- clk_rate = clk_get_rate(ov7251->xclk);
- ov7251->xclk_freq = clk_rate ? clk_rate : rate;
-
- if (ov7251->xclk_freq == 0)
- return dev_err_probe(dev, -EINVAL, "invalid clock frequency\n");
-
- if (!ret && ov7251->xclk) {
- ret = clk_set_rate(ov7251->xclk, rate);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set clock rate\n");
- }
+ ov7251->xclk_freq = clk_get_rate(ov7251->xclk);
for (i = 0; i < ARRAY_SIZE(supported_xclk_rates); i++)
if (ov7251->xclk_freq == supported_xclk_rates[i])
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 1f1c0de8e510..632fb80469be 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1036,13 +1036,10 @@ static int ov7740_probe(struct i2c_client *client)
if (!ov7740)
return -ENOMEM;
- ov7740->xvclk = devm_clk_get(&client->dev, "xvclk");
- if (IS_ERR(ov7740->xvclk)) {
- ret = PTR_ERR(ov7740->xvclk);
- dev_err(&client->dev,
- "OV7740: fail to get xvclk: %d\n", ret);
- return ret;
- }
+ ov7740->xvclk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(ov7740->xvclk))
+ return dev_err_probe(&client->dev, PTR_ERR(ov7740->xvclk),
+ "OV7740: fail to get xvclk\n");
ret = ov7740_probe_dt(client, ov7740);
if (ret)
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index 4b6874d2a104..e2998cfa0d18 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -1414,6 +1415,8 @@ static const struct ov8856_reg_list bayer_offset_configs[] = {
};
struct ov8856 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -1668,7 +1671,6 @@ static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 val)
static int ov8856_write_reg_list(struct ov8856 *ov8856,
const struct ov8856_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
unsigned int i;
int ret;
@@ -1676,7 +1678,7 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
ret = ov8856_write_reg(ov8856, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov8856->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -1688,7 +1690,6 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
static int ov8856_identify_module(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
int ret;
u32 val;
@@ -1701,7 +1702,7 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
return ret;
if (val != OV8856_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov8856->dev, "chip id mismatch: %x!=%x",
OV8856_CHIP_ID, val);
return -ENXIO;
}
@@ -1818,7 +1819,6 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov8856 *ov8856 = container_of(ctrl->handler,
struct ov8856, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
s64 exposure_max;
int ret = 0;
@@ -1834,7 +1834,7 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov8856->dev))
return 0;
switch (ctrl->id) {
@@ -1876,7 +1876,7 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
return ret;
}
@@ -1979,7 +1979,6 @@ static void ov8856_update_pad_format(struct ov8856 *ov8856,
static int ov8856_start_streaming(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
const struct ov8856_reg_list *reg_list;
int link_freq_index, ret;
@@ -1992,21 +1991,21 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov8856->dev, "failed to set plls");
return ret;
}
reg_list = &ov8856->cur_mode->reg_list;
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov8856->dev, "failed to set mode");
return ret;
}
reg_list = &bayer_offset_configs[ov8856->cur_mbus_index];
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mbus format");
+ dev_err(ov8856->dev, "failed to set mbus format");
return ret;
}
@@ -2017,7 +2016,7 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov8856->dev, "failed to set stream");
return ret;
}
@@ -2026,22 +2025,19 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
static void ov8856_stop_streaming(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
-
if (ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov8856->dev, "failed to set stream");
}
static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov8856 *ov8856 = to_ov8856(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov8856->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov8856->dev);
if (ret < 0) {
mutex_unlock(&ov8856->mutex);
return ret;
@@ -2051,11 +2047,11 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov8856_stop_streaming(ov8856);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
}
} else {
ov8856_stop_streaming(ov8856);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
}
mutex_unlock(&ov8856->mutex);
@@ -2255,8 +2251,9 @@ static const struct v4l2_subdev_internal_ops ov8856_internal_ops = {
};
-static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
+static int ov8856_get_hwcfg(struct ov8856 *ov8856)
{
+ struct device *dev = ov8856->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -2269,21 +2266,17 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
- if (ret)
- return ret;
-
- if (!is_acpi_node(fwnode)) {
- ov8856->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov8856->xvclk)) {
- dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
- "could not get xvclk clock\n");
- return PTR_ERR(ov8856->xvclk);
- }
+ ov8856->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", false, 0);
+ if (IS_ERR(ov8856->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
+ "could not get xvclk clock\n");
- clk_set_rate(ov8856->xvclk, xvclk_rate);
- xvclk_rate = clk_get_rate(ov8856->xvclk);
+ xvclk_rate = clk_get_rate(ov8856->xvclk);
+ if (xvclk_rate != OV8856_XVCLK_19_2)
+ dev_warn(dev, "external clock rate %u is unsupported",
+ xvclk_rate);
+ if (!is_acpi_node(fwnode)) {
ov8856->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov8856->reset_gpio))
@@ -2299,10 +2292,6 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
return ret;
}
- if (xvclk_rate != OV8856_XVCLK_19_2)
- dev_warn(dev, "external clock rate %u is unsupported",
- xvclk_rate);
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -2365,10 +2354,10 @@ static void ov8856_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov8856->dev);
mutex_destroy(&ov8856->mutex);
- ov8856_power_off(&client->dev);
+ ov8856_power_off(ov8856->dev);
}
static int ov8856_probe(struct i2c_client *client)
@@ -2381,23 +2370,25 @@ static int ov8856_probe(struct i2c_client *client)
if (!ov8856)
return -ENOMEM;
- ret = ov8856_get_hwcfg(ov8856, &client->dev);
+ ov8856->dev = &client->dev;
+
+ ret = ov8856_get_hwcfg(ov8856);
if (ret)
return ret;
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov8856->dev);
if (full_power) {
- ret = ov8856_power_on(&client->dev);
+ ret = ov8856_power_on(ov8856->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov8856->dev, "failed to power on\n");
return ret;
}
ret = ov8856_identify_module(ov8856);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov8856->dev, "failed to find sensor: %d", ret);
goto probe_power_off;
}
}
@@ -2407,7 +2398,7 @@ static int ov8856_probe(struct i2c_client *client)
ov8856->cur_mbus_index = ov8856->cur_mode->default_mbus_index;
ret = ov8856_init_controls(ov8856);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov8856->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -2418,22 +2409,22 @@ static int ov8856_probe(struct i2c_client *client)
ov8856->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov8856->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov8856->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov8856->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov8856->dev);
+ pm_runtime_enable(ov8856->dev);
+ pm_runtime_idle(ov8856->dev);
return 0;
@@ -2445,7 +2436,7 @@ probe_error_v4l2_ctrl_handler_free:
mutex_destroy(&ov8856->mutex);
probe_power_off:
- ov8856_power_off(&client->dev);
+ ov8856_power_off(ov8856->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 6b7193eaea1f..3f45f7fab833 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1876,7 +1876,7 @@ static int ov8858_probe(struct i2c_client *client)
if (!ov8858)
return -ENOMEM;
- ov8858->xvclk = devm_clk_get(dev, "xvclk");
+ ov8858->xvclk = devm_v4l2_sensor_clk_get(dev, "xvclk");
if (IS_ERR(ov8858->xvclk))
return dev_err_probe(dev, PTR_ERR(ov8858->xvclk),
"Failed to get xvclk\n");
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index a2138f7988aa..a8586df14f77 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2304,14 +2304,6 @@ static int ov8865_state_configure(struct ov8865_sensor *sensor,
if (sensor->state.streaming)
return -EBUSY;
- /* State will be configured at first power on otherwise. */
- if (pm_runtime_enabled(sensor->dev) &&
- !pm_runtime_suspended(sensor->dev)) {
- ret = ov8865_mode_configure(sensor, mode, mbus_code);
- if (ret)
- return ret;
- }
-
ret = ov8865_state_mipi_configure(sensor, mode, mbus_code);
if (ret)
return ret;
@@ -2384,10 +2376,10 @@ static int ov8865_sensor_init(struct ov8865_sensor *sensor)
}
/* Configure current mode. */
- ret = ov8865_state_configure(sensor, sensor->state.mode,
- sensor->state.mbus_code);
+ ret = ov8865_mode_configure(sensor, sensor->state.mode,
+ sensor->state.mbus_code);
if (ret) {
- dev_err(sensor->dev, "failed to configure state\n");
+ dev_err(sensor->dev, "failed to configure mode\n");
return ret;
}
@@ -2956,7 +2948,6 @@ static int ov8865_probe(struct i2c_client *client)
struct ov8865_sensor *sensor;
struct v4l2_subdev *subdev;
struct media_pad *pad;
- unsigned int rate = 0;
unsigned int i;
int ret;
@@ -3020,39 +3011,14 @@ static int ov8865_probe(struct i2c_client *client)
/* External Clock */
- sensor->extclk = devm_clk_get(dev, NULL);
- if (PTR_ERR(sensor->extclk) == -ENOENT) {
- dev_info(dev, "no external clock found, continuing...\n");
- sensor->extclk = NULL;
- } else if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->extclk);
- goto error_endpoint;
- }
-
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either dt or
- * ACPI...but we also need to support the weird IPU3 case which will
- * have an external clock AND a clock-frequency property. Check for the
- * clock-frequency property and if found, set that rate if we managed
- * to acquire a clock. This should cover the ACPI case. If the system
- * uses devicetree then the configured rate should already be set, so
- * we can just read it.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (!ret && sensor->extclk) {
- ret = clk_set_rate(sensor->extclk, rate);
- if (ret) {
- dev_err_probe(dev, ret, "failed to set clock rate\n");
- goto error_endpoint;
- }
- } else if (ret && !sensor->extclk) {
- dev_err_probe(dev, ret, "invalid clock config\n");
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->extclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "failed to get external clock\n");
goto error_endpoint;
}
- sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk);
+ sensor->extclk_rate = clk_get_rate(sensor->extclk);
for (i = 0; i < ARRAY_SIZE(supported_extclk_rates); i++) {
if (sensor->extclk_rate == supported_extclk_rates[i])
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index c882a021cf18..a9f6176e9729 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -1135,11 +1135,10 @@ static int ov9282_parse_hw_config(struct ov9282 *ov9282)
}
/* Get sensor input clock */
- ov9282->inclk = devm_clk_get(ov9282->dev, NULL);
- if (IS_ERR(ov9282->inclk)) {
- dev_err(ov9282->dev, "could not get inclk");
- return PTR_ERR(ov9282->inclk);
- }
+ ov9282->inclk = devm_v4l2_sensor_clk_get(ov9282->dev, NULL);
+ if (IS_ERR(ov9282->inclk))
+ return dev_err_probe(ov9282->dev, PTR_ERR(ov9282->inclk),
+ "could not get inclk\n");
ret = ov9282_configure_regulators(ov9282);
if (ret)
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 01dbc0ba89c8..2190c52b1433 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -718,9 +718,10 @@ static int ov9640_probe(struct i2c_client *client)
priv->subdev.ctrl_handler = &priv->hdl;
- priv->clk = devm_clk_get(&client->dev, "mclk");
+ priv->clk = devm_v4l2_sensor_clk_get(&client->dev, "mclk");
if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
+ ret = dev_err_probe(&client->dev, PTR_ERR(priv->clk),
+ "failed to get mclk\n");
goto ectrlinit;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 026ea34d6291..c94e8fe29f22 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1494,9 +1494,10 @@ static int ov965x_probe(struct i2c_client *client)
}
if (dev_fwnode(&client->dev)) {
- ov965x->clk = devm_clk_get(&client->dev, NULL);
+ ov965x->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(ov965x->clk))
- return PTR_ERR(ov965x->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(ov965x->clk),
+ "failed to get the clock\n");
ov965x->mclk_frequency = clk_get_rate(ov965x->clk);
ret = ov965x_configure_gpios(ov965x);
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index cae3aeefb616..0eaf33807fc9 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -321,6 +323,9 @@ static const struct ov9734_mode supported_modes[] = {
};
struct ov9734 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -414,7 +419,6 @@ static int ov9734_write_reg(struct ov9734 *ov9734, u16 reg, u16 len, u32 val)
static int ov9734_write_reg_list(struct ov9734 *ov9734,
const struct ov9734_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
unsigned int i;
int ret;
@@ -422,7 +426,7 @@ static int ov9734_write_reg_list(struct ov9734 *ov9734,
ret = ov9734_write_reg(ov9734, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov9734->dev,
"write reg 0x%4.4x return err = %d",
r_list->regs[i].address, ret);
return ret;
@@ -476,7 +480,6 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov9734 *ov9734 = container_of(ctrl->handler,
struct ov9734, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
s64 exposure_max;
int ret = 0;
@@ -492,7 +495,7 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov9734->dev))
return 0;
switch (ctrl->id) {
@@ -525,7 +528,7 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
return ret;
}
@@ -610,7 +613,6 @@ static void ov9734_update_pad_format(const struct ov9734_mode *mode,
static int ov9734_start_streaming(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
const struct ov9734_reg_list *reg_list;
int link_freq_index, ret;
@@ -618,14 +620,14 @@ static int ov9734_start_streaming(struct ov9734 *ov9734)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov9734_write_reg_list(ov9734, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov9734->dev, "failed to set plls");
return ret;
}
reg_list = &ov9734->cur_mode->reg_list;
ret = ov9734_write_reg_list(ov9734, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov9734->dev, "failed to set mode");
return ret;
}
@@ -636,30 +638,27 @@ static int ov9734_start_streaming(struct ov9734 *ov9734)
ret = ov9734_write_reg(ov9734, OV9734_REG_MODE_SELECT,
1, OV9734_MODE_STREAMING);
if (ret)
- dev_err(&client->dev, "failed to start stream");
+ dev_err(ov9734->dev, "failed to start stream");
return ret;
}
static void ov9734_stop_streaming(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
-
if (ov9734_write_reg(ov9734, OV9734_REG_MODE_SELECT,
1, OV9734_MODE_STANDBY))
- dev_err(&client->dev, "failed to stop stream");
+ dev_err(ov9734->dev, "failed to stop stream");
}
static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov9734 *ov9734 = to_ov9734(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov9734->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov9734->dev);
if (ret < 0) {
mutex_unlock(&ov9734->mutex);
return ret;
@@ -669,11 +668,11 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov9734_stop_streaming(ov9734);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
}
} else {
ov9734_stop_streaming(ov9734);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
}
mutex_unlock(&ov9734->mutex);
@@ -808,7 +807,6 @@ static const struct v4l2_subdev_internal_ops ov9734_internal_ops = {
static int ov9734_identify_module(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
int ret;
u32 val;
@@ -817,7 +815,7 @@ static int ov9734_identify_module(struct ov9734 *ov9734)
return ret;
if (val != OV9734_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov9734->dev, "chip id mismatch: %x!=%x",
OV9734_CHIP_ID, val);
return -ENXIO;
}
@@ -832,22 +830,12 @@ static int ov9734_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret)
- return ret;
-
- if (mclk != OV9734_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -892,14 +880,15 @@ static void ov9734_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov9734->dev);
+ pm_runtime_set_suspended(ov9734->dev);
mutex_destroy(&ov9734->mutex);
}
static int ov9734_probe(struct i2c_client *client)
{
struct ov9734 *ov9734;
+ unsigned long freq;
int ret;
ret = ov9734_check_hwcfg(&client->dev);
@@ -913,10 +902,23 @@ static int ov9734_probe(struct i2c_client *client)
if (!ov9734)
return -ENOMEM;
+ ov9734->dev = &client->dev;
+
+ ov9734->clk = devm_v4l2_sensor_clk_get(ov9734->dev, NULL);
+ if (IS_ERR(ov9734->clk))
+ return dev_err_probe(ov9734->dev, PTR_ERR(ov9734->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov9734->clk);
+ if (freq != OV9734_MCLK)
+ return dev_err_probe(ov9734->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov9734->sd, client, &ov9734_subdev_ops);
ret = ov9734_identify_module(ov9734);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov9734->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -924,7 +926,7 @@ static int ov9734_probe(struct i2c_client *client)
ov9734->cur_mode = &supported_modes[0];
ret = ov9734_init_controls(ov9734);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov9734->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -935,7 +937,7 @@ static int ov9734_probe(struct i2c_client *client)
ov9734->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov9734->sd.entity, 1, &ov9734->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov9734->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -943,13 +945,13 @@ static int ov9734_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov9734->dev);
+ pm_runtime_enable(ov9734->dev);
+ pm_runtime_idle(ov9734->dev);
ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov9734->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup_pm;
}
@@ -957,8 +959,8 @@ static int ov9734_probe(struct i2c_client *client)
return 0;
probe_error_media_entity_cleanup_pm:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov9734->dev);
+ pm_runtime_set_suspended(ov9734->dev);
media_entity_cleanup(&ov9734->sd.entity);
probe_error_v4l2_ctrl_handler_free:
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index b7ca39f63dba..6dfc91216851 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client)
V4L2_CID_GAIN, 0, 127, 1, 66);
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error)
- return rj54n1->hdl.error;
+ if (rj54n1->hdl.error) {
+ ret = rj54n1->hdl.error;
+ goto err_free_ctrl;
+ }
+
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
rj54n1->rect.top = RJ54N1_ROW_SKIP;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 7716dfe2b8c9..ab31ee2b596b 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1368,10 +1368,6 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
goto err_reg_dis;
}
- ret = clk_set_rate(state->clock, state->mclk_frequency);
- if (ret < 0)
- goto err_reg_dis;
-
ret = clk_prepare_enable(state->clock);
if (ret < 0)
goto err_reg_dis;
@@ -1556,16 +1552,13 @@ static int s5c73m3_get_dt_data(struct s5c73m3 *state)
if (!node)
return -EINVAL;
- state->clock = devm_clk_get(dev, S5C73M3_CLK_NAME);
+ state->clock = devm_v4l2_sensor_clk_get_legacy(dev, S5C73M3_CLK_NAME,
+ false,
+ S5C73M3_DEFAULT_MCLK_FREQ);
if (IS_ERR(state->clock))
- return PTR_ERR(state->clock);
-
- if (of_property_read_u32(node, "clock-frequency",
- &state->mclk_frequency)) {
- state->mclk_frequency = S5C73M3_DEFAULT_MCLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- state->mclk_frequency);
- }
+ return dev_err_probe(dev, PTR_ERR(state->clock),
+ "Failed to get the clock %s\n",
+ S5C73M3_CLK_NAME);
/* Request GPIO lines asserted */
state->stby = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 627e80cf5b72..68a19c2c8db8 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -382,8 +382,6 @@ struct s5c73m3 {
struct clk *clock;
- /* External master clock frequency */
- u32 mclk_frequency;
/* Video bus type - MIPI-CSI2/parallel */
enum v4l2_mbus_type bus_type;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 24f399cd2124..d1d00eca8708 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -284,7 +284,6 @@ struct s5k5baf {
struct regulator_bulk_data supplies[S5K5BAF_NUM_SUPPLIES];
struct clk *clock;
- u32 mclk_frequency;
struct s5k5baf_fw *fw;
@@ -576,7 +575,7 @@ static void s5k5baf_hw_patch(struct s5k5baf *state)
static void s5k5baf_hw_set_clocks(struct s5k5baf *state)
{
- unsigned long mclk = state->mclk_frequency / 1000;
+ unsigned long mclk = clk_get_rate(state->clock) / 1000;
u16 status;
static const u16 nseq_clk_cfg[] = {
NSEQ(REG_I_USE_NPVI_CLOCKS,
@@ -946,10 +945,6 @@ static int s5k5baf_power_on(struct s5k5baf *state)
if (ret < 0)
goto err;
- ret = clk_set_rate(state->clock, state->mclk_frequency);
- if (ret < 0)
- goto err_reg_dis;
-
ret = clk_prepare_enable(state->clock);
if (ret < 0)
goto err_reg_dis;
@@ -1841,14 +1836,6 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
return -EINVAL;
}
- ret = of_property_read_u32(node, "clock-frequency",
- &state->mclk_frequency);
- if (ret < 0) {
- state->mclk_frequency = S5K5BAF_DEFAULT_MCLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- state->mclk_frequency);
- }
-
node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!node_ep) {
dev_err(dev, "no endpoint defined at node %pOF\n", node);
@@ -1967,9 +1954,11 @@ static int s5k5baf_probe(struct i2c_client *c)
if (ret < 0)
goto err_me;
- state->clock = devm_clk_get(state->sd.dev, S5K5BAF_CLK_NAME);
+ state->clock = devm_v4l2_sensor_clk_get_legacy(state->sd.dev,
+ S5K5BAF_CLK_NAME, false,
+ S5K5BAF_DEFAULT_MCLK_FREQ);
if (IS_ERR(state->clock)) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(state->clock);
goto err_me;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 0c2674115b7b..ba6477e88da3 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -51,7 +51,6 @@ enum {
* @lock: mutex protecting the structure's members below
* @format: media bus format at the sensor's source pad
* @clock: pointer to &struct clk.
- * @clock_frequency: clock frequency
* @power_count: stores state if device is powered
*/
struct s5k6a3 {
@@ -63,7 +62,6 @@ struct s5k6a3 {
struct mutex lock;
struct v4l2_mbus_framefmt format;
struct clk *clock;
- u32 clock_frequency;
int power_count;
};
@@ -192,10 +190,6 @@ static int __s5k6a3_power_on(struct s5k6a3 *sensor)
int i = S5K6A3_SUPP_VDDA;
int ret;
- ret = clk_set_rate(sensor->clock, sensor->clock_frequency);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get(sensor->dev);
if (ret < 0)
goto error_rpm_put;
@@ -292,22 +286,18 @@ static int s5k6a3_probe(struct i2c_client *client)
mutex_init(&sensor->lock);
sensor->dev = dev;
- sensor->clock = devm_clk_get(sensor->dev, S5K6A3_CLK_NAME);
+ sensor->clock = devm_v4l2_sensor_clk_get_legacy(sensor->dev,
+ S5K6A3_CLK_NAME, false,
+ S5K6A3_DEFAULT_CLK_FREQ);
if (IS_ERR(sensor->clock))
- return PTR_ERR(sensor->clock);
+ return dev_err_probe(sensor->dev, PTR_ERR(sensor->clock),
+ "failed to get extclk\n");
sensor->gpio_reset = devm_gpiod_get(dev, NULL, GPIOD_OUT_HIGH);
ret = PTR_ERR_OR_ZERO(sensor->gpio_reset);
if (ret)
return ret;
- if (of_property_read_u32(dev->of_node, "clock-frequency",
- &sensor->clock_frequency)) {
- sensor->clock_frequency = S5K6A3_DEFAULT_CLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- sensor->clock_frequency);
- }
-
for (i = 0; i < S5K6A3_NUM_SUPPLIES; i++)
sensor->supplies[i].supply = s5k6a3_supply_names[i];
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 1ed8b5edb3fb..1c0031ba43b4 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -6,7 +6,7 @@
AC-3 support:
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index b8b8f206ec3a..48d6730d9271 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -18,7 +18,7 @@
// Added saa7115 support by Kevin Thayer <nufan_wfk at yahoo.com>
// (2/17/2003)
//
-// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
+// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@kernel.org>
//
// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
// SAA7111, SAA7113 and SAA7118 support
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 818ed19cf37b..a42a7ffe3768 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -25,7 +25,7 @@
* Copyright (C) 2004 Chris Kennedy <c@groovy.org>
*
* VBI additions & cleanup:
- * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@kernel.org>
*
* Note: the saa7126 is identical to the saa7127, and the saa7128 is
* identical to the saa7129, except that the saa7126 and saa7128 have
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index b0793bb0c02a..713331be947c 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -10,7 +10,7 @@
* Changes by T.Adachi (tadachi@tadachi-net.com)
* - support audio, video scaler etc, and checked the initialize sequence.
*
- * Cleaned up by Hans Verkuil <hverkuil@xs4all.nl>
+ * Cleaned up by Hans Verkuil <hverkuil@kernel.org>
*
* Note: this is a reversed engineered driver based on captures from
* the I2C bus under Windows. This chip is very similar to the saa7134,
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 1cc7636e446d..a0ca19359c43 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -38,7 +38,21 @@
static int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-3)");
+MODULE_PARM_DESC(debug, " debug level (0-3)");
+
+static int packet_type = 0x87;
+module_param(packet_type, int, 0644);
+MODULE_PARM_DESC(packet_type,
+ " Programmable Packet Type. Possible values:\n"
+ "\t\t 0x87: DRM InfoFrame (Default).\n"
+ "\t\t 0x01: Audio Clock Regeneration Packet\n"
+ "\t\t 0x02: Audio Sample Packet\n"
+ "\t\t 0x03: General Control Packet\n"
+ "\t\t 0x04: ACP Packet\n"
+ "\t\t 0x07: One Bit Audio Sample Packet\n"
+ "\t\t 0x08: DST Audio Packet\n"
+ "\t\t 0x09: High Bitrate Audio Stream Packet\n"
+ "\t\t 0x0a: Gamut Metadata Packet\n");
MODULE_DESCRIPTION("Toshiba TC358743 HDMI to CSI-2 bridge driver");
MODULE_AUTHOR("Ramakrishnan Muthukrishnan <ram@rkrishnan.org>");
@@ -466,10 +480,29 @@ tc358743_debugfs_if_read(u32 type, void *priv, struct file *filp,
if (!is_hdmi(sd))
return 0;
- if (type != V4L2_DEBUGFS_IF_AVI)
+ switch (type) {
+ case V4L2_DEBUGFS_IF_AVI:
+ i2c_rd(sd, PK_AVI_0HEAD, buf, PK_AVI_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_AUDIO:
+ i2c_rd(sd, PK_AUD_0HEAD, buf, PK_AUD_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_SPD:
+ i2c_rd(sd, PK_SPD_0HEAD, buf, PK_SPD_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_HDMI:
+ i2c_rd(sd, PK_VS_0HEAD, buf, PK_VS_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_DRM:
+ i2c_rd(sd, PK_ACP_0HEAD, buf, PK_ACP_LEN);
+ break;
+ default:
return 0;
+ }
+
+ if (!buf[2])
+ return -ENOENT;
- i2c_rd(sd, PK_AVI_0HEAD, buf, PK_AVI_16BYTE - PK_AVI_0HEAD + 1);
len = buf[2] + 4;
if (len > V4L2_DEBUGFS_IF_MAX_LEN)
len = -ENOENT;
@@ -478,26 +511,69 @@ tc358743_debugfs_if_read(u32 type, void *priv, struct file *filp,
return len < 0 ? 0 : len;
}
-static void print_avi_infoframe(struct v4l2_subdev *sd)
+static void print_infoframes(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct device *dev = &client->dev;
union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)] = {};
+ u8 buffer[V4L2_DEBUGFS_IF_MAX_LEN] = {};
+
+ /*
+ * Updating the ACP TYPE here allows for dynamically
+ * changing the type you want to monitor, without having
+ * to reload the driver with a new packet_type module option value.
+ *
+ * Instead you can set it with the new value, then call
+ * VIDIOC_LOG_STATUS.
+ */
+ i2c_wr8(sd, TYP_ACP_SET, packet_type);
if (!is_hdmi(sd)) {
- v4l2_info(sd, "DVI-D signal - AVI infoframe not supported\n");
+ v4l2_info(sd, "DVI-D signal - InfoFrames not supported\n");
return;
}
- i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
+ i2c_rd(sd, PK_AVI_0HEAD, buffer, PK_AVI_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
- v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
- return;
+ i2c_rd(sd, PK_VS_0HEAD, buffer, PK_VS_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_AUD_0HEAD, buffer, PK_AUD_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_SPD_0HEAD, buffer, PK_SPD_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_ACP_0HEAD, buffer, PK_ACP_LEN);
+ if (buffer[0] == packet_type) {
+ if (packet_type < 0x80)
+ v4l2_info(sd, "Packet: %*ph\n", PK_ACP_LEN, buffer);
+ else if (packet_type != 0x87)
+ v4l2_info(sd, "InfoFrame: %*ph\n", PK_ACP_LEN, buffer);
+ else if (hdmi_infoframe_unpack(&frame, buffer,
+ sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
}
- hdmi_infoframe_log(KERN_INFO, dev, &frame);
+ i2c_rd(sd, PK_MS_0HEAD, buffer, PK_MS_LEN);
+ if (buffer[2] && buffer[2] + 3 <= PK_MS_LEN)
+ v4l2_info(sd, "MPEG Source InfoFrame: %*ph\n",
+ buffer[2] + 3, buffer);
+
+ i2c_rd(sd, PK_ISRC1_0HEAD, buffer, PK_ISRC1_LEN);
+ if (buffer[0] == 0x05)
+ v4l2_info(sd, "ISRC1 Packet: %*ph\n",
+ PK_ISRC1_LEN, buffer);
+
+ i2c_rd(sd, PK_ISRC2_0HEAD, buffer, PK_ISRC2_LEN);
+ if (buffer[0] == 0x06)
+ v4l2_info(sd, "ISRC2 Packet: %*ph\n",
+ PK_ISRC2_LEN, buffer);
}
/* --------------- CTRLS --------------- */
@@ -1375,7 +1451,7 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Deep color mode: %d-bits per channel\n",
deep_color_mode[(i2c_rd8(sd, VI_STATUS1) &
MASK_S_DEEPCOLOR) >> 2]);
- print_avi_infoframe(sd);
+ print_infoframes(sd);
return 0;
}
@@ -2232,10 +2308,15 @@ static int tc358743_probe(struct i2c_client *client)
if (err < 0)
goto err_work_queues;
+ i2c_wr8(sd, TYP_ACP_SET, packet_type);
+ i2c_wr8(sd, PK_AUTO_CLR, 0xff);
+ i2c_wr8(sd, NO_PKT_CLR, MASK_NO_ACP_CLR);
+
state->debugfs_dir = debugfs_create_dir(sd->name, v4l2_debugfs_root());
state->infoframes = v4l2_debugfs_if_alloc(state->debugfs_dir,
- V4L2_DEBUGFS_IF_AVI, sd,
- tc358743_debugfs_if_read);
+ V4L2_DEBUGFS_IF_AVI | V4L2_DEBUGFS_IF_AUDIO |
+ V4L2_DEBUGFS_IF_SPD | V4L2_DEBUGFS_IF_HDMI |
+ V4L2_DEBUGFS_IF_DRM, sd, tc358743_debugfs_if_read);
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
@@ -2245,10 +2326,10 @@ static int tc358743_probe(struct i2c_client *client)
err_work_queues:
cec_unregister_adapter(state->cec_adap);
if (!state->i2c_client->irq) {
- timer_delete(&state->timer);
+ timer_delete_sync(&state->timer);
flush_work(&state->work_i2c_poll);
}
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
mutex_destroy(&state->confctl_mutex);
err_hdl:
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 2495878dc358..aae288f8add3 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -692,6 +692,8 @@
#define MASK_NCO_F0_MOD_42MHZ 0x00
#define MASK_NCO_F0_MOD_27MHZ 0x01
+#define TYP_ACP_SET 0x8706
+
#define PK_INT_MODE 0x8709
#define MASK_ISRC2_INT_MODE 0x80
#define MASK_ISRC_INT_MODE 0x40
@@ -702,6 +704,8 @@
#define MASK_AUD_INT_MODE 0x02
#define MASK_AVI_INT_MODE 0x01
+#define PK_AUTO_CLR 0x870a
+
#define NO_PKT_LIMIT 0x870B
#define MASK_NO_ACP_LIMIT 0xf0
#define SET_NO_ACP_LIMIT_MS(milliseconds) ((((milliseconds) / 80) << 4) & \
@@ -720,25 +724,44 @@
#define ERR_PK_LIMIT 0x870D
#define NO_PKT_LIMIT2 0x870E
#define PK_AVI_0HEAD 0x8710
-#define PK_AVI_1HEAD 0x8711
-#define PK_AVI_2HEAD 0x8712
#define PK_AVI_0BYTE 0x8713
-#define PK_AVI_1BYTE 0x8714
-#define PK_AVI_2BYTE 0x8715
-#define PK_AVI_3BYTE 0x8716
-#define PK_AVI_4BYTE 0x8717
-#define PK_AVI_5BYTE 0x8718
-#define PK_AVI_6BYTE 0x8719
-#define PK_AVI_7BYTE 0x871A
-#define PK_AVI_8BYTE 0x871B
-#define PK_AVI_9BYTE 0x871C
-#define PK_AVI_10BYTE 0x871D
-#define PK_AVI_11BYTE 0x871E
-#define PK_AVI_12BYTE 0x871F
-#define PK_AVI_13BYTE 0x8720
-#define PK_AVI_14BYTE 0x8721
-#define PK_AVI_15BYTE 0x8722
#define PK_AVI_16BYTE 0x8723
+#define PK_AVI_LEN (PK_AVI_16BYTE - PK_AVI_0HEAD + 1)
+
+#define PK_AUD_0HEAD 0x8730
+#define PK_AUD_0BYTE 0x8733
+#define PK_AUD_10BYTE 0x873d
+#define PK_AUD_LEN (PK_AUD_10BYTE - PK_AUD_0HEAD + 1)
+
+#define PK_MS_0HEAD 0x8740
+#define PK_MS_0BYTE 0x8743
+#define PK_MS_10BYTE 0x874d
+#define PK_MS_LEN (PK_MS_10BYTE - PK_MS_0HEAD + 1)
+
+#define PK_SPD_0HEAD 0x8750
+#define PK_SPD_0BYTE 0x8753
+#define PK_SPD_27BYTE 0x876e
+#define PK_SPD_LEN (PK_SPD_27BYTE - PK_SPD_0HEAD + 1)
+
+#define PK_VS_0HEAD 0x8770
+#define PK_VS_0BYTE 0x8773
+#define PK_VS_27BYTE 0x878e
+#define PK_VS_LEN (PK_VS_27BYTE - PK_VS_0HEAD + 1)
+
+#define PK_ACP_0HEAD 0x8790
+#define PK_ACP_0BYTE 0x8793
+#define PK_ACP_27BYTE 0x87ae
+#define PK_ACP_LEN (PK_ACP_27BYTE - PK_ACP_0HEAD + 1)
+
+#define PK_ISRC1_0HEAD 0x87b0
+#define PK_ISRC1_0BYTE 0x87b3
+#define PK_ISRC1_27BYTE 0x87c2
+#define PK_ISRC1_LEN (PK_ISRC1_27BYTE - PK_ISRC1_0HEAD + 1)
+
+#define PK_ISRC2_0HEAD 0x87d0
+#define PK_ISRC2_0BYTE 0x87d3
+#define PK_ISRC2_27BYTE 0x87ee
+#define PK_ISRC2_LEN (PK_ISRC2_27BYTE - PK_ISRC2_0HEAD + 1)
#define BKSV 0x8800
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index d61da811c9da..e3b266db571f 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -3,7 +3,7 @@
tda9840 - i2c-driver for the tda9840 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tda9840 is a stereo/dual sound processor with digital
identification. It can be found at address 0x84 on the i2c-bus.
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 4aaf66353610..0cd2e6c52e20 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -3,7 +3,7 @@
tea6415c - i2c-driver for the tea6415c by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tea6415c is a bus controlled video-matrix-switch
with 8 inputs and 6 outputs.
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 5c5ea3973251..400883fc0c0f 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -3,7 +3,7 @@
tea6420 - i2c-driver for the tea6420 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tea6420 is a bus controlled audio-matrix with 5 stereo inputs,
4 stereo outputs and gain control for each output.
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index b7cedc5b3e8e..ff268ebeb4d9 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -7,7 +7,7 @@
* Author: Chaithrika U S <chaithrika@ti.com>
*
* Contributors:
- * Hans Verkuil <hansverk@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
*
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index b7b31b6192af..6f6bc5236565 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -7,7 +7,7 @@
* Based on wm8775 driver
*
* Copyright (C) 2004 Ulf Eklund <ivtv at eklund.to>
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index 9d0b72a213be..a178af46e695 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -4,7 +4,7 @@
*
* 2003 by T.Adachi <tadachi@tadachi-net.com>
* 2003 by Takeru KOMORIYA <komoriya@paken.org>
- * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ * 2006 by Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 2e99ed5da42c..5421dc5e32c9 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -4,7 +4,7 @@
*
* 2003 by T.Adachi (tadachi@tadachi-net.com)
* 2003 by Takeru KOMORIYA <komoriya@paken.org>
- * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ * 2006 by Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index 7c39183dd44b..f09d6bf32641 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -66,7 +66,7 @@
#define VD55G1_REG_READOUT_CTRL CCI_REG8(0x052e)
#define VD55G1_READOUT_CTRL_BIN_MODE_NORMAL 0
#define VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2 1
-#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ea)
+#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ae)
#define VD55G1_DUSTER_ENABLE BIT(0)
#define VD55G1_DUSTER_DISABLE 0
#define VD55G1_DUSTER_DYN_ENABLE BIT(1)
@@ -1860,7 +1860,7 @@ static int vd55g1_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
- sensor->xclk = devm_clk_get(dev, NULL);
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk))
return dev_err_probe(dev, PTR_ERR(sensor->xclk),
"Failed to get xclk\n");
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
index d66e21ba4498..157acea9e286 100644
--- a/drivers/media/i2c/vd56g3.c
+++ b/drivers/media/i2c/vd56g3.c
@@ -1471,7 +1471,7 @@ static int vd56g3_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
- sensor->xclk = devm_clk_get(dev, NULL);
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk))
return dev_err_probe(dev, PTR_ERR(sensor->xclk),
"Failed to get xclk\n");
diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c
index 5b0479f3a3c0..d64d0099e6fe 100644
--- a/drivers/media/i2c/vgxy61.c
+++ b/drivers/media/i2c/vgxy61.c
@@ -1181,6 +1181,21 @@ static int vgxy61_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
+static int vgxy61_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct vgxy61_dev *sensor = to_vgxy61_dev(sd);
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+ fd->num_entries = 1;
+ fd->entry[0].pixelcode = sensor->fmt.code;
+ fd->entry[0].stream = 0;
+ fd->entry[0].bus.csi2.vc = 0;
+ fd->entry[0].bus.csi2.dt = get_data_type_by_code(sensor->fmt.code);
+
+ return 0;
+}
+
static int vgxy61_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -1402,6 +1417,7 @@ static const struct v4l2_subdev_pad_ops vgxy61_pad_ops = {
.set_fmt = vgxy61_set_fmt,
.get_selection = vgxy61_get_selection,
.enum_frame_size = vgxy61_enum_frame_size,
+ .get_frame_desc = vgxy61_get_frame_desc,
};
static const struct v4l2_subdev_ops vgxy61_subdev_ops = {
@@ -1761,11 +1777,11 @@ static int vgxy61_probe(struct i2c_client *client)
return ret;
}
- sensor->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "failed to get xclk\n");
+
sensor->clk_freq = clk_get_rate(sensor->xclk);
if (sensor->clk_freq < 6 * HZ_PER_MHZ ||
sensor->clk_freq > 27 * HZ_PER_MHZ) {
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 06fd46a63c72..df21950be24f 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -2,7 +2,7 @@
/*
* vp27smpx - driver version 0.0.1
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*
* Based on a tvaudio patch from Takahiro Adachi <tadachi@tadachi-net.com>
* and Kazuhiko Kawakami <kazz-0@mail.goo.ne.jp>
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index c091b78a5b41..72eb10339d06 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com>
*
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
* - Cleanup
*/
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 619b2988577c..56778d3bc28a 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -6,7 +6,7 @@
*
* Based on saa7115 driver
*
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
* - Cleanup
* - V4L2 API update
* - sound fixes
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 56444edaf136..6daa7aa99442 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd)
{
struct media_devnode *devnode = to_media_devnode(cd);
- mutex_lock(&media_devnode_lock);
- /* Mark device node number as free */
- clear_bit(devnode->minor, media_devnode_nums);
- mutex_unlock(&media_devnode_lock);
-
/* Release media_devnode and perform other cleanups as needed. */
if (devnode->release)
devnode->release(devnode);
@@ -281,6 +276,7 @@ void media_devnode_unregister(struct media_devnode *devnode)
/* Delete the cdev on this minor as well */
cdev_device_del(&devnode->cdev, &devnode->dev);
devnode->media_dev = NULL;
+ clear_bit(devnode->minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
put_device(&devnode->dev);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 045590905582..9519a537bfa2 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -682,8 +682,8 @@ done:
return 0;
dev_dbg(walk->mdev->dev,
- "media pipeline: adding unconnected pads of '%s'\n",
- local->entity->name);
+ "media pipeline: adding unconnected pads of '%s' reachable from pad %u\n",
+ origin->entity->name, origin->index);
media_entity_for_each_pad(origin->entity, local) {
/*
@@ -691,7 +691,7 @@ done:
* (already discovered through iterating over links) and pads
* not internally connected.
*/
- if (origin == local || !local->num_links ||
+ if (origin == local || local->num_links ||
!media_entity_has_pad_interdep(origin->entity, origin->index,
local->index))
continue;
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index 5edfc2791ce7..f66f728b1b43 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -6,7 +6,7 @@
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 Google, Inc.
*
- * Author: Hans Verkuil <hansverk@cisco.com>
+ * Author: Hans Verkuil <hverkuil@kernel.org>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
*/
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 486c8ec0fa60..ab53c5b02c48 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -411,7 +411,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev)
struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
if (irq_chk_intv > 0)
- cancel_delayed_work(&fc_pci->irq_check_work);
+ cancel_delayed_work_sync(&fc_pci->irq_check_work);
flexcop_pci_dma_exit(fc_pci);
flexcop_device_exit(fc_pci->fc_dev);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 9ce67f515843..17e4529e537a 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1620,7 +1620,7 @@ static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
-static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
+static int bttv_querystd(struct file *file, void *priv, v4l2_std_id *id)
{
struct bttv *btv = video_drvdata(file);
@@ -1750,7 +1750,7 @@ static int bttv_s_frequency(struct file *file, void *priv,
return 0;
}
-static int bttv_log_status(struct file *file, void *f)
+static int bttv_log_status(struct file *file, void *priv)
{
struct video_device *vdev = video_devdata(file);
struct bttv *btv = video_drvdata(file);
@@ -1761,7 +1761,7 @@ static int bttv_log_status(struct file *file, void *f)
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int bttv_g_register(struct file *file, void *f,
+static int bttv_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct bttv *btv = video_drvdata(file);
@@ -1774,7 +1774,7 @@ static int bttv_g_register(struct file *file, void *f,
return 0;
}
-static int bttv_s_register(struct file *file, void *f,
+static int bttv_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct bttv *btv = video_drvdata(file);
@@ -2159,7 +2159,7 @@ static int bttv_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int bttv_g_parm(struct file *file, void *f,
+static int bttv_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct bttv *btv = video_drvdata(file);
@@ -2208,7 +2208,7 @@ static int bttv_g_pixelaspect(struct file *file, void *priv,
return 0;
}
-static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int bttv_g_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct bttv *btv = video_drvdata(file);
@@ -2232,7 +2232,7 @@ static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *s
return 0;
}
-static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int bttv_s_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct bttv *btv = video_drvdata(file);
const struct v4l2_rect *b;
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index a71440611e46..0ca88a2400ee 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -241,7 +241,7 @@ static int try_fmt(struct v4l2_vbi_format *f, const struct bttv_tvnorm *tvnorm,
return 0;
}
-int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_try_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
@@ -258,7 +258,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
}
-int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_s_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
@@ -301,7 +301,7 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
}
-int bttv_g_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_g_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
const struct bttv_tvnorm *tvnorm;
struct bttv *btv = video_drvdata(file);
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 39e25cc53edb..b7695705fdee 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -44,7 +44,7 @@ module_param_named(ignore_err, cobalt_ignore_err, int, 0644);
MODULE_PARM_DESC(ignore_err,
"If set then ignore missing i2c adapters/receivers. Default: 0\n");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com> & Morten Hestnes");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org> & Morten Hestnes");
MODULE_DESCRIPTION("cobalt driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index ae82427e3479..51fd9576c6c2 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -447,7 +447,7 @@ static int cobalt_cobaltc(struct cobalt *cobalt, unsigned int cmd, void *arg)
return 0;
}
-static int cobalt_g_register(struct file *file, void *priv_fh,
+static int cobalt_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -456,7 +456,7 @@ static int cobalt_g_register(struct file *file, void *priv_fh,
return cobalt_cobaltc(cobalt, VIDIOC_DBG_G_REGISTER, reg);
}
-static int cobalt_s_register(struct file *file, void *priv_fh,
+static int cobalt_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -467,7 +467,7 @@ static int cobalt_s_register(struct file *file, void *priv_fh,
}
#endif
-static int cobalt_querycap(struct file *file, void *priv_fh,
+static int cobalt_querycap(struct file *file, void *priv,
struct v4l2_capability *vcap)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -562,7 +562,7 @@ static void cobalt_video_input_status_show(struct cobalt_stream *s)
cobalt_info("rx%d: Packer: %x\n", rx, ioread32(&packer->control));
}
-static int cobalt_log_status(struct file *file, void *priv_fh)
+static int cobalt_log_status(struct file *file, void *priv)
{
struct cobalt_stream *s = video_drvdata(file);
struct cobalt *cobalt = s->cobalt;
@@ -596,7 +596,7 @@ static int cobalt_log_status(struct file *file, void *priv_fh)
return 0;
}
-static int cobalt_enum_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -613,7 +613,7 @@ static int cobalt_enum_dv_timings(struct file *file, void *priv_fh,
pad, enum_dv_timings, timings);
}
-static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -641,7 +641,7 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
return err;
}
-static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -654,7 +654,7 @@ static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
pad, g_dv_timings, 0, timings);
}
-static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -667,7 +667,7 @@ static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
pad, query_dv_timings, 0, timings);
}
-static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
+static int cobalt_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -677,7 +677,7 @@ static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
pad, dv_timings_cap, cap);
}
-static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
switch (f->index) {
@@ -697,7 +697,7 @@ static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -726,7 +726,7 @@ static int cobalt_g_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -787,7 +787,7 @@ static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -796,7 +796,7 @@ static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
if (vb2_is_busy(&s->q))
return -EBUSY;
- if (cobalt_try_fmt_vid_cap(file, priv_fh, f))
+ if (cobalt_try_fmt_vid_cap(file, priv, f))
return -EINVAL;
s->width = pix->width;
@@ -821,7 +821,7 @@ static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_try_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -862,7 +862,7 @@ static int cobalt_try_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -882,7 +882,7 @@ static int cobalt_g_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
switch (f->index) {
@@ -899,7 +899,7 @@ static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -909,7 +909,7 @@ static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
};
u32 code;
- if (cobalt_try_fmt_vid_out(file, priv_fh, f))
+ if (cobalt_try_fmt_vid_out(file, priv, f))
return -EINVAL;
if (vb2_is_busy(&s->q) && (pix->pixelformat != s->pixfmt ||
@@ -942,7 +942,7 @@ static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_enum_input(struct file *file, void *priv_fh,
+static int cobalt_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -963,7 +963,7 @@ static int cobalt_enum_input(struct file *file, void *priv_fh,
video, g_input_status, &inp->status);
}
-static int cobalt_g_input(struct file *file, void *priv_fh, unsigned int *i)
+static int cobalt_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -971,7 +971,7 @@ static int cobalt_g_input(struct file *file, void *priv_fh, unsigned int *i)
return 0;
}
-static int cobalt_s_input(struct file *file, void *priv_fh, unsigned int i)
+static int cobalt_s_input(struct file *file, void *priv, unsigned int i)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -990,7 +990,7 @@ static int cobalt_s_input(struct file *file, void *priv_fh, unsigned int i)
ADV76XX_PAD_HDMI_PORT_A, 0, 0);
}
-static int cobalt_enum_output(struct file *file, void *priv_fh,
+static int cobalt_enum_output(struct file *file, void *priv,
struct v4l2_output *out)
{
if (out->index)
@@ -1001,18 +1001,18 @@ static int cobalt_enum_output(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_output(struct file *file, void *priv_fh, unsigned int *i)
+static int cobalt_g_output(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
-static int cobalt_s_output(struct file *file, void *priv_fh, unsigned int i)
+static int cobalt_s_output(struct file *file, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
-static int cobalt_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+static int cobalt_g_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct cobalt_stream *s = video_drvdata(file);
u32 pad = edid->pad;
@@ -1026,7 +1026,7 @@ static int cobalt_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
return ret;
}
-static int cobalt_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+static int cobalt_s_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct cobalt_stream *s = video_drvdata(file);
u32 pad = edid->pad;
@@ -1050,7 +1050,7 @@ static int cobalt_subscribe_event(struct v4l2_fh *fh,
return v4l2_ctrl_subscribe_event(fh, sub);
}
-static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+static int cobalt_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
{
struct cobalt_stream *s = video_drvdata(file);
struct v4l2_fract fps;
@@ -1065,7 +1065,7 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
return 0;
}
-static int cobalt_g_pixelaspect(struct file *file, void *fh,
+static int cobalt_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -1084,7 +1084,7 @@ static int cobalt_g_pixelaspect(struct file *file, void *fh,
return err;
}
-static int cobalt_g_selection(struct file *file, void *fh,
+static int cobalt_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct cobalt_stream *s = video_drvdata(file);
diff --git a/drivers/media/pci/cx18/cx18-audio.c b/drivers/media/pci/cx18/cx18-audio.c
index 8602d088601b..1464795619f9 100644
--- a/drivers/media/pci/cx18/cx18-audio.c
+++ b/drivers/media/pci/cx18/cx18-audio.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-audio.h b/drivers/media/pci/cx18/cx18-audio.h
index 36ce333ab07a..29cf89d38d60 100644
--- a/drivers/media/pci/cx18/cx18-audio.h
+++ b/drivers/media/pci/cx18/cx18-audio.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_audio_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-av-audio.c b/drivers/media/pci/cx18/cx18-av-audio.c
index 78e05df9a7ba..644d8ca4519b 100644
--- a/drivers/media/pci/cx18/cx18-av-audio.c
+++ b/drivers/media/pci/cx18/cx18-av-audio.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index ee6e71157786..4fb19d26ee29 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-core.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-core.h b/drivers/media/pci/cx18/cx18-av-core.h
index 55aceb064b33..71ac9d7af28f 100644
--- a/drivers/media/pci/cx18/cx18-av-core.h
+++ b/drivers/media/pci/cx18/cx18-av-core.h
@@ -4,7 +4,7 @@
*
* Derived from cx25840-core.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index 61aeb8c9af7f..02dde685a6ad 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -2,7 +2,7 @@
/*
* cx18 ADEC firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-vbi.c b/drivers/media/pci/cx18/cx18-av-vbi.c
index 1a113aad9cd4..f9beeaeaa1cb 100644
--- a/drivers/media/pci/cx18/cx18-av-vbi.c
+++ b/drivers/media/pci/cx18/cx18-av-vbi.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-vbi.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c
index f5a30959a367..bddb9e0fffe0 100644
--- a/drivers/media/pci/cx18/cx18-cards.c
+++ b/drivers/media/pci/cx18/cx18-cards.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-cards.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index ae9cf5bfdd59..511123b741d5 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-cards.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index bb5fc120473c..78eadad8b6e8 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-controls.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/cx18/cx18-controls.h b/drivers/media/pci/cx18/cx18-controls.h
index 458a3595a2ae..99de78878a76 100644
--- a/drivers/media/pci/cx18/cx18-controls.h
+++ b/drivers/media/pci/cx18/cx18-controls.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-controls.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 743fcc961374..b62fd12c93c1 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-driver.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 485ca9747c4c..ef38903709d0 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-driver.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -414,7 +414,7 @@ static inline struct cx18_open_id *fh2id(struct v4l2_fh *fh)
static inline struct cx18_open_id *file2id(struct file *file)
{
- return fh2id(file->private_data);
+ return fh2id(file_to_v4l2_fh(file));
}
/* forward declaration of struct defined in cx18-cards.h */
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index cefa91b37f89..4944033dbb20 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-fileops.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -678,7 +678,7 @@ void cx18_stop_capture(struct cx18_stream *s, int gop_end)
int cx18_v4l2_close(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
struct cx18_open_id *id = fh2id(fh);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
@@ -709,11 +709,11 @@ int cx18_v4l2_close(struct file *filp)
}
if (id->type == CX18_ENC_STREAM_TYPE_YUV &&
- filp->private_data == vdev->queue->owner) {
+ file_to_v4l2_fh(filp) == vdev->queue->owner) {
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
}
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
/* 'Unclaim' this stream */
@@ -743,8 +743,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
item->type = s->type;
item->open_id = cx->open_id++;
- filp->private_data = &item->fh;
- v4l2_fh_add(&item->fh);
+ v4l2_fh_add(&item->fh, filp);
if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
@@ -752,7 +751,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
if (atomic_read(&cx->ana_capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- v4l2_fh_del(&item->fh);
+ v4l2_fh_del(&item->fh, filp);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 943057b83d94..bc999ea85dc2 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-fileops.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* Testing/Debugging */
diff --git a/drivers/media/pci/cx18/cx18-firmware.c b/drivers/media/pci/cx18/cx18-firmware.c
index 1b038b2802bf..94e17948fb30 100644
--- a/drivers/media/pci/cx18/cx18-firmware.c
+++ b/drivers/media/pci/cx18/cx18-firmware.c
@@ -2,7 +2,7 @@
/*
* cx18 firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-firmware.h b/drivers/media/pci/cx18/cx18-firmware.h
index 1357f76d613e..8e63677284bd 100644
--- a/drivers/media/pci/cx18/cx18-firmware.h
+++ b/drivers/media/pci/cx18/cx18-firmware.h
@@ -2,7 +2,7 @@
/*
* cx18 firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_firmware_init(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index 485a6cbeb15a..4aea92639599 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-gpio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-gpio.h b/drivers/media/pci/cx18/cx18-gpio.h
index 8d5797dea7f5..3eefc4644101 100644
--- a/drivers/media/pci/cx18/cx18-gpio.h
+++ b/drivers/media/pci/cx18/cx18-gpio.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-gpio.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index a83435245251..a1abb0631cae 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-i2c.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-i2c.h b/drivers/media/pci/cx18/cx18-i2c.h
index 4526cb324fec..8ae3125c78c0 100644
--- a/drivers/media/pci/cx18/cx18-i2c.h
+++ b/drivers/media/pci/cx18/cx18-i2c.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-i2c.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_i2c_register(struct cx18 *cx, unsigned idx);
diff --git a/drivers/media/pci/cx18/cx18-io.c b/drivers/media/pci/cx18/cx18-io.c
index 50e4e8a598d4..1d3d006e6329 100644
--- a/drivers/media/pci/cx18/cx18-io.c
+++ b/drivers/media/pci/cx18/cx18-io.c
@@ -2,7 +2,7 @@
/*
* cx18 driver PCI memory mapped IO access routines
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-io.h b/drivers/media/pci/cx18/cx18-io.h
index 190b142d047e..1f0bfad7d0f0 100644
--- a/drivers/media/pci/cx18/cx18-io.h
+++ b/drivers/media/pci/cx18/cx18-io.h
@@ -2,7 +2,7 @@
/*
* cx18 driver PCI memory mapped IO access routines
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 9a1512b1ccaa..0f3019739d03 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-ioctl.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -52,7 +52,7 @@ static const struct v4l2_fmtdesc cx18_formats_mpeg[] = {
static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -76,7 +76,7 @@ static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
static int cx18_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
int w = pixfmt->width;
@@ -121,7 +121,7 @@ static int cx18_try_fmt_vid_cap(struct file *file, void *fh,
static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -261,7 +261,7 @@ u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt)
static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
@@ -280,7 +280,7 @@ static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
/* sane, V4L2 spec compliant, defaults */
@@ -311,7 +311,7 @@ static int cx18_try_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
@@ -330,7 +330,7 @@ static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh,
static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
int ret;
@@ -360,7 +360,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
int ret;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
@@ -392,7 +392,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
static int cx18_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (reg->reg & 0x3)
return -EINVAL;
@@ -406,7 +406,7 @@ static int cx18_g_register(struct file *file, void *fh,
static int cx18_s_register(struct file *file, void *fh,
const struct v4l2_dbg_register *reg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (reg->reg & 0x3)
return -EINVAL;
@@ -420,7 +420,7 @@ static int cx18_s_register(struct file *file, void *fh,
static int cx18_querycap(struct file *file, void *fh,
struct v4l2_capability *vcap)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
strscpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
@@ -431,14 +431,14 @@ static int cx18_querycap(struct file *file, void *fh,
static int cx18_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
return cx18_get_audio_input(cx, vin->index, vin);
}
static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
vin->index = cx->audio_input;
return cx18_get_audio_input(cx, vin->index, vin);
@@ -446,7 +446,7 @@ static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
static int cx18_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vout->index >= cx->nof_audio_inputs)
return -EINVAL;
@@ -457,7 +457,7 @@ static int cx18_s_audio(struct file *file, void *fh, const struct v4l2_audio *vo
static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
/* set it to defaults from our table */
return cx18_get_input(cx, vin->index, vin);
@@ -466,7 +466,7 @@ static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
static int cx18_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -479,7 +479,7 @@ static int cx18_g_pixelaspect(struct file *file, void *fh,
static int cx18_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -499,7 +499,7 @@ static int cx18_g_selection(struct file *file, void *fh,
static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
if (id->type == CX18_ENC_STREAM_TYPE_YUV) {
if (fmt->index >= ARRAY_SIZE(cx18_formats_yuv))
@@ -515,7 +515,7 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
*i = cx->active_input;
return 0;
@@ -523,7 +523,7 @@ static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
int cx18_s_input(struct file *file, void *fh, unsigned int inp)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
v4l2_std_id std = V4L2_STD_ALL;
const struct cx18_card_video_input *card_input =
@@ -561,7 +561,7 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
static int cx18_g_frequency(struct file *file, void *fh,
struct v4l2_frequency *vf)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vf->tuner != 0)
return -EINVAL;
@@ -572,7 +572,7 @@ static int cx18_g_frequency(struct file *file, void *fh,
int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if (vf->tuner != 0)
@@ -587,7 +587,7 @@ int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
*std = cx->std;
return 0;
@@ -595,7 +595,7 @@ static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if ((std & V4L2_STD_ALL) == 0)
@@ -644,7 +644,7 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if (vt->index != 0)
@@ -656,7 +656,7 @@ static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vt->index != 0)
return -EINVAL;
@@ -673,7 +673,7 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
static int cx18_g_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_sliced_vbi_cap *cap)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
int set = cx->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
@@ -794,7 +794,7 @@ static int cx18_process_idx_data(struct cx18_stream *s, struct cx18_mdl *mdl,
static int cx18_g_enc_index(struct file *file, void *fh,
struct v4l2_enc_idx *idx)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
s32 tmp;
struct cx18_mdl *mdl;
@@ -841,7 +841,7 @@ static int cx18_g_enc_index(struct file *file, void *fh,
static int cx18_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *enc)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
u32 h;
@@ -900,7 +900,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
static int cx18_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *enc)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
@@ -932,7 +932,7 @@ static int cx18_try_encoder_cmd(struct file *file, void *fh,
static int cx18_log_status(struct file *file, void *fh)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_input vidin;
struct v4l2_audio audin;
int i;
@@ -976,7 +976,7 @@ static int cx18_log_status(struct file *file, void *fh)
static long cx18_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
switch (cmd) {
case VIDIOC_INT_RESET: {
diff --git a/drivers/media/pci/cx18/cx18-ioctl.h b/drivers/media/pci/cx18/cx18-ioctl.h
index 221e2400fb3e..97cd9f99e22d 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.h
+++ b/drivers/media/pci/cx18/cx18-ioctl.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-ioctl.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index db63077821b1..0ef01e98255d 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -2,7 +2,7 @@
/*
* cx18 interrupt handling
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-irq.h b/drivers/media/pci/cx18/cx18-irq.h
index fdb585a72827..c1456d69dffa 100644
--- a/drivers/media/pci/cx18/cx18-irq.h
+++ b/drivers/media/pci/cx18/cx18-irq.h
@@ -2,7 +2,7 @@
/*
* cx18 interrupt handling
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index a6457c23d18c..8c70b638a40c 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -2,7 +2,7 @@
/*
* cx18 mailbox functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-mailbox.h b/drivers/media/pci/cx18/cx18-mailbox.h
index 971382ac0eca..1752b8e1ca13 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.h
+++ b/drivers/media/pci/cx18/cx18-mailbox.h
@@ -2,7 +2,7 @@
/*
* cx18 mailbox functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 013694bfcb1c..eeb5513b1d52 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-queue.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s)
break;
}
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+ buf->buf, s->buf_size,
+ s->dma);
+ if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) {
+ kfree(buf->buf);
+ kfree(mdl);
+ kfree(buf);
+ break;
+ }
+
INIT_LIST_HEAD(&mdl->list);
INIT_LIST_HEAD(&mdl->buf_list);
mdl->id = s->mdl_base_idx; /* a somewhat safe value */
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list);
- buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
- buf->buf, s->buf_size,
- s->dma);
cx18_buf_sync_for_cpu(s, buf);
list_add_tail(&buf->list, &s->buf_pool);
}
diff --git a/drivers/media/pci/cx18/cx18-queue.h b/drivers/media/pci/cx18/cx18-queue.h
index 26f2097c0496..972f234ffead 100644
--- a/drivers/media/pci/cx18/cx18-queue.h
+++ b/drivers/media/pci/cx18/cx18-queue.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-queue.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-scb.c b/drivers/media/pci/cx18/cx18-scb.c
index 4a0edc1e42e7..670dd9b0a332 100644
--- a/drivers/media/pci/cx18/cx18-scb.c
+++ b/drivers/media/pci/cx18/cx18-scb.c
@@ -2,7 +2,7 @@
/*
* cx18 System Control Block initialization
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-scb.h b/drivers/media/pci/cx18/cx18-scb.h
index 841edc0712ab..900f7291979f 100644
--- a/drivers/media/pci/cx18/cx18-scb.h
+++ b/drivers/media/pci/cx18/cx18-scb.h
@@ -2,7 +2,7 @@
/*
* cx18 System Control Block initialization
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 42d6f7b90ede..48203ba16387 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-streams.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-streams.h b/drivers/media/pci/cx18/cx18-streams.h
index bba4349c5c2e..e01bed6b4827 100644
--- a/drivers/media/pci/cx18/cx18-streams.h
+++ b/drivers/media/pci/cx18/cx18-streams.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-streams.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index c7cce38dd754..8dc4ce325935 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-vbi.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-vbi.h b/drivers/media/pci/cx18/cx18-vbi.h
index a5f81c6159f0..41f5026696c4 100644
--- a/drivers/media/pci/cx18/cx18-vbi.h
+++ b/drivers/media/pci/cx18/cx18-vbi.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-vbi.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
diff --git a/drivers/media/pci/cx18/cx18-version.h b/drivers/media/pci/cx18/cx18-version.h
index e7396182fc5a..e8636ac5d5a5 100644
--- a/drivers/media/pci/cx18/cx18-version.h
+++ b/drivers/media/pci/cx18/cx18-version.h
@@ -2,7 +2,7 @@
/*
* cx18 driver version information
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef CX18_VERSION_H
diff --git a/drivers/media/pci/cx18/cx18-video.c b/drivers/media/pci/cx18/cx18-video.c
index 2fde8c2d3fdc..86cd44053d34 100644
--- a/drivers/media/pci/cx18/cx18-video.c
+++ b/drivers/media/pci/cx18/cx18-video.c
@@ -2,7 +2,7 @@
/*
* cx18 video interface functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-video.h b/drivers/media/pci/cx18/cx18-video.h
index f613975ca5f0..ef212f1c5b17 100644
--- a/drivers/media/pci/cx18/cx18-video.h
+++ b/drivers/media/pci/cx18/cx18-video.h
@@ -2,7 +2,7 @@
/*
* cx18 video interface functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
void cx18_video_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h
index 8859c0e8557f..22486f39bcda 100644
--- a/drivers/media/pci/cx18/cx23418.h
+++ b/drivers/media/pci/cx18/cx23418.h
@@ -2,7 +2,7 @@
/*
* cx18 header containing common defines.
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef CX23418_H
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
index 6030bd23b4b9..d1fece6210ab 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -109,7 +109,7 @@ static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
case V4L2_EVENT_FRAME_SYNC:
return v4l2_event_subscribe(fh, sub, 10, NULL);
case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
default:
return -EINVAL;
}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
index 0a06de5c739c..463a0adf9e13 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
@@ -81,6 +81,12 @@ unsigned int ipu6_isys_mbus_code_to_mipi(u32 code)
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
return MIPI_CSI2_DT_RAW8;
+ case MEDIA_BUS_FMT_META_8:
+ case MEDIA_BUS_FMT_META_10:
+ case MEDIA_BUS_FMT_META_12:
+ case MEDIA_BUS_FMT_META_16:
+ case MEDIA_BUS_FMT_META_24:
+ return MIPI_CSI2_DT_EMBEDDED_8B;
default:
/* return unavailable MIPI data type - 0x3f */
WARN_ON(1);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 24a2ef93474c..f3f3bc0615e5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -1306,7 +1306,6 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av)
__ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta);
av->meta_fmt = format_meta.fmt.meta;
- set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
video_set_drvdata(&av->vdev, av);
ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 8f346d7da9c8..269a799ec046 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -148,14 +148,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
- v4l2_fh_init(&item.fh, &s->vdev);
item.itv = itv;
item.type = s->type;
/* See if the stream is available */
if (ivtv_claim_stream(&item, item.type)) {
/* No, it's already in use */
- v4l2_fh_exit(&item.fh);
snd_ivtv_unlock(itvsc);
return -EBUSY;
}
diff --git a/drivers/media/pci/ivtv/ivtv-cards.c b/drivers/media/pci/ivtv/ivtv-cards.c
index c8f4ed7ff2c6..f2ccf8e98664 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.c
+++ b/drivers/media/pci/ivtv/ivtv-cards.c
@@ -2,7 +2,7 @@
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-cards.h b/drivers/media/pci/ivtv/ivtv-cards.h
index c252733df340..af64e55d3b80 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.h
+++ b/drivers/media/pci/ivtv/ivtv-cards.h
@@ -2,7 +2,7 @@
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-controls.c b/drivers/media/pci/ivtv/ivtv-controls.c
index a0b9a5a5c7f1..f087a12c4ebd 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.c
+++ b/drivers/media/pci/ivtv/ivtv-controls.c
@@ -2,7 +2,7 @@
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-controls.h b/drivers/media/pci/ivtv/ivtv-controls.h
index 444c86a47e5a..d152691ea255 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.h
+++ b/drivers/media/pci/ivtv/ivtv-controls.h
@@ -2,7 +2,7 @@
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index ecc20cd89926..72a8f76a41f4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
ivtv driver initialization and card probing
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* Main Driver file for the ivtv project:
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index a6ffa99e16bc..f1f18911332e 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
ivtv driver internal defines and structures
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef IVTV_DRIVER_H
@@ -322,6 +309,7 @@ struct ivtv_queue {
};
struct ivtv; /* forward reference */
+struct ivtv_open_id;
struct ivtv_stream {
/* These first four fields are always set, even if the stream
@@ -331,7 +319,7 @@ struct ivtv_stream {
const char *name; /* name of the stream */
int type; /* stream type */
- struct v4l2_fh *fh; /* pointer to the streaming filehandle */
+ struct ivtv_open_id *id; /* pointer to the streaming ivtv_open_id */
spinlock_t qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -383,9 +371,9 @@ struct ivtv_open_id {
struct ivtv *itv;
};
-static inline struct ivtv_open_id *fh2id(struct v4l2_fh *fh)
+static inline struct ivtv_open_id *file2id(struct file *filp)
{
- return container_of(fh, struct ivtv_open_id, fh);
+ return container_of(file_to_v4l2_fh(filp), struct ivtv_open_id, fh);
}
struct yuv_frame_info
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index cfa28d035586..ef9ec062c03a 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -3,7 +3,7 @@
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -39,16 +39,16 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type)
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
- if (s->fh == &id->fh) {
+ if (s->id == id) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
- if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+ if (s->id == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
- s->fh = &id->fh;
+ s->id = id;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
@@ -56,7 +56,7 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type)
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
- s->fh = &id->fh;
+ s->id = id;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -94,7 +94,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
- s->fh = NULL;
+ s->id = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
@@ -126,7 +126,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
/* was already cleared */
return;
}
- if (s_vbi->fh) {
+ if (s_vbi->id) {
/* VBI stream still claimed by a file descriptor */
return;
}
@@ -359,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
size_t tot_written = 0;
int single_frame = 0;
- if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
+ if (atomic_read(&itv->capturing) == 0 && s->id == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
@@ -502,7 +502,7 @@ int ivtv_start_capture(struct ivtv_open_id *id)
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
ssize_t rc;
@@ -564,7 +564,7 @@ static int ivtv_schedule_dma(struct ivtv_stream *s)
static ssize_t ivtv_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -719,7 +719,7 @@ retry:
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
ssize_t res;
@@ -732,7 +732,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
__poll_t res = 0;
@@ -767,7 +767,7 @@ __poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -831,7 +831,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
- s->fh = NULL;
+ s->id = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -877,8 +877,8 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
int ivtv_v4l2_close(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
- struct ivtv_open_id *id = fh2id(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -911,11 +911,11 @@ int ivtv_v4l2_close(struct file *filp)
ivtv_unmute(itv);
}
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
- if (s->fh != &id->fh)
+ if (s->id != id)
goto close_done;
/* 'Unclaim' this stream */
@@ -998,9 +998,7 @@ static int ivtv_open(struct file *filp)
v4l2_fh_init(&item->fh, &s->vdev);
item->itv = itv;
item->type = s->type;
-
- filp->private_data = &item->fh;
- v4l2_fh_add(&item->fh);
+ v4l2_fh_add(&item->fh, filp);
if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
@@ -1008,7 +1006,7 @@ static int ivtv_open(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- v4l2_fh_del(&item->fh);
+ v4l2_fh_del(&item->fh, filp);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index c2c01bba5d03..7bbe42b0030c 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -2,7 +2,7 @@
/*
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 56b25255faf5..1bc873ebef73 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -3,7 +3,7 @@
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.h b/drivers/media/pci/ivtv/ivtv-firmware.h
index 393e94a8d0e4..ce94b6c385de 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.h
+++ b/drivers/media/pci/ivtv/ivtv-firmware.h
@@ -3,7 +3,7 @@
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index 6434c0d03a6d..d3477e1529c9 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -3,7 +3,7 @@
gpio functions.
Merging GPIO support into driver:
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.h b/drivers/media/pci/ivtv/ivtv-gpio.h
index 4ad817173f04..686c9b5e9c19 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.h
+++ b/drivers/media/pci/ivtv/ivtv-gpio.h
@@ -2,7 +2,7 @@
/*
gpio functions.
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index c052c57c6dce..28cb22d6a892 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -2,7 +2,7 @@
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.h b/drivers/media/pci/ivtv/ivtv-i2c.h
index 2d9cdaa682c5..504bbc1dee25 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.h
+++ b/drivers/media/pci/ivtv/ivtv-i2c.h
@@ -2,7 +2,7 @@
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 7947dcd615e8..84c73bd22f2d 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -2,7 +2,7 @@
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -308,7 +308,7 @@ static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id,
static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->reserved[0] = 0;
@@ -330,7 +330,7 @@ static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_fo
static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -353,7 +353,7 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
@@ -372,7 +372,7 @@ static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
vbifmt->reserved[0] = 0;
@@ -394,7 +394,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -434,8 +434,8 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
@@ -461,7 +461,7 @@ static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_
static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
int w = fmt->fmt.pix.width;
int h = fmt->fmt.pix.height;
@@ -490,7 +490,7 @@ static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (id->type == IVTV_DEC_STREAM_TYPE_VBI)
@@ -510,7 +510,7 @@ static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_
static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
s32 w = fmt->fmt.pix.width;
s32 h = fmt->fmt.pix.height;
int field = fmt->fmt.pix.field;
@@ -544,8 +544,8 @@ static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
@@ -566,7 +566,7 @@ static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_fo
static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -597,7 +597,7 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
@@ -610,7 +610,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt);
@@ -628,7 +628,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
int ret = ivtv_try_fmt_vid_out(file, fh, fmt);
@@ -673,7 +673,7 @@ static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt);
if (ret == 0) {
@@ -710,7 +710,7 @@ static int ivtv_itvc(struct ivtv *itv, bool get, u64 reg, u64 *val)
static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
reg->size = 4;
return ivtv_itvc(itv, true, reg->reg, &reg->val);
@@ -718,7 +718,7 @@ static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register
static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
u64 val = reg->val;
return ivtv_itvc(itv, false, reg->reg, &val);
@@ -727,7 +727,7 @@ static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_re
static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
strscpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
@@ -738,14 +738,14 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
return ivtv_get_audio_input(itv, vin->index, vin);
}
static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
vin->index = itv->audio_input;
return ivtv_get_audio_input(itv, vin->index, vin);
@@ -753,7 +753,7 @@ static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (vout->index >= itv->nof_audio_inputs)
return -EINVAL;
@@ -766,7 +766,7 @@ static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vo
static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
/* set it to defaults from our table */
return ivtv_get_audio_output(itv, vin->index, vin);
@@ -774,7 +774,7 @@ static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vi
static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
vin->index = 0;
return ivtv_get_audio_output(itv, vin->index, vin);
@@ -782,7 +782,7 @@ static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (itv->card->video_outputs == NULL || vout->index != 0)
return -EINVAL;
@@ -791,7 +791,7 @@ static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout
static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
/* set it to defaults from our table */
return ivtv_get_input(itv, vin->index, vin);
@@ -799,7 +799,7 @@ static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
return ivtv_get_output(itv, vout->index, vout);
}
@@ -807,7 +807,7 @@ static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vou
static int ivtv_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -825,7 +825,7 @@ static int ivtv_g_pixelaspect(struct file *file, void *fh,
static int ivtv_s_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
@@ -868,7 +868,7 @@ static int ivtv_s_selection(struct file *file, void *fh,
static int ivtv_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
@@ -924,8 +924,8 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (fmt->index)
return -EINVAL;
@@ -951,8 +951,8 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (fmt->index)
return -EINVAL;
@@ -967,7 +967,7 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
*i = itv->active_input;
@@ -976,7 +976,7 @@ static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
v4l2_std_id std;
int i;
@@ -1019,7 +1019,7 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
@@ -1031,7 +1031,7 @@ static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (outp >= itv->card->nof_outputs)
return -EINVAL;
@@ -1053,8 +1053,8 @@ static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1067,8 +1067,8 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1084,7 +1084,7 @@ int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
*std = itv->std;
return 0;
@@ -1157,7 +1157,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if ((std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -1185,7 +1185,7 @@ static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (vt->index != 0)
@@ -1198,7 +1198,7 @@ static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (vt->index != 0)
return -EINVAL;
@@ -1214,7 +1214,7 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
@@ -1249,7 +1249,7 @@ static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced
static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_enc_idx_entry *e = idx->entry;
int entries;
int i;
@@ -1275,7 +1275,7 @@ static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *id
static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
@@ -1327,7 +1327,7 @@ static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd
static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
@@ -1357,8 +1357,8 @@ static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder
static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
u32 data[CX2341X_MBOX_MAX_DATA];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -1444,9 +1444,9 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *fb)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
@@ -1465,9 +1465,9 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe
static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
@@ -1492,7 +1492,7 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subs
static int ivtv_log_status(struct file *file, void *fh)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
u32 data[CX2341X_MBOX_MAX_DATA];
int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT;
@@ -1584,7 +1584,7 @@ static int ivtv_log_status(struct file *file, void *fh)
static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_DECODER_CMD %d\n", dec->cmd);
@@ -1593,7 +1593,7 @@ static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd
static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_TRY_DECODER_CMD %d\n", dec->cmd);
@@ -1602,7 +1602,7 @@ static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -1645,7 +1645,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
static long ivtv_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!valid_prio) {
switch (cmd) {
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.h b/drivers/media/pci/ivtv/ivtv-ioctl.h
index 42c2516379fc..7f8c6f43d397 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.h
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.h
@@ -2,7 +2,7 @@
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 748c14e87963..05e0293b4d44 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -2,7 +2,7 @@
/* interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -305,7 +305,7 @@ static void dma_post(struct ivtv_stream *s)
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
- if (s->fh == NULL) {
+ if (s->id == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
@@ -330,7 +330,7 @@ static void dma_post(struct ivtv_stream *s)
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
}
- if (s->fh)
+ if (s->id)
wake_up(&s->waitq);
}
@@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
/* Insert buffer block for YUV if needed */
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
- if (yi->blanking_dmaptr) {
+ if (yi->blanking_ptr) {
s->sg_pending[idx].src = yi->blanking_dmaptr;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size = 720 * 16;
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index b8b0703a1c82..8a780bea7de4 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -3,7 +3,7 @@
interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.c b/drivers/media/pci/ivtv/ivtv-mailbox.c
index d3fdaaa903f1..cd7c9f2d473f 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.c
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.c
@@ -3,7 +3,7 @@
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.h b/drivers/media/pci/ivtv/ivtv-mailbox.h
index 537c90437e1d..364e7f51508e 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.h
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.h
@@ -2,7 +2,7 @@
/*
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index f9b192ab7e7c..f7d2d159d800 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -3,7 +3,7 @@
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-queue.h b/drivers/media/pci/ivtv/ivtv-queue.h
index 983e99642364..9619745d6de1 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.h
+++ b/drivers/media/pci/ivtv/ivtv-queue.h
@@ -3,7 +3,7 @@
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-routing.c b/drivers/media/pci/ivtv/ivtv-routing.c
index 57d4d5a3cb87..b1dfc2e96d91 100644
--- a/drivers/media/pci/ivtv/ivtv-routing.c
+++ b/drivers/media/pci/ivtv/ivtv-routing.c
@@ -2,7 +2,7 @@
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-routing.h b/drivers/media/pci/ivtv/ivtv-routing.h
index e4a0ae0694d2..69ddb66ef26f 100644
--- a/drivers/media/pci/ivtv/ivtv-routing.h
+++ b/drivers/media/pci/ivtv/ivtv-routing.h
@@ -2,7 +2,7 @@
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index ac085925d3cb..d98fe0c9d9f1 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* License: GPL
diff --git a/drivers/media/pci/ivtv/ivtv-streams.h b/drivers/media/pci/ivtv/ivtv-streams.h
index 5f35c57fcdfd..43d4ecd6dd6f 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.h
+++ b/drivers/media/pci/ivtv/ivtv-streams.h
@@ -2,7 +2,7 @@
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index f467a00492f4..7dedf04f9f87 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -4,7 +4,7 @@
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-udma.h b/drivers/media/pci/ivtv/ivtv-udma.h
index 12b9426b2db2..3030fadfdbc7 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.h
+++ b/drivers/media/pci/ivtv/ivtv-udma.h
@@ -2,7 +2,7 @@
/*
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2006-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2006-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 80478b026d75..ae7a00f46257 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Vertical Blank Interval support functions
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.h b/drivers/media/pci/ivtv/ivtv-vbi.h
index 780f73d2ab6b..12fe27da544b 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.h
+++ b/drivers/media/pci/ivtv/ivtv-vbi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Vertical Blank Interval support functions
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-version.h b/drivers/media/pci/ivtv/ivtv-version.h
index 996f1871e49c..21e26d1f66b8 100644
--- a/drivers/media/pci/ivtv/ivtv-version.h
+++ b/drivers/media/pci/ivtv/ivtv-version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
ivtv driver version information
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2d9274537725..71f040106647 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
/* If we've offset the y plane, ensure top area is blanked */
- if (f->offset_y && yi->blanking_dmaptr) {
+ if (f->offset_y && yi->blanking_ptr) {
dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
@@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv)
yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
yi->blanking_ptr,
720 * 16, DMA_TO_DEVICE);
+ if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) {
+ kfree(yi->blanking_ptr);
+ yi->blanking_ptr = NULL;
+ yi->blanking_dmaptr = 0;
+ IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n");
+ }
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
index 923650d53d4c..d7dddc5c8728 100644
--- a/drivers/media/pci/mgb4/mgb4_trigger.c
+++ b/drivers/media/pci/mgb4/mgb4_trigger.c
@@ -91,7 +91,7 @@ static irqreturn_t trigger_handler(int irq, void *p)
struct {
u32 data;
s64 ts __aligned(8);
- } scan;
+ } scan = { };
scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0);
mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 989e93f67f75..42c327bc50e1 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -610,8 +610,7 @@ static int vidioc_s_dv_timings(struct file *file, void *fh,
timings->bt.height < video_timings_cap.bt.min_height ||
timings->bt.height > video_timings_cap.bt.max_height)
return -EINVAL;
- if (timings->bt.width == vindev->timings.bt.width &&
- timings->bt.height == vindev->timings.bt.height)
+ if (v4l2_match_dv_timings(timings, &vindev->timings, 0, false))
return 0;
if (vb2_is_busy(&vindev->queue))
return -EBUSY;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index c88939bce56b..4a51b873e47a 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1302,7 +1302,7 @@ static int saa7134_g_pixelaspect(struct file *file, void *priv,
return 0;
}
-static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int saa7134_g_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
@@ -1325,7 +1325,7 @@ static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection
return 0;
}
-static int saa7134_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int saa7134_s_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_rect *b = &dev->crop_bounds;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index bf73e9e83f52..66d650b5f69a 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -219,7 +219,7 @@ int saa7164_s_std(struct saa7164_port *port, v4l2_std_id id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_std(fh->port, id);
}
@@ -232,7 +232,7 @@ int saa7164_g_std(struct saa7164_port *port, v4l2_std_id *id)
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_std(fh->port, id);
}
@@ -277,7 +277,7 @@ int saa7164_g_input(struct saa7164_port *port, unsigned int *i)
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_input(fh->port, i);
}
@@ -301,14 +301,14 @@ int saa7164_s_input(struct saa7164_port *port, unsigned int i)
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_input(fh->port, i);
}
int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -347,7 +347,7 @@ int saa7164_g_frequency(struct saa7164_port *port, struct v4l2_frequency *f)
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_frequency(fh->port, f);
}
@@ -400,7 +400,7 @@ int saa7164_s_frequency(struct saa7164_port *port,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_frequency(fh->port, f);
}
@@ -483,7 +483,7 @@ static int saa7164_s_ctrl(struct v4l2_ctrl *ctrl)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -510,7 +510,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -725,15 +725,14 @@ static int fops_open(struct file *file)
fh->port = port;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
static int fops_release(struct file *file)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -747,7 +746,7 @@ static int fops_release(struct file *file)
}
}
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, file);
v4l2_fh_exit(&fh->fh);
kfree(fh);
@@ -787,7 +786,7 @@ saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
static ssize_t fops_read(struct file *file, char __user *buffer,
size_t count, loff_t *pos)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -893,8 +892,7 @@ err:
static __poll_t fops_poll(struct file *file, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
- struct saa7164_encoder_fh *fh =
- (struct saa7164_encoder_fh *)file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
__poll_t mask = v4l2_ctrl_poll(file, wait);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index ac958a5fca78..57e4362c0d19 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -144,28 +144,28 @@ static int saa7164_vbi_initialize(struct saa7164_port *port)
/* -- V4L2 --------------------------------------------------------- */
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_s_std(fh->port->enc_port, id);
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_std(fh->port->enc_port, id);
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_input(fh->port->enc_port, i);
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_s_input(fh->port->enc_port, i);
}
@@ -173,7 +173,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_frequency(fh->port->enc_port, f);
}
@@ -181,7 +181,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
int ret = saa7164_s_frequency(fh->port->enc_port, f);
if (ret == 0)
@@ -192,7 +192,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -428,15 +428,14 @@ static int fops_open(struct file *file)
fh->port = port;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
static int fops_release(struct file *file)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -450,7 +449,7 @@ static int fops_release(struct file *file)
}
}
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, file);
v4l2_fh_exit(&fh->fh);
kfree(fh);
@@ -489,7 +488,7 @@ saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
static ssize_t fops_read(struct file *file, char __user *buffer,
size_t count, loff_t *pos)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -596,7 +595,7 @@ err:
static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
__poll_t mask = 0;
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index e1bac1fe19d3..94e987e7b5e5 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -180,12 +180,22 @@ struct saa7164_encoder_fh {
atomic_t v4l_reading;
};
+static inline struct saa7164_encoder_fh *to_saa7164_encoder_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct saa7164_encoder_fh, fh);
+}
+
struct saa7164_vbi_fh {
struct v4l2_fh fh;
struct saa7164_port *port;
atomic_t v4l_reading;
};
+static inline struct saa7164_vbi_fh *to_saa7164_vbi_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct saa7164_vbi_fh, fh);
+}
+
struct saa7164_histogram_bucket {
u32 val;
u32 count;
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 35dd19b2427e..08b7ce1043aa 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -14,7 +14,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/init.h>
@@ -37,7 +37,7 @@
MODULE_DESCRIPTION("v4l2 driver module for tw6800 based video capture cards");
MODULE_AUTHOR("William M. Brack");
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL");
static unsigned int latency = UNSET;
diff --git a/drivers/media/pci/tw68/tw68-reg.h b/drivers/media/pci/tw68/tw68-reg.h
index dcd9931b25cc..8aeef452ea8a 100644
--- a/drivers/media/pci/tw68/tw68-reg.h
+++ b/drivers/media/pci/tw68/tw68-reg.h
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _TW68_REG_H_
diff --git a/drivers/media/pci/tw68/tw68-risc.c b/drivers/media/pci/tw68/tw68-risc.c
index dacb136c4f3a..e793db6134e4 100644
--- a/drivers/media/pci/tw68/tw68-risc.c
+++ b/drivers/media/pci/tw68/tw68-risc.c
@@ -14,7 +14,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include "tw68.h"
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 77773dec48b8..6232bac170d0 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index a1f422d6e600..66be6b3bb7b6 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/pci.h>
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 1cd990468d3d..d05e222b3921 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -154,12 +154,6 @@ struct zoran_jpg_settings {
struct zoran;
-/* zoran_fh contains per-open() settings */
-struct zoran_fh {
- struct v4l2_fh fh;
- struct zoran *zr;
-};
-
struct card_info {
enum card_type type;
char name[32];
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index e31f9f19a48a..d81facf735d9 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -67,10 +67,6 @@ module_param(pass_through, int, 0644);
MODULE_PARM_DESC(pass_through,
"Pass TV signal through to TV-out when idling");
-int zr36067_debug = 1;
-module_param_named(debug, zr36067_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-5)");
-
#define ZORAN_VERSION "0.10.1"
MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 518cb426b446..c4f81777e6ce 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -12,8 +12,6 @@
#ifndef __ZORAN_CARD_H__
#define __ZORAN_CARD_H__
-extern int zr36067_debug;
-
/* Anybody who uses more than four? */
#define BUZ_MAX 4
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index f42f596d3e62..5b4d5dd06edb 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -245,7 +245,7 @@ static int zoran_set_input(struct zoran *zr, int input)
* ioctl routine
*/
-static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap)
+static int zoran_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct zoran *zr = video_drvdata(file);
@@ -278,7 +278,7 @@ static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
return -EINVAL;
}
-static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct zoran *zr = video_drvdata(file);
@@ -286,7 +286,7 @@ static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
}
-static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -308,13 +308,13 @@ static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
return 0;
}
-static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
if (zr->map_mode != ZORAN_MAP_MODE_RAW)
- return zoran_g_fmt_vid_out(file, __fh, fmt);
+ return zoran_g_fmt_vid_out(file, fh, fmt);
fmt->fmt.pix.width = zr->v4l_settings.width;
fmt->fmt.pix.height = zr->v4l_settings.height;
fmt->fmt.pix.sizeimage = zr->buffer_size;
@@ -328,7 +328,7 @@ static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
return 0;
}
-static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_try_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -391,7 +391,7 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
return res;
}
-static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -399,7 +399,7 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
int i;
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
- return zoran_try_fmt_vid_out(file, __fh, fmt);
+ return zoran_try_fmt_vid_out(file, fh, fmt);
for (i = 0; i < NUM_FORMATS; i++)
if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
@@ -427,7 +427,7 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
return 0;
}
-static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -507,11 +507,10 @@ static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
return res;
}
-static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
- struct zoran_fh *fh = __fh;
int i;
int res = 0;
@@ -556,7 +555,7 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
return res;
}
-static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
+static int zoran_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct zoran *zr = video_drvdata(file);
@@ -564,7 +563,7 @@ static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
return 0;
}
-static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
+static int zoran_s_std(struct file *file, void *fh, v4l2_std_id std)
{
struct zoran *zr = video_drvdata(file);
int res = 0;
@@ -579,7 +578,7 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
return res;
}
-static int zoran_enum_input(struct file *file, void *__fh,
+static int zoran_enum_input(struct file *file, void *fh,
struct v4l2_input *inp)
{
struct zoran *zr = video_drvdata(file);
@@ -596,7 +595,7 @@ static int zoran_enum_input(struct file *file, void *__fh,
return 0;
}
-static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
+static int zoran_g_input(struct file *file, void *fh, unsigned int *input)
{
struct zoran *zr = video_drvdata(file);
@@ -605,7 +604,7 @@ static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
return 0;
}
-static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
+static int zoran_s_input(struct file *file, void *fh, unsigned int input)
{
struct zoran *zr = video_drvdata(file);
int res;
@@ -618,7 +617,7 @@ static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
}
/* cropping (sub-frame capture) */
-static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+static int zoran_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct zoran *zr = video_drvdata(file);
@@ -653,7 +652,7 @@ static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selectio
return 0;
}
-static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+static int zoran_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct zoran *zr = video_drvdata(file);
struct zoran_jpg_settings settings;
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index eb03df0d8652..510c3c9661d9 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -197,8 +197,6 @@ static const struct regmap_config allegro_sram_config = {
.cache_type = REGCACHE_NONE,
};
-#define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh)
-
struct allegro_channel {
struct allegro_dev *dev;
struct v4l2_fh fh;
@@ -302,6 +300,11 @@ struct allegro_channel {
unsigned int error;
};
+static inline struct allegro_channel *file_to_channel(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct allegro_channel, fh);
+}
+
static inline int
allegro_channel_get_i_frame_qp(struct allegro_channel *channel)
{
@@ -3214,8 +3217,7 @@ static int allegro_open(struct file *file)
}
list_add(&channel->list, &dev->channels);
- file->private_data = &channel->fh;
- v4l2_fh_add(&channel->fh);
+ v4l2_fh_add(&channel->fh, file);
allegro_channel_adjust(channel);
@@ -3229,7 +3231,7 @@ error:
static int allegro_release(struct file *file)
{
- struct allegro_channel *channel = fh_to_channel(file->private_data);
+ struct allegro_channel *channel = file_to_channel(file);
v4l2_m2m_ctx_release(channel->fh.m2m_ctx);
@@ -3237,7 +3239,7 @@ static int allegro_release(struct file *file)
v4l2_ctrl_handler_free(&channel->ctrl_handler);
- v4l2_fh_del(&channel->fh);
+ v4l2_fh_del(&channel->fh, file);
v4l2_fh_exit(&channel->fh);
kfree(channel);
@@ -3280,7 +3282,7 @@ static int allegro_enum_fmt_vid(struct file *file, void *fh,
static int allegro_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.width = channel->width;
@@ -3322,7 +3324,7 @@ static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct vb2_queue *vq;
int err;
@@ -3346,7 +3348,7 @@ static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
static int allegro_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
f->fmt.pix.field = V4L2_FIELD_NONE;
@@ -3393,7 +3395,7 @@ static int allegro_try_fmt_vid_out(struct file *file, void *fh,
static int allegro_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
err = allegro_try_fmt_vid_out(file, fh, f);
@@ -3434,7 +3436,7 @@ static int allegro_channel_cmd_start(struct allegro_channel *channel)
static int allegro_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *cmd)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
err = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
@@ -3483,8 +3485,7 @@ static int allegro_enum_framesizes(struct file *file, void *fh,
static int allegro_ioctl_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -3493,13 +3494,13 @@ static int allegro_ioctl_streamon(struct file *file, void *priv,
return err;
}
- return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+ return v4l2_m2m_streamon(file, channel->fh.m2m_ctx, type);
}
static int allegro_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct v4l2_fract *timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -3516,7 +3517,7 @@ static int allegro_g_parm(struct file *file, void *fh,
static int allegro_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct v4l2_fract *timeperframe;
int div;
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
index 1011ab3ebac7..b9e4ef3fc308 100644
--- a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
@@ -388,13 +388,12 @@ static void c3_mipi_csi_cfg_host(struct c3_csi_device *csi)
c3_mipi_csi_write(csi, CSI2_HOST_N_LANES, csi->bus.num_data_lanes - 1);
}
-static int c3_mipi_csi_start_stream(struct c3_csi_device *csi,
- struct v4l2_subdev *src_sd)
+static int c3_mipi_csi_start_stream(struct c3_csi_device *csi)
{
s64 link_freq;
s64 lane_rate;
- link_freq = v4l2_get_link_freq(src_sd->ctrl_handler, 0, 0);
+ link_freq = v4l2_get_link_freq(csi->src_pad, 0, 0);
if (link_freq < 0) {
dev_err(csi->dev,
"Unable to obtain link frequency: %lld\n", link_freq);
@@ -434,7 +433,7 @@ static int c3_mipi_csi_enable_streams(struct v4l2_subdev *sd,
pm_runtime_resume_and_get(csi->dev);
- c3_mipi_csi_start_stream(csi, src_sd);
+ c3_mipi_csi_start_stream(csi);
ret = v4l2_subdev_enable_streams(src_sd, csi->src_pad->index, BIT(0));
if (ret) {
diff --git a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
index 0c004bb8ba05..5744853a4003 100644
--- a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
@@ -82,6 +82,11 @@ struct ge2d_ctx {
u32 xy_swap;
};
+static inline struct ge2d_ctx *file_to_ge2d_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct ge2d_ctx, fh);
+}
+
struct meson_ge2d {
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
@@ -452,7 +457,7 @@ static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct ge2d_frame *f;
bool use_frame = false;
@@ -502,7 +507,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct ge2d_frame *f;
int ret = 0;
@@ -569,8 +574,8 @@ static void vidioc_setup_cap_fmt(struct ge2d_ctx *ctx, struct v4l2_pix_format *f
static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
const struct ge2d_fmt *fmt = find_fmt(f);
- struct ge2d_ctx *ctx = priv;
struct v4l2_pix_format fmt_cap;
vidioc_setup_cap_fmt(ctx, &fmt_cap);
@@ -590,7 +595,7 @@ static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format
static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct vb2_queue *vq;
struct ge2d_frame *frm;
@@ -626,7 +631,7 @@ static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct vb2_queue *vq;
struct ge2d_frame *frm;
@@ -665,7 +670,7 @@ static int vidioc_try_fmt_out(struct file *file, void *priv, struct v4l2_format
static int vidioc_s_fmt_out(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct vb2_queue *vq;
struct ge2d_frame *frm, *frm_cap;
@@ -855,8 +860,7 @@ static int ge2d_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ge2d_setup_ctrls(ctx);
@@ -871,8 +875,7 @@ static int ge2d_open(struct file *file)
static int ge2d_release(struct file *file)
{
- struct ge2d_ctx *ctx =
- container_of(file->private_data, struct ge2d_ctx, fh);
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
mutex_lock(&ge2d->mutex);
@@ -880,7 +883,7 @@ static int ge2d_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index cac0f1a64fea..bfd171a3ded4 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -328,7 +328,7 @@ static inline const char *vpu_core_type_desc(enum vpu_core_type type)
static inline struct vpu_inst *to_inst(struct file *filp)
{
- return container_of(filp->private_data, struct vpu_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct vpu_inst, fh);
}
#define ctrl_to_inst(ctrl) \
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 74668fa362e2..fcb2eff813ac 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -24,6 +24,11 @@
#include "vpu_msgs.h"
#include "vpu_helpers.h"
+static char *vpu_type_name(u32 type)
+{
+ return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
+}
+
void vpu_inst_lock(struct vpu_inst *inst)
{
mutex_lock(&inst->lock);
@@ -42,7 +47,7 @@ dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
vb->planes[plane_no].data_offset;
}
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
+static unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
{
if (plane_no >= vb->num_planes)
return 0;
@@ -81,7 +86,7 @@ void vpu_v4l2_set_error(struct vpu_inst *inst)
vpu_inst_unlock(inst);
}
-int vpu_notify_eos(struct vpu_inst *inst)
+static int vpu_notify_eos(struct vpu_inst *inst)
{
static const struct v4l2_event ev = {
.id = 0,
@@ -573,7 +578,8 @@ static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
call_void_vop(inst, on_queue_empty, q->type);
}
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
+static void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type,
+ enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *buf;
@@ -718,8 +724,6 @@ static int vpu_v4l2_release(struct vpu_inst *inst)
v4l2_ctrl_handler_free(&inst->ctrl_handler);
mutex_destroy(&inst->lock);
- v4l2_fh_del(&inst->fh);
- v4l2_fh_exit(&inst->fh);
call_void_vop(inst, cleanup);
@@ -756,7 +760,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
inst->min_buffer_cap = 2;
inst->min_buffer_out = 2;
v4l2_fh_init(&inst->fh, func->vfd);
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
ret = call_vop(inst, ctrl_init);
if (ret)
@@ -770,7 +774,6 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
}
inst->fh.ctrl_handler = &inst->ctrl_handler;
- file->private_data = &inst->fh;
inst->state = VPU_CODEC_STATE_DEINIT;
inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM);
if (inst->workqueue) {
@@ -788,6 +791,8 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
return 0;
error:
+ v4l2_fh_del(&inst->fh, file);
+ v4l2_fh_exit(&inst->fh);
vpu_inst_put(inst);
return ret;
}
@@ -807,6 +812,9 @@ int vpu_v4l2_close(struct file *file)
call_void_vop(inst, release);
vpu_inst_unlock(inst);
+ v4l2_fh_del(&inst->fh, file);
+ v4l2_fh_exit(&inst->fh);
+
vpu_inst_unregister(inst);
vpu_inst_put(inst);
diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h
index 56f2939fa84d..4a87b06ae520 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.h
+++ b/drivers/media/platform/amphion/vpu_v4l2.h
@@ -26,15 +26,12 @@ void vpu_skip_frame(struct vpu_inst *inst, int count);
struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence);
struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx);
void vpu_v4l2_set_error(struct vpu_inst *inst);
-int vpu_notify_eos(struct vpu_inst *inst);
int vpu_notify_source_change(struct vpu_inst *inst);
int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos);
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
bool vpu_is_source_empty(struct vpu_inst *inst);
dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no);
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no);
static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -43,11 +40,6 @@ static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
return &inst->cap_format;
}
-static inline char *vpu_type_name(u32 type)
-{
- return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
-}
-
static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf)
{
#ifdef V4L2_BUF_FLAG_CODECCONFIG
diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c
index 54cae0da9aca..b83e43245277 100644
--- a/drivers/media/platform/aspeed/aspeed-video.c
+++ b/drivers/media/platform/aspeed/aspeed-video.c
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -25,6 +26,8 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -203,6 +206,25 @@
#define VE_MEM_RESTRICT_START 0x310
#define VE_MEM_RESTRICT_END 0x314
+/* SCU's registers */
+#define SCU_MISC_CTRL 0xC0
+#define SCU_DPLL_SOURCE BIT(20)
+
+/* GFX's registers */
+#define GFX_CTRL 0x60
+#define GFX_CTRL_ENABLE BIT(0)
+#define GFX_CTRL_FMT GENMASK(9, 7)
+
+#define GFX_H_DISPLAY 0x70
+#define GFX_H_DISPLAY_DE GENMASK(28, 16)
+#define GFX_H_DISPLAY_TOTAL GENMASK(12, 0)
+
+#define GFX_V_DISPLAY 0x78
+#define GFX_V_DISPLAY_DE GENMASK(27, 16)
+#define GFX_V_DISPLAY_TOTAL GENMASK(11, 0)
+
+#define GFX_DISPLAY_ADDR 0x80
+
/*
* VIDEO_MODE_DETECT_DONE: a flag raised if signal lock
* VIDEO_RES_CHANGE: a flag raised if res_change work on-going
@@ -262,6 +284,7 @@ struct aspeed_video_perf {
/*
* struct aspeed_video - driver data
*
+ * version: holds the version of aspeed SoC
* res_work: holds the delayed_work for res-detection if unlock
* buffers: holds the list of buffer queued from user
* flags: holds the state of video
@@ -273,6 +296,7 @@ struct aspeed_video_perf {
* yuv420: a flag raised if JPEG subsampling is 420
* format: holds the video format
* hq_mode: a flag raised if HQ is enabled. Only for VIDEO_FMT_ASPEED
+ * input: holds the video input
* frame_rate: holds the frame_rate
* jpeg_quality: holds jpeq's quality (0~11)
* jpeg_hq_quality: holds hq's quality (1~12) only if hq_mode enabled
@@ -298,6 +322,9 @@ struct aspeed_video {
struct video_device vdev;
struct mutex video_lock; /* v4l2 and videobuf2 lock */
+ struct regmap *scu;
+ struct regmap *gfx;
+ u32 version;
u32 jpeg_mode;
u32 comp_size_read;
@@ -316,6 +343,7 @@ struct aspeed_video {
bool yuv420;
enum aspeed_video_format format;
bool hq_mode;
+ enum aspeed_video_input input;
unsigned int frame_rate;
unsigned int jpeg_quality;
unsigned int jpeg_hq_quality;
@@ -331,21 +359,25 @@ struct aspeed_video {
#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
struct aspeed_video_config {
+ u32 version;
u32 jpeg_mode;
u32 comp_size_read;
};
static const struct aspeed_video_config ast2400_config = {
+ .version = 4,
.jpeg_mode = AST2400_VE_SEQ_CTRL_JPEG_MODE,
.comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK,
};
static const struct aspeed_video_config ast2500_config = {
+ .version = 5,
.jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE,
.comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK,
};
static const struct aspeed_video_config ast2600_config = {
+ .version = 6,
.jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE,
.comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK,
};
@@ -485,6 +517,7 @@ static const struct v4l2_dv_timings_cap aspeed_video_timings_cap = {
static const char * const format_str[] = {"Standard JPEG",
"Aspeed JPEG"};
+static const char * const input_str[] = {"HOST VGA", "BMC GFX"};
static unsigned int debug;
@@ -609,6 +642,14 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
aspeed_video_free_buf(video, &video->bcd);
}
+ if (video->input == VIDEO_INPUT_GFX) {
+ u32 val;
+
+ // update input buffer address as gfx's
+ regmap_read(video->gfx, GFX_DISPLAY_ADDR, &val);
+ aspeed_video_write(video, VE_TGS_0, val);
+ }
+
spin_lock_irqsave(&video->lock, flags);
buf = list_first_entry_or_null(&video->buffers,
struct aspeed_video_buffer, link);
@@ -1026,9 +1067,23 @@ static void aspeed_video_get_timings(struct aspeed_video *v,
}
}
+static void aspeed_video_get_resolution_gfx(struct aspeed_video *video,
+ struct v4l2_bt_timings *det)
+{
+ u32 h_val, v_val;
+
+ regmap_read(video->gfx, GFX_H_DISPLAY, &h_val);
+ regmap_read(video->gfx, GFX_V_DISPLAY, &v_val);
+
+ det->width = FIELD_GET(GFX_H_DISPLAY_DE, h_val) + 1;
+ det->height = FIELD_GET(GFX_V_DISPLAY_DE, v_val) + 1;
+ video->v4l2_input_status = 0;
+}
+
#define res_check(v) test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(v)->flags)
-static void aspeed_video_get_resolution(struct aspeed_video *video)
+static void aspeed_video_get_resolution_vga(struct aspeed_video *video,
+ struct v4l2_bt_timings *det)
{
bool invalid_resolution = true;
int rc;
@@ -1036,7 +1091,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
u32 mds;
u32 src_lr_edge;
u32 src_tb_edge;
- struct v4l2_bt_timings *det = &video->detected_timings;
det->width = MIN_WIDTH;
det->height = MIN_HEIGHT;
@@ -1113,14 +1167,20 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
aspeed_video_get_timings(video, det);
- /*
- * Enable mode-detect watchdog, resolution-change watchdog and
- * automatic compression after frame capture.
- */
+ /* Enable mode-detect watchdog, resolution-change watchdog */
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
VE_INTERRUPT_MODE_DETECT_WD);
- aspeed_video_update(video, VE_SEQ_CTRL, 0,
- VE_SEQ_CTRL_AUTO_COMP | VE_SEQ_CTRL_EN_WATCHDOG);
+ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_EN_WATCHDOG);
+}
+
+static void aspeed_video_get_resolution(struct aspeed_video *video)
+{
+ struct v4l2_bt_timings *det = &video->detected_timings;
+
+ if (video->input == VIDEO_INPUT_GFX)
+ aspeed_video_get_resolution_gfx(video, det);
+ else
+ aspeed_video_get_resolution_vga(video, det);
v4l2_dbg(1, debug, &video->v4l2_dev, "Got resolution: %dx%d\n",
det->width, det->height);
@@ -1156,7 +1216,7 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4);
/* Don't use direct mode below 1024 x 768 (irqs don't fire) */
- if (size < DIRECT_FETCH_THRESHOLD) {
+ if (video->input == VIDEO_INPUT_VGA && size < DIRECT_FETCH_THRESHOLD) {
v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Sync Mode\n");
aspeed_video_write(video, VE_TGS_0,
FIELD_PREP(VE_TGS_FIRST,
@@ -1171,10 +1231,20 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH,
VE_CTRL_INT_DE);
} else {
+ u32 ctrl, val, bpp;
+
v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Direct Mode\n");
+ ctrl = VE_CTRL_DIRECT_FETCH;
+ if (video->input == VIDEO_INPUT_GFX) {
+ regmap_read(video->gfx, GFX_CTRL, &val);
+ bpp = FIELD_GET(GFX_CTRL_FMT, val) ? 32 : 16;
+ if (bpp == 16)
+ ctrl |= VE_CTRL_INT_DE;
+ aspeed_video_write(video, VE_TGS_1, act->width * (bpp >> 3));
+ }
aspeed_video_update(video, VE_CTRL,
VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH,
- VE_CTRL_DIRECT_FETCH);
+ ctrl);
}
size *= 4;
@@ -1207,6 +1277,22 @@ err_mem:
aspeed_video_free_buf(video, &video->srcs[0]);
}
+/*
+ * Update relative parameters when timing changed.
+ *
+ * @video: the struct of aspeed_video
+ * @timings: the new timings
+ */
+static void aspeed_video_update_timings(struct aspeed_video *video, struct v4l2_bt_timings *timings)
+{
+ video->active_timings = *timings;
+ aspeed_video_set_resolution(video);
+
+ video->pix_fmt.width = timings->width;
+ video->pix_fmt.height = timings->height;
+ video->pix_fmt.sizeimage = video->max_compressed_size;
+}
+
static void aspeed_video_update_regs(struct aspeed_video *video)
{
u8 jpeg_hq_quality = clamp((int)video->jpeg_hq_quality - 1, 0,
@@ -1219,6 +1305,8 @@ static void aspeed_video_update_regs(struct aspeed_video *video)
u32 ctrl = 0;
u32 seq_ctrl = 0;
+ v4l2_dbg(1, debug, &video->v4l2_dev, "input(%s)\n",
+ input_str[video->input]);
v4l2_dbg(1, debug, &video->v4l2_dev, "framerate(%d)\n",
video->frame_rate);
v4l2_dbg(1, debug, &video->v4l2_dev, "jpeg format(%s) subsample(%s)\n",
@@ -1234,6 +1322,9 @@ static void aspeed_video_update_regs(struct aspeed_video *video)
else
aspeed_video_update(video, VE_BCD_CTRL, VE_BCD_CTRL_EN_BCD, 0);
+ if (video->input == VIDEO_INPUT_VGA)
+ ctrl |= VE_CTRL_AUTO_OR_CURSOR;
+
if (video->frame_rate)
ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate);
@@ -1252,7 +1343,9 @@ static void aspeed_video_update_regs(struct aspeed_video *video)
aspeed_video_update(video, VE_SEQ_CTRL,
video->jpeg_mode | VE_SEQ_CTRL_YUV420,
seq_ctrl);
- aspeed_video_update(video, VE_CTRL, VE_CTRL_FRC, ctrl);
+ aspeed_video_update(video, VE_CTRL,
+ VE_CTRL_FRC | VE_CTRL_AUTO_OR_CURSOR |
+ VE_CTRL_SOURCE, ctrl);
aspeed_video_update(video, VE_COMP_CTRL,
VE_COMP_CTRL_DCT_LUM | VE_COMP_CTRL_DCT_CHR |
VE_COMP_CTRL_EN_HQ | VE_COMP_CTRL_HQ_DCT_LUM |
@@ -1280,6 +1373,7 @@ static void aspeed_video_init_regs(struct aspeed_video *video)
aspeed_video_write(video, VE_JPEG_ADDR, video->jpeg.dma);
/* Set control registers */
+ aspeed_video_write(video, VE_SEQ_CTRL, VE_SEQ_CTRL_AUTO_COMP);
aspeed_video_write(video, VE_CTRL, ctrl);
aspeed_video_write(video, VE_COMP_CTRL, VE_COMP_CTRL_RSVD);
@@ -1311,12 +1405,7 @@ static void aspeed_video_start(struct aspeed_video *video)
aspeed_video_get_resolution(video);
/* Set timings since the device is being opened for the first time */
- video->active_timings = video->detected_timings;
- aspeed_video_set_resolution(video);
-
- video->pix_fmt.width = video->active_timings.width;
- video->pix_fmt.height = video->active_timings.height;
- video->pix_fmt.sizeimage = video->max_compressed_size;
+ aspeed_video_update_timings(video, &video->detected_timings);
}
static void aspeed_video_stop(struct aspeed_video *video)
@@ -1401,10 +1490,10 @@ static int aspeed_video_enum_input(struct file *file, void *fh,
{
struct aspeed_video *video = video_drvdata(file);
- if (inp->index)
+ if (inp->index >= VIDEO_INPUT_MAX)
return -EINVAL;
- strscpy(inp->name, "Host VGA capture", sizeof(inp->name));
+ sprintf(inp->name, "%s capture", input_str[inp->index]);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
inp->status = video->v4l2_input_status;
@@ -1414,16 +1503,57 @@ static int aspeed_video_enum_input(struct file *file, void *fh,
static int aspeed_video_get_input(struct file *file, void *fh, unsigned int *i)
{
- *i = 0;
+ struct aspeed_video *video = video_drvdata(file);
+
+ *i = video->input;
return 0;
}
static int aspeed_video_set_input(struct file *file, void *fh, unsigned int i)
{
- if (i)
+ struct aspeed_video *video = video_drvdata(file);
+
+ if (i >= VIDEO_INPUT_MAX)
return -EINVAL;
+ if (i == video->input)
+ return 0;
+
+ if (vb2_is_busy(&video->queue))
+ return -EBUSY;
+
+ if (IS_ERR(video->scu)) {
+ v4l2_dbg(1, debug, &video->v4l2_dev,
+ "%s: scu isn't ready for input-control\n", __func__);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(video->gfx) && i == VIDEO_INPUT_GFX) {
+ v4l2_dbg(1, debug, &video->v4l2_dev,
+ "%s: gfx isn't ready for GFX input\n", __func__);
+ return -EINVAL;
+ }
+
+ video->input = i;
+
+ if (video->version == 6) {
+ /* modify dpll source per current input */
+ if (video->input == VIDEO_INPUT_VGA)
+ regmap_update_bits(video->scu, SCU_MISC_CTRL,
+ SCU_DPLL_SOURCE, 0);
+ else
+ regmap_update_bits(video->scu, SCU_MISC_CTRL,
+ SCU_DPLL_SOURCE, SCU_DPLL_SOURCE);
+ }
+
+ aspeed_video_update_regs(video);
+
+ /* update signal status */
+ aspeed_video_get_resolution(video);
+ if (!video->v4l2_input_status)
+ aspeed_video_update_timings(video, &video->detected_timings);
+
return 0;
}
@@ -1527,13 +1657,7 @@ static int aspeed_video_set_dv_timings(struct file *file, void *fh,
if (vb2_is_busy(&video->queue))
return -EBUSY;
- video->active_timings = timings->bt;
-
- aspeed_video_set_resolution(video);
-
- video->pix_fmt.width = timings->bt.width;
- video->pix_fmt.height = timings->bt.height;
- video->pix_fmt.sizeimage = video->max_compressed_size;
+ aspeed_video_update_timings(video, &timings->bt);
timings->type = V4L2_DV_BT_656_1120;
@@ -1909,6 +2033,7 @@ static int aspeed_video_debugfs_show(struct seq_file *s, void *data)
val08 = aspeed_video_read(v, VE_CTRL);
if (FIELD_GET(VE_CTRL_DIRECT_FETCH, val08)) {
seq_printf(s, " %-20s:\tDirect fetch\n", "Mode");
+ seq_printf(s, " %-20s:\t%s\n", "Input", input_str[v->input]);
seq_printf(s, " %-20s:\t%s\n", "VGA bpp mode",
FIELD_GET(VE_CTRL_INT_DE, val08) ? "16" : "32");
} else {
@@ -2068,12 +2193,29 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
return 0;
}
+/*
+ * Get regmap without checking res, such as clk/reset, that could lead to
+ * conflict.
+ */
+static struct regmap *aspeed_regmap_lookup(struct device_node *np, const char *property)
+{
+ struct device_node *syscon_np __free(device_node) = of_parse_phandle(np, property, 0);
+
+ if (!syscon_np)
+ return ERR_PTR(-ENODEV);
+
+ return device_node_to_regmap(syscon_np);
+}
+
static int aspeed_video_init(struct aspeed_video *video)
{
int irq;
int rc;
struct device *dev = video->dev;
+ video->scu = aspeed_regmap_lookup(dev->of_node, "aspeed,scu");
+ video->gfx = aspeed_regmap_lookup(dev->of_node, "aspeed,gfx");
+
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "Unable to find IRQ\n");
@@ -2165,6 +2307,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
if (!config)
return -ENODEV;
+ video->version = config->version;
video->jpeg_mode = config->jpeg_mode;
video->comp_size_read = config->comp_size_read;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 7f1ce95cdc3f..8c19f125da3e 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -5,8 +5,10 @@
* Copyright (C) 2017 Cadence Design Systems Inc.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -17,6 +19,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include <media/cadence/cdns-csi2rx.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -52,7 +55,9 @@
#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
-#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8)
+#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF BIT(8)
+#define CSI2RX_STREAM_CFG_NUM_PIXELS_MASK GENMASK(5, 4)
+#define CSI2RX_STREAM_CFG_NUM_PIXELS(n) ((n) >> 1U)
#define CSI2RX_LANES_MAX 4
#define CSI2RX_STREAMS_MAX 4
@@ -87,7 +92,10 @@ enum csi2rx_pads {
struct csi2rx_fmt {
u32 code;
+ /* width of a single pixel on CSI-2 bus */
u8 bpp;
+ /* max pixels per clock supported on output bus */
+ u8 max_pixels;
};
struct csi2rx_event {
@@ -132,6 +140,7 @@ struct csi2rx_priv {
struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX];
struct phy *dphy;
+ u8 num_pixels[CSI2RX_STREAMS_MAX];
u8 lanes[CSI2RX_LANES_MAX];
u8 num_lanes;
u8 max_lanes;
@@ -149,22 +158,22 @@ struct csi2rx_priv {
};
static const struct csi2rx_fmt formats[] = {
- { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, },
- { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, },
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, .max_pixels = 1, },
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .max_pixels = 1, },
+ { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, .max_pixels = 1, },
};
static void csi2rx_configure_error_irq_mask(void __iomem *base,
@@ -370,7 +379,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
reset_control_deassert(csi2rx->pixel_rst[i]);
- writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
+ writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF |
+ FIELD_PREP(CSI2RX_STREAM_CFG_NUM_PIXELS_MASK,
+ csi2rx->num_pixels[i]),
csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
/*
@@ -569,6 +580,33 @@ static int csi2rx_init_state(struct v4l2_subdev *subdev,
return csi2rx_set_fmt(subdev, state, &format);
}
+int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
+ u8 *ppc)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+ const struct csi2rx_fmt *csi_fmt;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (!ppc || pad < CSI2RX_PAD_SOURCE_STREAM0 || pad >= CSI2RX_PAD_MAX)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
+ fmt = v4l2_subdev_state_get_format(state, pad);
+ csi_fmt = csi2rx_get_fmt_by_code(fmt->code);
+
+ /* Reduce requested PPC if it is too high */
+ *ppc = min(*ppc, csi_fmt->max_pixels);
+
+ v4l2_subdev_unlock_state(state);
+
+ csi2rx->num_pixels[pad - CSI2RX_PAD_SOURCE_STREAM0] =
+ CSI2RX_STREAM_CFG_NUM_PIXELS(*ppc);
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_MODULES(cdns_csi2rx_negotiate_ppc, "j721e-csi2rx");
+
static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
.enum_mbus_code = csi2rx_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
@@ -595,6 +633,7 @@ static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = {
static const struct media_entity_operations csi2rx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
diff --git a/drivers/media/platform/chips-media/coda/coda-common.c b/drivers/media/platform/chips-media/coda/coda-common.c
index e6e3f5ec24f6..9a57b042d9fd 100644
--- a/drivers/media/platform/chips-media/coda/coda-common.c
+++ b/drivers/media/platform/chips-media/coda/coda-common.c
@@ -56,6 +56,11 @@
#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
+static inline struct coda_ctx *file_to_ctx(struct file *filp)
+{
+ return fh_to_ctx(file_to_v4l2_fh(filp));
+}
+
int coda_debug;
module_param(coda_debug, int, 0644);
MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
@@ -422,7 +427,7 @@ out:
static int coda_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
strscpy(cap->driver, CODA_NAME, sizeof(cap->driver));
strscpy(cap->card, coda_product_name(ctx->dev->devtype->product),
@@ -442,7 +447,7 @@ static int coda_enum_fmt(struct file *file, void *priv,
{
struct video_device *vdev = video_devdata(file);
const struct coda_video_device *cvd = to_coda_video_device(vdev);
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
const u32 *formats;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -492,7 +497,7 @@ static int coda_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_q_data *q_data;
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
q_data = get_q_data(ctx, f->type);
if (!q_data)
@@ -653,7 +658,7 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
static int coda_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
const struct coda_q_data *q_data_src;
const struct coda_codec *codec;
struct vb2_queue *src_vq;
@@ -759,7 +764,7 @@ static void coda_set_default_colorspace(struct v4l2_pix_format *fmt)
static int coda_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_dev *dev = ctx->dev;
const struct coda_q_data *q_data_dst;
const struct coda_codec *codec;
@@ -853,7 +858,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
static int coda_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data_src;
const struct coda_codec *codec;
struct v4l2_rect r;
@@ -905,7 +910,7 @@ static int coda_s_fmt_vid_cap(struct file *file, void *priv,
static int coda_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
const struct coda_codec *codec;
struct v4l2_format f_cap;
struct vb2_queue *dst_vq;
@@ -961,7 +966,7 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
static int coda_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
int ret;
ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
@@ -981,7 +986,7 @@ static int coda_reqbufs(struct file *file, void *priv,
static int coda_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
if (ctx->inst_type == CODA_INST_DECODER &&
buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -992,7 +997,7 @@ static int coda_qbuf(struct file *file, void *priv,
static int coda_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
int ret;
ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
@@ -1020,7 +1025,7 @@ void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
static int coda_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data;
struct v4l2_rect r, *rsel;
@@ -1066,7 +1071,7 @@ static int coda_g_selection(struct file *file, void *fh,
static int coda_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data;
switch (s->target) {
@@ -1121,7 +1126,7 @@ static void coda_wake_up_capture_queue(struct coda_ctx *ctx)
static int coda_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct vb2_v4l2_buffer *buf;
int ret;
@@ -1202,7 +1207,7 @@ static bool coda_mark_last_dst_buf(struct coda_ctx *ctx)
static int coda_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *buf;
struct vb2_queue *dst_vq;
@@ -1281,7 +1286,7 @@ static int coda_decoder_cmd(struct file *file, void *fh,
static int coda_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data_dst;
const struct coda_codec *codec;
@@ -1314,7 +1319,7 @@ static int coda_enum_framesizes(struct file *file, void *fh,
static int coda_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *f)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data;
const struct coda_codec *codec;
@@ -1353,7 +1358,7 @@ static int coda_enum_frameintervals(struct file *file, void *fh,
static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *tpf;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -1436,7 +1441,7 @@ static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe)
static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *tpf;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -2637,8 +2642,7 @@ static int coda_open(struct file *file)
if (ctx->ops->seq_end_work)
INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->dev = dev;
ctx->idx = idx;
@@ -2721,7 +2725,7 @@ err_clk_ahb:
err_clk_enable:
pm_runtime_put_sync(dev->dev);
err_pm_get:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
err_coda_name_init:
ida_free(&dev->ida, ctx->idx);
@@ -2733,7 +2737,7 @@ err_coda_max:
static int coda_release(struct file *file)
{
struct coda_dev *dev = video_drvdata(file);
- struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+ struct coda_ctx *ctx = file_to_ctx(file);
coda_dbg(1, ctx, "release instance (%p)\n", ctx);
@@ -2759,7 +2763,7 @@ static int coda_release(struct file *file)
clk_disable_unprepare(dev->clk_ahb);
clk_disable_unprepare(dev->clk_per);
pm_runtime_put_sync(dev->dev);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
ida_free(&dev->ida, ctx->idx);
if (ctx->ops->release)
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.c b/drivers/media/platform/chips-media/wave5/wave5-helper.c
index 2c9d8cbca6e4..f03ad9c0de22 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-helper.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.c
@@ -27,7 +27,7 @@ const char *state_to_str(enum vpu_instance_state state)
}
}
-void wave5_cleanup_instance(struct vpu_instance *inst)
+void wave5_cleanup_instance(struct vpu_instance *inst, struct file *filp)
{
int i;
@@ -46,7 +46,7 @@ void wave5_cleanup_instance(struct vpu_instance *inst)
wave5_vdi_free_dma_memory(inst->dev, &inst->bitstream_vbuf);
v4l2_ctrl_handler_free(&inst->v4l2_ctrl_hdl);
if (inst->v4l2_fh.vdev) {
- v4l2_fh_del(&inst->v4l2_fh);
+ v4l2_fh_del(&inst->v4l2_fh, filp);
v4l2_fh_exit(&inst->v4l2_fh);
}
list_del_init(&inst->list);
@@ -59,7 +59,7 @@ int wave5_vpu_release_device(struct file *filp,
int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
char *name)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(filp->private_data);
+ struct vpu_instance *inst = file_to_vpu_inst(filp);
int ret = 0;
v4l2_m2m_ctx_release(inst->v4l2_fh.m2m_ctx);
@@ -78,7 +78,7 @@ int wave5_vpu_release_device(struct file *filp,
}
}
- wave5_cleanup_instance(inst);
+ wave5_cleanup_instance(inst, filp);
return ret;
}
@@ -142,7 +142,7 @@ int wave5_vpu_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscr
int wave5_vpu_g_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i;
f->fmt.pix_mp.width = inst->src_fmt.width;
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.h b/drivers/media/platform/chips-media/wave5/wave5-helper.h
index 9937fce553fc..976a402e426f 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-helper.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.h
@@ -14,7 +14,7 @@
#define MAX_FMTS 12
const char *state_to_str(enum vpu_instance_state state);
-void wave5_cleanup_instance(struct vpu_instance *inst);
+void wave5_cleanup_instance(struct vpu_instance *inst, struct file *filp);
int wave5_vpu_release_device(struct file *filp,
int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
char *name);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index a9ce032cc5a2..e3038c18ca36 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -505,7 +505,7 @@ static int wave5_vpu_dec_enum_fmt_cap(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_dec_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
struct dec_info *p_dec_info = &inst->codec_info->dec_info;
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
@@ -546,7 +546,7 @@ static int wave5_vpu_dec_try_fmt_cap(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_dec_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i, ret;
dev_dbg(inst->dev->dev,
@@ -605,7 +605,7 @@ static int wave5_vpu_dec_s_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_dec_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i;
f->fmt.pix_mp.width = inst->dst_fmt.width;
@@ -629,7 +629,7 @@ static int wave5_vpu_dec_g_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_dec_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
@@ -646,7 +646,7 @@ static int wave5_vpu_dec_enum_fmt_out(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_dec_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
int width, height;
@@ -677,7 +677,7 @@ static int wave5_vpu_dec_try_fmt_out(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_dec_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
int i, ret;
@@ -726,7 +726,7 @@ static int wave5_vpu_dec_s_fmt_out(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_dec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
@@ -760,7 +760,7 @@ static int wave5_vpu_dec_g_selection(struct file *file, void *fh, struct v4l2_se
static int wave5_vpu_dec_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -871,7 +871,7 @@ unlock_and_return:
static int wave5_vpu_dec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
int ret;
@@ -1757,8 +1757,7 @@ static int wave5_vpu_open_dec(struct file *filp)
return -ENOMEM;
v4l2_fh_init(&inst->v4l2_fh, vdev);
- filp->private_data = &inst->v4l2_fh;
- v4l2_fh_add(&inst->v4l2_fh);
+ v4l2_fh_add(&inst->v4l2_fh, filp);
INIT_LIST_HEAD(&inst->list);
@@ -1836,7 +1835,7 @@ static int wave5_vpu_open_dec(struct file *filp)
return 0;
cleanup_inst:
- wave5_cleanup_instance(inst);
+ wave5_cleanup_instance(inst, filp);
return ret;
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
index 35913a7de834..9bfaa9fb3ceb 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -360,7 +360,7 @@ static int wave5_vpu_enc_enum_framesizes(struct file *f, void *fh, struct v4l2_f
static int wave5_vpu_enc_enum_fmt_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
@@ -377,7 +377,7 @@ static int wave5_vpu_enc_enum_fmt_cap(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_enc_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
int width, height;
@@ -411,7 +411,7 @@ static int wave5_vpu_enc_try_fmt_cap(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_enc_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i, ret;
dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
@@ -445,7 +445,7 @@ static int wave5_vpu_enc_s_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_enc_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i;
f->fmt.pix_mp.width = inst->dst_fmt.width;
@@ -469,7 +469,7 @@ static int wave5_vpu_enc_g_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_enc_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
@@ -486,7 +486,7 @@ static int wave5_vpu_enc_enum_fmt_out(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_enc_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
int width, height;
@@ -515,7 +515,7 @@ static int wave5_vpu_enc_try_fmt_out(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
const struct v4l2_format_info *info;
int i, ret;
@@ -543,7 +543,7 @@ static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_form
if (!info)
return -EINVAL;
- inst->cbcr_interleave = (info->comp_planes == 2) ? true : false;
+ inst->cbcr_interleave = info->comp_planes == 2;
switch (inst->src_fmt.pixelformat) {
case V4L2_PIX_FMT_NV21:
@@ -576,7 +576,7 @@ static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_enc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
@@ -605,7 +605,7 @@ static int wave5_vpu_enc_g_selection(struct file *file, void *fh, struct v4l2_se
static int wave5_vpu_enc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -628,7 +628,7 @@ static int wave5_vpu_enc_s_selection(struct file *file, void *fh, struct v4l2_se
static int wave5_vpu_enc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
int ret;
@@ -661,7 +661,7 @@ static int wave5_vpu_enc_encoder_cmd(struct file *file, void *fh, struct v4l2_en
static int wave5_vpu_enc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
@@ -681,7 +681,7 @@ static int wave5_vpu_enc_g_parm(struct file *file, void *fh, struct v4l2_streamp
static int wave5_vpu_enc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
@@ -1582,8 +1582,7 @@ static int wave5_vpu_open_enc(struct file *filp)
return -ENOMEM;
v4l2_fh_init(&inst->v4l2_fh, vdev);
- filp->private_data = &inst->v4l2_fh;
- v4l2_fh_add(&inst->v4l2_fh);
+ v4l2_fh_add(&inst->v4l2_fh, filp);
INIT_LIST_HEAD(&inst->list);
@@ -1779,7 +1778,7 @@ static int wave5_vpu_open_enc(struct file *filp)
return 0;
cleanup_inst:
- wave5_cleanup_instance(inst);
+ wave5_cleanup_instance(inst, filp);
return ret;
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.h b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
index 3847332551fc..5943bdaa9c4c 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
@@ -46,6 +46,11 @@ static inline struct vpu_instance *wave5_to_vpu_inst(struct v4l2_fh *vfh)
return container_of(vfh, struct vpu_instance, v4l2_fh);
}
+static inline struct vpu_instance *file_to_vpu_inst(struct file *filp)
+{
+ return wave5_to_vpu_inst(file_to_v4l2_fh(filp));
+}
+
static inline struct vpu_instance *wave5_ctrl_to_vpu_inst(struct v4l2_ctrl *vctrl)
{
return container_of(vctrl->handler, struct vpu_instance, v4l2_ctrl_hdl);
diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.c b/drivers/media/platform/imagination/e5010-jpeg-enc.c
index ae868d9f73e1..c4e0097cb8b7 100644
--- a/drivers/media/platform/imagination/e5010-jpeg-enc.c
+++ b/drivers/media/platform/imagination/e5010-jpeg-enc.c
@@ -253,7 +253,7 @@ static int e5010_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
int i, index = 0;
struct e5010_fmt *fmt = NULL;
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
if (!V4L2_TYPE_IS_MULTIPLANAR(f->type)) {
v4l2_err(&ctx->e5010->v4l2_dev, "ENUMFMT with Invalid type: %d\n", f->type);
@@ -279,7 +279,7 @@ static int e5010_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
static int e5010_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct e5010_q_data *queue;
int i;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -380,14 +380,14 @@ static int e5010_jpeg_try_fmt(struct v4l2_format *f, struct e5010_context *ctx)
static int e5010_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
return e5010_jpeg_try_fmt(f, ctx);
}
static int e5010_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct vb2_queue *vq;
int ret = 0, i = 0;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -462,7 +462,7 @@ static int e5010_enum_framesizes(struct file *file, void *priv, struct v4l2_frms
static int e5010_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct e5010_q_data *queue;
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -490,7 +490,7 @@ static int e5010_g_selection(struct file *file, void *fh, struct v4l2_selection
static int e5010_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct e5010_q_data *queue;
struct vb2_queue *vq;
struct v4l2_rect base_rect;
@@ -742,8 +742,7 @@ static int e5010_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = ctx;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->e5010 = e5010;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(e5010->m2m_dev, ctx, queue_init);
@@ -770,7 +769,7 @@ static int e5010_open(struct file *file)
err_ctrls_setup:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
exit:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&e5010->mutex);
free:
@@ -781,13 +780,13 @@ free:
static int e5010_release(struct file *file)
{
struct e5010_dev *e5010 = video_drvdata(file);
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
dprintk(e5010, 1, "Releasing instance: 0x%p, m2m_ctx: 0x%p\n", ctx, ctx->fh.m2m_ctx);
mutex_lock(&e5010->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&e5010->mutex);
@@ -1262,7 +1261,7 @@ static void e5010_buf_queue(struct vb2_buffer *vb)
static int e5010_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
int ret;
struct vb2_queue *cap_vq;
diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.h b/drivers/media/platform/imagination/e5010-jpeg-enc.h
index 71f49ead6898..da57bc1baa46 100644
--- a/drivers/media/platform/imagination/e5010-jpeg-enc.h
+++ b/drivers/media/platform/imagination/e5010-jpeg-enc.h
@@ -120,6 +120,11 @@ struct e5010_context {
u8 chroma_qp[QP_TABLE_SIZE];
};
+static inline struct e5010_context *to_e5010_context(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct e5010_context, fh);
+}
+
/*
* Buffer structure
* Contains info for all buffers
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 5188f3189096..e07e57d4206b 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -142,6 +142,11 @@ struct deinterlace_ctx {
struct dma_interleaved_template *xt;
};
+static inline struct deinterlace_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct deinterlace_ctx, fh);
+}
+
/*
* mem2mem callbacks
*/
@@ -512,13 +517,13 @@ static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_ctx(file), f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_ctx(file), f);
}
static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
@@ -539,8 +544,8 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
struct deinterlace_fmt *fmt;
- struct deinterlace_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_CAPTURE))
@@ -633,20 +638,20 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file_to_ctx(file), f);
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
int ret;
ret = vidioc_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
- ret = vidioc_s_fmt(priv, f);
+ ret = vidioc_s_fmt(ctx, f);
if (!ret)
ctx->colorspace = f->fmt.pix.colorspace;
@@ -656,8 +661,8 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
struct deinterlace_q_data *s_q_data, *d_q_data;
- struct deinterlace_ctx *ctx = priv;
s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -842,7 +847,6 @@ static int deinterlace_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = pcdev;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
@@ -861,7 +865,7 @@ static int deinterlace_open(struct file *file)
}
ctx->colorspace = V4L2_COLORSPACE_REC709;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
@@ -872,11 +876,11 @@ static int deinterlace_open(struct file *file)
static int deinterlace_release(struct file *file)
{
struct deinterlace_dev *pcdev = video_drvdata(file);
- struct deinterlace_ctx *ctx = file->private_data;
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
dprintk(pcdev, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
kfree(ctx->xt);
diff --git a/drivers/media/platform/marvell/cafe-driver.c b/drivers/media/platform/marvell/cafe-driver.c
index ef810249def6..f9796de92aa7 100644
--- a/drivers/media/platform/marvell/cafe-driver.c
+++ b/drivers/media/platform/marvell/cafe-driver.c
@@ -14,7 +14,7 @@
* Written by Jonathan Corbet, corbet@lwn.net.
*
* v4l2_device/v4l2_subdev conversion by:
- * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 7eb12449b63a..6268d651bdcf 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -119,9 +119,9 @@ static inline struct mtk_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
return container_of(ctrl->handler, struct mtk_jpeg_ctx, ctrl_hdl);
}
-static inline struct mtk_jpeg_ctx *mtk_jpeg_fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mtk_jpeg_ctx *mtk_jpeg_file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mtk_jpeg_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mtk_jpeg_ctx, fh);
}
static inline struct mtk_jpeg_src_buf *mtk_jpeg_vb2_to_srcbuf(
@@ -212,7 +212,7 @@ static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n,
static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
return mtk_jpeg_enum_fmt(jpeg->variant->formats,
@@ -223,7 +223,7 @@ static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
static int mtk_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
return mtk_jpeg_enum_fmt(jpeg->variant->formats,
@@ -305,7 +305,7 @@ static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
struct vb2_queue *vq;
struct mtk_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
int i;
@@ -351,7 +351,7 @@ static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
struct mtk_jpeg_fmt *fmt;
@@ -380,7 +380,7 @@ static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
static int mtk_jpeg_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
struct mtk_jpeg_fmt *fmt;
@@ -470,7 +470,7 @@ static int mtk_jpeg_s_fmt_vid_out_mplane(struct file *file, void *priv,
if (ret)
return ret;
- return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f,
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_file_to_ctx(file), f,
MTK_JPEG_FMT_FLAG_OUTPUT);
}
@@ -483,7 +483,7 @@ static int mtk_jpeg_s_fmt_vid_cap_mplane(struct file *file, void *priv,
if (ret)
return ret;
- return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f,
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_file_to_ctx(file), f,
MTK_JPEG_FMT_FLAG_CAPTURE);
}
@@ -512,7 +512,7 @@ static int mtk_jpeg_subscribe_event(struct v4l2_fh *fh,
static int mtk_jpeg_enc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -537,7 +537,7 @@ static int mtk_jpeg_enc_g_selection(struct file *file, void *priv,
static int mtk_jpeg_dec_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -566,7 +566,7 @@ static int mtk_jpeg_dec_g_selection(struct file *file, void *priv,
static int mtk_jpeg_enc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -588,8 +588,8 @@ static int mtk_jpeg_enc_s_selection(struct file *file, void *priv,
static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct vb2_queue *vq;
struct vb2_buffer *vb;
struct mtk_jpeg_src_buf *jpeg_src_buf;
@@ -1171,8 +1171,7 @@ static int mtk_jpeg_open(struct file *file)
INIT_LIST_HEAD(&ctx->dst_done_queue);
spin_lock_init(&ctx->done_queue_lock);
v4l2_fh_init(&ctx->fh, vfd);
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->jpeg = jpeg;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx,
@@ -1197,7 +1196,7 @@ static int mtk_jpeg_open(struct file *file)
return 0;
error:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&jpeg->lock);
free:
@@ -1208,12 +1207,12 @@ free:
static int mtk_jpeg_release(struct file *file)
{
struct mtk_jpeg_dev *jpeg = video_drvdata(file);
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(file->private_data);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&jpeg->lock);
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
index d0fd77dcf8e2..03c07948dfdd 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
@@ -348,9 +348,9 @@ static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
return 0;
}
-static inline struct mtk_mdp_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mtk_mdp_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mtk_mdp_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mtk_mdp_ctx, fh);
}
static inline struct mtk_mdp_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
@@ -589,7 +589,7 @@ static const struct vb2_ops mtk_mdp_m2m_qops = {
static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_dev *mdp = ctx->mdp_dev;
strscpy(cap->driver, MTK_MDP_MODULE_NAME, sizeof(cap->driver));
@@ -627,7 +627,7 @@ static int mtk_mdp_m2m_enum_fmt_vid_out(struct file *file, void *priv,
static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
int i;
@@ -666,7 +666,7 @@ static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
if (!mtk_mdp_try_fmt_mplane(ctx, f))
return -EINVAL;
@@ -676,7 +676,7 @@ static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct vb2_queue *vq;
struct mtk_mdp_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
@@ -722,7 +722,7 @@ static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -730,7 +730,7 @@ static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
static int mtk_mdp_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
int ret;
if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_VPU_INIT)) {
@@ -768,8 +768,8 @@ static inline bool mtk_mdp_is_target_crop(u32 target)
static int mtk_mdp_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_frame *frame;
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
bool valid = false;
if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -835,8 +835,8 @@ static int mtk_mdp_check_scaler_ratio(struct mtk_mdp_variant *var, int src_w,
static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_frame *frame;
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
struct v4l2_rect new_r;
struct mtk_mdp_variant *variant = ctx->mdp_dev->variant;
int ret;
@@ -1065,14 +1065,13 @@ static int mtk_mdp_m2m_open(struct file *file)
mutex_init(&ctx->slock);
ctx->id = mdp->id_counter++;
v4l2_fh_init(&ctx->fh, vfd);
- file->private_data = &ctx->fh;
ret = mtk_mdp_ctrls_create(ctx);
if (ret)
goto error_ctrls;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_LIST_HEAD(&ctx->list);
ctx->mdp_dev = mdp;
@@ -1126,7 +1125,7 @@ err_load_vpu:
error_m2m_ctx:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
error_ctrls:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&mdp->lock);
err_lock:
@@ -1137,14 +1136,14 @@ err_lock:
static int mtk_mdp_m2m_release(struct file *file)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_dev *mdp = ctx->mdp_dev;
flush_workqueue(mdp->job_wq);
mutex_lock(&mdp->lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mtk_mdp_vpu_deinit(&ctx->vpu);
mdp->ctx_num--;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 683c066ed975..7fcb2fbdd64e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1530,6 +1530,9 @@ static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
}, {
.compatible = "mediatek,mt8195-mdp3-tcc",
.data = (void *)MDP_COMP_TYPE_TCC,
+ }, {
+ .compatible = "mediatek,mt8188-mdp3-rdma",
+ .data = (void *)MDP_COMP_TYPE_RDMA,
},
{}
};
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index 8de2c8e4d333..6559d72d5d42 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -282,7 +282,7 @@ static int mdp_probe(struct platform_device *pdev)
}
mdp->rproc_handle = scp_get_rproc(mdp->scp);
- dev_dbg(&pdev->dev, "MDP rproc_handle: %pK", mdp->rproc_handle);
+ dev_dbg(&pdev->dev, "MDP rproc_handle: %p", mdp->rproc_handle);
mutex_init(&mdp->vpu_lock);
mutex_init(&mdp->m2m_lock);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
index 59ce5cce0698..9ef956b565a7 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -10,9 +10,9 @@
#include <media/videobuf2-dma-contig.h>
#include "mtk-mdp3-m2m.h"
-static inline struct mdp_m2m_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mdp_m2m_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mdp_m2m_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mdp_m2m_ctx, fh);
}
static inline struct mdp_m2m_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
@@ -285,7 +285,7 @@ static int mdp_m2m_querycap(struct file *file, void *fh,
static int mdp_m2m_enum_fmt_mplane(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
return mdp_enum_fmt_mplane(ctx->mdp_dev, f);
}
@@ -293,7 +293,7 @@ static int mdp_m2m_enum_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
@@ -311,7 +311,7 @@ static int mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame = ctx_get_frame(ctx, f->type);
struct mdp_frame *capture;
const struct mdp_format *fmt;
@@ -354,7 +354,7 @@ static int mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
if (!mdp_try_fmt_mplane(ctx->mdp_dev, f, &ctx->curr_param, ctx->id))
return -EINVAL;
@@ -365,7 +365,7 @@ static int mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame;
bool valid = false;
@@ -417,7 +417,7 @@ static int mdp_m2m_g_selection(struct file *file, void *fh,
static int mdp_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame = ctx_get_frame(ctx, s->type);
struct mdp_frame *capture;
struct v4l2_rect r;
@@ -585,14 +585,13 @@ static int mdp_m2m_open(struct file *file)
ctx->mdp_dev = mdp;
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = &ctx->fh;
ret = mdp_m2m_ctrls_create(ctx);
if (ret)
goto err_exit_fh;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_init(&ctx->ctx_lock);
ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx, mdp_m2m_queue_init);
@@ -629,7 +628,7 @@ err_release_m2m_ctx:
v4l2_m2m_ctx_release(ctx->m2m_ctx);
err_release_handler:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
err_exit_fh:
v4l2_fh_exit(&ctx->fh);
ida_free(&mdp->mdp_ida, ctx->id);
@@ -643,7 +642,7 @@ err_free_ctx:
static int mdp_m2m_release(struct file *file)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_dev *mdp = video_drvdata(file);
struct device *dev = &mdp->pdev->dev;
@@ -653,7 +652,7 @@ static int mdp_m2m_release(struct file *file)
mdp_vpu_put_locked(mdp);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
ida_free(&mdp->mdp_ida, ctx->id);
mutex_unlock(&mdp->m2m_lock);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
index da3a892ad867..fae3e1ad2df7 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
@@ -221,7 +221,7 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
}
dev_dbg(&mdp->pdev->dev,
- "VPU param:%pK pa:%pad sz:%zx, work:%pK pa:%pad sz:%zx, config:%pK pa:%pad sz:%zx",
+ "VPU param:%p pa:%pad sz:%zx, work:%p pa:%pad sz:%zx, config:%p pa:%pad sz:%zx",
vpu->param, &vpu->param_addr, vpu->param_size,
vpu->work, &vpu->work_addr, vpu->work_size,
vpu->config, &vpu->config_addr, vpu->config_size);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
index 98838217b97d..d691bd533103 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
@@ -87,7 +87,7 @@ static int stateful_try_decoder_cmd(struct file *file, void *priv, struct v4l2_d
static int stateful_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct vb2_queue *src_vq, *dst_vq;
int ret;
@@ -132,7 +132,7 @@ static int stateless_try_decoder_cmd(struct file *file, void *priv, struct v4l2_
static int stateless_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
int ret;
ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, cmd);
@@ -158,7 +158,7 @@ static int stateless_decoder_cmd(struct file *file, void *priv, struct v4l2_deco
static int vidioc_try_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->dev->vdec_pdata->uses_stateless_api)
return stateless_try_decoder_cmd(file, priv, cmd);
@@ -168,7 +168,7 @@ static int vidioc_try_decoder_cmd(struct file *file, void *priv, struct v4l2_dec
static int vidioc_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->dev->vdec_pdata->uses_stateless_api)
return stateless_decoder_cmd(file, priv, cmd);
@@ -233,7 +233,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_dec_ctx *ctx)
static int vidioc_vdec_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_vdec_err(ctx, "[%d] Call on QBUF after unrecoverable error", ctx->id);
@@ -246,7 +246,7 @@ static int vidioc_vdec_qbuf(struct file *file, void *priv,
static int vidioc_vdec_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_vdec_err(ctx, "[%d] Call on DQBUF after unrecoverable error", ctx->id);
@@ -259,7 +259,7 @@ static int vidioc_vdec_dqbuf(struct file *file, void *priv,
static int vidioc_vdec_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct device *dev = &ctx->dev->plat_dev->dev;
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
@@ -354,7 +354,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
fmt = mtk_vdec_find_format(f, dec_pdata);
@@ -372,7 +372,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
fmt = mtk_vdec_find_format(f, dec_pdata);
@@ -393,7 +393,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
static int vidioc_vdec_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct mtk_q_data *q_data;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -442,7 +442,7 @@ static int vidioc_vdec_g_selection(struct file *file, void *priv,
static int vidioc_vdec_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -464,7 +464,7 @@ static int vidioc_vdec_s_selection(struct file *file, void *priv,
static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct v4l2_pix_format_mplane *pix_mp;
struct mtk_q_data *q_data;
int ret = 0;
@@ -594,7 +594,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
int i = 0;
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
if (fsize->index != 0)
@@ -623,10 +623,10 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
-static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
+static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
bool output_queue)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
int i, j = 0;
@@ -660,19 +660,19 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
static int vidioc_vdec_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, priv, false);
+ return vidioc_enum_fmt(file, f, false);
}
static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, priv, true);
+ return vidioc_enum_fmt(file, f, true);
}
static int vidioc_vdec_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct vb2_queue *vq;
struct mtk_q_data *q_data;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index 9247d92d431d..46d176e6de63 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -206,8 +206,7 @@ static int fops_vcodec_open(struct file *file)
mutex_lock(&dev->dev_mutex);
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
if (ctx->dev->vdec_pdata->is_subdev_supported) {
@@ -283,7 +282,7 @@ err_load_fw:
err_m2m_ctx_init:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
err_ctrls_setup:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -294,7 +293,7 @@ err_ctrls_setup:
static int fops_vcodec_release(struct file *file)
{
struct mtk_vcodec_dec_dev *dev = video_drvdata(file);
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(file->private_data);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
mtk_v4l2_vdec_dbg(0, ctx, "[%d] decoder", ctx->id);
mutex_lock(&dev->dev_mutex);
@@ -308,7 +307,7 @@ static int fops_vcodec_release(struct file *file)
v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_dec_release(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index aececca7ecf8..d047d7c580fb 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -314,6 +314,11 @@ static inline struct mtk_vcodec_dec_ctx *fh_to_dec_ctx(struct v4l2_fh *fh)
return container_of(fh, struct mtk_vcodec_dec_ctx, fh);
}
+static inline struct mtk_vcodec_dec_ctx *file_to_dec_ctx(struct file *filp)
+{
+ return fh_to_dec_ctx(file_to_v4l2_fh(filp));
+}
+
static inline struct mtk_vcodec_dec_ctx *ctrl_to_dec_ctx(struct v4l2_ctrl *ctrl)
{
return container_of(ctrl->handler, struct mtk_vcodec_dec_ctx, ctrl_hdl);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
index a01dc25a7699..d815e962ab89 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
@@ -159,7 +159,7 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(fh);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
if (fsize->index != 0)
return -EINVAL;
@@ -183,7 +183,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct mtk_vcodec_enc_pdata *pdata =
- fh_to_enc_ctx(priv)->dev->venc_pdata;
+ file_to_enc_ctx(file)->dev->venc_pdata;
return vidioc_enum_fmt(f, pdata->capture_formats,
pdata->num_capture_formats);
@@ -193,15 +193,14 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct mtk_vcodec_enc_pdata *pdata =
- fh_to_enc_ctx(priv)->dev->venc_pdata;
+ file_to_enc_ctx(file)->dev->venc_pdata;
return vidioc_enum_fmt(f, pdata->output_formats,
pdata->num_output_formats);
}
-static int mtk_vcodec_enc_get_chip_name(void *priv)
+static int mtk_vcodec_enc_get_chip_name(struct mtk_vcodec_enc_ctx *ctx)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-enc"))
@@ -221,9 +220,9 @@ static int mtk_vcodec_enc_get_chip_name(void *priv)
static int vidioc_venc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct device *dev = &ctx->dev->plat_dev->dev;
- int platform_name = mtk_vcodec_enc_get_chip_name(priv);
+ int platform_name = mtk_vcodec_enc_get_chip_name(ctx);
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
snprintf(cap->card, sizeof(cap->card), "MT%d video encoder", platform_name);
@@ -234,7 +233,7 @@ static int vidioc_venc_querycap(struct file *file, void *priv,
static int vidioc_venc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct v4l2_fract *timeperframe = &a->parm.output.timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -257,7 +256,7 @@ static int vidioc_venc_s_parm(struct file *file, void *priv,
static int vidioc_venc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
@@ -414,7 +413,7 @@ static void mtk_venc_set_param(struct mtk_vcodec_enc_ctx *ctx,
static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
@@ -469,7 +468,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
@@ -524,7 +523,7 @@ static int vidioc_venc_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
int i;
@@ -557,7 +556,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
@@ -579,7 +578,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
@@ -600,7 +599,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
static int vidioc_venc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -630,7 +629,7 @@ static int vidioc_venc_g_selection(struct file *file, void *priv,
static int vidioc_venc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -655,7 +654,7 @@ static int vidioc_venc_s_selection(struct file *file, void *priv,
static int vidioc_venc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_venc_err(ctx, "[%d] Call on QBUF after unrecoverable error",
@@ -669,7 +668,7 @@ static int vidioc_venc_qbuf(struct file *file, void *priv,
static int vidioc_venc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
int ret;
if (ctx->state == MTK_STATE_ABORT) {
@@ -707,7 +706,7 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct vb2_queue *src_vq, *dst_vq;
int ret;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index a1e4483abcdb..fb1c3bdc2dae 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -129,8 +129,7 @@ static int fops_vcodec_open(struct file *file)
*/
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
init_waitqueue_head(&ctx->queue[0]);
@@ -192,7 +191,7 @@ err_load_fw:
err_m2m_ctx_init:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
err_ctrls_setup:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -203,14 +202,14 @@ err_ctrls_setup:
static int fops_vcodec_release(struct file *file)
{
struct mtk_vcodec_enc_dev *dev = video_drvdata(file);
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(file->private_data);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
mtk_v4l2_venc_dbg(1, ctx, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index 0bd85d0fb379..5b304a551236 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -217,9 +217,9 @@ struct mtk_vcodec_enc_dev {
struct mtk_vcodec_dbgfs dbgfs;
};
-static inline struct mtk_vcodec_enc_ctx *fh_to_enc_ctx(struct v4l2_fh *fh)
+static inline struct mtk_vcodec_enc_ctx *file_to_enc_ctx(struct file *filp)
{
- return container_of(fh, struct mtk_vcodec_enc_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mtk_vcodec_enc_ctx, fh);
}
static inline struct mtk_vcodec_enc_ctx *ctrl_to_enc_ctx(struct v4l2_ctrl *ctrl)
diff --git a/drivers/media/platform/nvidia/tegra-vde/v4l2.c b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
index e3726cab0c82..d94978ae2baf 100644
--- a/drivers/media/platform/nvidia/tegra-vde/v4l2.c
+++ b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
@@ -46,9 +46,9 @@ static const struct v4l2_ctrl_config ctrl_cfgs[] = {
},
};
-static inline struct tegra_ctx *fh_to_tegra_ctx(struct v4l2_fh *fh)
+static inline struct tegra_ctx *file_to_tegra_ctx(struct file *file)
{
- return container_of(fh, struct tegra_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct tegra_ctx, fh);
}
static void tegra_set_control_data(struct tegra_ctx *ctx, void *data, u32 id)
@@ -506,7 +506,7 @@ static int tegra_querycap(struct file *file, void *priv,
static int tegra_enum_decoded_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
if (WARN_ON(!ctx->coded_fmt_desc))
return -EINVAL;
@@ -522,7 +522,7 @@ static int tegra_enum_decoded_fmt(struct file *file, void *priv,
static int tegra_g_decoded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
*f = ctx->decoded_fmt;
return 0;
@@ -531,8 +531,8 @@ static int tegra_g_decoded_fmt(struct file *file, void *priv,
static int tegra_try_decoded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
const struct tegra_coded_fmt_desc *coded_desc;
unsigned int i;
@@ -571,7 +571,7 @@ static int tegra_try_decoded_fmt(struct file *file, void *priv,
static int tegra_s_decoded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
struct vb2_queue *vq;
int err;
@@ -593,7 +593,7 @@ static int tegra_s_decoded_fmt(struct file *file, void *priv,
static int tegra_enum_coded_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
const struct tegra_vde_soc *soc = ctx->vde->soc;
if (f->index >= soc->num_coded_fmts)
@@ -607,7 +607,7 @@ static int tegra_enum_coded_fmt(struct file *file, void *priv,
static int tegra_g_coded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
*f = ctx->coded_fmt;
return 0;
@@ -631,7 +631,7 @@ static int tegra_try_coded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
const struct tegra_vde_soc *soc = ctx->vde->soc;
int size = pix_mp->plane_fmt[0].sizeimage;
const struct tegra_coded_fmt_desc *desc;
@@ -656,7 +656,7 @@ static int tegra_try_coded_fmt(struct file *file, void *priv,
static int tegra_s_coded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct tegra_coded_fmt_desc *desc;
struct vb2_queue *peer_vq, *vq;
@@ -718,7 +718,7 @@ static int tegra_s_coded_fmt(struct file *file, void *priv,
static int tegra_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
const struct tegra_coded_fmt_desc *fmt;
if (fsize->index)
@@ -832,14 +832,13 @@ static int tegra_open(struct file *file)
goto free_ctrls;
}
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
tegra_reset_coded_fmt(ctx);
- tegra_try_coded_fmt(file, file->private_data, &ctx->coded_fmt);
+ tegra_try_coded_fmt(file, &ctx->fh, &ctx->coded_fmt);
tegra_reset_decoded_fmt(ctx);
- tegra_try_decoded_fmt(file, file->private_data, &ctx->decoded_fmt);
+ tegra_try_decoded_fmt(file, &ctx->fh, &ctx->decoded_fmt);
return 0;
@@ -853,11 +852,11 @@ free_ctx:
static int tegra_release(struct file *file)
{
- struct v4l2_fh *fh = file->private_data;
- struct tegra_ctx *ctx = fh_to_tegra_ctx(fh);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct tegra_vde *vde = ctx->vde;
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, file);
v4l2_m2m_ctx_release(fh->m2m_ctx);
v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_fh_exit(fh);
diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c
index 3d1db1121bf9..97744c7b7c03 100644
--- a/drivers/media/platform/nxp/dw100/dw100.c
+++ b/drivers/media/platform/nxp/dw100/dw100.c
@@ -266,7 +266,7 @@ static inline int dw100_dump_regs(struct seq_file *m)
static inline struct dw100_ctx *dw100_file2ctx(struct file *file)
{
- return container_of(file->private_data, struct dw100_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct dw100_ctx, fh);
}
static struct dw100_q_data *dw100_get_q_data(struct dw100_ctx *ctx,
@@ -607,7 +607,6 @@ static int dw100_open(struct file *file)
mutex_init(&ctx->vq_mutex);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dw_dev = dw_dev;
ctx->q_data[DW100_QUEUE_SRC].fmt = &formats[0];
@@ -651,7 +650,7 @@ static int dw100_open(struct file *file)
goto err;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
return 0;
@@ -668,7 +667,7 @@ static int dw100_release(struct file *file)
{
struct dw100_ctx *ctx = dw100_file2ctx(file);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 8681dd193033..df3ccdf767ba 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -644,9 +644,9 @@ static void print_mxc_buf(struct mxc_jpeg_dev *jpeg, struct vb2_buffer *buf,
}
}
-static inline struct mxc_jpeg_ctx *mxc_jpeg_fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mxc_jpeg_ctx *mxc_jpeg_file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mxc_jpeg_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mxc_jpeg_ctx, fh);
}
static int enum_fmt(const struct mxc_jpeg_fmt *mxc_formats, int n,
@@ -1604,8 +1604,8 @@ end:
static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- struct v4l2_fh *fh = file->private_data;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
unsigned long flags;
int ret;
@@ -1637,8 +1637,8 @@ static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
static int mxc_jpeg_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct v4l2_fh *fh = file->private_data;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
unsigned long flags;
int ret;
@@ -2200,8 +2200,7 @@ static int mxc_jpeg_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, mxc_vfd);
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->mxc_jpeg = mxc_jpeg;
@@ -2234,7 +2233,7 @@ static int mxc_jpeg_open(struct file *file)
err_ctrls_setup:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&mxc_jpeg->lock);
free:
@@ -2256,7 +2255,7 @@ static int mxc_jpeg_querycap(struct file *file, void *priv,
static int mxc_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, f->type);
if (ctx->mxc_jpeg->mode == MXC_JPEG_ENCODE) {
@@ -2296,7 +2295,7 @@ static int mxc_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
static int mxc_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
u32 type = ctx->mxc_jpeg->mode == MXC_JPEG_DECODE ? MXC_JPEG_FMT_TYPE_ENC :
MXC_JPEG_FMT_TYPE_RAW;
int ret;
@@ -2437,7 +2436,7 @@ static int mxc_jpeg_try_fmt(struct v4l2_format *f,
static int mxc_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
struct mxc_jpeg_q_data tmp_q;
@@ -2456,7 +2455,7 @@ static int mxc_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
static int mxc_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
struct mxc_jpeg_q_data tmp_q;
@@ -2508,20 +2507,20 @@ static int mxc_jpeg_s_fmt(struct mxc_jpeg_ctx *ctx,
static int mxc_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return mxc_jpeg_s_fmt(mxc_jpeg_fh_to_ctx(priv), f);
+ return mxc_jpeg_s_fmt(mxc_jpeg_file_to_ctx(file), f);
}
static int mxc_jpeg_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct vb2_queue *dst_vq;
struct mxc_jpeg_q_data *q_data_cap;
enum v4l2_buf_type cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
struct v4l2_format fc;
- ret = mxc_jpeg_s_fmt(mxc_jpeg_fh_to_ctx(priv), f);
+ ret = mxc_jpeg_s_fmt(ctx, f);
if (ret)
return ret;
@@ -2550,7 +2549,7 @@ static int mxc_jpeg_s_fmt_vid_out(struct file *file, void *priv,
static int mxc_jpeg_g_fmt_vid(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -2588,7 +2587,7 @@ static int mxc_jpeg_g_fmt_vid(struct file *file, void *priv,
static int mxc_jpeg_dec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data_cap;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
@@ -2617,7 +2616,7 @@ static int mxc_jpeg_dec_g_selection(struct file *file, void *fh, struct v4l2_sel
static int mxc_jpeg_enc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data_out;
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -2645,7 +2644,7 @@ static int mxc_jpeg_enc_g_selection(struct file *file, void *fh, struct v4l2_sel
static int mxc_jpeg_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
if (ctx->mxc_jpeg->mode == MXC_JPEG_DECODE)
return mxc_jpeg_dec_g_selection(file, fh, s);
@@ -2655,7 +2654,7 @@ static int mxc_jpeg_g_selection(struct file *file, void *fh, struct v4l2_selecti
static int mxc_jpeg_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data_out;
if (ctx->mxc_jpeg->mode != MXC_JPEG_ENCODE)
@@ -2735,7 +2734,7 @@ static const struct v4l2_ioctl_ops mxc_jpeg_ioctl_ops = {
static int mxc_jpeg_release(struct file *file)
{
struct mxc_jpeg_dev *mxc_jpeg = video_drvdata(file);
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(file->private_data);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct device *dev = mxc_jpeg->dev;
mutex_lock(&mxc_jpeg->lock);
@@ -2747,7 +2746,7 @@ static int mxc_jpeg_release(struct file *file)
ctx->slot);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&mxc_jpeg->lock);
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 2beb5f43c2c0..d5de7854f579 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -54,23 +54,19 @@
/* CSIS common control */
#define MIPI_CSIS_CMN_CTRL 0x04
-#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW BIT(16)
-#define MIPI_CSIS_CMN_CTRL_INTER_MODE BIT(10)
+#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW(n) BIT((n) + 16)
+#define MIPI_CSIS_CMN_CTRL_INTERLEAVE_MODE_DT BIT(10)
+#define MIPI_CSIS_CMN_CTRL_LANE_NUMBER(n) ((n) << 8)
+#define MIPI_CSIS_CMN_CTRL_LANE_NUMBER_MASK GENMASK(9, 8)
#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL BIT(2)
-#define MIPI_CSIS_CMN_CTRL_RESET BIT(1)
-#define MIPI_CSIS_CMN_CTRL_ENABLE BIT(0)
-
-#define MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET 8
-#define MIPI_CSIS_CMN_CTRL_LANE_NR_MASK (3 << 8)
+#define MIPI_CSIS_CMN_CTRL_SW_RESET BIT(1)
+#define MIPI_CSIS_CMN_CTRL_CSI_EN BIT(0)
/* CSIS clock control */
#define MIPI_CSIS_CLK_CTRL 0x08
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH3(x) ((x) << 28)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH2(x) ((x) << 24)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH1(x) ((x) << 20)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(x) ((x) << 16)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK (0xf << 4)
-#define MIPI_CSIS_CLK_CTRL_WCLK_SRC BIT(0)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL(n, x) ((x) << ((n) * 4 + 16))
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MASK GENMASK(7, 4)
+#define MIPI_CSIS_CLK_CTRL_WCLK_SRC(n) BIT(n)
/* CSIS Interrupt mask */
#define MIPI_CSIS_INT_MSK 0x10
@@ -87,7 +83,7 @@
#define MIPI_CSIS_INT_MSK_ERR_WRONG_CFG BIT(3)
#define MIPI_CSIS_INT_MSK_ERR_ECC BIT(2)
#define MIPI_CSIS_INT_MSK_ERR_CRC BIT(1)
-#define MIPI_CSIS_INT_MSK_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INT_MSK_ERR_ID BIT(0)
/* CSIS Interrupt source */
#define MIPI_CSIS_INT_SRC 0x14
@@ -98,16 +94,16 @@
#define MIPI_CSIS_INT_SRC_ODD_AFTER BIT(28)
#define MIPI_CSIS_INT_SRC_ODD (0x3 << 28)
#define MIPI_CSIS_INT_SRC_NON_IMAGE_DATA (0xf << 28)
-#define MIPI_CSIS_INT_SRC_FRAME_START BIT(24)
-#define MIPI_CSIS_INT_SRC_FRAME_END BIT(20)
-#define MIPI_CSIS_INT_SRC_ERR_SOT_HS BIT(16)
-#define MIPI_CSIS_INT_SRC_ERR_LOST_FS BIT(12)
-#define MIPI_CSIS_INT_SRC_ERR_LOST_FE BIT(8)
-#define MIPI_CSIS_INT_SRC_ERR_OVER BIT(4)
+#define MIPI_CSIS_INT_SRC_FRAME_START(n) BIT((n) + 24)
+#define MIPI_CSIS_INT_SRC_FRAME_END(n) BIT((n) + 20)
+#define MIPI_CSIS_INT_SRC_ERR_SOT_HS(n) BIT((n) + 16)
+#define MIPI_CSIS_INT_SRC_ERR_LOST_FS(n) BIT((n) + 12)
+#define MIPI_CSIS_INT_SRC_ERR_LOST_FE(n) BIT((n) + 8)
+#define MIPI_CSIS_INT_SRC_ERR_OVER(n) BIT((n) + 4)
#define MIPI_CSIS_INT_SRC_ERR_WRONG_CFG BIT(3)
#define MIPI_CSIS_INT_SRC_ERR_ECC BIT(2)
#define MIPI_CSIS_INT_SRC_ERR_CRC BIT(1)
-#define MIPI_CSIS_INT_SRC_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INT_SRC_ERR_ID BIT(0)
#define MIPI_CSIS_INT_SRC_ERRORS 0xfffff
/* D-PHY status control */
@@ -123,8 +119,8 @@
#define MIPI_CSIS_DPHY_CMN_CTRL_HSSETTLE_MASK GENMASK(31, 24)
#define MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE(n) ((n) << 22)
#define MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE_MASK GENMASK(23, 22)
-#define MIPI_CSIS_DPHY_CMN_CTRL_DPDN_SWAP_CLK BIT(6)
-#define MIPI_CSIS_DPHY_CMN_CTRL_DPDN_SWAP_DAT BIT(5)
+#define MIPI_CSIS_DPHY_CMN_CTRL_S_DPDN_SWAP_CLK BIT(6)
+#define MIPI_CSIS_DPHY_CMN_CTRL_S_DPDN_SWAP_DAT BIT(5)
#define MIPI_CSIS_DPHY_CMN_CTRL_ENABLE_DAT BIT(1)
#define MIPI_CSIS_DPHY_CMN_CTRL_ENABLE_CLK BIT(0)
#define MIPI_CSIS_DPHY_CMN_CTRL_ENABLE (0x1f << 0)
@@ -174,26 +170,28 @@
/* ISP Configuration register */
#define MIPI_CSIS_ISP_CONFIG_CH(n) (0x40 + (n) * 0x10)
-#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MSK (0xff << 24)
+#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MASK GENMASK(31, 24)
#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP(x) ((x) << 24)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_SINGLE (0 << 12)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL (1 << 12)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_QUAD (2 << 12) /* i.MX8M[MNP] only */
-#define MIPI_CSIS_ISPCFG_PIXEL_MASK (3 << 12)
-#define MIPI_CSIS_ISPCFG_ALIGN_32BIT BIT(11)
-#define MIPI_CSIS_ISPCFG_FMT(fmt) ((fmt) << 2)
-#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
+#define MIPI_CSIS_ISPCFG_PIXEL_MODE_MASK GENMASK(13, 12)
+#define MIPI_CSIS_ISPCFG_PARALLEL BIT(11)
+#define MIPI_CSIS_ISPCFG_DATAFORMAT(fmt) ((fmt) << 2)
+#define MIPI_CSIS_ISPCFG_DATAFORMAT_MASK GENMASK(7, 2)
/* ISP Image Resolution register */
#define MIPI_CSIS_ISP_RESOL_CH(n) (0x44 + (n) * 0x10)
+#define MIPI_CSIS_ISP_RESOL_VRESOL(n) ((n) << 16)
+#define MIPI_CSIS_ISP_RESOL_HRESOL(n) ((n) << 0)
#define CSIS_MAX_PIX_WIDTH 0xffff
#define CSIS_MAX_PIX_HEIGHT 0xffff
/* ISP SYNC register */
#define MIPI_CSIS_ISP_SYNC_CH(n) (0x48 + (n) * 0x10)
-#define MIPI_CSIS_ISP_SYNC_HSYNC_LINTV_OFFSET 18
-#define MIPI_CSIS_ISP_SYNC_VSYNC_SINTV_OFFSET 12
-#define MIPI_CSIS_ISP_SYNC_VSYNC_EINTV_OFFSET 0
+#define MIPI_CSIS_ISP_SYNC_HSYNC_LINTV(n) ((n) << 18)
+#define MIPI_CSIS_ISP_SYNC_VSYNC_SINTV(n) ((n) << 12)
+#define MIPI_CSIS_ISP_SYNC_VSYNC_EINTV(n) ((n) << 0)
/* ISP shadow registers */
#define MIPI_CSIS_SDW_CONFIG_CH(n) (0x80 + (n) * 0x10)
@@ -203,23 +201,23 @@
/* Debug control register */
#define MIPI_CSIS_DBG_CTRL 0xc0
#define MIPI_CSIS_DBG_INTR_MSK 0xc4
-#define MIPI_CSIS_DBG_INTR_MSK_DT_NOT_SUPPORT BIT(25)
-#define MIPI_CSIS_DBG_INTR_MSK_DT_IGNORE BIT(24)
-#define MIPI_CSIS_DBG_INTR_MSK_ERR_FRAME_SIZE BIT(20)
-#define MIPI_CSIS_DBG_INTR_MSK_TRUNCATED_FRAME BIT(16)
-#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FE BIT(12)
-#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FS BIT(8)
-#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_FALL BIT(4)
-#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_RISE BIT(0)
+#define MIPI_CSIS_DBG_INTR_MSK_DT_NOT_SUPPORT BIT(25)
+#define MIPI_CSIS_DBG_INTR_MSK_DT_IGNORE BIT(24)
+#define MIPI_CSIS_DBG_INTR_MSK_ERR_FRAME_SIZE(n) BIT((n) + 20)
+#define MIPI_CSIS_DBG_INTR_MSK_TRUNCATED_FRAME(n) BIT((n) + 16)
+#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FE(n) BIT((n) + 12)
+#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FS(n) BIT((n) + 8)
+#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_FALL(n) BIT((n) + 4)
+#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_RISE(n) BIT((n) + 0)
#define MIPI_CSIS_DBG_INTR_SRC 0xc8
-#define MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT BIT(25)
-#define MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE BIT(24)
-#define MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE BIT(20)
-#define MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME BIT(16)
-#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FE BIT(12)
-#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FS BIT(8)
-#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL BIT(4)
-#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE BIT(0)
+#define MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT BIT(25)
+#define MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE BIT(24)
+#define MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(n) BIT((n) + 20)
+#define MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(n) BIT((n) + 16)
+#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(n) BIT((n) + 12)
+#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(n) BIT((n) + 8)
+#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(n) BIT((n) + 4)
+#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(n) BIT((n) + 0)
#define MIPI_CSIS_FRAME_COUNTER_CH(n) (0x0100 + (n) * 4)
@@ -228,10 +226,11 @@
#define MIPI_CSIS_PKTDATA_EVEN 0x3000
#define MIPI_CSIS_PKTDATA_SIZE SZ_4K
-#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+#define MIPI_CSIS_MAX_CHANNELS 4
struct mipi_csis_event {
bool debug;
+ unsigned int channel;
u32 mask;
const char * const name;
unsigned int counter;
@@ -239,33 +238,70 @@ struct mipi_csis_event {
static const struct mipi_csis_event mipi_csis_events[] = {
/* Errors */
- { false, MIPI_CSIS_INT_SRC_ERR_SOT_HS, "SOT Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_LOST_FS, "Lost Frame Start Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_LOST_FE, "Lost Frame End Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_OVER, "FIFO Overflow Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_ECC, "ECC Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_CRC, "CRC Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_UNKNOWN, "Unknown Error" },
- { true, MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT, "Data Type Not Supported" },
- { true, MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE, "Data Type Ignored" },
- { true, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE, "Frame Size Error" },
- { true, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME, "Truncated Frame" },
- { true, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE, "Early Frame End" },
- { true, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS, "Early Frame Start" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(0), "SOT 0 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(1), "SOT 1 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(2), "SOT 2 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(3), "SOT 3 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_LOST_FS(0), "Lost Frame Start Error 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_ERR_LOST_FS(1), "Lost Frame Start Error 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_ERR_LOST_FS(2), "Lost Frame Start Error 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_ERR_LOST_FS(3), "Lost Frame Start Error 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_LOST_FE(0), "Lost Frame End Error 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_ERR_LOST_FE(1), "Lost Frame End Error 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_ERR_LOST_FE(2), "Lost Frame End Error 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_ERR_LOST_FE(3), "Lost Frame End Error 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_OVER(0), "FIFO Overflow Error 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_ERR_OVER(1), "FIFO Overflow Error 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_ERR_OVER(2), "FIFO Overflow Error 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_ERR_OVER(3), "FIFO Overflow Error 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_ECC, "ECC Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_CRC, "CRC Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_ID, "Unknown ID Error" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT, "Data Type Not Supported" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE, "Data Type Ignored" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(0), "Frame Size Error 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(1), "Frame Size Error 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(2), "Frame Size Error 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(3), "Frame Size Error 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(0), "Truncated Frame 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(1), "Truncated Frame 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(2), "Truncated Frame 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(3), "Truncated Frame 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(0), "Early Frame End 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(1), "Early Frame End 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(2), "Early Frame End 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(3), "Early Frame End 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(0), "Early Frame Start 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(1), "Early Frame Start 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(2), "Early Frame Start 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(3), "Early Frame Start 3" },
/* Non-image data receive events */
- { false, MIPI_CSIS_INT_SRC_EVEN_BEFORE, "Non-image data before even frame" },
- { false, MIPI_CSIS_INT_SRC_EVEN_AFTER, "Non-image data after even frame" },
- { false, MIPI_CSIS_INT_SRC_ODD_BEFORE, "Non-image data before odd frame" },
- { false, MIPI_CSIS_INT_SRC_ODD_AFTER, "Non-image data after odd frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_EVEN_AFTER, "Non-image data after even frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_ODD_AFTER, "Non-image data after odd frame" },
/* Frame start/end */
- { false, MIPI_CSIS_INT_SRC_FRAME_START, "Frame Start" },
- { false, MIPI_CSIS_INT_SRC_FRAME_END, "Frame End" },
- { true, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL, "VSYNC Falling Edge" },
- { true, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE, "VSYNC Rising Edge" },
+ { false, 0, MIPI_CSIS_INT_SRC_FRAME_START(0), "Frame Start 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_FRAME_START(1), "Frame Start 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_FRAME_START(2), "Frame Start 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_FRAME_START(3), "Frame Start 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_FRAME_END(0), "Frame End 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_FRAME_END(1), "Frame End 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_FRAME_END(2), "Frame End 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_FRAME_END(3), "Frame End 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(0), "VSYNC Falling Edge 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(1), "VSYNC Falling Edge 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(2), "VSYNC Falling Edge 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(3), "VSYNC Falling Edge 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(0), "VSYNC Rising Edge 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(1), "VSYNC Rising Edge 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(2), "VSYNC Rising Edge 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(3), "VSYNC Rising Edge 3" },
};
-#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
+#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
+#define MIPI_CSIS_NUM_ERROR_EVENTS 38
enum mipi_csis_clk {
MIPI_CSIS_CLK_PCLK,
@@ -297,7 +333,9 @@ struct mipi_csis_device {
struct clk_bulk_data *clks;
struct reset_control *mrst;
struct regulator *mipi_phy_regulator;
+
const struct mipi_csis_info *info;
+ unsigned int num_channels;
struct v4l2_subdev sd;
struct media_pad pads[CSIS_PADS_NUM];
@@ -517,7 +555,7 @@ static void mipi_csis_sw_reset(struct mipi_csis_device *csis)
u32 val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL,
- val | MIPI_CSIS_CMN_CTRL_RESET);
+ val | MIPI_CSIS_CMN_CTRL_SW_RESET);
usleep_range(10, 20);
}
@@ -527,9 +565,9 @@ static void mipi_csis_system_enable(struct mipi_csis_device *csis, int on)
val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
if (on)
- val |= MIPI_CSIS_CMN_CTRL_ENABLE;
+ val |= MIPI_CSIS_CMN_CTRL_CSI_EN;
else
- val &= ~MIPI_CSIS_CMN_CTRL_ENABLE;
+ val &= ~MIPI_CSIS_CMN_CTRL_CSI_EN;
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL, val);
val = mipi_csis_read(csis, MIPI_CSIS_DPHY_CMN_CTRL);
@@ -549,8 +587,8 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
/* Color format */
val = mipi_csis_read(csis, MIPI_CSIS_ISP_CONFIG_CH(0));
- val &= ~(MIPI_CSIS_ISPCFG_ALIGN_32BIT | MIPI_CSIS_ISPCFG_FMT_MASK
- | MIPI_CSIS_ISPCFG_PIXEL_MASK);
+ val &= ~(MIPI_CSIS_ISPCFG_PARALLEL | MIPI_CSIS_ISPCFG_PIXEL_MODE_MASK |
+ MIPI_CSIS_ISPCFG_DATAFORMAT_MASK);
/*
* YUV 4:2:2 can be transferred with 8 or 16 bits per clock sample
@@ -568,24 +606,23 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
if (csis_fmt->data_type == MIPI_CSI2_DT_YUV422_8B)
val |= MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL;
- val |= MIPI_CSIS_ISPCFG_FMT(csis_fmt->data_type);
+ val |= MIPI_CSIS_ISPCFG_DATAFORMAT(csis_fmt->data_type);
mipi_csis_write(csis, MIPI_CSIS_ISP_CONFIG_CH(0), val);
/* Pixel resolution */
- val = format->width | (format->height << 16);
- mipi_csis_write(csis, MIPI_CSIS_ISP_RESOL_CH(0), val);
+ mipi_csis_write(csis, MIPI_CSIS_ISP_RESOL_CH(0),
+ MIPI_CSIS_ISP_RESOL_VRESOL(format->height) |
+ MIPI_CSIS_ISP_RESOL_HRESOL(format->width));
}
static int mipi_csis_calculate_params(struct mipi_csis_device *csis,
const struct csis_pix_format *csis_fmt)
{
- struct media_pad *src_pad =
- &csis->source.sd->entity.pads[csis->source.pad->index];
s64 link_freq;
u32 lane_rate;
/* Calculate the line rate from the pixel rate. */
- link_freq = v4l2_get_link_freq(src_pad, csis_fmt->width,
+ link_freq = v4l2_get_link_freq(csis->source.pad, csis_fmt->width,
csis->bus.num_data_lanes * 2);
if (link_freq < 0) {
dev_err(csis->dev, "Unable to obtain link frequency: %d\n",
@@ -635,10 +672,10 @@ static void mipi_csis_set_params(struct mipi_csis_device *csis,
u32 val;
val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
- val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK;
- val |= (lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET;
+ val &= ~MIPI_CSIS_CMN_CTRL_LANE_NUMBER_MASK;
+ val |= MIPI_CSIS_CMN_CTRL_LANE_NUMBER(lanes - 1);
if (csis->info->version == MIPI_CSIS_V3_3)
- val |= MIPI_CSIS_CMN_CTRL_INTER_MODE;
+ val |= MIPI_CSIS_CMN_CTRL_INTERLEAVE_MODE_DT;
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL, val);
__mipi_csis_set_format(csis, format, csis_fmt);
@@ -647,15 +684,15 @@ static void mipi_csis_set_params(struct mipi_csis_device *csis,
MIPI_CSIS_DPHY_CMN_CTRL_HSSETTLE(csis->hs_settle) |
MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE(csis->clk_settle));
- val = (0 << MIPI_CSIS_ISP_SYNC_HSYNC_LINTV_OFFSET)
- | (0 << MIPI_CSIS_ISP_SYNC_VSYNC_SINTV_OFFSET)
- | (0 << MIPI_CSIS_ISP_SYNC_VSYNC_EINTV_OFFSET);
- mipi_csis_write(csis, MIPI_CSIS_ISP_SYNC_CH(0), val);
+ mipi_csis_write(csis, MIPI_CSIS_ISP_SYNC_CH(0),
+ MIPI_CSIS_ISP_SYNC_HSYNC_LINTV(0) |
+ MIPI_CSIS_ISP_SYNC_VSYNC_SINTV(0) |
+ MIPI_CSIS_ISP_SYNC_VSYNC_EINTV(0));
val = mipi_csis_read(csis, MIPI_CSIS_CLK_CTRL);
- val |= MIPI_CSIS_CLK_CTRL_WCLK_SRC;
- val |= MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(15);
- val &= ~MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK;
+ val |= MIPI_CSIS_CLK_CTRL_WCLK_SRC(0);
+ val |= MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL(0, 15);
+ val &= ~MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MASK;
mipi_csis_write(csis, MIPI_CSIS_CLK_CTRL, val);
mipi_csis_write(csis, MIPI_CSIS_DPHY_BCTRL_L,
@@ -671,7 +708,7 @@ static void mipi_csis_set_params(struct mipi_csis_device *csis,
/* Update the shadow register. */
val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL,
- val | MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW |
+ val | MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW(0) |
MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
}
@@ -704,12 +741,17 @@ static int mipi_csis_clk_get(struct mipi_csis_device *csis)
if (ret < 0)
return ret;
- /* Set clock rate */
- ret = clk_set_rate(csis->clks[MIPI_CSIS_CLK_WRAP].clk,
- csis->clk_frequency);
- if (ret < 0)
- dev_err(csis->dev, "set rate=%d failed: %d\n",
- csis->clk_frequency, ret);
+ if (csis->clk_frequency) {
+ /*
+ * Set the clock rate. This is deprecated, for backward
+ * compatibility with old device trees.
+ */
+ ret = clk_set_rate(csis->clks[MIPI_CSIS_CLK_WRAP].clk,
+ csis->clk_frequency);
+ if (ret < 0)
+ dev_err(csis->dev, "set rate=%d failed: %d\n",
+ csis->clk_frequency, ret);
+ }
return ret;
}
@@ -757,16 +799,19 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
/* Update the event/error counters */
if ((status & MIPI_CSIS_INT_SRC_ERRORS) || csis->debug.enable) {
- for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
+ for (i = 0; i < ARRAY_SIZE(csis->events); i++) {
struct mipi_csis_event *event = &csis->events[i];
+ if (event->channel >= csis->num_channels)
+ continue;
+
if ((!event->debug && (status & event->mask)) ||
(event->debug && (dbg_status & event->mask)))
event->counter++;
}
}
- if (status & MIPI_CSIS_INT_SRC_FRAME_START)
+ if (status & MIPI_CSIS_INT_SRC_FRAME_START(0))
mipi_csis_queue_event_sof(csis);
spin_unlock_irqrestore(&csis->slock, flags);
@@ -843,7 +888,7 @@ static void mipi_csis_clear_counters(struct mipi_csis_device *csis)
static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_errors)
{
unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
- : MIPI_CSIS_NUM_EVENTS - 8;
+ : MIPI_CSIS_NUM_ERROR_EVENTS;
unsigned int counters[MIPI_CSIS_NUM_EVENTS];
unsigned long flags;
unsigned int i;
@@ -854,45 +899,67 @@ static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_error
spin_unlock_irqrestore(&csis->slock, flags);
for (i = 0; i < num_events; ++i) {
+ const struct mipi_csis_event *event = &csis->events[i];
+
+ if (event->channel >= csis->num_channels)
+ continue;
+
if (counters[i] > 0 || csis->debug.enable)
dev_info(csis->dev, "%s events: %d\n",
- csis->events[i].name,
- counters[i]);
+ event->name, counters[i]);
}
}
+struct mipi_csis_reg_info {
+ u32 addr;
+ unsigned int offset;
+ const char * const name;
+};
+
+static void mipi_csis_dump_channel_reg(struct mipi_csis_device *csis,
+ const struct mipi_csis_reg_info *reg,
+ unsigned int channel)
+{
+ dev_info(csis->dev, "%16s%u: 0x%08x\n", reg->name, channel,
+ mipi_csis_read(csis, reg->addr + channel * reg->offset));
+}
+
static int mipi_csis_dump_regs(struct mipi_csis_device *csis)
{
- static const struct {
- u32 offset;
- const char * const name;
- } registers[] = {
- { MIPI_CSIS_CMN_CTRL, "CMN_CTRL" },
- { MIPI_CSIS_CLK_CTRL, "CLK_CTRL" },
- { MIPI_CSIS_INT_MSK, "INT_MSK" },
- { MIPI_CSIS_DPHY_STATUS, "DPHY_STATUS" },
- { MIPI_CSIS_DPHY_CMN_CTRL, "DPHY_CMN_CTRL" },
- { MIPI_CSIS_DPHY_SCTRL_L, "DPHY_SCTRL_L" },
- { MIPI_CSIS_DPHY_SCTRL_H, "DPHY_SCTRL_H" },
- { MIPI_CSIS_ISP_CONFIG_CH(0), "ISP_CONFIG_CH0" },
- { MIPI_CSIS_ISP_RESOL_CH(0), "ISP_RESOL_CH0" },
- { MIPI_CSIS_SDW_CONFIG_CH(0), "SDW_CONFIG_CH0" },
- { MIPI_CSIS_SDW_RESOL_CH(0), "SDW_RESOL_CH0" },
- { MIPI_CSIS_DBG_CTRL, "DBG_CTRL" },
- { MIPI_CSIS_FRAME_COUNTER_CH(0), "FRAME_COUNTER_CH0" },
+ static const struct mipi_csis_reg_info common_registers[] = {
+ { MIPI_CSIS_CMN_CTRL, 0, "CMN_CTRL" },
+ { MIPI_CSIS_CLK_CTRL, 0, "CLK_CTRL" },
+ { MIPI_CSIS_INT_MSK, 0, "INT_MSK" },
+ { MIPI_CSIS_DPHY_STATUS, 0, "DPHY_STATUS" },
+ { MIPI_CSIS_DPHY_CMN_CTRL, 0, "DPHY_CMN_CTRL" },
+ { MIPI_CSIS_DPHY_SCTRL_L, 0, "DPHY_SCTRL_L" },
+ { MIPI_CSIS_DPHY_SCTRL_H, 0, "DPHY_SCTRL_H" },
+ { MIPI_CSIS_DBG_CTRL, 0, "DBG_CTRL" },
+ };
+ static const struct mipi_csis_reg_info channel_registers[] = {
+ { MIPI_CSIS_ISP_CONFIG_CH(0), 0x10, "ISP_CONFIG_CH" },
+ { MIPI_CSIS_ISP_RESOL_CH(0), 0x10, "ISP_RESOL_CH" },
+ { MIPI_CSIS_SDW_CONFIG_CH(0), 0x10, "SDW_CONFIG_CH" },
+ { MIPI_CSIS_SDW_RESOL_CH(0), 0x10, "SDW_RESOL_CH" },
+ { MIPI_CSIS_FRAME_COUNTER_CH(0), 4, "FRAME_COUNTER_CH" },
};
-
- unsigned int i;
- u32 cfg;
if (!pm_runtime_get_if_in_use(csis->dev))
return 0;
dev_info(csis->dev, "--- REGISTERS ---\n");
- for (i = 0; i < ARRAY_SIZE(registers); i++) {
- cfg = mipi_csis_read(csis, registers[i].offset);
- dev_info(csis->dev, "%14s: 0x%08x\n", registers[i].name, cfg);
+ for (unsigned int i = 0; i < ARRAY_SIZE(common_registers); i++) {
+ const struct mipi_csis_reg_info *reg = &common_registers[i];
+
+ dev_info(csis->dev, "%17s: 0x%08x\n", reg->name,
+ mipi_csis_read(csis, reg->addr));
+ }
+
+ for (unsigned int chan = 0; chan < csis->num_channels; chan++) {
+ for (unsigned int i = 0; i < ARRAY_SIZE(channel_registers); ++i)
+ mipi_csis_dump_channel_reg(csis, &channel_registers[i],
+ chan);
}
pm_runtime_put(csis->dev);
@@ -998,7 +1065,7 @@ err_unlock:
}
static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*
@@ -1011,7 +1078,7 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = v4l2_subdev_state_get_format(sd_state, code->pad);
+ fmt = v4l2_subdev_state_get_format(state, code->pad);
code->code = fmt->code;
return 0;
}
@@ -1028,10 +1095,10 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *sdformat)
{
- struct csis_pix_format const *csis_fmt;
+ const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt *fmt;
unsigned int align;
@@ -1040,7 +1107,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
* modified.
*/
if (sdformat->pad == CSIS_PAD_SOURCE)
- return v4l2_subdev_get_fmt(sd, sd_state, sdformat);
+ return v4l2_subdev_get_fmt(sd, state, sdformat);
if (sdformat->pad != CSIS_PAD_SINK)
return -EINVAL;
@@ -1078,7 +1145,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
&sdformat->format.height, 1,
CSIS_MAX_PIX_HEIGHT, 0, 0);
- fmt = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
+ fmt = v4l2_subdev_state_get_format(state, sdformat->pad);
fmt->code = csis_fmt->code;
fmt->width = sdformat->format.width;
@@ -1092,7 +1159,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
sdformat->format = *fmt;
/* Propagate the format from sink to source. */
- fmt = v4l2_subdev_state_get_format(sd_state, CSIS_PAD_SOURCE);
+ fmt = v4l2_subdev_state_get_format(state, CSIS_PAD_SOURCE);
*fmt = sdformat->format;
/* The format on the source pad might change due to unpacking. */
@@ -1132,7 +1199,7 @@ static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
}
static int mipi_csis_init_state(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format fmt = {
.pad = CSIS_PAD_SINK,
@@ -1149,7 +1216,7 @@ static int mipi_csis_init_state(struct v4l2_subdev *sd,
V4L2_MAP_QUANTIZATION_DEFAULT(false, fmt.format.colorspace,
fmt.format.ycbcr_enc);
- return mipi_csis_set_fmt(sd, sd_state, &fmt);
+ return mipi_csis_set_fmt(sd, state, &fmt);
}
static int mipi_csis_log_status(struct v4l2_subdev *sd)
@@ -1413,9 +1480,13 @@ static int mipi_csis_parse_dt(struct mipi_csis_device *csis)
{
struct device_node *node = csis->dev->of_node;
- if (of_property_read_u32(node, "clock-frequency",
- &csis->clk_frequency))
- csis->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+ of_property_read_u32(node, "clock-frequency", &csis->clk_frequency);
+
+ csis->num_channels = 1;
+ of_property_read_u32(node, "fsl,num-channels", &csis->num_channels);
+ if (csis->num_channels < 1 || csis->num_channels > MIPI_CSIS_MAX_CHANNELS)
+ return dev_err_probe(csis->dev, -EINVAL,
+ "Invalid fsl,num-channels value\n");
return 0;
}
@@ -1440,10 +1511,8 @@ static int mipi_csis_probe(struct platform_device *pdev)
/* Parse DT properties. */
ret = mipi_csis_parse_dt(csis);
- if (ret < 0) {
- dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Acquire resources. */
csis->regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/media/platform/nxp/imx-pxp.c b/drivers/media/platform/nxp/imx-pxp.c
index 7f8ffbac582f..6cc9b07ea53a 100644
--- a/drivers/media/platform/nxp/imx-pxp.c
+++ b/drivers/media/platform/nxp/imx-pxp.c
@@ -248,7 +248,7 @@ struct pxp_ctx {
static inline struct pxp_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct pxp_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct pxp_ctx, fh);
}
static struct pxp_q_data *get_q_data(struct pxp_ctx *ctx,
@@ -1660,7 +1660,6 @@ static int pxp_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 4);
@@ -1699,7 +1698,7 @@ static int pxp_open(struct file *file)
goto open_unlock;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
atomic_inc(&dev->num_inst);
dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
@@ -1717,7 +1716,7 @@ static int pxp_release(struct file *file)
dprintk(dev, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 981648a03113..adc8d9960bf0 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -374,6 +374,8 @@ static int mxc_isi_pm_suspend(struct device *dev)
mxc_isi_video_suspend(pipe);
}
+ mxc_isi_m2m_suspend(&isi->m2m);
+
return pm_runtime_force_suspend(dev);
}
@@ -403,6 +405,12 @@ static int mxc_isi_pm_resume(struct device *dev)
}
}
+ ret = mxc_isi_m2m_resume(&isi->m2m);
+ if (ret) {
+ dev_err(dev, "Failed to resume ISI (%d) for m2m\n", ret);
+ err = ret;
+ }
+
return err;
}
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index 206995bedca4..e84af5127e4e 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -202,9 +202,8 @@ struct mxc_isi_video {
struct video_device vdev;
struct media_pad pad;
- /* Protects is_streaming, and the vdev and vb2_q operations */
+ /* Protects the vdev and vb2_q operations */
struct mutex lock;
- bool is_streaming;
struct v4l2_pix_format_mplane pix;
const struct mxc_isi_format_info *fmtinfo;
@@ -343,6 +342,8 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
#ifdef CONFIG_VIDEO_IMX8_ISI_M2M
int mxc_isi_m2m_register(struct mxc_isi_dev *isi, struct v4l2_device *v4l2_dev);
int mxc_isi_m2m_unregister(struct mxc_isi_dev *isi);
+void mxc_isi_m2m_suspend(struct mxc_isi_m2m *m2m);
+int mxc_isi_m2m_resume(struct mxc_isi_m2m *m2m);
#else
static inline int mxc_isi_m2m_register(struct mxc_isi_dev *isi,
struct v4l2_device *v4l2_dev)
@@ -353,6 +354,13 @@ static inline int mxc_isi_m2m_unregister(struct mxc_isi_dev *isi)
{
return 0;
}
+static inline void mxc_isi_m2m_suspend(struct mxc_isi_m2m *m2m)
+{
+}
+static inline int mxc_isi_m2m_resume(struct mxc_isi_m2m *m2m)
+{
+ return 0;
+}
#endif
int mxc_isi_channel_acquire(struct mxc_isi_pipe *pipe,
@@ -362,7 +370,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_put(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe);
-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass);
+int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_config(struct mxc_isi_pipe *pipe,
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index 5623914f95e6..9225a7ac1c3e 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -587,7 +587,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe)
*
* TODO: Support secondary line buffer for downscaling YUV420 images.
*/
-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass)
+int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe)
{
/* Channel chaining requires both line and output buffer. */
const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
index 22e49d3a1287..00afcbfbdde4 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data {
struct v4l2_pix_format_mplane format;
const struct mxc_isi_format_info *info;
u32 sequence;
- bool streaming;
};
struct mxc_isi_m2m_ctx {
@@ -74,9 +73,9 @@ to_isi_m2m_buffer(struct vb2_v4l2_buffer *buf)
return container_of(buf, struct mxc_isi_m2m_buffer, buf.vb);
}
-static inline struct mxc_isi_m2m_ctx *to_isi_m2m_ctx(struct v4l2_fh *fh)
+static inline struct mxc_isi_m2m_ctx *file_to_isi_m2m_ctx(struct file *filp)
{
- return container_of(fh, struct mxc_isi_m2m_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mxc_isi_m2m_ctx, fh);
}
static inline struct mxc_isi_m2m_ctx_queue_data *
@@ -236,6 +235,70 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
+ const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
+ const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+ const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+ int ret;
+
+ guard(mutex)(&m2m->lock);
+
+ if (m2m->usage_count == INT_MAX)
+ return -EOVERFLOW;
+
+ ret = pm_runtime_resume_and_get(m2m->isi->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Acquire the pipe and initialize the channel with the first user of
+ * the M2M device.
+ */
+ if (m2m->usage_count == 0) {
+ bool bypass = cap_pix->width == out_pix->width &&
+ cap_pix->height == out_pix->height &&
+ cap_info->encoding == out_info->encoding;
+
+ ret = mxc_isi_channel_acquire(m2m->pipe,
+ &mxc_isi_m2m_frame_write_done,
+ bypass);
+ if (ret)
+ goto err_pm;
+
+ mxc_isi_channel_get(m2m->pipe);
+ }
+
+ m2m->usage_count++;
+
+ /*
+ * Allocate resources for the channel, counting how many users require
+ * buffer chaining.
+ */
+ if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+ ret = mxc_isi_channel_chain(m2m->pipe);
+ if (ret)
+ goto err_deinit;
+
+ m2m->chained_count++;
+ ctx->chained = true;
+ }
+
+ return 0;
+
+err_deinit:
+ if (--m2m->usage_count == 0) {
+ mxc_isi_channel_put(m2m->pipe);
+ mxc_isi_channel_release(m2m->pipe);
+ }
+err_pm:
+ pm_runtime_put(m2m->isi->dev);
+ return ret;
+}
+
static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
unsigned int count)
{
@@ -265,13 +328,46 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
}
}
+static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+
+ guard(mutex)(&m2m->lock);
+
+ /*
+ * If the last context is this one, reset it to make sure the device
+ * will be reconfigured when streaming is restarted.
+ */
+ if (m2m->last_ctx == ctx)
+ m2m->last_ctx = NULL;
+
+ /* Free the channel resources if this is the last chained context. */
+ if (ctx->chained && --m2m->chained_count == 0)
+ mxc_isi_channel_unchain(m2m->pipe);
+ ctx->chained = false;
+
+ /* Turn off the light with the last user. */
+ if (--m2m->usage_count == 0) {
+ mxc_isi_channel_disable(m2m->pipe);
+ mxc_isi_channel_put(m2m->pipe);
+ mxc_isi_channel_release(m2m->pipe);
+ }
+
+ WARN_ON(m2m->usage_count < 0);
+
+ pm_runtime_put(m2m->isi->dev);
+}
+
static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
.queue_setup = mxc_isi_m2m_vb2_queue_setup,
.buf_init = mxc_isi_m2m_vb2_buffer_init,
.buf_prepare = mxc_isi_m2m_vb2_buffer_prepare,
.buf_queue = mxc_isi_m2m_vb2_buffer_queue,
+ .prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming,
.start_streaming = mxc_isi_m2m_vb2_start_streaming,
.stop_streaming = mxc_isi_m2m_vb2_stop_streaming,
+ .unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming,
};
static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
@@ -427,7 +523,7 @@ static int mxc_isi_m2m_try_fmt_vid(struct file *file, void *fh,
const enum mxc_isi_video_type type =
f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
MXC_ISI_VIDEO_M2M_OUT : MXC_ISI_VIDEO_M2M_CAP;
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
__mxc_isi_m2m_try_fmt_vid(ctx, &f->fmt.pix_mp, type);
@@ -437,7 +533,7 @@ static int mxc_isi_m2m_try_fmt_vid(struct file *file, void *fh,
static int mxc_isi_m2m_g_fmt_vid(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
const struct mxc_isi_m2m_ctx_queue_data *qdata =
mxc_isi_m2m_ctx_qdata(ctx, f->type);
@@ -452,7 +548,7 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
const enum mxc_isi_video_type type =
f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
MXC_ISI_VIDEO_M2M_OUT : MXC_ISI_VIDEO_M2M_CAP;
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
const struct mxc_isi_format_info *info;
struct vb2_queue *vq;
@@ -481,136 +577,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
return 0;
}
-static int mxc_isi_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
- const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
- const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
- const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
- const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
- struct mxc_isi_m2m *m2m = ctx->m2m;
- bool bypass;
- int ret;
-
- if (q->streaming)
- return 0;
-
- mutex_lock(&m2m->lock);
-
- if (m2m->usage_count == INT_MAX) {
- ret = -EOVERFLOW;
- goto unlock;
- }
-
- bypass = cap_pix->width == out_pix->width &&
- cap_pix->height == out_pix->height &&
- cap_info->encoding == out_info->encoding;
-
- /*
- * Acquire the pipe and initialize the channel with the first user of
- * the M2M device.
- */
- if (m2m->usage_count == 0) {
- ret = mxc_isi_channel_acquire(m2m->pipe,
- &mxc_isi_m2m_frame_write_done,
- bypass);
- if (ret)
- goto unlock;
-
- mxc_isi_channel_get(m2m->pipe);
- }
-
- m2m->usage_count++;
-
- /*
- * Allocate resources for the channel, counting how many users require
- * buffer chaining.
- */
- if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
- ret = mxc_isi_channel_chain(m2m->pipe, bypass);
- if (ret)
- goto deinit;
-
- m2m->chained_count++;
- ctx->chained = true;
- }
-
- /*
- * Drop the lock to start the stream, as the .device_run() operation
- * needs to acquire it.
- */
- mutex_unlock(&m2m->lock);
- ret = v4l2_m2m_ioctl_streamon(file, fh, type);
- if (ret) {
- /* Reacquire the lock for the cleanup path. */
- mutex_lock(&m2m->lock);
- goto unchain;
- }
-
- q->streaming = true;
-
- return 0;
-
-unchain:
- if (ctx->chained && --m2m->chained_count == 0)
- mxc_isi_channel_unchain(m2m->pipe);
- ctx->chained = false;
-
-deinit:
- if (--m2m->usage_count == 0) {
- mxc_isi_channel_put(m2m->pipe);
- mxc_isi_channel_release(m2m->pipe);
- }
-
-unlock:
- mutex_unlock(&m2m->lock);
- return ret;
-}
-
-static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
- struct mxc_isi_m2m *m2m = ctx->m2m;
-
- v4l2_m2m_ioctl_streamoff(file, fh, type);
-
- if (!q->streaming)
- return 0;
-
- mutex_lock(&m2m->lock);
-
- /*
- * If the last context is this one, reset it to make sure the device
- * will be reconfigured when streaming is restarted.
- */
- if (m2m->last_ctx == ctx)
- m2m->last_ctx = NULL;
-
- /* Free the channel resources if this is the last chained context. */
- if (ctx->chained && --m2m->chained_count == 0)
- mxc_isi_channel_unchain(m2m->pipe);
- ctx->chained = false;
-
- /* Turn off the light with the last user. */
- if (--m2m->usage_count == 0) {
- mxc_isi_channel_disable(m2m->pipe);
- mxc_isi_channel_put(m2m->pipe);
- mxc_isi_channel_release(m2m->pipe);
- }
-
- WARN_ON(m2m->usage_count < 0);
-
- mutex_unlock(&m2m->lock);
-
- q->streaming = false;
-
- return 0;
-}
-
static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_querycap = mxc_isi_m2m_querycap,
@@ -631,8 +597,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
- .vidioc_streamon = mxc_isi_m2m_streamon,
- .vidioc_streamoff = mxc_isi_m2m_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -668,7 +634,6 @@ static int mxc_isi_m2m_open(struct file *file)
mutex_init(&ctx->vb2_lock);
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = &ctx->fh;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(m2m->m2m_dev, ctx,
&mxc_isi_m2m_queue_init);
@@ -685,16 +650,10 @@ static int mxc_isi_m2m_open(struct file *file)
if (ret)
goto err_ctx;
- ret = pm_runtime_resume_and_get(m2m->isi->dev);
- if (ret)
- goto err_ctrls;
-
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
return 0;
-err_ctrls:
- mxc_isi_m2m_ctx_ctrls_delete(ctx);
err_ctx:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_fh:
@@ -706,20 +665,17 @@ err_fh:
static int mxc_isi_m2m_release(struct file *file)
{
- struct mxc_isi_m2m *m2m = video_drvdata(file);
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(file->private_data);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mxc_isi_m2m_ctx_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_destroy(&ctx->vb2_lock);
kfree(ctx);
- pm_runtime_put(m2m->isi->dev);
-
return 0;
}
@@ -733,6 +689,40 @@ static const struct v4l2_file_operations mxc_isi_m2m_fops = {
};
/* -----------------------------------------------------------------------------
+ * Suspend & resume
+ */
+
+void mxc_isi_m2m_suspend(struct mxc_isi_m2m *m2m)
+{
+ if (m2m->usage_count == 0)
+ return;
+
+ v4l2_m2m_suspend(m2m->m2m_dev);
+
+ if (m2m->chained_count > 0)
+ mxc_isi_channel_unchain(m2m->pipe);
+
+ mxc_isi_channel_disable(m2m->pipe);
+ mxc_isi_channel_put(m2m->pipe);
+}
+
+int mxc_isi_m2m_resume(struct mxc_isi_m2m *m2m)
+{
+ if (m2m->usage_count == 0)
+ return 0;
+
+ mxc_isi_channel_get(m2m->pipe);
+
+ if (m2m->chained_count > 0)
+ mxc_isi_channel_chain(m2m->pipe);
+
+ m2m->last_ctx = NULL;
+ v4l2_m2m_resume(m2m->m2m_dev);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
* Registration
*/
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
index d76eb58deb09..a41c51dd9ce0 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
@@ -855,7 +855,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe,
/* Chain the channel if needed for wide resolutions. */
if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
- ret = mxc_isi_channel_chain(pipe, bypass);
+ ret = mxc_isi_channel_chain(pipe);
if (ret)
mxc_isi_channel_release(pipe);
}
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
index 8654150728a8..13682bf6e9f8 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
@@ -937,6 +937,47 @@ static void mxc_isi_video_init_channel(struct mxc_isi_video *video)
mxc_isi_channel_set_output_format(pipe, video->fmtinfo, &video->pix);
}
+static int mxc_isi_vb2_prepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_video *video = vb2_get_drv_priv(q);
+ struct media_device *mdev = &video->pipe->isi->media_dev;
+ struct media_pipeline *pipe;
+ int ret;
+
+ /* Get a pipeline for the video node and start it. */
+ scoped_guard(mutex, &mdev->graph_mutex) {
+ ret = mxc_isi_pipe_acquire(video->pipe,
+ &mxc_isi_video_frame_write_done);
+ if (ret)
+ return ret;
+
+ pipe = media_entity_pipeline(&video->vdev.entity)
+ ? : &video->pipe->pipe;
+
+ ret = __video_device_pipeline_start(&video->vdev, pipe);
+ if (ret)
+ goto err_release;
+ }
+
+ /* Verify that the video format matches the output of the subdev. */
+ ret = mxc_isi_video_validate_format(video);
+ if (ret)
+ goto err_stop;
+
+ /* Allocate buffers for discard operation. */
+ ret = mxc_isi_video_alloc_discard_buffers(video);
+ if (ret)
+ goto err_stop;
+
+ return 0;
+
+err_stop:
+ video_device_pipeline_stop(&video->vdev);
+err_release:
+ mxc_isi_pipe_release(video->pipe);
+ return ret;
+}
+
static int mxc_isi_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mxc_isi_video *video = vb2_get_drv_priv(q);
@@ -985,13 +1026,24 @@ static void mxc_isi_vb2_stop_streaming(struct vb2_queue *q)
mxc_isi_video_return_buffers(video, VB2_BUF_STATE_ERROR);
}
+static void mxc_isi_vb2_unprepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_video *video = vb2_get_drv_priv(q);
+
+ mxc_isi_video_free_discard_buffers(video);
+ video_device_pipeline_stop(&video->vdev);
+ mxc_isi_pipe_release(video->pipe);
+}
+
static const struct vb2_ops mxc_isi_vb2_qops = {
.queue_setup = mxc_isi_vb2_queue_setup,
.buf_init = mxc_isi_vb2_buffer_init,
.buf_prepare = mxc_isi_vb2_buffer_prepare,
.buf_queue = mxc_isi_vb2_buffer_queue,
+ .prepare_streaming = mxc_isi_vb2_prepare_streaming,
.start_streaming = mxc_isi_vb2_start_streaming,
.stop_streaming = mxc_isi_vb2_stop_streaming,
+ .unprepare_streaming = mxc_isi_vb2_unprepare_streaming,
};
/* -----------------------------------------------------------------------------
@@ -1145,97 +1197,6 @@ static int mxc_isi_video_s_fmt(struct file *file, void *priv,
return 0;
}
-static int mxc_isi_video_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_video *video = video_drvdata(file);
- struct media_device *mdev = &video->pipe->isi->media_dev;
- struct media_pipeline *pipe;
- int ret;
-
- if (vb2_queue_is_busy(&video->vb2_q, file))
- return -EBUSY;
-
- /*
- * Get a pipeline for the video node and start it. This must be done
- * here and not in the queue .start_streaming() handler, so that
- * pipeline start errors can be reported from VIDIOC_STREAMON and not
- * delayed until subsequent VIDIOC_QBUF calls.
- */
- mutex_lock(&mdev->graph_mutex);
-
- ret = mxc_isi_pipe_acquire(video->pipe, &mxc_isi_video_frame_write_done);
- if (ret) {
- mutex_unlock(&mdev->graph_mutex);
- return ret;
- }
-
- pipe = media_entity_pipeline(&video->vdev.entity) ? : &video->pipe->pipe;
-
- ret = __video_device_pipeline_start(&video->vdev, pipe);
- if (ret) {
- mutex_unlock(&mdev->graph_mutex);
- goto err_release;
- }
-
- mutex_unlock(&mdev->graph_mutex);
-
- /* Verify that the video format matches the output of the subdev. */
- ret = mxc_isi_video_validate_format(video);
- if (ret)
- goto err_stop;
-
- /* Allocate buffers for discard operation. */
- ret = mxc_isi_video_alloc_discard_buffers(video);
- if (ret)
- goto err_stop;
-
- ret = vb2_streamon(&video->vb2_q, type);
- if (ret)
- goto err_free;
-
- video->is_streaming = true;
-
- return 0;
-
-err_free:
- mxc_isi_video_free_discard_buffers(video);
-err_stop:
- video_device_pipeline_stop(&video->vdev);
-err_release:
- mxc_isi_pipe_release(video->pipe);
- return ret;
-}
-
-static void mxc_isi_video_cleanup_streaming(struct mxc_isi_video *video)
-{
- lockdep_assert_held(&video->lock);
-
- if (!video->is_streaming)
- return;
-
- mxc_isi_video_free_discard_buffers(video);
- video_device_pipeline_stop(&video->vdev);
- mxc_isi_pipe_release(video->pipe);
-
- video->is_streaming = false;
-}
-
-static int mxc_isi_video_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_video *video = video_drvdata(file);
- int ret;
-
- ret = vb2_ioctl_streamoff(file, priv, type);
- if (ret)
- return ret;
-
- mxc_isi_video_cleanup_streaming(video);
-
- return 0;
-}
-
static int mxc_isi_video_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
@@ -1291,9 +1252,8 @@ static const struct v4l2_ioctl_ops mxc_isi_video_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
-
- .vidioc_streamon = mxc_isi_video_streamon,
- .vidioc_streamoff = mxc_isi_video_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_framesizes = mxc_isi_video_enum_framesizes,
@@ -1332,10 +1292,6 @@ static int mxc_isi_video_release(struct file *file)
if (ret)
dev_err(video->pipe->isi->dev, "%s fail\n", __func__);
- mutex_lock(&video->lock);
- mxc_isi_video_cleanup_streaming(video);
- mutex_unlock(&video->lock);
-
pm_runtime_put(video->pipe->isi->dev);
return ret;
}
@@ -1357,7 +1313,7 @@ void mxc_isi_video_suspend(struct mxc_isi_pipe *pipe)
{
struct mxc_isi_video *video = &pipe->video;
- if (!video->is_streaming)
+ if (!vb2_is_streaming(&video->vb2_q))
return;
mxc_isi_pipe_disable(pipe);
@@ -1388,7 +1344,7 @@ int mxc_isi_video_resume(struct mxc_isi_pipe *pipe)
{
struct mxc_isi_video *video = &pipe->video;
- if (!video->is_streaming)
+ if (!vb2_is_streaming(&video->vb2_q))
return 0;
mxc_isi_video_init_channel(video);
diff --git a/drivers/media/platform/nxp/mx2_emmaprp.c b/drivers/media/platform/nxp/mx2_emmaprp.c
index 0c6cc120fd2a..3aae8c0b690c 100644
--- a/drivers/media/platform/nxp/mx2_emmaprp.c
+++ b/drivers/media/platform/nxp/mx2_emmaprp.c
@@ -214,6 +214,11 @@ struct emmaprp_ctx {
struct emmaprp_q_data q_data[2];
};
+static inline struct emmaprp_ctx *file_to_emmaprp_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct emmaprp_ctx, fh);
+}
+
static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -451,13 +456,13 @@ static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_emmaprp_ctx(file), f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_emmaprp_ctx(file), f);
}
static int vidioc_try_fmt(struct v4l2_format *f)
@@ -497,8 +502,8 @@ static int vidioc_try_fmt(struct v4l2_format *f)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct emmaprp_ctx *ctx = file_to_emmaprp_ctx(file);
struct emmaprp_fmt *fmt;
- struct emmaprp_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
@@ -514,8 +519,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct emmaprp_ctx *ctx = file_to_emmaprp_ctx(file);
struct emmaprp_fmt *fmt;
- struct emmaprp_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
@@ -575,7 +580,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file_to_emmaprp_ctx(file), f);
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
@@ -587,7 +592,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file_to_emmaprp_ctx(file), f);
}
static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
@@ -725,7 +730,6 @@ static int emmaprp_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = pcdev;
if (mutex_lock_interruptible(&pcdev->dev_mutex)) {
@@ -747,7 +751,7 @@ static int emmaprp_open(struct file *file)
clk_prepare_enable(pcdev->clk_emma_ahb);
ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1];
ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_unlock(&pcdev->dev_mutex);
dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx);
@@ -758,14 +762,14 @@ static int emmaprp_open(struct file *file)
static int emmaprp_release(struct file *file)
{
struct emmaprp_dev *pcdev = video_drvdata(file);
- struct emmaprp_ctx *ctx = file->private_data;
+ struct emmaprp_ctx *ctx = file_to_emmaprp_ctx(file);
dprintk(pcdev, "Releasing instance %p\n", ctx);
mutex_lock(&pcdev->dev_mutex);
clk_disable_unprepare(pcdev->clk_emma_ahb);
clk_disable_unprepare(pcdev->clk_emma_ipg);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&pcdev->dev_mutex);
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index d26a9c24a430..23960d02877d 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -6,9 +6,10 @@ qcom-camss-objs += \
camss-csid.o \
camss-csid-4-1.o \
camss-csid-4-7.o \
+ camss-csid-340.o \
camss-csid-680.o \
camss-csid-gen2.o \
- camss-csid-780.o \
+ camss-csid-gen3.o \
camss-csiphy-2ph-1-0.o \
camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
@@ -17,9 +18,10 @@ qcom-camss-objs += \
camss-vfe-4-7.o \
camss-vfe-4-8.o \
camss-vfe-17x.o \
+ camss-vfe-340.o \
camss-vfe-480.o \
camss-vfe-680.o \
- camss-vfe-780.o \
+ camss-vfe-gen3.o \
camss-vfe-gen1.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-340.c b/drivers/media/platform/qcom/camss/camss-csid-340.c
new file mode 100644
index 000000000000..22a30510fb79
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-340.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module 340
+ *
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "camss.h"
+#include "camss-csid.h"
+#include "camss-csid-gen2.h"
+
+#define CSID_RST_STROBES (0x010)
+#define CSID_RST_SW_REGS BIT(0)
+#define CSID_RST_IRQ BIT(1)
+#define CSID_RST_IFE_CLK BIT(2)
+#define CSID_RST_PHY_CLK BIT(3)
+#define CSID_RST_CSID_CLK BIT(4)
+
+#define CSID_IRQ_STATUS (0x070)
+#define CSID_IRQ_MASK (0x074)
+#define CSID_IRQ_MASK_RST_DONE BIT(0)
+#define CSID_IRQ_CLEAR (0x078)
+#define CSID_IRQ_CMD (0x080)
+#define CSID_IRQ_CMD_CLEAR BIT(0)
+
+#define CSID_CSI2_RX_CFG0 (0x100)
+#define CSI2_RX_CFG0_NUM_ACTIVE_LANES_MASK GENMASK(1, 0)
+#define CSI2_RX_CFG0_DLX_INPUT_SEL_MASK GENMASK(17, 4)
+#define CSI2_RX_CFG0_PHY_NUM_SEL_MASK GENMASK(21, 20)
+#define CSI2_RX_CFG0_PHY_NUM_SEL_BASE_IDX 1
+#define CSI2_RX_CFG0_PHY_TYPE_SEL BIT(24)
+
+#define CSID_CSI2_RX_CFG1 (0x104)
+#define CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN BIT(0)
+#define CSI2_RX_CFG1_MISR_EN BIT(6)
+#define CSI2_RX_CFG1_CGC_MODE BIT(7)
+
+#define CSID_RDI_CFG0(rdi) (0x300 + 0x100 * (rdi))
+#define CSID_RDI_CFG0_BYTE_CNTR_EN BIT(0)
+#define CSID_RDI_CFG0_TIMESTAMP_EN BIT(1)
+#define CSID_RDI_CFG0_DECODE_FORMAT_MASK GENMASK(15, 12)
+#define CSID_RDI_CFG0_DECODE_FORMAT_NOP CSID_RDI_CFG0_DECODE_FORMAT_MASK
+#define CSID_RDI_CFG0_DT_MASK GENMASK(21, 16)
+#define CSID_RDI_CFG0_VC_MASK GENMASK(23, 22)
+#define CSID_RDI_CFG0_DTID_MASK GENMASK(28, 27)
+#define CSID_RDI_CFG0_ENABLE BIT(31)
+
+#define CSID_RDI_CTRL(rdi) (0x308 + 0x100 * (rdi))
+#define CSID_RDI_CTRL_HALT_AT_FRAME_BOUNDARY 0
+#define CSID_RDI_CTRL_RESUME_AT_FRAME_BOUNDARY 1
+
+static void __csid_configure_rx(struct csid_device *csid,
+ struct csid_phy_config *phy, int vc)
+{
+ u32 val;
+
+ val = FIELD_PREP(CSI2_RX_CFG0_NUM_ACTIVE_LANES_MASK, phy->lane_cnt - 1);
+ val |= FIELD_PREP(CSI2_RX_CFG0_DLX_INPUT_SEL_MASK, phy->lane_assign);
+ val |= FIELD_PREP(CSI2_RX_CFG0_PHY_NUM_SEL_MASK,
+ phy->csiphy_id + CSI2_RX_CFG0_PHY_NUM_SEL_BASE_IDX);
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+}
+
+static void __csid_ctrl_rdi(struct csid_device *csid, int enable, u8 rdi)
+{
+ writel_relaxed(!!enable, csid->base + CSID_RDI_CTRL(rdi));
+}
+
+static void __csid_configure_rdi_stream(struct csid_device *csid, u8 enable, u8 vc)
+{
+ struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ const struct csid_format_info *format = csid_get_fmt_entry(csid->res->formats->formats,
+ csid->res->formats->nformats,
+ input_format->code);
+ u8 lane_cnt = csid->phy.lane_cnt;
+ u8 dt_id;
+ u32 val;
+
+ if (!lane_cnt)
+ lane_cnt = 4;
+
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ dt_id = vc & 0x03;
+
+ val = CSID_RDI_CFG0_DECODE_FORMAT_NOP; /* only for RDI path */
+ val |= FIELD_PREP(CSID_RDI_CFG0_DT_MASK, format->data_type);
+ val |= FIELD_PREP(CSID_RDI_CFG0_VC_MASK, vc);
+ val |= FIELD_PREP(CSID_RDI_CFG0_DTID_MASK, dt_id);
+
+ if (enable)
+ val |= CSID_RDI_CFG0_ENABLE;
+
+ dev_dbg(csid->camss->dev, "CSID%u: Stream %s (dt:0x%x vc=%u)\n",
+ csid->id, enable ? "enable" : "disable", format->data_type, vc);
+
+ writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
+}
+
+static void csid_configure_stream(struct csid_device *csid, u8 enable)
+{
+ int i;
+
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ if (csid->phy.en_vc & BIT(i)) {
+ __csid_configure_rdi_stream(csid, enable, i);
+ __csid_configure_rx(csid, &csid->phy, i);
+ __csid_ctrl_rdi(csid, enable, i);
+ }
+ }
+}
+
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+
+ writel_relaxed(CSID_IRQ_MASK_RST_DONE, csid->base + CSID_IRQ_MASK);
+ writel_relaxed(CSID_IRQ_MASK_RST_DONE, csid->base + CSID_IRQ_CLEAR);
+ writel_relaxed(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ reinit_completion(&csid->reset_complete);
+
+ /* Reset with registers preserved */
+ writel(CSID_RST_IRQ | CSID_RST_IFE_CLK | CSID_RST_PHY_CLK | CSID_RST_CSID_CLK,
+ csid->base + CSID_RST_STROBES);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID%u: reset timeout\n", csid->id);
+ return -EIO;
+ }
+
+ dev_dbg(csid->camss->dev, "CSID%u: reset done\n", csid->id);
+
+ return 0;
+}
+
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 val;
+
+ val = readl_relaxed(csid->base + CSID_IRQ_STATUS);
+ writel_relaxed(val, csid->base + CSID_IRQ_CLEAR);
+ writel_relaxed(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ if (val & CSID_IRQ_MASK_RST_DONE)
+ complete(&csid->reset_complete);
+ else
+ dev_warn_ratelimited(csid->camss->dev, "Spurious CSID interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
+{
+ return -EOPNOTSUPP; /* Not part of CSID */
+}
+
+static void csid_subdev_init(struct csid_device *csid) {}
+
+const struct csid_hw_ops csid_ops_340 = {
+ .configure_testgen_pattern = csid_configure_testgen_pattern,
+ .configure_stream = csid_configure_stream,
+ .hw_version = csid_hw_version,
+ .isr = csid_isr,
+ .reset = csid_reset,
+ .src_pad_code = csid_src_pad_code,
+ .subdev_init = csid_subdev_init,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.c b/drivers/media/platform/qcom/camss/camss-csid-gen3.c
index 4c720d177731..664245cf6eb0 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-780.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen3.c
@@ -13,7 +13,7 @@
#include "camss.h"
#include "camss-csid.h"
-#include "camss-csid-780.h"
+#include "camss-csid-gen3.h"
#define CSID_IO_PATH_CFG0(csid) (0x4 * (csid))
#define OUTPUT_IFE_EN 0x100
@@ -45,8 +45,12 @@
#define CSID_CSI2_RX_IRQ_CLEAR 0xA4
#define CSID_CSI2_RX_IRQ_SET 0xA8
+#define IS_CSID_690(csid) ((csid->camss->res->version == CAMSS_8775P) \
+ || (csid->camss->res->version == CAMSS_8300))
#define CSID_BUF_DONE_IRQ_STATUS 0x8C
-#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ? 1 : 14)
+#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ?\
+ 1 : (IS_CSID_690(csid) ?\
+ 13 : 14))
#define CSID_BUF_DONE_IRQ_MASK 0x90
#define CSID_BUF_DONE_IRQ_CLEAR 0x94
#define CSID_BUF_DONE_IRQ_SET 0x98
@@ -59,6 +63,7 @@
#define CSID_CSI2_RX_CFG0 0x200
#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
+#define CSI2_RX_CFG0_VC_MODE 3
#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
#define CSI2_RX_CFG0_PHY_NUM_SEL 20
@@ -66,7 +71,9 @@
#define CSI2_RX_CFG1_ECC_CORRECTION_EN BIT(0)
#define CSI2_RX_CFG1_VC_MODE BIT(2)
-#define CSID_RDI_CFG0(rdi) (0x500 + 0x100 * (rdi))
+#define CSID_RDI_CFG0(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x300 + 0x100 * (rdi)) :\
+ (0x500 + 0x100 * (rdi)))
#define RDI_CFG0_TIMESTAMP_EN BIT(6)
#define RDI_CFG0_TIMESTAMP_STB_SEL BIT(8)
#define RDI_CFG0_DECODE_FORMAT 12
@@ -75,10 +82,14 @@
#define RDI_CFG0_DT_ID 27
#define RDI_CFG0_EN BIT(31)
-#define CSID_RDI_CTRL(rdi) (0x504 + 0x100 * (rdi))
+#define CSID_RDI_CTRL(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x304 + 0x100 * (rdi)) :\
+ (0x504 + 0x100 * (rdi)))
#define RDI_CTRL_START_CMD BIT(0)
-#define CSID_RDI_CFG1(rdi) (0x510 + 0x100 * (rdi))
+#define CSID_RDI_CFG1(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x310 + 0x100 * (rdi)) :\
+ (0x510 + 0x100 * (rdi)))
#define RDI_CFG1_DROP_H_EN BIT(5)
#define RDI_CFG1_DROP_V_EN BIT(6)
#define RDI_CFG1_CROP_H_EN BIT(7)
@@ -86,9 +97,12 @@
#define RDI_CFG1_PIX_STORE BIT(10)
#define RDI_CFG1_PACKING_FORMAT_MIPI BIT(15)
-#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (0x548 + 0x100 * (rdi))
-#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (0x54C + 0x100 * (rdi))
-
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x348 + 0x100 * (rdi)) :\
+ (0x548 + 0x100 * (rdi)))
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x34C + 0x100 * (rdi)) :\
+ (0x54C + 0x100 * (rdi)))
#define CSI2_RX_CFG0_PHY_SEL_BASE_IDX 1
static void __csid_configure_rx(struct csid_device *csid,
@@ -259,7 +273,7 @@ static irqreturn_t csid_isr(int irq, void *dev)
if (buf_done_val & BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i)) {
/*
- * For Titan 780, bus done and RUP IRQ have been moved to
+ * For Titan Gen3, bus done and RUP IRQ have been moved to
* CSID from VFE. Once CSID received bus done, need notify
* VFE of this event. Trigger VFE to handle bus done process.
*/
@@ -325,7 +339,7 @@ static void csid_subdev_init(struct csid_device *csid)
csid->testgen.nmodes = CSID_PAYLOAD_MODE_DISABLED;
}
-const struct csid_hw_ops csid_ops_780 = {
+const struct csid_hw_ops csid_ops_gen3 = {
.configure_stream = csid_configure_stream,
.configure_testgen_pattern = csid_configure_testgen_pattern,
.hw_version = csid_hw_version,
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.h b/drivers/media/platform/qcom/camss/camss-csid-gen3.h
index a990c66a60ff..6ee62da770c1 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-780.h
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen3.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * camss-csid-780.h
+ * camss-csid-gen3.h
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module Generation 3
*
* Copyright (c) 2024 Qualcomm Technologies, Inc.
*/
-#ifndef __QC_MSM_CAMSS_CSID_780_H__
-#define __QC_MSM_CAMSS_CSID_780_H__
+#ifndef __QC_MSM_CAMSS_CSID_GEN3_H__
+#define __QC_MSM_CAMSS_CSID_GEN3_H__
#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
@@ -22,4 +22,4 @@
#define PLAIN_FORMAT_PLAIN16 0x1 /* supports DPCM, UNCOMPRESSED_10/16_BIT */
#define PLAIN_FORMAT_PLAIN32 0x2 /* supports UNCOMPRESSED_20_BIT */
-#endif /* __QC_MSM_CAMSS_CSID_780_H__ */
+#endif /* __QC_MSM_CAMSS_CSID_GEN3_H__ */
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 9dc826d8c8f6..aedc96ed84b2 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -213,9 +213,10 @@ extern const struct csid_formats csid_formats_gen2;
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
+extern const struct csid_hw_ops csid_ops_340;
extern const struct csid_hw_ops csid_ops_680;
extern const struct csid_hw_ops csid_ops_gen2;
-extern const struct csid_hw_ops csid_ops_780;
+extern const struct csid_hw_ops csid_ops_gen3;
/*
* csid_is_lite - Check if CSID is CSID lite.
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index 88c0ba495c32..a229ba04b158 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -64,6 +64,85 @@ struct csiphy_lane_regs {
u32 csiphy_param_type;
};
+/* 5nm 2PH v 1.3.0 2p5Gbps 4 lane DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_sa8775p[] = {
+ {0x0724, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x070C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0024, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0224, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0424, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0624, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x005C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x025C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x045C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x065C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
/* GEN2 1.0 2PH */
static const struct
csiphy_lane_regs lane_regs_sdm845[] = {
@@ -319,6 +398,90 @@ csiphy_lane_regs lane_regs_sm8250[] = {
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
};
+/* 14nm 2PH v 2.0.1 2p5Gbps 4 lane DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_qcm2290[] = {
+ {0x0030, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0730, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0xc0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0230, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020c, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0430, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0630, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
/* GEN2 2.1.2 2PH DPHY mode */
static const struct
csiphy_lane_regs lane_regs_sm8550[] = {
@@ -744,11 +907,14 @@ static bool csiphy_is_gen2(u32 version)
bool ret = false;
switch (version) {
+ case CAMSS_2290:
case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_8300:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_8775P:
case CAMSS_X1E80100:
ret = true;
break;
@@ -829,6 +995,10 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->lane_regs = &lane_regs_sdm845[0];
regs->lane_array_size = ARRAY_SIZE(lane_regs_sdm845);
break;
+ case CAMSS_2290:
+ regs->lane_regs = &lane_regs_qcm2290[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_qcm2290);
+ break;
case CAMSS_7280:
case CAMSS_8250:
regs->lane_regs = &lane_regs_sm8250[0];
@@ -848,6 +1018,11 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8550);
regs->offset = 0x1000;
break;
+ case CAMSS_8300:
+ case CAMSS_8775P:
+ regs->lane_regs = &lane_regs_sa8775p[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sa8775p);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-340.c b/drivers/media/platform/qcom/camss/camss-vfe-340.c
new file mode 100644
index 000000000000..30d7630b3e8b
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-340.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module 340 (TFE)
+ *
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+#define TFE_GLOBAL_RESET_CMD (0x014)
+#define TFE_GLOBAL_RESET_CMD_CORE BIT(0)
+
+#define TFE_REG_UPDATE_CMD (0x02c)
+
+#define TFE_IRQ_CMD (0x030)
+#define TFE_IRQ_CMD_CLEAR BIT(0)
+#define TFE_IRQ_MASK_0 (0x034)
+#define TFE_IRQ_MASK_0_RST_DONE BIT(0)
+#define TFE_IRQ_MASK_0_BUS_WR BIT(1)
+#define TFE_IRQ_MASK_1 (0x038)
+#define TFE_IRQ_MASK_2 (0x03c)
+#define TFE_IRQ_CLEAR_0 (0x040)
+
+#define TFE_IRQ_STATUS_0 (0x04c)
+
+#define BUS_REG(a) (0xa00 + (a))
+
+#define TFE_BUS_IRQ_MASK_0 BUS_REG(0x18)
+#define TFE_BUS_IRQ_MASK_RUP_DONE_MASK GENMASK(3, 0)
+#define TFE_BUS_IRQ_MASK_RUP_DONE(sc) FIELD_PREP(TFE_BUS_IRQ_MASK_RUP_DONE_MASK, BIT(sc))
+#define TFE_BUS_IRQ_MASK_BUF_DONE_MASK GENMASK(15, 8)
+#define TFE_BUS_IRQ_MASK_BUF_DONE(sg) FIELD_PREP(TFE_BUS_IRQ_MASK_BUF_DONE_MASK, BIT(sg))
+#define TFE_BUS_IRQ_MASK_0_CONS_VIOL BIT(28)
+#define TFE_BUS_IRQ_MASK_0_VIOL BIT(30)
+#define TFE_BUS_IRQ_MASK_0_IMG_VIOL BIT(31)
+
+#define TFE_BUS_IRQ_MASK_1 BUS_REG(0x1c)
+#define TFE_BUS_IRQ_CLEAR_0 BUS_REG(0x20)
+#define TFE_BUS_IRQ_STATUS_0 BUS_REG(0x28)
+#define TFE_BUS_IRQ_CMD BUS_REG(0x30)
+#define TFE_BUS_IRQ_CMD_CLEAR BIT(0)
+
+#define TFE_BUS_STATUS_CLEAR BUS_REG(0x60)
+#define TFE_BUS_VIOLATION_STATUS BUS_REG(0x64)
+#define TFE_BUS_OVERFLOW_STATUS BUS_REG(0x68)
+#define TFE_BUS_IMAGE_SZ_VIOLATION_STATUS BUS_REG(0x70)
+
+#define TFE_BUS_CLIENT_CFG(c) BUS_REG(0x200 + (c) * 0x100)
+#define TFE_BUS_CLIENT_CFG_EN BIT(0)
+#define TFE_BUS_CLIENT_CFG_MODE_FRAME BIT(16)
+#define TFE_BUS_IMAGE_ADDR(c) BUS_REG(0x204 + (c) * 0x100)
+#define TFE_BUS_FRAME_INCR(c) BUS_REG(0x208 + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_0(c) BUS_REG(0x20c + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_0_DEFAULT 0xffff
+#define TFE_BUS_IMAGE_CFG_1(c) BUS_REG(0x210 + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_2(c) BUS_REG(0x214 + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_2_DEFAULT 0xffff
+#define TFE_BUS_PACKER_CFG(c) BUS_REG(0x218 + (c) * 0x100)
+#define TFE_BUS_PACKER_CFG_FMT_PLAIN64 0xa
+#define TFE_BUS_IRQ_SUBSAMPLE_CFG_0(c) BUS_REG(0x230 + (c) * 0x100)
+#define TFE_BUS_IRQ_SUBSAMPLE_CFG_1(c) BUS_REG(0x234 + (c) * 0x100)
+#define TFE_BUS_FRAMEDROP_CFG_0(c) BUS_REG(0x238 + (c) * 0x100)
+#define TFE_BUS_FRAMEDROP_CFG_1(c) BUS_REG(0x23c + (c) * 0x100)
+
+/*
+ * TODO: differentiate the port id based on requested type of RDI, BHIST etc
+ *
+ * TFE write master IDs (clients)
+ *
+ * BAYER 0
+ * IDEAL_RAW 1
+ * STATS_TINTLESS_BG 2
+ * STATS_BHIST 3
+ * STATS_AWB_BG 4
+ * STATS_AEC_BG 5
+ * STATS_BAF 6
+ * RDI0 7
+ * RDI1 8
+ * RDI2 9
+ */
+#define RDI_WM(n) (7 + (n))
+#define TFE_WM_NUM 10
+
+enum tfe_iface {
+ TFE_IFACE_PIX,
+ TFE_IFACE_RDI0,
+ TFE_IFACE_RDI1,
+ TFE_IFACE_RDI2,
+ TFE_IFACE_NUM
+};
+
+enum tfe_subgroups {
+ TFE_SUBGROUP_BAYER,
+ TFE_SUBGROUP_IDEAL_RAW,
+ TFE_SUBGROUP_HDR,
+ TFE_SUBGROUP_BG,
+ TFE_SUBGROUP_BAF,
+ TFE_SUBGROUP_RDI0,
+ TFE_SUBGROUP_RDI1,
+ TFE_SUBGROUP_RDI2,
+ TFE_SUBGROUP_NUM
+};
+
+static enum tfe_iface tfe_line_iface_map[VFE_LINE_NUM_MAX] = {
+ [VFE_LINE_RDI0] = TFE_IFACE_RDI0,
+ [VFE_LINE_RDI1] = TFE_IFACE_RDI1,
+ [VFE_LINE_RDI2] = TFE_IFACE_RDI2,
+ [VFE_LINE_PIX] = TFE_IFACE_PIX,
+};
+
+static enum vfe_line_id tfe_subgroup_line_map[TFE_SUBGROUP_NUM] = {
+ [TFE_SUBGROUP_BAYER] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_IDEAL_RAW] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_HDR] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_BG] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_BAF] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_RDI0] = VFE_LINE_RDI0,
+ [TFE_SUBGROUP_RDI1] = VFE_LINE_RDI1,
+ [TFE_SUBGROUP_RDI2] = VFE_LINE_RDI2,
+};
+
+static inline enum tfe_iface __line_to_iface(enum vfe_line_id line_id)
+{
+ if (line_id <= VFE_LINE_NONE || line_id >= VFE_LINE_NUM_MAX) {
+ pr_warn("VFE: Invalid line %d\n", line_id);
+ return TFE_IFACE_RDI0;
+ }
+
+ return tfe_line_iface_map[line_id];
+}
+
+static inline enum vfe_line_id __iface_to_line(unsigned int iface)
+{
+ int i;
+
+ for (i = 0; i < VFE_LINE_NUM_MAX; i++) {
+ if (tfe_line_iface_map[i] == iface)
+ return i;
+ }
+
+ return VFE_LINE_NONE;
+}
+
+static inline enum vfe_line_id __subgroup_to_line(enum tfe_subgroups sg)
+{
+ if (sg >= TFE_SUBGROUP_NUM)
+ return VFE_LINE_NONE;
+
+ return tfe_subgroup_line_map[sg];
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ writel(TFE_IRQ_MASK_0_RST_DONE, vfe->base + TFE_IRQ_MASK_0);
+ writel(TFE_GLOBAL_RESET_CMD_CORE, vfe->base + TFE_GLOBAL_RESET_CMD);
+}
+
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 status;
+ int i;
+
+ status = readl_relaxed(vfe->base + TFE_IRQ_STATUS_0);
+ writel_relaxed(status, vfe->base + TFE_IRQ_CLEAR_0);
+ writel_relaxed(TFE_IRQ_CMD_CLEAR, vfe->base + TFE_IRQ_CMD);
+
+ if (status & TFE_IRQ_MASK_0_RST_DONE) {
+ dev_dbg(vfe->camss->dev, "VFE%u: Reset done!", vfe->id);
+ vfe_isr_reset_ack(vfe);
+ }
+
+ if (status & TFE_IRQ_MASK_0_BUS_WR) {
+ u32 bus_status = readl_relaxed(vfe->base + TFE_BUS_IRQ_STATUS_0);
+
+ writel_relaxed(bus_status, vfe->base + TFE_BUS_IRQ_CLEAR_0);
+ writel_relaxed(TFE_BUS_IRQ_CMD_CLEAR, vfe->base + TFE_BUS_IRQ_CMD);
+
+ for (i = 0; i < TFE_IFACE_NUM; i++) {
+ if (bus_status & TFE_BUS_IRQ_MASK_RUP_DONE(i))
+ vfe->res->hw_ops->reg_update_clear(vfe, __iface_to_line(i));
+ }
+
+ for (i = 0; i < TFE_SUBGROUP_NUM; i++) {
+ if (bus_status & TFE_BUS_IRQ_MASK_BUF_DONE(i))
+ vfe_buf_done(vfe, __subgroup_to_line(i));
+ }
+
+ if (bus_status & TFE_BUS_IRQ_MASK_0_CONS_VIOL)
+ dev_err_ratelimited(vfe->camss->dev, "VFE%u: Bad config violation",
+ vfe->id);
+
+ if (bus_status & TFE_BUS_IRQ_MASK_0_VIOL)
+ dev_err_ratelimited(vfe->camss->dev, "VFE%u: Input data violation",
+ vfe->id);
+
+ if (bus_status & TFE_BUS_IRQ_MASK_0_IMG_VIOL)
+ dev_err_ratelimited(vfe->camss->dev, "VFE%u: Image size violation",
+ vfe->id);
+ }
+
+ status = readl_relaxed(vfe->base + TFE_BUS_OVERFLOW_STATUS);
+ if (status) {
+ writel_relaxed(status, vfe->base + TFE_BUS_STATUS_CLEAR);
+ for (i = 0; i < TFE_WM_NUM; i++) {
+ if (status & BIT(i))
+ dev_err_ratelimited(vfe->camss->dev,
+ "VFE%u: bus overflow for wm %u\n",
+ vfe->id, i);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+static void vfe_enable_irq(struct vfe_device *vfe)
+{
+ writel(TFE_IRQ_MASK_0_RST_DONE | TFE_IRQ_MASK_0_BUS_WR,
+ vfe->base + TFE_IRQ_MASK_0);
+ writel(TFE_BUS_IRQ_MASK_RUP_DONE_MASK | TFE_BUS_IRQ_MASK_BUF_DONE_MASK |
+ TFE_BUS_IRQ_MASK_0_CONS_VIOL | TFE_BUS_IRQ_MASK_0_VIOL |
+ TFE_BUS_IRQ_MASK_0_IMG_VIOL, vfe->base + TFE_BUS_IRQ_MASK_0);
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 rdi, u32 addr,
+ struct vfe_line *line)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel_relaxed(addr, vfe->base + TFE_BUS_IMAGE_ADDR(wm));
+}
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 rdi, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix = &line->video_out.active_fmt.fmt.pix_mp;
+ u32 stride = pix->plane_fmt[0].bytesperline;
+ u8 wm = RDI_WM(rdi);
+
+ /* Configuration for plain RDI frames */
+ writel_relaxed(TFE_BUS_IMAGE_CFG_0_DEFAULT, vfe->base + TFE_BUS_IMAGE_CFG_0(wm));
+ writel_relaxed(0u, vfe->base + TFE_BUS_IMAGE_CFG_1(wm));
+ writel_relaxed(TFE_BUS_IMAGE_CFG_2_DEFAULT, vfe->base + TFE_BUS_IMAGE_CFG_2(wm));
+ writel_relaxed(stride * pix->height, vfe->base + TFE_BUS_FRAME_INCR(wm));
+ writel_relaxed(TFE_BUS_PACKER_CFG_FMT_PLAIN64, vfe->base + TFE_BUS_PACKER_CFG(wm));
+
+ /* No dropped frames, one irq per frame */
+ writel_relaxed(0, vfe->base + TFE_BUS_FRAMEDROP_CFG_0(wm));
+ writel_relaxed(1, vfe->base + TFE_BUS_FRAMEDROP_CFG_1(wm));
+ writel_relaxed(0, vfe->base + TFE_BUS_IRQ_SUBSAMPLE_CFG_0(wm));
+ writel_relaxed(1, vfe->base + TFE_BUS_IRQ_SUBSAMPLE_CFG_1(wm));
+
+ vfe_enable_irq(vfe);
+
+ writel(TFE_BUS_CLIENT_CFG_EN | TFE_BUS_CLIENT_CFG_MODE_FRAME,
+ vfe->base + TFE_BUS_CLIENT_CFG(wm));
+
+ dev_dbg(vfe->camss->dev, "VFE%u: Started RDI%u width %u height %u stride %u\n",
+ vfe->id, rdi, pix->width, pix->height, stride);
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 rdi)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel(0, vfe->base + TFE_BUS_CLIENT_CFG(wm));
+
+ dev_dbg(vfe->camss->dev, "VFE%u: Stopped RDI%u\n", vfe->id, rdi);
+}
+
+static const struct camss_video_ops vfe_video_ops_520 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_520;
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= BIT(__line_to_iface(line_id));
+ writel_relaxed(vfe->reg_update, vfe->base + TFE_REG_UPDATE_CMD);
+}
+
+static void vfe_reg_update_clear(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~BIT(__line_to_iface(line_id));
+}
+
+const struct vfe_hw_ops vfe_ops_340 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable_v2,
+ .vfe_halt = vfe_halt,
+ .vfe_wm_start = vfe_wm_start,
+ .vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done,
+ .vfe_wm_update = vfe_wm_update,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-780.c b/drivers/media/platform/qcom/camss/camss-vfe-gen3.c
index b9812d70f91b..22579617def7 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-780.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v780 (SM8550)
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module gen3
*
* Copyright (c) 2024 Qualcomm Technologies, Inc.
*/
@@ -12,13 +12,44 @@
#include "camss.h"
#include "camss-vfe.h"
-#define BUS_REG_BASE (vfe_is_lite(vfe) ? 0x200 : 0xC00)
+#define IS_VFE_690(vfe) \
+ ((vfe->camss->res->version == CAMSS_8775P) \
+ || (vfe->camss->res->version == CAMSS_8300))
+
+#define BUS_REG_BASE_690 \
+ (vfe_is_lite(vfe) ? 0x480 : 0x400)
+#define BUS_REG_BASE_780 \
+ (vfe_is_lite(vfe) ? 0x200 : 0xC00)
+#define BUS_REG_BASE \
+ (IS_VFE_690(vfe) ? BUS_REG_BASE_690 : BUS_REG_BASE_780)
+
+#define VFE_TOP_CORE_CFG (0x24)
+#define VFE_DISABLE_DSCALING_DS4 BIT(21)
+#define VFE_DISABLE_DSCALING_DS16 BIT(22)
+
+#define VFE_BUS_WM_TEST_BUS_CTRL_690 (BUS_REG_BASE + 0xFC)
+#define VFE_BUS_WM_TEST_BUS_CTRL_780 (BUS_REG_BASE + 0xDC)
+#define VFE_BUS_WM_TEST_BUS_CTRL \
+ (IS_VFE_690(vfe) ? VFE_BUS_WM_TEST_BUS_CTRL_690 \
+ : VFE_BUS_WM_TEST_BUS_CTRL_780)
+/*
+ * Bus client mapping:
+ *
+ * Full VFE:
+ * VFE_690: 16 = RDI0, 17 = RDI1, 18 = RDI2
+ * VFE_780: 23 = RDI0, 24 = RDI1, 25 = RDI2
+ *
+ * VFE LITE:
+ * VFE_690 : 0 = RDI0, 1 = RDI1, 2 = RDI2, 3 = RDI3, 4 = RDI4, 5 = RDI5
+ * VFE_780 : 0 = RDI0, 1 = RDI1, 2 = RDI2, 3 = RDI3, 4 = RDI4
+ */
+#define RDI_WM_690(n) ((vfe_is_lite(vfe) ? 0x0 : 0x10) + (n))
+#define RDI_WM_780(n) ((vfe_is_lite(vfe) ? 0x0 : 0x17) + (n))
+#define RDI_WM(n) (IS_VFE_690(vfe) ? RDI_WM_690(n) : RDI_WM_780(n))
#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
#define WM_CGC_OVERRIDE_ALL (0x7FFFFFF)
-#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xDC)
-
#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
#define WM_CFG_EN BIT(0)
#define WM_VIR_FRM_EN BIT(1)
@@ -39,17 +70,6 @@
#define VFE_BUS_WM_MMU_PREFETCH_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
#define VFE_BUS_WM_MMU_PREFETCH_MAX_OFFSET(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
-/*
- * Bus client mapping:
- *
- * Full VFE:
- * 23 = RDI0, 24 = RDI1, 25 = RDI2
- *
- * VFE LITE:
- * 0 = RDI0, 1 = RDI1, 2 = RDI3, 4 = RDI4
- */
-#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0x0 : 0x17) + (n))
-
static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
{
struct v4l2_pix_format_mplane *pix =
@@ -62,14 +82,24 @@ static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
writel(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
- writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height >> 8,
- vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ if (IS_VFE_690(vfe))
+ writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ else
+ writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height >> 8,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+
writel((WM_IMAGE_CFG_0_DEFAULT_WIDTH & 0xFFFF),
vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
writel(WM_IMAGE_CFG_2_DEFAULT_STRIDE,
vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
writel(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
+ /* TOP CORE CFG */
+ if (IS_VFE_690(vfe))
+ writel(VFE_DISABLE_DSCALING_DS4 | VFE_DISABLE_DSCALING_DS16,
+ vfe->base + VFE_TOP_CORE_CFG);
+
/* no dropped frames, one irq per frame */
writel(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
writel(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
@@ -92,7 +122,11 @@ static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
struct vfe_line *line)
{
wm = RDI_WM(wm);
- writel((addr >> 8) & 0xFFFFFFFF, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+
+ if (IS_VFE_690(vfe))
+ writel(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+ else
+ writel((addr >> 8), vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
dev_dbg(vfe->camss->dev, "wm:%d, image buf addr:0x%x\n",
wm, addr);
@@ -113,14 +147,14 @@ static inline void vfe_reg_update_clear(struct vfe_device *vfe,
camss_reg_update(vfe->camss, vfe->id, port_id, true);
}
-static const struct camss_video_ops vfe_video_ops_780 = {
+static const struct camss_video_ops vfe_video_ops_gen3 = {
.queue_buffer = vfe_queue_buffer_v2,
.flush_buffers = vfe_flush_buffers,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
- vfe->video_ops = vfe_video_ops_780;
+ vfe->video_ops = vfe_video_ops_gen3;
}
static void vfe_global_reset(struct vfe_device *vfe)
@@ -140,7 +174,7 @@ static int vfe_halt(struct vfe_device *vfe)
return 0;
}
-const struct vfe_hw_ops vfe_ops_780 = {
+const struct vfe_hw_ops vfe_ops_gen3 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr = vfe_isr,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 4bca6c3abaff..dff8d0a1e8c2 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -340,12 +340,15 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
}
break;
case CAMSS_660:
+ case CAMSS_2290:
case CAMSS_7280:
case CAMSS_8x96:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_8300:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_8775P:
case CAMSS_X1E80100:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
@@ -910,7 +913,24 @@ static int vfe_match_clock_names(struct vfe_device *vfe,
return (!strcmp(clock->name, vfe_name) ||
!strcmp(clock->name, vfe_lite_name) ||
- !strcmp(clock->name, "vfe_lite"));
+ !strcmp(clock->name, "vfe_lite") ||
+ !strcmp(clock->name, "camnoc_axi"));
+}
+
+/*
+ * vfe_check_clock_levels - Calculate and set clock rates on VFE module
+ * @clock: clocks data
+ *
+ * Return false if there is no non-zero clock level and true otherwise.
+ */
+static bool vfe_check_clock_levels(struct camss_clock *clock)
+{
+ int i;
+
+ for (i = 0; i < clock->nfreqs; i++)
+ if (clock->freq[i])
+ return true;
+ return false;
}
/*
@@ -936,7 +956,7 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (vfe_match_clock_names(vfe, clock)) {
+ if (vfe_match_clock_names(vfe, clock) && vfe_check_clock_levels(clock)) {
u64 min_rate = 0;
long rate;
@@ -1017,7 +1037,7 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (vfe_match_clock_names(vfe, clock)) {
+ if (vfe_match_clock_names(vfe, clock) && vfe_check_clock_levels(clock)) {
u64 min_rate = 0;
unsigned long rate;
@@ -1972,8 +1992,10 @@ static int vfe_bpl_align(struct vfe_device *vfe)
case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_8300:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_8775P:
case CAMSS_X1E80100:
ret = 16;
break;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index a23f666be753..0300efdb1c46 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -242,9 +242,10 @@ extern const struct vfe_hw_ops vfe_ops_4_1;
extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
+extern const struct vfe_hw_ops vfe_ops_340;
extern const struct vfe_hw_ops vfe_ops_480;
extern const struct vfe_hw_ops vfe_ops_680;
-extern const struct vfe_hw_ops vfe_ops_780;
+extern const struct vfe_hw_ops vfe_ops_gen3;
int vfe_get(struct vfe_device *vfe);
void vfe_put(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 8d05802d1735..831486e14754 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -604,50 +604,11 @@ static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
* V4L2 file operations
*/
-static int video_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct camss_video *video = video_drvdata(file);
- struct v4l2_fh *vfh;
- int ret;
-
- mutex_lock(&video->lock);
-
- vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
- if (vfh == NULL) {
- ret = -ENOMEM;
- goto error_alloc;
- }
-
- v4l2_fh_init(vfh, vdev);
- v4l2_fh_add(vfh);
-
- file->private_data = vfh;
-
- mutex_unlock(&video->lock);
-
- return 0;
-
-error_alloc:
- mutex_unlock(&video->lock);
-
- return ret;
-}
-
-static int video_release(struct file *file)
-{
- vb2_fop_release(file);
-
- file->private_data = NULL;
-
- return 0;
-}
-
static const struct v4l2_file_operations msm_vid_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
- .open = video_open,
- .release = video_release,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.read = vb2_fop_read,
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index e08e70b93824..2fbcd0e343aa 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -515,6 +515,140 @@ static const struct camss_subdev_resources vfe_res_8x96[] = {
}
};
+static const struct camss_subdev_resources csiphy_res_2290[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdd-csiphy-1p2", "vdd-csiphy-1p8" },
+ .clock = { "top_ahb", "ahb", "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 240000000, 341330000, 384000000 },
+ { 100000000, 200000000, 268800000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdd-csiphy-1p2", "vdd-csiphy-1p8" },
+ .clock = { "top_ahb", "ahb", "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 240000000, 341330000, 384000000 },
+ { 100000000, 200000000, 268800000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ }
+};
+
+static const struct camss_subdev_resources csid_res_2290[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "csi0", "vfe0_cphy_rx", "vfe0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 192000000, 240000000, 384000000, 426400000 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .hw_ops = &csid_ops_340,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "csi1", "vfe1_cphy_rx", "vfe1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 192000000, 240000000, 384000000, 426400000 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .hw_ops = &csid_ops_340,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_2290[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "axi", "vfe0", "camnoc_rt_axi", "camnoc_nrt_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 153600000, 192000000, 256000000, 384000000, 460800000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 4,
+ .hw_ops = &vfe_ops_340,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "axi", "vfe1", "camnoc_rt_axi", "camnoc_nrt_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 153600000, 192000000, 256000000, 384000000, 460800000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 4,
+ .hw_ops = &vfe_ops_340,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_2290[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 150000,
+ .icc_bw_tbl.peak = 300000,
+ },
+ {
+ .name = "hf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 3000000,
+ },
+ {
+ .name = "sf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 3000000,
+ },
+};
+
static const struct camss_subdev_resources csiphy_res_660[] = {
/* CSIPHY0 */
{
@@ -2285,7 +2419,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = false,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2300,7 +2434,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = false,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2315,7 +2449,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = false,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2330,7 +2464,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = true,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2345,7 +2479,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = true,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
}
@@ -2371,7 +2505,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.is_lite = false,
.has_pd = true,
.pd_name = "ife0",
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2395,7 +2529,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.is_lite = false,
.has_pd = true,
.pd_name = "ife1",
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2419,7 +2553,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.is_lite = false,
.has_pd = true,
.pd_name = "ife2",
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2441,7 +2575,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.vfe = {
.line_num = 4,
.is_lite = true,
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2463,7 +2597,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.vfe = {
.line_num = 4,
.is_lite = true,
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2483,6 +2617,467 @@ static const struct resources_icc icc_res_sm8550[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_8300[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy_rx", "csiphy0", "csiphy0_timer" },
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845,
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy_rx", "csiphy1", "csiphy1_timer" },
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845,
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy_rx", "csiphy2", "csiphy2_timer" },
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .id = 2,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845,
+ }
+ },
+};
+
+static const struct camss_subdev_resources csiphy_res_8775p[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy0", "csiphy0_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy1", "csiphy1_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy2", "csiphy2_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .id = 2,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy3", "csiphy3_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .csiphy = {
+ .id = 3,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+};
+
+static const struct camss_subdev_resources csid_res_8775p[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx"},
+ .clock_rate = {
+ { 400000000, 400000000},
+ { 400000000, 400000000}
+ },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx"},
+ .clock_rate = {
+ { 400000000, 400000000},
+ { 400000000, 400000000}
+ },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID2 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID3 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID4 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite2" },
+ .interrupt = { "csid_lite2" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID5 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite3" },
+ .interrupt = { "csid_lite3" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID6 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite4" },
+ .interrupt = { "csid_lite4" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+};
+
+static const struct camss_subdev_resources vfe_res_8775p[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe0", "vfe0", "vfe0_fast_ahb",
+ "cpas_ahb", "gcc_axi_hf",
+ "cpas_fast_ahb_clk",
+ "camnoc_axi"},
+ .clock_rate = {
+ { 0 },
+ { 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 0 },
+ { 300000000, 400000000 },
+ { 400000000 },
+ },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = false,
+ .pd_name = NULL,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe1", "vfe1", "vfe1_fast_ahb",
+ "cpas_ahb", "gcc_axi_hf",
+ "cpas_fast_ahb_clk",
+ "camnoc_axi"},
+ .clock_rate = {
+ { 0 },
+ { 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 0 },
+ { 300000000, 400000000 },
+ { 400000000 },
+ },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = false,
+ .pd_name = NULL,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE2 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE3 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE4 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite2" },
+ .interrupt = { "vfe_lite2" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE5 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite3" },
+ .interrupt = { "vfe_lite3" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE6 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite4" },
+ .interrupt = { "vfe_lite4" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_qcs8300[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "hf_0",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
+static const struct resources_icc icc_res_sa8775p[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "hf_0",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
/* CSIPHY0 */
{
@@ -3041,9 +3636,6 @@ static int camss_of_parse_ports(struct camss *camss)
for_each_endpoint_of_node(dev->of_node, node) {
struct camss_async_subdev *csd;
- if (!of_device_is_available(node))
- continue;
-
remote = of_graph_get_remote_port_parent(node);
if (!remote) {
dev_err(dev, "Cannot get remote parent\n");
@@ -3143,7 +3735,6 @@ static int camss_init_subdevices(struct camss *camss)
}
/*
- * camss_link_entities - Register subdev nodes and create links
* camss_link_err - print error in case link creation fails
* @src_name: name for source of the link
* @sink_name: name for sink of the link
@@ -3557,7 +4148,6 @@ static int camss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct camss *camss;
- int num_subdevs;
int ret;
camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL);
@@ -3628,17 +4218,15 @@ static int camss_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- num_subdevs = camss_of_parse_ports(camss);
- if (num_subdevs < 0) {
- ret = num_subdevs;
+ ret = camss_of_parse_ports(camss);
+ if (ret < 0)
goto err_v4l2_device_unregister;
- }
ret = camss_register_entities(camss);
if (ret < 0)
goto err_v4l2_device_unregister;
- ret = camss->res->link_entities(camss);
+ ret = camss_link_entities(camss);
if (ret < 0)
goto err_register_subdevs;
@@ -3648,23 +4236,12 @@ static int camss_probe(struct platform_device *pdev)
goto err_register_subdevs;
}
- if (num_subdevs) {
- camss->notifier.ops = &camss_subdev_notifier_ops;
-
- ret = v4l2_async_nf_register(&camss->notifier);
- if (ret) {
- dev_err(dev,
- "Failed to register async subdev nodes: %d\n",
- ret);
- goto err_media_device_unregister;
- }
- } else {
- ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
- if (ret < 0) {
- dev_err(dev, "Failed to register subdev nodes: %d\n",
- ret);
- goto err_media_device_unregister;
- }
+ camss->notifier.ops = &camss_subdev_notifier_ops;
+ ret = v4l2_async_nf_register(&camss->notifier);
+ if (ret) {
+ dev_err(dev,
+ "Failed to register async subdev nodes: %d\n", ret);
+ goto err_media_device_unregister;
}
return 0;
@@ -3723,7 +4300,6 @@ static const struct camss_resources msm8916_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8x16),
.csid_num = ARRAY_SIZE(csid_res_8x16),
.vfe_num = ARRAY_SIZE(vfe_res_8x16),
- .link_entities = camss_link_entities
};
static const struct camss_resources msm8953_resources = {
@@ -3737,7 +4313,6 @@ static const struct camss_resources msm8953_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8x96),
.csid_num = ARRAY_SIZE(csid_res_8x53),
.vfe_num = ARRAY_SIZE(vfe_res_8x53),
- .link_entities = camss_link_entities
};
static const struct camss_resources msm8996_resources = {
@@ -3749,7 +4324,46 @@ static const struct camss_resources msm8996_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8x96),
.csid_num = ARRAY_SIZE(csid_res_8x96),
.vfe_num = ARRAY_SIZE(vfe_res_8x96),
- .link_entities = camss_link_entities
+};
+
+static const struct camss_resources qcm2290_resources = {
+ .version = CAMSS_2290,
+ .csiphy_res = csiphy_res_2290,
+ .csid_res = csid_res_2290,
+ .vfe_res = vfe_res_2290,
+ .icc_res = icc_res_2290,
+ .icc_path_num = ARRAY_SIZE(icc_res_2290),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_2290),
+ .csid_num = ARRAY_SIZE(csid_res_2290),
+ .vfe_num = ARRAY_SIZE(vfe_res_2290),
+};
+
+static const struct camss_resources qcs8300_resources = {
+ .version = CAMSS_8300,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_8300,
+ .csid_res = csid_res_8775p,
+ .csid_wrapper_res = &csid_wrapper_res_sm8550,
+ .vfe_res = vfe_res_8775p,
+ .icc_res = icc_res_qcs8300,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8300),
+ .csid_num = ARRAY_SIZE(csid_res_8775p),
+ .vfe_num = ARRAY_SIZE(vfe_res_8775p),
+ .icc_path_num = ARRAY_SIZE(icc_res_qcs8300),
+};
+
+static const struct camss_resources sa8775p_resources = {
+ .version = CAMSS_8775P,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_8775p,
+ .csid_res = csid_res_8775p,
+ .csid_wrapper_res = &csid_wrapper_res_sm8550,
+ .vfe_res = vfe_res_8775p,
+ .icc_res = icc_res_sa8775p,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8775p),
+ .csid_num = ARRAY_SIZE(csid_res_8775p),
+ .vfe_num = ARRAY_SIZE(vfe_res_8775p),
+ .icc_path_num = ARRAY_SIZE(icc_res_sa8775p),
};
static const struct camss_resources sdm660_resources = {
@@ -3761,7 +4375,6 @@ static const struct camss_resources sdm660_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_660),
.csid_num = ARRAY_SIZE(csid_res_660),
.vfe_num = ARRAY_SIZE(vfe_res_660),
- .link_entities = camss_link_entities
};
static const struct camss_resources sdm670_resources = {
@@ -3772,7 +4385,6 @@ static const struct camss_resources sdm670_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_670),
.csid_num = ARRAY_SIZE(csid_res_670),
.vfe_num = ARRAY_SIZE(vfe_res_670),
- .link_entities = camss_link_entities
};
static const struct camss_resources sdm845_resources = {
@@ -3784,7 +4396,6 @@ static const struct camss_resources sdm845_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_845),
.csid_num = ARRAY_SIZE(csid_res_845),
.vfe_num = ARRAY_SIZE(vfe_res_845),
- .link_entities = camss_link_entities
};
static const struct camss_resources sm8250_resources = {
@@ -3798,7 +4409,6 @@ static const struct camss_resources sm8250_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8250),
.csid_num = ARRAY_SIZE(csid_res_8250),
.vfe_num = ARRAY_SIZE(vfe_res_8250),
- .link_entities = camss_link_entities
};
static const struct camss_resources sc8280xp_resources = {
@@ -3813,7 +4423,6 @@ static const struct camss_resources sc8280xp_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_sc8280xp),
.csid_num = ARRAY_SIZE(csid_res_sc8280xp),
.vfe_num = ARRAY_SIZE(vfe_res_sc8280xp),
- .link_entities = camss_link_entities
};
static const struct camss_resources sc7280_resources = {
@@ -3827,7 +4436,6 @@ static const struct camss_resources sc7280_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_7280),
.csid_num = ARRAY_SIZE(csid_res_7280),
.vfe_num = ARRAY_SIZE(vfe_res_7280),
- .link_entities = camss_link_entities
};
static const struct camss_resources sm8550_resources = {
@@ -3842,7 +4450,6 @@ static const struct camss_resources sm8550_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8550),
.csid_num = ARRAY_SIZE(csid_res_8550),
.vfe_num = ARRAY_SIZE(vfe_res_8550),
- .link_entities = camss_link_entities
};
static const struct camss_resources x1e80100_resources = {
@@ -3857,13 +4464,15 @@ static const struct camss_resources x1e80100_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_x1e80100),
.csid_num = ARRAY_SIZE(csid_res_x1e80100),
.vfe_num = ARRAY_SIZE(vfe_res_x1e80100),
- .link_entities = camss_link_entities
};
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
{ .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
+ { .compatible = "qcom,qcm2290-camss", .data = &qcm2290_resources },
+ { .compatible = "qcom,qcs8300-camss", .data = &qcs8300_resources },
+ { .compatible = "qcom,sa8775p-camss", .data = &sa8775p_resources },
{ .compatible = "qcom,sc7280-camss", .data = &sc7280_resources },
{ .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 63c0afee154a..a70fbc78ccc3 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -78,14 +78,17 @@ enum pm_domain {
enum camss_version {
CAMSS_660,
+ CAMSS_2290,
CAMSS_7280,
CAMSS_8x16,
CAMSS_8x53,
CAMSS_8x96,
CAMSS_8250,
CAMSS_8280XP,
+ CAMSS_8300,
CAMSS_845,
CAMSS_8550,
+ CAMSS_8775P,
CAMSS_X1E80100,
};
@@ -107,7 +110,6 @@ struct camss_resources {
const unsigned int csiphy_num;
const unsigned int csid_num;
const unsigned int vfe_num;
- int (*link_entities)(struct camss *camss);
};
struct camss {
diff --git a/drivers/media/platform/qcom/iris/Makefile b/drivers/media/platform/qcom/iris/Makefile
index e86d00ee6f15..13270cd6d899 100644
--- a/drivers/media/platform/qcom/iris/Makefile
+++ b/drivers/media/platform/qcom/iris/Makefile
@@ -1,5 +1,5 @@
-qcom-iris-objs += \
- iris_buffer.o \
+qcom-iris-objs += iris_buffer.o \
+ iris_common.o \
iris_core.o \
iris_ctrls.o \
iris_firmware.o \
@@ -19,6 +19,7 @@ qcom-iris-objs += \
iris_vidc.o \
iris_vb2.o \
iris_vdec.o \
+ iris_venc.o \
iris_vpu2.o \
iris_vpu3x.o \
iris_vpu_buffer.o \
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.c b/drivers/media/platform/qcom/iris/iris_buffer.c
index 6425e4919e3b..c0900038e7de 100644
--- a/drivers/media/platform/qcom/iris/iris_buffer.c
+++ b/drivers/media/platform/qcom/iris/iris_buffer.c
@@ -63,7 +63,12 @@
static u32 iris_yuv_buffer_size_nv12(struct iris_inst *inst)
{
u32 y_plane, uv_plane, y_stride, uv_stride, y_scanlines, uv_scanlines;
- struct v4l2_format *f = inst->fmt_dst;
+ struct v4l2_format *f;
+
+ if (inst->domain == DECODER)
+ f = inst->fmt_dst;
+ else
+ f = inst->fmt_src;
y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN);
uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN);
@@ -194,7 +199,7 @@ static u32 iris_yuv_buffer_size_qc08c(struct iris_inst *inst)
return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane, PIXELS_4K);
}
-static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
+static u32 iris_dec_bitstream_buffer_size(struct iris_inst *inst)
{
struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
u32 base_res_mbs = NUM_MBS_4K;
@@ -219,18 +224,58 @@ static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
return ALIGN(frame_size, PIXELS_4K);
}
+static u32 iris_enc_bitstream_buffer_size(struct iris_inst *inst)
+{
+ u32 aligned_width, aligned_height, bitstream_size, yuv_size;
+ int bitrate_mode, frame_rc;
+ struct v4l2_format *f;
+
+ f = inst->fmt_dst;
+
+ bitrate_mode = inst->fw_caps[BITRATE_MODE].value;
+ frame_rc = inst->fw_caps[FRAME_RC_ENABLE].value;
+
+ aligned_width = ALIGN(f->fmt.pix_mp.width, 32);
+ aligned_height = ALIGN(f->fmt.pix_mp.height, 32);
+ bitstream_size = aligned_width * aligned_height * 3;
+ yuv_size = (aligned_width * aligned_height * 3) >> 1;
+ if (aligned_width * aligned_height > (4096 * 2176))
+ /* bitstream_size = 0.25 * yuv_size; */
+ bitstream_size = (bitstream_size >> 3);
+ else if (aligned_width * aligned_height > (1280 * 720))
+ /* bitstream_size = 0.5 * yuv_size; */
+ bitstream_size = (bitstream_size >> 2);
+
+ if ((!frame_rc || bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) &&
+ bitstream_size < yuv_size)
+ bitstream_size = (bitstream_size << 1);
+
+ return ALIGN(bitstream_size, 4096);
+}
+
int iris_get_buffer_size(struct iris_inst *inst,
enum iris_buffer_type buffer_type)
{
- switch (buffer_type) {
- case BUF_INPUT:
- return iris_bitstream_buffer_size(inst);
- case BUF_OUTPUT:
- return iris_yuv_buffer_size_nv12(inst);
- case BUF_DPB:
- return iris_yuv_buffer_size_qc08c(inst);
- default:
- return 0;
+ if (inst->domain == DECODER) {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return iris_dec_bitstream_buffer_size(inst);
+ case BUF_OUTPUT:
+ return iris_yuv_buffer_size_nv12(inst);
+ case BUF_DPB:
+ return iris_yuv_buffer_size_qc08c(inst);
+ default:
+ return 0;
+ }
+ } else {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return iris_yuv_buffer_size_nv12(inst);
+ case BUF_OUTPUT:
+ return iris_enc_bitstream_buffer_size(inst);
+ default:
+ return 0;
+ }
}
}
@@ -239,7 +284,7 @@ static void iris_fill_internal_buf_info(struct iris_inst *inst,
{
struct iris_buffers *buffers = &inst->buffers[buffer_type];
- buffers->size = iris_vpu_buf_size(inst, buffer_type);
+ buffers->size = inst->core->iris_platform_data->get_vpu_buffer_size(inst, buffer_type);
buffers->min_count = iris_vpu_buf_count(inst, buffer_type);
}
@@ -249,16 +294,30 @@ void iris_get_internal_buffers(struct iris_inst *inst, u32 plane)
const u32 *internal_buf_type;
u32 internal_buffer_count, i;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
- for (i = 0; i < internal_buffer_count; i++)
- iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
- for (i = 0; i < internal_buffer_count; i++)
- iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_op_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ }
}
}
@@ -299,12 +358,22 @@ int iris_create_internal_buffers(struct iris_inst *inst, u32 plane)
const u32 *internal_buf_type;
int ret;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_op_int_buf_tbl_size;
+ }
}
for (i = 0; i < internal_buffer_count; i++) {
@@ -334,6 +403,29 @@ int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf)
return 0;
}
+int iris_queue_internal_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ struct iris_buffer *buffer, *next;
+ struct iris_buffers *buffers;
+ int ret = 0;
+
+ buffers = &inst->buffers[buffer_type];
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (buffer->attr & BUF_ATTR_QUEUED)
+ continue;
+
+ if (buffer->attr & BUF_ATTR_DEFERRED) {
+ ret = iris_queue_buffer(inst, buffer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
{
const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
@@ -343,12 +435,22 @@ int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
u32 internal_buffer_count, i;
int ret;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_op_int_buf_tbl_size;
+ }
}
for (i = 0; i < internal_buffer_count; i++) {
@@ -358,6 +460,10 @@ int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
continue;
if (buffer->attr & BUF_ATTR_QUEUED)
continue;
+ if (buffer->type == BUF_DPB && inst->state != IRIS_INST_STREAMING) {
+ buffer->attr |= BUF_ATTR_DEFERRED;
+ continue;
+ }
ret = iris_queue_buffer(inst, buffer);
if (ret)
return ret;
@@ -388,12 +494,22 @@ static int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane, bool
u32 i, len;
int ret;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- len = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ len = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ len = platform_data->dec_op_int_buf_tbl_size;
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- len = platform_data->dec_op_int_buf_tbl_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ len = platform_data->enc_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ len = platform_data->enc_op_int_buf_tbl_size;
+ }
}
for (i = 0; i < len; i++) {
@@ -413,6 +529,19 @@ static int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane, bool
}
}
+ if (force) {
+ if (inst->domain == DECODER)
+ buffers = &inst->buffers[BUF_PERSIST];
+ else
+ buffers = &inst->buffers[BUF_ARP];
+
+ list_for_each_entry_safe(buf, next, &buffers->list, list) {
+ ret = iris_destroy_internal_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
@@ -455,8 +584,13 @@ static int iris_release_input_internal_buffers(struct iris_inst *inst)
u32 internal_buffer_count, i;
int ret;
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ }
for (i = 0; i < internal_buffer_count; i++) {
ret = iris_release_internal_buffers(inst, internal_buf_type[i]);
@@ -467,9 +601,9 @@ static int iris_release_input_internal_buffers(struct iris_inst *inst)
return 0;
}
-int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst)
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst, enum iris_buffer_type buffer_type)
{
- struct iris_buffers *buffers = &inst->buffers[BUF_PERSIST];
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
struct iris_buffer *buffer, *next;
int ret;
u32 i;
@@ -477,10 +611,10 @@ int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst)
if (!list_empty(&buffers->list))
return 0;
- iris_fill_internal_buf_info(inst, BUF_PERSIST);
+ iris_fill_internal_buf_info(inst, buffer_type);
for (i = 0; i < buffers->min_count; i++) {
- ret = iris_create_internal_buffer(inst, BUF_PERSIST, i);
+ ret = iris_create_internal_buffer(inst, buffer_type, i);
if (ret)
return ret;
}
@@ -614,6 +748,8 @@ int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
vb2 = &vbuf->vb2_buf;
+ vbuf->flags |= buf->flags;
+
if (buf->flags & V4L2_BUF_FLAG_ERROR) {
state = VB2_BUF_STATE_ERROR;
vb2_set_plane_payload(vb2, 0, 0);
@@ -622,8 +758,6 @@ int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
return 0;
}
- vbuf->flags |= buf->flags;
-
if (V4L2_TYPE_IS_CAPTURE(type)) {
vb2_set_plane_payload(vb2, 0, buf->data_size);
vbuf->sequence = inst->sequence_cap++;
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.h b/drivers/media/platform/qcom/iris/iris_buffer.h
index 00825ad2dc3a..325d30fce5c9 100644
--- a/drivers/media/platform/qcom/iris/iris_buffer.h
+++ b/drivers/media/platform/qcom/iris/iris_buffer.h
@@ -25,6 +25,8 @@ struct iris_inst;
* @BUF_DPB: buffer to store display picture buffers for reference
* @BUF_PERSIST: buffer to store session context data
* @BUF_SCRATCH_1: buffer to store decoding/encoding context data for HW
+ * @BUF_SCRATCH_2: buffer to store encoding context data for HW
+ * @BUF_VPSS: buffer to store VPSS context data for HW
* @BUF_TYPE_MAX: max buffer types
*/
enum iris_buffer_type {
@@ -38,6 +40,8 @@ enum iris_buffer_type {
BUF_DPB,
BUF_PERSIST,
BUF_SCRATCH_1,
+ BUF_SCRATCH_2,
+ BUF_VPSS,
BUF_TYPE_MAX,
};
@@ -105,10 +109,11 @@ int iris_get_buffer_size(struct iris_inst *inst, enum iris_buffer_type buffer_ty
void iris_get_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_create_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_queue_internal_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buffer_type);
int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer);
int iris_destroy_all_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_destroy_dequeued_internal_buffers(struct iris_inst *inst, u32 plane);
-int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst);
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst, enum iris_buffer_type buf_type);
int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst);
int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf);
int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type);
diff --git a/drivers/media/platform/qcom/iris/iris_common.c b/drivers/media/platform/qcom/iris/iris_common.c
new file mode 100644
index 000000000000..9fc663bdaf3f
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_common.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_common.h"
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+
+int iris_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+
+ buf->type = iris_v4l2_type_to_driver(vb2->type);
+ buf->index = vb2->index;
+ buf->fd = vb2->planes[0].m.fd;
+ buf->buffer_size = vb2->planes[0].length;
+ buf->data_offset = vb2->planes[0].data_offset;
+ buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
+ buf->flags = vbuf->flags;
+ buf->timestamp = vb2->timestamp;
+ buf->attr = 0;
+
+ return 0;
+}
+
+void iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ u64 ts_us = vb->timestamp;
+
+ if (inst->metadata_idx >= ARRAY_SIZE(inst->tss))
+ inst->metadata_idx = 0;
+
+ do_div(ts_us, NSEC_PER_USEC);
+
+ inst->tss[inst->metadata_idx].flags = vbuf->flags & mask;
+ inst->tss[inst->metadata_idx].tc = vbuf->timecode;
+ inst->tss[inst->metadata_idx].ts_us = ts_us;
+ inst->tss[inst->metadata_idx].ts_ns = vb->timestamp;
+
+ inst->metadata_idx++;
+}
+
+int iris_process_streamon_input(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state set_sub_state = 0;
+ int ret;
+
+ iris_scale_power(inst);
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_inst_change_sub_state(inst, IRIS_INST_SUB_INPUT_PAUSE, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->domain == DECODER &&
+ (inst->sub_state & IRIS_INST_SUB_DRC ||
+ inst->sub_state & IRIS_INST_SUB_DRAIN ||
+ inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)) {
+ if (!(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE)) {
+ if (hfi_ops->session_pause) {
+ ret = hfi_ops->session_pause(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+ set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ }
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ inst->last_buffer_dequeued = false;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_process_streamon_output(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ bool drain_active = false, drc_active = false;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ int ret = 0;
+
+ iris_scale_power(inst);
+
+ drain_active = inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
+
+ drc_active = inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST;
+
+ if (drc_active)
+ clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
+ else if (drain_active)
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+
+ if (inst->domain == DECODER && inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_alloc_and_queue_input_int_bufs(inst);
+ if (ret)
+ return ret;
+ ret = iris_set_stage(inst, STAGE);
+ if (ret)
+ return ret;
+ ret = iris_set_pipe(inst, PIPE);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->state == IRIS_INST_INPUT_STREAMING &&
+ inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (!drain_active)
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ else if (hfi_ops->session_resume_drain)
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
+ clear_sub_state |= IRIS_INST_SUB_FIRST_IPSC;
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE)
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ inst->last_buffer_dequeued = false;
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+static void iris_flush_deferred_buffers(struct iris_inst *inst,
+ enum iris_buffer_type type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+
+ if (type == BUF_INPUT) {
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ }
+}
+
+static void iris_kill_session(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+
+ if (!inst->session_id)
+ return;
+
+ hfi_ops->session_close(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+}
+
+int iris_session_streamoff(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_buffer_type buffer_type;
+ int ret;
+
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ buffer_type = BUF_INPUT;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ buffer_type = BUF_OUTPUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hfi_ops->session_stop(inst, plane);
+ if (ret)
+ goto error;
+
+ ret = iris_inst_state_change_streamoff(inst, plane);
+ if (ret)
+ goto error;
+
+ iris_flush_deferred_buffers(inst, buffer_type);
+
+ return 0;
+
+error:
+ iris_kill_session(inst);
+ iris_flush_deferred_buffers(inst, buffer_type);
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_common.h b/drivers/media/platform/qcom/iris/iris_common.h
new file mode 100644
index 000000000000..b2a27b781c9a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_COMMON_H__
+#define __IRIS_COMMON_H__
+
+struct iris_inst;
+struct iris_buffer;
+
+int iris_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf);
+void iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
+int iris_process_streamon_input(struct iris_inst *inst);
+int iris_process_streamon_output(struct iris_inst *inst);
+int iris_session_streamoff(struct iris_inst *inst, u32 plane);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_core.c b/drivers/media/platform/qcom/iris/iris_core.c
index 0fa0a3b549a2..8406c48d635b 100644
--- a/drivers/media/platform/qcom/iris/iris_core.c
+++ b/drivers/media/platform/qcom/iris/iris_core.c
@@ -15,10 +15,12 @@ void iris_core_deinit(struct iris_core *core)
pm_runtime_resume_and_get(core->dev);
mutex_lock(&core->lock);
- iris_fw_unload(core);
- iris_vpu_power_off(core);
- iris_hfi_queues_deinit(core);
- core->state = IRIS_CORE_DEINIT;
+ if (core->state != IRIS_CORE_DEINIT) {
+ iris_fw_unload(core);
+ iris_vpu_power_off(core);
+ iris_hfi_queues_deinit(core);
+ core->state = IRIS_CORE_DEINIT;
+ }
mutex_unlock(&core->lock);
pm_runtime_put_sync(core->dev);
diff --git a/drivers/media/platform/qcom/iris/iris_core.h b/drivers/media/platform/qcom/iris/iris_core.h
index aeeac32a1f6d..fb194c967ad4 100644
--- a/drivers/media/platform/qcom/iris/iris_core.h
+++ b/drivers/media/platform/qcom/iris/iris_core.h
@@ -25,6 +25,11 @@ struct icc_info {
#define IRIS_FW_VERSION_LENGTH 128
#define IFACEQ_CORE_PKT_SIZE (1024 * 4)
+enum domain_type {
+ ENCODER = BIT(0),
+ DECODER = BIT(1),
+};
+
/**
* struct iris_core - holds core parameters valid for all instances
*
@@ -33,8 +38,10 @@ struct icc_info {
* @irq: iris irq
* @v4l2_dev: a holder for v4l2 device structure
* @vdev_dec: iris video device structure for decoder
+ * @vdev_enc: iris video device structure for encoder
* @iris_v4l2_file_ops: iris v4l2 file ops
- * @iris_v4l2_ioctl_ops: iris v4l2 ioctl ops
+ * @iris_v4l2_ioctl_ops_dec: iris v4l2 ioctl ops for decoder
+ * @iris_v4l2_ioctl_ops_enc: iris v4l2 ioctl ops for encoder
* @iris_vb2_ops: iris vb2 ops
* @icc_tbl: table of iris interconnects
* @icc_count: count of iris interconnects
@@ -64,7 +71,8 @@ struct icc_info {
* @intr_status: interrupt status
* @sys_error_handler: a delayed work for handling system fatal error
* @instances: a list_head of all instances
- * @inst_fw_caps: an array of supported instance capabilities
+ * @inst_fw_caps_dec: an array of supported instance capabilities by decoder
+ * @inst_fw_caps_enc: an array of supported instance capabilities by encoder
*/
struct iris_core {
@@ -73,8 +81,10 @@ struct iris_core {
int irq;
struct v4l2_device v4l2_dev;
struct video_device *vdev_dec;
+ struct video_device *vdev_enc;
const struct v4l2_file_operations *iris_v4l2_file_ops;
- const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops;
+ const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops_dec;
+ const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops_enc;
const struct vb2_ops *iris_vb2_ops;
struct icc_bulk_data *icc_tbl;
u32 icc_count;
@@ -104,7 +114,9 @@ struct iris_core {
u32 intr_status;
struct delayed_work sys_error_handler;
struct list_head instances;
- struct platform_inst_fw_cap inst_fw_caps[INST_FW_CAP_MAX];
+ /* encoder and decoder have overlapping caps, so two different arrays are required */
+ struct platform_inst_fw_cap inst_fw_caps_dec[INST_FW_CAP_MAX];
+ struct platform_inst_fw_cap inst_fw_caps_enc[INST_FW_CAP_MAX];
};
int iris_core_init(struct iris_core *core);
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.c b/drivers/media/platform/qcom/iris/iris_ctrls.c
index 9136b723c0f2..754a5ad718bc 100644
--- a/drivers/media/platform/qcom/iris/iris_ctrls.c
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.c
@@ -7,8 +7,13 @@
#include <media/v4l2-mem2mem.h>
#include "iris_ctrls.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_hfi_gen2_defines.h"
#include "iris_instance.h"
+#define CABAC_MAX_BITRATE 160000000
+#define CAVLC_MAX_BITRATE 220000000
+
static inline bool iris_valid_cap_id(enum platform_inst_fw_cap_type cap_id)
{
return cap_id >= 1 && cap_id < INST_FW_CAP_MAX;
@@ -31,6 +36,68 @@ static enum platform_inst_fw_cap_type iris_get_cap_id(u32 id)
return LEVEL_VP9;
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
return TIER;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return HEADER_MODE;
+ case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR:
+ return PREPEND_SPSPPS_TO_IDR;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ return BITRATE;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ return BITRATE_PEAK;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return BITRATE_MODE;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ return FRAME_SKIP_MODE;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ return FRAME_RC_ENABLE;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return GOP_SIZE;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return ENTROPY_MODE;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ return MIN_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+ return MIN_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ return MAX_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+ return MAX_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP:
+ return I_FRAME_MIN_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP:
+ return I_FRAME_MIN_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP:
+ return P_FRAME_MIN_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP:
+ return P_FRAME_MIN_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP:
+ return B_FRAME_MIN_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP:
+ return B_FRAME_MIN_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP:
+ return I_FRAME_MAX_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP:
+ return I_FRAME_MAX_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP:
+ return P_FRAME_MAX_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP:
+ return P_FRAME_MAX_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP:
+ return B_FRAME_MAX_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP:
+ return B_FRAME_MAX_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ return I_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ return I_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ return P_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+ return P_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ return B_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+ return B_FRAME_QP_HEVC;
default:
return INST_FW_CAP_MAX;
}
@@ -56,12 +123,74 @@ static u32 iris_get_v4l2_id(enum platform_inst_fw_cap_type cap_id)
return V4L2_CID_MPEG_VIDEO_VP9_LEVEL;
case TIER:
return V4L2_CID_MPEG_VIDEO_HEVC_TIER;
+ case HEADER_MODE:
+ return V4L2_CID_MPEG_VIDEO_HEADER_MODE;
+ case PREPEND_SPSPPS_TO_IDR:
+ return V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR;
+ case BITRATE:
+ return V4L2_CID_MPEG_VIDEO_BITRATE;
+ case BITRATE_PEAK:
+ return V4L2_CID_MPEG_VIDEO_BITRATE_PEAK;
+ case BITRATE_MODE:
+ return V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
+ case FRAME_SKIP_MODE:
+ return V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE;
+ case FRAME_RC_ENABLE:
+ return V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE;
+ case GOP_SIZE:
+ return V4L2_CID_MPEG_VIDEO_GOP_SIZE;
+ case ENTROPY_MODE:
+ return V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE;
+ case MIN_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_MIN_QP;
+ case MIN_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP;
+ case MAX_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_MAX_QP;
+ case MAX_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP;
+ case I_FRAME_MIN_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP;
+ case I_FRAME_MIN_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP;
+ case P_FRAME_MIN_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP;
+ case P_FRAME_MIN_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP;
+ case B_FRAME_MIN_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP;
+ case B_FRAME_MIN_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP;
+ case I_FRAME_MAX_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP;
+ case I_FRAME_MAX_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP;
+ case P_FRAME_MAX_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP;
+ case P_FRAME_MAX_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP;
+ case B_FRAME_MAX_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP;
+ case B_FRAME_MAX_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP;
+ case I_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP;
+ case I_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP;
+ case P_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP;
+ case P_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP;
+ case B_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP;
+ case B_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP;
default:
return 0;
}
}
-static int iris_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+static int iris_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct iris_inst *inst = container_of(ctrl->handler, struct iris_inst, ctrl_handler);
enum platform_inst_fw_cap_type cap_id;
@@ -82,11 +211,16 @@ static int iris_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
inst->fw_caps[cap_id].value = ctrl->val;
+ if (vb2_is_streaming(q)) {
+ if (cap[cap_id].set)
+ cap[cap_id].set(inst, cap_id);
+ }
+
return 0;
}
static const struct v4l2_ctrl_ops iris_ctrl_ops = {
- .s_ctrl = iris_vdec_op_s_ctrl,
+ .s_ctrl = iris_op_s_ctrl,
};
int iris_ctrls_init(struct iris_inst *inst)
@@ -101,7 +235,10 @@ int iris_ctrls_init(struct iris_inst *inst)
num_ctrls++;
}
- /* Adding 1 to num_ctrls to include V4L2_CID_MIN_BUFFERS_FOR_CAPTURE */
+ /* Adding 1 to num_ctrls to include
+ * V4L2_CID_MIN_BUFFERS_FOR_CAPTURE for decoder and
+ * V4L2_CID_MIN_BUFFERS_FOR_OUTPUT for encoder
+ */
ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls + 1);
if (ret)
@@ -143,8 +280,13 @@ int iris_ctrls_init(struct iris_inst *inst)
ctrl_idx++;
}
- v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
- V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 4);
+ if (inst->domain == DECODER) {
+ v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 4);
+ } else {
+ v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 4);
+ }
ret = inst->ctrl_handler.error;
if (ret)
@@ -162,32 +304,57 @@ void iris_session_init_caps(struct iris_core *core)
struct platform_inst_fw_cap *caps;
u32 i, num_cap, cap_id;
- caps = core->iris_platform_data->inst_fw_caps;
- num_cap = core->iris_platform_data->inst_fw_caps_size;
+ caps = core->iris_platform_data->inst_fw_caps_dec;
+ num_cap = core->iris_platform_data->inst_fw_caps_dec_size;
+
+ for (i = 0; i < num_cap; i++) {
+ cap_id = caps[i].cap_id;
+ if (!iris_valid_cap_id(cap_id))
+ continue;
+
+ core->inst_fw_caps_dec[cap_id].cap_id = caps[i].cap_id;
+ core->inst_fw_caps_dec[cap_id].min = caps[i].min;
+ core->inst_fw_caps_dec[cap_id].max = caps[i].max;
+ core->inst_fw_caps_dec[cap_id].step_or_mask = caps[i].step_or_mask;
+ core->inst_fw_caps_dec[cap_id].value = caps[i].value;
+ core->inst_fw_caps_dec[cap_id].flags = caps[i].flags;
+ core->inst_fw_caps_dec[cap_id].hfi_id = caps[i].hfi_id;
+ core->inst_fw_caps_dec[cap_id].set = caps[i].set;
+ }
+
+ caps = core->iris_platform_data->inst_fw_caps_enc;
+ num_cap = core->iris_platform_data->inst_fw_caps_enc_size;
for (i = 0; i < num_cap; i++) {
cap_id = caps[i].cap_id;
if (!iris_valid_cap_id(cap_id))
continue;
- core->inst_fw_caps[cap_id].cap_id = caps[i].cap_id;
- core->inst_fw_caps[cap_id].min = caps[i].min;
- core->inst_fw_caps[cap_id].max = caps[i].max;
- core->inst_fw_caps[cap_id].step_or_mask = caps[i].step_or_mask;
- core->inst_fw_caps[cap_id].value = caps[i].value;
- core->inst_fw_caps[cap_id].flags = caps[i].flags;
- core->inst_fw_caps[cap_id].hfi_id = caps[i].hfi_id;
- core->inst_fw_caps[cap_id].set = caps[i].set;
+ core->inst_fw_caps_enc[cap_id].cap_id = caps[i].cap_id;
+ core->inst_fw_caps_enc[cap_id].min = caps[i].min;
+ core->inst_fw_caps_enc[cap_id].max = caps[i].max;
+ core->inst_fw_caps_enc[cap_id].step_or_mask = caps[i].step_or_mask;
+ core->inst_fw_caps_enc[cap_id].value = caps[i].value;
+ core->inst_fw_caps_enc[cap_id].flags = caps[i].flags;
+ core->inst_fw_caps_enc[cap_id].hfi_id = caps[i].hfi_id;
+ core->inst_fw_caps_enc[cap_id].set = caps[i].set;
}
}
static u32 iris_get_port_info(struct iris_inst *inst,
enum platform_inst_fw_cap_type cap_id)
{
- if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
- return HFI_PORT_BITSTREAM;
- else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
- return HFI_PORT_RAW;
+ if (inst->domain == DECODER) {
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
+ return HFI_PORT_BITSTREAM;
+ else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
+ return HFI_PORT_RAW;
+ } else {
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
+ return HFI_PORT_RAW;
+ else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
+ return HFI_PORT_BITSTREAM;
+ }
return HFI_PORT_NONE;
}
@@ -227,8 +394,10 @@ int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id
u32 width = inp_f->fmt.pix_mp.width;
u32 work_mode = STAGE_2;
- if (iris_res_is_less_than(width, height, 1280, 720))
- work_mode = STAGE_1;
+ if (inst->domain == DECODER) {
+ if (iris_res_is_less_than(width, height, 1280, 720))
+ work_mode = STAGE_1;
+ }
return hfi_ops->session_set_property(inst, hfi_id,
HFI_HOST_FLAGS_NONE,
@@ -250,6 +419,470 @@ int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
&work_route, sizeof(u32));
}
+int iris_set_profile(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_id, hfi_value;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ hfi_id = inst->fw_caps[PROFILE_H264].hfi_id;
+ hfi_value = inst->fw_caps[PROFILE_H264].value;
+ } else {
+ hfi_id = inst->fw_caps[PROFILE_HEVC].hfi_id;
+ hfi_value = inst->fw_caps[PROFILE_HEVC].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_level(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_id, hfi_value;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ hfi_id = inst->fw_caps[LEVEL_H264].hfi_id;
+ hfi_value = inst->fw_caps[LEVEL_H264].value;
+ } else {
+ hfi_id = inst->fw_caps[LEVEL_HEVC].hfi_id;
+ hfi_value = inst->fw_caps[LEVEL_HEVC].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_profile_level_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ struct hfi_profile_level pl;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ pl.profile = inst->fw_caps[PROFILE_H264].value;
+ pl.level = inst->fw_caps[LEVEL_H264].value;
+ } else {
+ pl.profile = inst->fw_caps[PROFILE_HEVC].value;
+ pl.level = inst->fw_caps[LEVEL_HEVC].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &pl, sizeof(u32));
+}
+
+int iris_set_header_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 header_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)
+ hfi_val = 0;
+ else
+ hfi_val = 1;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_header_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 prepend_sps_pps = inst->fw_caps[PREPEND_SPSPPS_TO_IDR].value;
+ u32 header_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (prepend_sps_pps)
+ hfi_val = HFI_SEQ_HEADER_PREFIX_WITH_SYNC_FRAME;
+ else if (header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)
+ hfi_val = HFI_SEQ_HEADER_JOINED_WITH_1ST_FRAME;
+ else
+ hfi_val = HFI_SEQ_HEADER_SEPERATE_FRAME;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 entropy_mode = inst->fw_caps[ENTROPY_MODE].value;
+ u32 bitrate = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 max_bitrate;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC)
+ max_bitrate = CABAC_MAX_BITRATE;
+
+ if (entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+ max_bitrate = CABAC_MAX_BITRATE;
+ else
+ max_bitrate = CAVLC_MAX_BITRATE;
+
+ bitrate = min(bitrate, max_bitrate);
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &bitrate, sizeof(u32));
+}
+
+int iris_set_peak_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 rc_mode = inst->fw_caps[BITRATE_MODE].value;
+ u32 peak_bitrate = inst->fw_caps[cap_id].value;
+ u32 bitrate = inst->fw_caps[BITRATE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ if (rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ return 0;
+
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_CLIENT_SET) {
+ if (peak_bitrate < bitrate)
+ peak_bitrate = bitrate;
+ } else {
+ peak_bitrate = bitrate;
+ }
+
+ inst->fw_caps[cap_id].value = peak_bitrate;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &peak_bitrate, sizeof(u32));
+}
+
+int iris_set_bitrate_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 bitrate_mode = inst->fw_caps[BITRATE_MODE].value;
+ u32 frame_rc = inst->fw_caps[FRAME_RC_ENABLE].value;
+ u32 frame_skip = inst->fw_caps[FRAME_SKIP_MODE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 rc_mode = 0;
+
+ if (!frame_rc)
+ rc_mode = HFI_RATE_CONTROL_OFF;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ rc_mode = frame_skip ? HFI_RATE_CONTROL_VBR_VFR : HFI_RATE_CONTROL_VBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ rc_mode = frame_skip ? HFI_RATE_CONTROL_CBR_VFR : HFI_RATE_CONTROL_CBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ rc_mode = HFI_RATE_CONTROL_CQ;
+
+ inst->hfi_rc_type = rc_mode;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &rc_mode, sizeof(u32));
+}
+
+int iris_set_bitrate_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 bitrate_mode = inst->fw_caps[BITRATE_MODE].value;
+ u32 frame_rc = inst->fw_caps[FRAME_RC_ENABLE].value;
+ u32 frame_skip = inst->fw_caps[FRAME_SKIP_MODE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 rc_mode = 0;
+
+ if (!frame_rc)
+ rc_mode = HFI_RC_OFF;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ rc_mode = HFI_RC_VBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ rc_mode = frame_skip ? HFI_RC_CBR_VFR : HFI_RC_CBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ rc_mode = HFI_RC_CQ;
+
+ inst->hfi_rc_type = rc_mode;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &rc_mode, sizeof(u32));
+}
+
+int iris_set_entropy_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 entropy_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (inst->codec != V4L2_PIX_FMT_H264)
+ return 0;
+
+ hfi_val = (entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) ?
+ HFI_H264_ENTROPY_CAVLC : HFI_H264_ENTROPY_CABAC;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_entropy_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 entropy_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 profile;
+
+ if (inst->codec != V4L2_PIX_FMT_H264)
+ return 0;
+
+ profile = inst->fw_caps[PROFILE_H264].value;
+
+ if (profile == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE ||
+ profile == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE)
+ entropy_mode = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+
+ inst->fw_caps[cap_id].value = entropy_mode;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &entropy_mode, sizeof(u32));
+}
+
+int iris_set_min_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
+ u32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0;
+ u32 min_qp_enable = 0, client_qp_enable = 0;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ if (inst->fw_caps[MIN_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ min_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[I_FRAME_MIN_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[P_FRAME_MIN_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[B_FRAME_MIN_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ } else {
+ if (inst->fw_caps[MIN_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ min_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[I_FRAME_MIN_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[P_FRAME_MIN_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[B_FRAME_MIN_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ }
+
+ client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
+ if (!client_qp_enable)
+ return 0;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ i_frame_qp = max(inst->fw_caps[I_FRAME_MIN_QP_H264].value,
+ inst->fw_caps[MIN_FRAME_QP_H264].value);
+ p_frame_qp = max(inst->fw_caps[P_FRAME_MIN_QP_H264].value,
+ inst->fw_caps[MIN_FRAME_QP_H264].value);
+ b_frame_qp = max(inst->fw_caps[B_FRAME_MIN_QP_H264].value,
+ inst->fw_caps[MIN_FRAME_QP_H264].value);
+ } else {
+ i_frame_qp = max(inst->fw_caps[I_FRAME_MIN_QP_HEVC].value,
+ inst->fw_caps[MIN_FRAME_QP_HEVC].value);
+ p_frame_qp = max(inst->fw_caps[P_FRAME_MIN_QP_HEVC].value,
+ inst->fw_caps[MIN_FRAME_QP_HEVC].value);
+ b_frame_qp = max(inst->fw_caps[B_FRAME_MIN_QP_HEVC].value,
+ inst->fw_caps[MIN_FRAME_QP_HEVC].value);
+ }
+
+ hfi_val = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 | client_qp_enable << 24;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_max_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
+ u32 max_qp_enable = 0, client_qp_enable;
+ u32 i_frame_qp, p_frame_qp, b_frame_qp;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ if (inst->fw_caps[MAX_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ max_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[I_FRAME_MAX_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[P_FRAME_MAX_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[B_FRAME_MAX_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ } else {
+ if (inst->fw_caps[MAX_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ max_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[I_FRAME_MAX_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[P_FRAME_MAX_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[B_FRAME_MAX_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ }
+
+ client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
+ if (!client_qp_enable)
+ return 0;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ i_frame_qp = min(inst->fw_caps[I_FRAME_MAX_QP_H264].value,
+ inst->fw_caps[MAX_FRAME_QP_H264].value);
+ p_frame_qp = min(inst->fw_caps[P_FRAME_MAX_QP_H264].value,
+ inst->fw_caps[MAX_FRAME_QP_H264].value);
+ b_frame_qp = min(inst->fw_caps[B_FRAME_MAX_QP_H264].value,
+ inst->fw_caps[MAX_FRAME_QP_H264].value);
+ } else {
+ i_frame_qp = min(inst->fw_caps[I_FRAME_MAX_QP_HEVC].value,
+ inst->fw_caps[MAX_FRAME_QP_HEVC].value);
+ p_frame_qp = min(inst->fw_caps[P_FRAME_MAX_QP_HEVC].value,
+ inst->fw_caps[MAX_FRAME_QP_HEVC].value);
+ b_frame_qp = min(inst->fw_caps[B_FRAME_MAX_QP_HEVC].value,
+ inst->fw_caps[MAX_FRAME_QP_HEVC].value);
+ }
+
+ hfi_val = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
+ client_qp_enable << 24;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_frame_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0, client_qp_enable;
+ u32 i_frame_qp, p_frame_qp, b_frame_qp;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ struct vb2_queue *q;
+ u32 hfi_val;
+
+ q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ if (vb2_is_streaming(q)) {
+ if (inst->hfi_rc_type != HFI_RC_OFF)
+ return 0;
+ }
+
+ if (inst->hfi_rc_type == HFI_RC_OFF) {
+ i_qp_enable = 1;
+ p_qp_enable = 1;
+ b_qp_enable = 1;
+ } else {
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ if (inst->fw_caps[I_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ i_qp_enable = 1;
+ if (inst->fw_caps[P_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ p_qp_enable = 1;
+ if (inst->fw_caps[B_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ b_qp_enable = 1;
+ } else {
+ if (inst->fw_caps[I_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ i_qp_enable = 1;
+ if (inst->fw_caps[P_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ p_qp_enable = 1;
+ if (inst->fw_caps[B_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ b_qp_enable = 1;
+ }
+ }
+
+ client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
+ if (!client_qp_enable)
+ return 0;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ i_frame_qp = inst->fw_caps[I_FRAME_QP_H264].value;
+ p_frame_qp = inst->fw_caps[P_FRAME_QP_H264].value;
+ b_frame_qp = inst->fw_caps[B_FRAME_QP_H264].value;
+ } else {
+ i_frame_qp = inst->fw_caps[I_FRAME_QP_HEVC].value;
+ p_frame_qp = inst->fw_caps[P_FRAME_QP_HEVC].value;
+ b_frame_qp = inst->fw_caps[B_FRAME_QP_HEVC].value;
+ }
+
+ hfi_val = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
+ client_qp_enable << 24;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_qp_range(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct hfi_quantization_range_v2 range;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ range.min_qp.qp_packed = inst->fw_caps[MIN_FRAME_QP_HEVC].value;
+ range.max_qp.qp_packed = inst->fw_caps[MAX_FRAME_QP_HEVC].value;
+ } else {
+ range.min_qp.qp_packed = inst->fw_caps[MIN_FRAME_QP_H264].value;
+ range.max_qp.qp_packed = inst->fw_caps[MAX_FRAME_QP_H264].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &range, sizeof(range));
+}
+
int iris_set_properties(struct iris_inst *inst, u32 plane)
{
const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.h b/drivers/media/platform/qcom/iris/iris_ctrls.h
index 9b5741868933..30af333cc494 100644
--- a/drivers/media/platform/qcom/iris/iris_ctrls.h
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.h
@@ -17,6 +17,21 @@ int iris_set_u32_enum(struct iris_inst *inst, enum platform_inst_fw_cap_type cap
int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
int iris_set_u32(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_profile(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_level(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_profile_level_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_header_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_header_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_peak_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_bitrate_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_bitrate_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_entropy_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_entropy_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_min_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_max_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_frame_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_qp_range(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
int iris_set_properties(struct iris_inst *inst, u32 plane);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.c b/drivers/media/platform/qcom/iris/iris_firmware.c
index f1b5cd56db32..9ab499fad946 100644
--- a/drivers/media/platform/qcom/iris/iris_firmware.c
+++ b/drivers/media/platform/qcom/iris/iris_firmware.c
@@ -60,16 +60,7 @@ static int iris_load_fw_to_memory(struct iris_core *core, const char *fw_name)
ret = qcom_mdt_load(dev, firmware, fw_name,
pas_id, mem_virt, mem_phys, res_size, NULL);
- if (ret)
- goto err_mem_unmap;
-
- ret = qcom_scm_pas_auth_and_reset(pas_id);
- if (ret)
- goto err_mem_unmap;
-
- return ret;
-err_mem_unmap:
memunmap(mem_virt);
err_release_fw:
release_firmware(firmware);
@@ -94,6 +85,12 @@ int iris_fw_load(struct iris_core *core)
return -ENOMEM;
}
+ ret = qcom_scm_pas_auth_and_reset(core->iris_platform_data->pas_id);
+ if (ret) {
+ dev_err(core->dev, "auth and reset failed: %d\n", ret);
+ return ret;
+ }
+
ret = qcom_scm_mem_protect_video_var(cp_config->cp_start,
cp_config->cp_size,
cp_config->cp_nonpixel_start,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.h b/drivers/media/platform/qcom/iris/iris_hfi_common.h
index 9e6aadb83783..b51471fb32c7 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_common.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.h
@@ -102,7 +102,7 @@ enum hfi_matrix_coefficients {
struct iris_hfi_prop_type_handle {
u32 type;
- int (*handle)(struct iris_inst *inst);
+ int (*handle)(struct iris_inst *inst, u32 plane);
};
struct iris_hfi_command_ops {
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
index 5fc30d54af4d..e1788c266bb1 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
@@ -21,6 +21,10 @@ static u32 iris_hfi_gen1_buf_type_from_driver(enum iris_buffer_type buffer_type)
return HFI_BUFFER_INTERNAL_SCRATCH;
case BUF_SCRATCH_1:
return HFI_BUFFER_INTERNAL_SCRATCH_1;
+ case BUF_SCRATCH_2:
+ return HFI_BUFFER_INTERNAL_SCRATCH_2;
+ case BUF_ARP:
+ return HFI_BUFFER_INTERNAL_PERSIST;
default:
return -EINVAL;
}
@@ -109,7 +113,12 @@ static int iris_hfi_gen1_session_open(struct iris_inst *inst)
packet.shdr.hdr.size = sizeof(struct hfi_session_open_pkt);
packet.shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
packet.shdr.session_id = inst->session_id;
- packet.session_domain = HFI_SESSION_TYPE_DEC;
+
+ if (inst->domain == DECODER)
+ packet.session_domain = HFI_SESSION_TYPE_DEC;
+ else
+ packet.session_domain = HFI_SESSION_TYPE_ENC;
+
packet.session_codec = codec;
reinit_completion(&inst->completion);
@@ -184,47 +193,70 @@ static int iris_hfi_gen1_session_stop(struct iris_inst *inst, u32 plane)
u32 flush_type = 0;
int ret = 0;
- if ((V4L2_TYPE_IS_OUTPUT(plane) &&
- inst->state == IRIS_INST_INPUT_STREAMING) ||
- (V4L2_TYPE_IS_CAPTURE(plane) &&
- inst->state == IRIS_INST_OUTPUT_STREAMING) ||
- inst->state == IRIS_INST_ERROR) {
- reinit_completion(&inst->completion);
- iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
- ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
- if (!ret)
- ret = iris_wait_for_session_response(inst, false);
-
- reinit_completion(&inst->completion);
- iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_RELEASE_RESOURCES);
- ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
- if (!ret)
- ret = iris_wait_for_session_response(inst, false);
-
- iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+ if (inst->domain == DECODER) {
+ if (inst->state == IRIS_INST_STREAMING) {
+ if (V4L2_TYPE_IS_OUTPUT(plane))
+ flush_type = HFI_FLUSH_ALL;
+ else if (V4L2_TYPE_IS_CAPTURE(plane))
+ flush_type = HFI_FLUSH_OUTPUT;
+
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = flush_type;
+
+ ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ if (!ret) {
+ inst->flush_responses_pending++;
+ ret = iris_wait_for_session_response(inst, true);
+ }
+ } else if (inst->sub_state & IRIS_INST_SUB_LOAD_RESOURCES) {
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt,
+ HFI_CMD_SESSION_RELEASE_RESOURCES);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ }
+ } else {
+ if (inst->state == IRIS_INST_STREAMING ||
+ inst->state == IRIS_INST_INPUT_STREAMING ||
+ inst->state == IRIS_INST_ERROR) {
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt,
+ HFI_CMD_SESSION_RELEASE_RESOURCES);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+ }
iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
VB2_BUF_STATE_ERROR);
iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
VB2_BUF_STATE_ERROR);
- } else if (inst->state == IRIS_INST_STREAMING) {
- if (V4L2_TYPE_IS_OUTPUT(plane))
- flush_type = HFI_FLUSH_ALL;
- else if (V4L2_TYPE_IS_CAPTURE(plane))
- flush_type = HFI_FLUSH_OUTPUT;
-
- reinit_completion(&inst->flush_completion);
-
- flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
- flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
- flush_pkt.shdr.session_id = inst->session_id;
- flush_pkt.flush_type = flush_type;
-
- ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
- if (!ret) {
- inst->flush_responses_pending++;
- ret = iris_wait_for_session_response(inst, true);
- }
}
return ret;
@@ -241,23 +273,44 @@ static int iris_hfi_gen1_session_continue(struct iris_inst *inst, u32 plane)
static int iris_hfi_gen1_queue_input_buffer(struct iris_inst *inst, struct iris_buffer *buf)
{
- struct hfi_session_empty_buffer_compressed_pkt ip_pkt;
-
- ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
- ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
- ip_pkt.shdr.session_id = inst->session_id;
- ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
- ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
- ip_pkt.flags = buf->flags;
- ip_pkt.mark_target = 0;
- ip_pkt.mark_data = 0;
- ip_pkt.offset = buf->data_offset;
- ip_pkt.alloc_len = buf->buffer_size;
- ip_pkt.filled_len = buf->data_size;
- ip_pkt.input_tag = buf->index;
- ip_pkt.packet_buffer = buf->device_addr;
-
- return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ struct hfi_session_empty_buffer_compressed_pkt com_ip_pkt;
+ struct hfi_session_empty_buffer_uncompressed_pkt uncom_ip_pkt;
+
+ if (inst->domain == DECODER) {
+ com_ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ com_ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ com_ip_pkt.shdr.session_id = inst->session_id;
+ com_ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
+ com_ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
+ com_ip_pkt.flags = buf->flags;
+ com_ip_pkt.mark_target = 0;
+ com_ip_pkt.mark_data = 0;
+ com_ip_pkt.offset = buf->data_offset;
+ com_ip_pkt.alloc_len = buf->buffer_size;
+ com_ip_pkt.filled_len = buf->data_size;
+ com_ip_pkt.input_tag = buf->index;
+ com_ip_pkt.packet_buffer = buf->device_addr;
+ return iris_hfi_queue_cmd_write(inst->core, &com_ip_pkt,
+ com_ip_pkt.shdr.hdr.size);
+ } else {
+ uncom_ip_pkt.shdr.hdr.size =
+ sizeof(struct hfi_session_empty_buffer_uncompressed_pkt);
+ uncom_ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ uncom_ip_pkt.shdr.session_id = inst->session_id;
+ uncom_ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
+ uncom_ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
+ uncom_ip_pkt.view_id = 0;
+ uncom_ip_pkt.flags = buf->flags;
+ uncom_ip_pkt.mark_target = 0;
+ uncom_ip_pkt.mark_data = 0;
+ uncom_ip_pkt.offset = buf->data_offset;
+ uncom_ip_pkt.alloc_len = buf->buffer_size;
+ uncom_ip_pkt.filled_len = buf->data_size;
+ uncom_ip_pkt.input_tag = buf->index;
+ uncom_ip_pkt.packet_buffer = buf->device_addr;
+ return iris_hfi_queue_cmd_write(inst->core, &uncom_ip_pkt,
+ uncom_ip_pkt.shdr.hdr.size);
+ }
}
static int iris_hfi_gen1_queue_output_buffer(struct iris_inst *inst, struct iris_buffer *buf)
@@ -330,6 +383,8 @@ static int iris_hfi_gen1_session_queue_buffer(struct iris_inst *inst, struct iri
case BUF_PERSIST:
case BUF_BIN:
case BUF_SCRATCH_1:
+ case BUF_SCRATCH_2:
+ case BUF_ARP:
return iris_hfi_gen1_queue_internal_buffer(inst, buf);
default:
return -EINVAL;
@@ -395,16 +450,31 @@ exit:
static int iris_hfi_gen1_session_drain(struct iris_inst *inst, u32 plane)
{
- struct hfi_session_empty_buffer_compressed_pkt ip_pkt = {0};
+ if (inst->domain == DECODER) {
+ struct hfi_session_empty_buffer_compressed_pkt ip_pkt = {0};
+
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.flags = HFI_BUFFERFLAG_EOS;
+ ip_pkt.packet_buffer = 0xdeadb000;
+
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ }
+
+ if (inst->domain == ENCODER) {
+ struct hfi_session_empty_buffer_uncompressed_pkt ip_pkt = {0};
- ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
- ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
- ip_pkt.shdr.session_id = inst->session_id;
- ip_pkt.flags = HFI_BUFFERFLAG_EOS;
- if (inst->codec == V4L2_PIX_FMT_VP9)
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_uncompressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.flags = HFI_BUFFERFLAG_EOS;
ip_pkt.packet_buffer = 0xdeadb000;
- return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ }
+
+ return -EINVAL;
}
static int
@@ -507,6 +577,114 @@ iris_hfi_gen1_packet_session_set_property(struct hfi_session_set_property_pkt *p
packet->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
break;
}
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: {
+ struct hfi_profile_level *in = pdata, *pl = prop_data;
+
+ pl->level = in->level;
+ pl->profile = in->profile;
+ if (pl->profile <= 0)
+ /* Profile not supported, falling back to high */
+ pl->profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+
+ if (!pl->level)
+ /* Level not supported, falling back to 1 */
+ pl->level = 1;
+
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*pl);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: {
+ struct hfi_enable *en = prop_data;
+ u32 *in = pdata;
+
+ en->enable = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: {
+ struct hfi_bitrate *brate = prop_data;
+ u32 *in = pdata;
+
+ brate->bitrate = *in;
+ brate->layer_id = 0;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*brate);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_RATE_CONTROL_OFF:
+ case HFI_RATE_CONTROL_CBR_CFR:
+ case HFI_RATE_CONTROL_CBR_VFR:
+ case HFI_RATE_CONTROL_VBR_CFR:
+ case HFI_RATE_CONTROL_VBR_VFR:
+ case HFI_RATE_CONTROL_CQ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ packet->data[1] = *in;
+ packet->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: {
+ struct hfi_h264_entropy_control *entropy = prop_data;
+ u32 *in = pdata;
+
+ entropy->entropy_mode = *in;
+ if (entropy->entropy_mode == HFI_H264_ENTROPY_CABAC)
+ entropy->cabac_model = HFI_H264_CABAC_MODEL_0;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*entropy);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2: {
+ struct hfi_quantization_range_v2 *range = prop_data;
+ struct hfi_quantization_range_v2 *in = pdata;
+ u32 min_qp, max_qp;
+
+ min_qp = in->min_qp.qp_packed;
+ max_qp = in->max_qp.qp_packed;
+
+ /* We'll be packing in the qp, so make sure we
+ * won't be losing data when masking
+ */
+ if (min_qp > 0xff || max_qp > 0xff)
+ return -ERANGE;
+
+ range->min_qp.layer_id = 0xFF;
+ range->max_qp.layer_id = 0xFF;
+ range->min_qp.qp_packed = (min_qp & 0xFF) | ((min_qp & 0xFF) << 8) |
+ ((min_qp & 0xFF) << 16);
+ range->max_qp.qp_packed = (max_qp & 0xFF) | ((max_qp & 0xFF) << 8) |
+ ((max_qp & 0xFF) << 16);
+ range->min_qp.enable = 7;
+ range->max_qp.enable = 7;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*range);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_FRAME_RATE: {
+ struct hfi_framerate *frate = prop_data;
+ struct hfi_framerate *in = pdata;
+
+ frate->buffer_type = in->buffer_type;
+ frate->framerate = in->framerate;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*frate);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
+ struct hfi_uncompressed_plane_actual_info *plane_actual_info = prop_data;
+ struct hfi_uncompressed_plane_actual_info *in = pdata;
+
+ plane_actual_info->buffer_type = in->buffer_type;
+ plane_actual_info->num_planes = in->num_planes;
+ plane_actual_info->plane_format[0] = in->plane_format[0];
+ if (in->num_planes > 1)
+ plane_actual_info->plane_format[1] = in->plane_format[1];
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*plane_actual_info);
+ break;
+ }
default:
return -EINVAL;
}
@@ -549,7 +727,7 @@ static int iris_hfi_gen1_session_set_property(struct iris_inst *inst, u32 packet
return hfi_gen1_set_property(inst, packet_type, payload, payload_size);
}
-static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
+static int iris_hfi_gen1_set_resolution(struct iris_inst *inst, u32 plane)
{
u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
struct hfi_framesize fs;
@@ -564,14 +742,18 @@ static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
if (ret)
return ret;
}
- fs.buffer_type = HFI_BUFFER_OUTPUT2;
+ if (inst->domain == DECODER)
+ fs.buffer_type = HFI_BUFFER_OUTPUT2;
+ else
+ fs.buffer_type = HFI_BUFFER_OUTPUT;
+
fs.width = inst->fmt_dst->fmt.pix_mp.width;
fs.height = inst->fmt_dst->fmt.pix_mp.height;
return hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
}
-static int iris_hfi_gen1_decide_core(struct iris_inst *inst)
+static int iris_hfi_gen1_decide_core(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct hfi_videocores_usage_type cu;
@@ -581,36 +763,45 @@ static int iris_hfi_gen1_decide_core(struct iris_inst *inst)
return hfi_gen1_set_property(inst, ptype, &cu, sizeof(cu));
}
-static int iris_hfi_gen1_set_raw_format(struct iris_inst *inst)
+static int iris_hfi_gen1_set_raw_format(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
- u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
struct hfi_uncompressed_format_select fmt;
+ u32 pixelformat;
int ret;
- if (iris_split_mode_enabled(inst)) {
- fmt.buffer_type = HFI_BUFFER_OUTPUT;
- fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12_UBWC : 0;
+ if (inst->domain == DECODER) {
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ if (iris_split_mode_enabled(inst)) {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ?
+ HFI_COLOR_FORMAT_NV12_UBWC : 0;
- ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
- if (ret)
- return ret;
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ if (ret)
+ return ret;
- fmt.buffer_type = HFI_BUFFER_OUTPUT2;
- fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+ fmt.buffer_type = HFI_BUFFER_OUTPUT2;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
- ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ } else {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ }
} else {
- fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ pixelformat = inst->fmt_src->fmt.pix_mp.pixelformat;
+ fmt.buffer_type = HFI_BUFFER_INPUT;
fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
-
ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
}
return ret;
}
-static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst)
+static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
struct hfi_uncompressed_plane_actual_constraints_info pconstraint;
@@ -630,7 +821,7 @@ static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst)
return hfi_gen1_set_property(inst, ptype, &pconstraint, sizeof(pconstraint));
}
-static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
+static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst, u32 plane)
{
u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
struct hfi_buffer_count_actual buf_count;
@@ -644,20 +835,28 @@ static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
if (ret)
return ret;
- if (iris_split_mode_enabled(inst)) {
- buf_count.type = HFI_BUFFER_OUTPUT;
- buf_count.count_actual = VIDEO_MAX_FRAME;
- buf_count.count_min_host = VIDEO_MAX_FRAME;
+ if (inst->domain == DECODER) {
+ if (iris_split_mode_enabled(inst)) {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
- ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
- if (ret)
- return ret;
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ if (ret)
+ return ret;
- buf_count.type = HFI_BUFFER_OUTPUT2;
- buf_count.count_actual = iris_vpu_buf_count(inst, BUF_DPB);
- buf_count.count_min_host = iris_vpu_buf_count(inst, BUF_DPB);
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = iris_vpu_buf_count(inst, BUF_DPB);
+ buf_count.count_min_host = iris_vpu_buf_count(inst, BUF_DPB);
- ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ } else {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ }
} else {
buf_count.type = HFI_BUFFER_OUTPUT;
buf_count.count_actual = VIDEO_MAX_FRAME;
@@ -669,7 +868,7 @@ static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
return ret;
}
-static int iris_hfi_gen1_set_multistream(struct iris_inst *inst)
+static int iris_hfi_gen1_set_multistream(struct iris_inst *inst, u32 plane)
{
u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
struct hfi_multi_stream multi = {0};
@@ -704,7 +903,7 @@ static int iris_hfi_gen1_set_multistream(struct iris_inst *inst)
return ret;
}
-static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
+static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
struct hfi_buffer_size_actual bufsz;
@@ -712,7 +911,7 @@ static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
if (iris_split_mode_enabled(inst)) {
bufsz.type = HFI_BUFFER_OUTPUT;
- bufsz.size = iris_vpu_buf_size(inst, BUF_DPB);
+ bufsz.size = inst->core->iris_platform_data->get_vpu_buffer_size(inst, BUF_DPB);
ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
if (ret)
@@ -739,14 +938,49 @@ static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
return ret;
}
+static int iris_hfi_gen1_set_frame_rate(struct iris_inst *inst, u32 plane)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
+ struct hfi_framerate frate;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane))
+ return 0;
+
+ frate.buffer_type = HFI_BUFFER_OUTPUT;
+ frate.framerate = inst->frame_rate << 16;
+
+ return hfi_gen1_set_property(inst, ptype, &frate, sizeof(frate));
+}
+
+static int iris_hfi_gen1_set_stride(struct iris_inst *inst, u32 plane)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO;
+ struct hfi_uncompressed_plane_actual_info plane_actual_info;
+
+ plane_actual_info.buffer_type = HFI_BUFFER_INPUT;
+ plane_actual_info.num_planes = 2;
+ plane_actual_info.plane_format[0].actual_stride =
+ ALIGN(inst->fmt_src->fmt.pix_mp.width, 128);
+ plane_actual_info.plane_format[0].actual_plane_buffer_height =
+ ALIGN(inst->fmt_src->fmt.pix_mp.height, 32);
+ plane_actual_info.plane_format[1].actual_stride =
+ ALIGN(inst->fmt_src->fmt.pix_mp.width, 128);
+ plane_actual_info.plane_format[1].actual_plane_buffer_height =
+ (ALIGN(inst->fmt_src->fmt.pix_mp.height, 32)) / 2;
+
+ return hfi_gen1_set_property(inst, ptype, &plane_actual_info, sizeof(plane_actual_info));
+}
+
static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 plane)
{
+ struct iris_hfi_prop_type_handle const *handler = NULL;
+ u32 handler_size = 0;
struct iris_core *core = inst->core;
u32 config_params_size, i, j;
const u32 *config_params;
int ret;
- static const struct iris_hfi_prop_type_handle prop_type_handle_inp_arr[] = {
+ static const struct iris_hfi_prop_type_handle vdec_prop_type_handle_inp_arr[] = {
{HFI_PROPERTY_PARAM_FRAME_SIZE,
iris_hfi_gen1_set_resolution},
{HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE,
@@ -763,7 +997,7 @@ static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 p
iris_hfi_gen1_set_bufsize},
};
- static const struct iris_hfi_prop_type_handle prop_type_handle_out_arr[] = {
+ static const struct iris_hfi_prop_type_handle vdec_prop_type_handle_out_arr[] = {
{HFI_PROPERTY_PARAM_FRAME_SIZE,
iris_hfi_gen1_set_resolution},
{HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
@@ -778,29 +1012,43 @@ static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 p
iris_hfi_gen1_set_bufsize},
};
- config_params = core->iris_platform_data->input_config_params_default;
- config_params_size = core->iris_platform_data->input_config_params_default_size;
-
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- for (i = 0; i < config_params_size; i++) {
- for (j = 0; j < ARRAY_SIZE(prop_type_handle_inp_arr); j++) {
- if (prop_type_handle_inp_arr[j].type == config_params[i]) {
- ret = prop_type_handle_inp_arr[j].handle(inst);
- if (ret)
- return ret;
- break;
- }
- }
+ static const struct iris_hfi_prop_type_handle venc_prop_type_handle_inp_arr[] = {
+ {HFI_PROPERTY_CONFIG_FRAME_RATE,
+ iris_hfi_gen1_set_frame_rate},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+ iris_hfi_gen1_set_stride},
+ {HFI_PROPERTY_PARAM_FRAME_SIZE,
+ iris_hfi_gen1_set_resolution},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ iris_hfi_gen1_set_raw_format},
+ {HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ iris_hfi_gen1_set_num_bufs},
+ };
+
+ if (inst->domain == DECODER) {
+ config_params = core->iris_platform_data->dec_input_config_params_default;
+ config_params_size = core->iris_platform_data->dec_input_config_params_default_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ handler = vdec_prop_type_handle_inp_arr;
+ handler_size = ARRAY_SIZE(vdec_prop_type_handle_inp_arr);
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ handler = vdec_prop_type_handle_out_arr;
+ handler_size = ARRAY_SIZE(vdec_prop_type_handle_out_arr);
}
- } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
- for (i = 0; i < config_params_size; i++) {
- for (j = 0; j < ARRAY_SIZE(prop_type_handle_out_arr); j++) {
- if (prop_type_handle_out_arr[j].type == config_params[i]) {
- ret = prop_type_handle_out_arr[j].handle(inst);
- if (ret)
- return ret;
- break;
- }
+ } else {
+ config_params = core->iris_platform_data->enc_input_config_params;
+ config_params_size = core->iris_platform_data->enc_input_config_params_size;
+ handler = venc_prop_type_handle_inp_arr;
+ handler_size = ARRAY_SIZE(venc_prop_type_handle_inp_arr);
+ }
+
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < handler_size; j++) {
+ if (handler[j].type == config_params[i]) {
+ ret = handler[j].handle(inst, plane);
+ if (ret)
+ return ret;
+ break;
}
}
}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
index d4d119ca98b0..42226ccee3d9 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
@@ -10,6 +10,7 @@
#define HFI_VIDEO_ARCH_OX 0x1
+#define HFI_SESSION_TYPE_ENC 1
#define HFI_SESSION_TYPE_DEC 2
#define HFI_VIDEO_CODEC_H264 0x00000002
@@ -73,18 +74,22 @@
#define HFI_BUFFER_INPUT 0x1
#define HFI_BUFFER_OUTPUT 0x2
#define HFI_BUFFER_OUTPUT2 0x3
+#define HFI_BUFFER_INTERNAL_PERSIST 0x4
#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
#define HFI_BUFFER_INTERNAL_SCRATCH 0x6
#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x7
+#define HFI_BUFFER_INTERNAL_SCRATCH_2 0x8
#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO 0x1002
#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003
#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005
#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
+#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001
#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
@@ -111,15 +116,32 @@
#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a
#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c
-#define HFI_PICTURE_I 0x00000001
-#define HFI_PICTURE_P 0x00000002
-#define HFI_PICTURE_B 0x00000004
-#define HFI_PICTURE_IDR 0x00000008
+#define HFI_GEN1_PICTURE_I 0x00000001
+#define HFI_GEN1_PICTURE_P 0x00000002
+#define HFI_GEN1_PICTURE_B 0x00000004
+#define HFI_GEN1_PICTURE_IDR 0x00000008
#define HFI_FRAME_NOTCODED 0x7f002000
#define HFI_FRAME_YUV 0x7f004000
#define HFI_UNUSED_PICT 0x10000000
-#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
-#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
+#define HFI_RATE_CONTROL_OFF 0x1000001
+#define HFI_RATE_CONTROL_VBR_VFR 0x1000002
+#define HFI_RATE_CONTROL_VBR_CFR 0x1000003
+#define HFI_RATE_CONTROL_CBR_VFR 0x1000004
+#define HFI_RATE_CONTROL_CBR_CFR 0x1000005
+#define HFI_RATE_CONTROL_CQ 0x1000008
+
+#define HFI_H264_ENTROPY_CAVLC 0x1
+#define HFI_H264_ENTROPY_CABAC 0x2
+
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL 0x2005002
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL 0x2005003
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL 0x2005004
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2 0x2005009
+#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES 0x2005020
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE 0x2006001
+#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER 0x2006008
struct hfi_pkt_hdr {
u32 size;
@@ -194,6 +216,23 @@ struct hfi_session_empty_buffer_compressed_pkt {
u32 data;
};
+struct hfi_session_empty_buffer_uncompressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 view_id;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data;
+};
+
struct hfi_session_fill_buffer_pkt {
struct hfi_session_hdr_pkt shdr;
u32 stream_id;
@@ -340,6 +379,17 @@ struct hfi_uncompressed_plane_actual_constraints_info {
struct hfi_uncompressed_plane_constraints plane_format[2];
};
+struct hfi_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_actual plane_format[2];
+};
+
struct hfi_buffer_count_actual {
u32 type;
u32 count_actual;
@@ -367,6 +417,36 @@ struct hfi_buffer_requirements {
u32 alignment;
};
+struct hfi_bitrate {
+ u32 bitrate;
+ u32 layer_id;
+};
+
+#define HFI_H264_CABAC_MODEL_0 0x1
+
+struct hfi_h264_entropy_control {
+ u32 entropy_mode;
+ u32 cabac_model;
+};
+
+struct hfi_quantization_v2 {
+ u32 qp_packed;
+ u32 layer_id;
+ u32 enable;
+ u32 reserved[3];
+};
+
+struct hfi_quantization_range_v2 {
+ struct hfi_quantization_v2 min_qp;
+ struct hfi_quantization_v2 max_qp;
+ u32 reserved[4];
+};
+
+struct hfi_framerate {
+ u32 buffer_type;
+ u32 framerate;
+};
+
struct hfi_event_data {
u32 error;
u32 height;
@@ -398,6 +478,26 @@ struct hfi_msg_session_empty_buffer_done_pkt {
u32 data[];
};
+struct hfi_msg_session_fbd_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 error_type;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[];
+};
+
struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
struct hfi_session_hdr_pkt shdr;
u32 stream_id;
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
index 8d1ce8a19a45..8e864c239e29 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
@@ -387,24 +387,43 @@ error:
static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
{
- struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt = packet;
+ struct hfi_msg_session_fbd_uncompressed_plane0_pkt *uncom_pkt = packet;
+ struct hfi_msg_session_fbd_compressed_pkt *com_pkt = packet;
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
struct v4l2_m2m_buffer *m2m_buffer, *n;
struct hfi_session_flush_pkt flush_pkt;
- u32 timestamp_hi = pkt->time_stamp_hi;
- u32 timestamp_lo = pkt->time_stamp_lo;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
struct iris_core *core = inst->core;
- u32 filled_len = pkt->filled_len;
- u32 pic_type = pkt->picture_type;
- u32 output_tag = pkt->output_tag;
+ u32 filled_len;
+ u32 pic_type;
+ u32 output_tag;
struct iris_buffer *buf, *iter;
struct iris_buffers *buffers;
- u32 hfi_flags = pkt->flags;
- u32 offset = pkt->offset;
+ u32 hfi_flags;
+ u32 offset;
u64 timestamp_us = 0;
bool found = false;
u32 flags = 0;
+ if (inst->domain == DECODER) {
+ timestamp_hi = uncom_pkt->time_stamp_hi;
+ timestamp_lo = uncom_pkt->time_stamp_lo;
+ filled_len = uncom_pkt->filled_len;
+ pic_type = uncom_pkt->picture_type;
+ output_tag = uncom_pkt->output_tag;
+ hfi_flags = uncom_pkt->flags;
+ offset = uncom_pkt->offset;
+ } else {
+ timestamp_hi = com_pkt->time_stamp_hi;
+ timestamp_lo = com_pkt->time_stamp_lo;
+ filled_len = com_pkt->filled_len;
+ pic_type = com_pkt->picture_type;
+ output_tag = com_pkt->output_tag;
+ hfi_flags = com_pkt->flags;
+ offset = com_pkt->offset;
+ }
+
if ((hfi_flags & HFI_BUFFERFLAG_EOS) && !filled_len) {
reinit_completion(&inst->flush_completion);
@@ -416,11 +435,10 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
inst->flush_responses_pending++;
iris_inst_sub_state_change_drain_last(inst);
-
- return;
}
- if (iris_split_mode_enabled(inst) && pkt->stream_id == 0) {
+ if (iris_split_mode_enabled(inst) && inst->domain == DECODER &&
+ uncom_pkt->stream_id == 0) {
buffers = &inst->buffers[BUF_DPB];
if (!buffers)
goto error;
@@ -461,8 +479,14 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
timestamp_us = timestamp_hi;
timestamp_us = (timestamp_us << 32) | timestamp_lo;
} else {
- if (pkt->stream_id == 1 && !inst->last_buffer_dequeued) {
- if (iris_drc_pending(inst)) {
+ if (inst->domain == DECODER && uncom_pkt->stream_id == 1 &&
+ !inst->last_buffer_dequeued) {
+ if (iris_drc_pending(inst) || iris_drain_pending(inst)) {
+ flags |= V4L2_BUF_FLAG_LAST;
+ inst->last_buffer_dequeued = true;
+ }
+ } else if (inst->domain == ENCODER) {
+ if (!inst->last_buffer_dequeued && iris_drain_pending(inst)) {
flags |= V4L2_BUF_FLAG_LAST;
inst->last_buffer_dequeued = true;
}
@@ -471,14 +495,14 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
buf->timestamp = timestamp_us;
switch (pic_type) {
- case HFI_PICTURE_IDR:
- case HFI_PICTURE_I:
+ case HFI_GEN1_PICTURE_IDR:
+ case HFI_GEN1_PICTURE_I:
flags |= V4L2_BUF_FLAG_KEYFRAME;
break;
- case HFI_PICTURE_P:
+ case HFI_GEN1_PICTURE_P:
flags |= V4L2_BUF_FLAG_PFRAME;
break;
- case HFI_PICTURE_B:
+ case HFI_GEN1_PICTURE_B:
flags |= V4L2_BUF_FLAG_BFRAME;
break;
case HFI_FRAME_NOTCODED:
@@ -553,7 +577,7 @@ static const struct iris_hfi_gen1_response_pkt_info pkt_infos[] = {
},
{
.pkt = HFI_MSG_SESSION_FILL_BUFFER,
- .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
+ .pkt_sz = sizeof(struct hfi_msg_session_fbd_compressed_pkt),
},
{
.pkt = HFI_MSG_SESSION_FLUSH,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
index 7ca5ae13d62b..4ce71a142508 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
@@ -88,33 +88,63 @@ static int iris_hfi_gen2_sys_pc_prep(struct iris_core *core)
return ret;
}
-static u32 iris_hfi_gen2_get_port(u32 plane)
+static u32 iris_hfi_gen2_get_port(struct iris_inst *inst, u32 plane)
{
- switch (plane) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return HFI_PORT_BITSTREAM;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- return HFI_PORT_RAW;
- default:
- return HFI_PORT_NONE;
+ if (inst->domain == DECODER) {
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return HFI_PORT_BITSTREAM;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return HFI_PORT_RAW;
+ default:
+ return HFI_PORT_NONE;
+ }
+ } else {
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return HFI_PORT_RAW;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return HFI_PORT_BITSTREAM;
+ default:
+ return HFI_PORT_NONE;
+ }
}
}
-static u32 iris_hfi_gen2_get_port_from_buf_type(enum iris_buffer_type buffer_type)
+static u32 iris_hfi_gen2_get_port_from_buf_type(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
{
- switch (buffer_type) {
- case BUF_INPUT:
- case BUF_BIN:
- case BUF_COMV:
- case BUF_NON_COMV:
- case BUF_LINE:
- return HFI_PORT_BITSTREAM;
- case BUF_OUTPUT:
- case BUF_DPB:
- return HFI_PORT_RAW;
- case BUF_PERSIST:
- default:
- return HFI_PORT_NONE;
+ if (inst->domain == DECODER) {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ return HFI_PORT_BITSTREAM;
+ case BUF_OUTPUT:
+ case BUF_DPB:
+ return HFI_PORT_RAW;
+ case BUF_PERSIST:
+ default:
+ return HFI_PORT_NONE;
+ }
+ } else {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ case BUF_VPSS:
+ return HFI_PORT_RAW;
+ case BUF_OUTPUT:
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ case BUF_SCRATCH_2:
+ return HFI_PORT_BITSTREAM;
+ case BUF_ARP:
+ default:
+ return HFI_PORT_NONE;
+ }
}
}
@@ -136,34 +166,77 @@ static int iris_hfi_gen2_session_set_property(struct iris_inst *inst, u32 packet
inst_hfi_gen2->packet->size);
}
-static int iris_hfi_gen2_set_bitstream_resolution(struct iris_inst *inst)
+static int iris_hfi_gen2_set_raw_resolution(struct iris_inst *inst, u32 plane)
{
- struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 resolution = inst->fmt_src->fmt.pix_mp.width << 16 |
inst->fmt_src->fmt.pix_mp.height;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
- inst_hfi_gen2->src_subcr_params.bitstream_resolution = resolution;
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_RAW_RESOLUTION,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_32_PACKED,
+ &resolution,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_bitstream_resolution(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
+ enum hfi_packet_payload_info payload_type;
+ u32 resolution, codec_align;
+
+ if (inst->domain == DECODER) {
+ resolution = inst->fmt_src->fmt.pix_mp.width << 16 |
+ inst->fmt_src->fmt.pix_mp.height;
+ inst_hfi_gen2->src_subcr_params.bitstream_resolution = resolution;
+ payload_type = HFI_PAYLOAD_U32;
+ } else {
+ codec_align = inst->codec == V4L2_PIX_FMT_HEVC ? 32 : 16;
+ resolution = ALIGN(inst->fmt_dst->fmt.pix_mp.width, codec_align) << 16 |
+ ALIGN(inst->fmt_dst->fmt.pix_mp.height, codec_align);
+ inst_hfi_gen2->dst_subcr_params.bitstream_resolution = resolution;
+ payload_type = HFI_PAYLOAD_32_PACKED;
+ }
return iris_hfi_gen2_session_set_property(inst,
HFI_PROP_BITSTREAM_RESOLUTION,
HFI_HOST_FLAGS_NONE,
port,
- HFI_PAYLOAD_U32,
+ payload_type,
&resolution,
sizeof(u32));
}
-static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
+static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst, u32 plane)
{
- u32 bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
- u32 right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- u32 left_offset = inst->crop.left;
- u32 top_offset = inst->crop.top;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
+ u32 bottom_offset, right_offset;
+ u32 left_offset, top_offset;
u32 payload[2];
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
+ right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
+ left_offset = inst->crop.left;
+ top_offset = inst->crop.top;
+ } else {
+ bottom_offset = (inst->fmt_dst->fmt.pix_mp.height - inst->compose.height);
+ right_offset = (inst->fmt_dst->fmt.pix_mp.width - inst->compose.width);
+ left_offset = inst->compose.left;
+ top_offset = inst->compose.top;
+ }
+ } else {
+ bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
+ right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
+ left_offset = inst->crop.left;
+ top_offset = inst->crop.top;
+ }
+
payload[0] = FIELD_PREP(GENMASK(31, 16), left_offset) | top_offset;
payload[1] = FIELD_PREP(GENMASK(31, 16), right_offset) | bottom_offset;
inst_hfi_gen2->src_subcr_params.crop_offsets[0] = payload[0];
@@ -178,10 +251,10 @@ static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
sizeof(u64));
}
-static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst)
+static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 bitdepth = BIT_DEPTH_8;
inst_hfi_gen2->src_subcr_params.bit_depth = bitdepth;
@@ -195,10 +268,10 @@ static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst)
+static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 coded_frames = 0;
if (inst->fw_caps[CODED_FRAMES].value == CODED_FRAMES_PROGRESSIVE)
@@ -214,11 +287,11 @@ static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst)
+static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 min_output = inst->buffers[BUF_OUTPUT].min_count;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
inst_hfi_gen2->src_subcr_params.fw_min_count = min_output;
@@ -231,10 +304,10 @@ static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst)
+static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 poc = 0;
inst_hfi_gen2->src_subcr_params.pic_order_cnt = poc;
@@ -248,16 +321,16 @@ static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst)
+static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
struct v4l2_pix_format_mplane *pixmp = &inst->fmt_src->fmt.pix_mp;
u32 video_signal_type_present_flag = 0, color_info;
u32 matrix_coeff = HFI_MATRIX_COEFF_RESERVED;
u32 video_format = UNSPECIFIED_COLOR_FORMAT;
u32 full_range = V4L2_QUANTIZATION_DEFAULT;
u32 transfer_char = HFI_TRANSFER_RESERVED;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 colour_description_present_flag = 0;
u32 primaries = HFI_PRIMARIES_RESERVED;
@@ -291,10 +364,10 @@ static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
+static int iris_hfi_gen2_set_profile(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 profile = 0;
switch (inst->codec) {
@@ -320,10 +393,10 @@ static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_level(struct iris_inst *inst)
+static int iris_hfi_gen2_set_level(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 level = 0;
switch (inst->codec) {
@@ -349,33 +422,47 @@ static int iris_hfi_gen2_set_level(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_colorformat(struct iris_inst *inst)
+static int iris_hfi_gen2_set_colorformat(struct iris_inst *inst, u32 plane)
{
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 hfi_colorformat, pixelformat;
- pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
- hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+ if (inst->domain == DECODER) {
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+ } else {
+ pixelformat = inst->fmt_src->fmt.pix_mp.pixelformat;
+ hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+ }
return iris_hfi_gen2_session_set_property(inst,
HFI_PROP_COLOR_FORMAT,
HFI_HOST_FLAGS_NONE,
port,
- HFI_PAYLOAD_U32,
+ HFI_PAYLOAD_U32_ENUM,
&hfi_colorformat,
sizeof(u32));
}
-static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
+static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst, u32 plane)
{
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
- u32 scanline_y = inst->fmt_dst->fmt.pix_mp.height;
- u32 stride_y = inst->fmt_dst->fmt.pix_mp.width;
- u32 scanline_uv = scanline_y / 2;
- u32 stride_uv = stride_y;
+ u32 pixelformat, stride_y, stride_uv, scanline_y, scanline_uv;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 payload[2];
+ if (inst->domain == DECODER) {
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ stride_y = inst->fmt_dst->fmt.pix_mp.width;
+ scanline_y = inst->fmt_dst->fmt.pix_mp.height;
+ } else {
+ pixelformat = inst->fmt_src->fmt.pix_mp.pixelformat;
+ stride_y = ALIGN(inst->fmt_src->fmt.pix_mp.width, 128);
+ scanline_y = ALIGN(inst->fmt_src->fmt.pix_mp.height, 32);
+ }
+
+ stride_uv = stride_y;
+ scanline_uv = scanline_y / 2;
+
if (pixelformat != V4L2_PIX_FMT_NV12)
return 0;
@@ -386,15 +473,15 @@ static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
HFI_PROP_LINEAR_STRIDE_SCANLINE,
HFI_HOST_FLAGS_NONE,
port,
- HFI_PAYLOAD_U64,
+ HFI_PAYLOAD_64_PACKED,
&payload,
sizeof(u64));
}
-static int iris_hfi_gen2_set_tier(struct iris_inst *inst)
+static int iris_hfi_gen2_set_tier(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 tier = inst->fw_caps[TIER].value;
inst_hfi_gen2->src_subcr_params.tier = tier;
@@ -408,14 +495,29 @@ static int iris_hfi_gen2_set_tier(struct iris_inst *inst)
sizeof(u32));
}
+static int iris_hfi_gen2_set_frame_rate(struct iris_inst *inst, u32 plane)
+{
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 frame_rate = inst->frame_rate << 16;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_FRAME_RATE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_Q16,
+ &frame_rate,
+ sizeof(u32));
+}
+
static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 plane)
{
- struct iris_core *core = inst->core;
+ const struct iris_platform_data *pdata = inst->core->iris_platform_data;
u32 config_params_size = 0, i, j;
const u32 *config_params = NULL;
int ret;
static const struct iris_hfi_prop_type_handle prop_type_handle_arr[] = {
+ {HFI_PROP_RAW_RESOLUTION, iris_hfi_gen2_set_raw_resolution },
{HFI_PROP_BITSTREAM_RESOLUTION, iris_hfi_gen2_set_bitstream_resolution },
{HFI_PROP_CROP_OFFSETS, iris_hfi_gen2_set_crop_offsets },
{HFI_PROP_CODED_FRAMES, iris_hfi_gen2_set_coded_frames },
@@ -428,29 +530,35 @@ static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 p
{HFI_PROP_COLOR_FORMAT, iris_hfi_gen2_set_colorformat },
{HFI_PROP_LINEAR_STRIDE_SCANLINE, iris_hfi_gen2_set_linear_stride_scanline },
{HFI_PROP_TIER, iris_hfi_gen2_set_tier },
+ {HFI_PROP_FRAME_RATE, iris_hfi_gen2_set_frame_rate },
};
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- switch (inst->codec) {
- case V4L2_PIX_FMT_H264:
- config_params = core->iris_platform_data->input_config_params_default;
- config_params_size =
- core->iris_platform_data->input_config_params_default_size;
- break;
- case V4L2_PIX_FMT_HEVC:
- config_params = core->iris_platform_data->input_config_params_hevc;
- config_params_size =
- core->iris_platform_data->input_config_params_hevc_size;
- break;
- case V4L2_PIX_FMT_VP9:
- config_params = core->iris_platform_data->input_config_params_vp9;
- config_params_size =
- core->iris_platform_data->input_config_params_vp9_size;
- break;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ config_params = pdata->dec_input_config_params_default;
+ config_params_size = pdata->dec_input_config_params_default_size;
+ } else if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ config_params = pdata->dec_input_config_params_hevc;
+ config_params_size = pdata->dec_input_config_params_hevc_size;
+ } else if (inst->codec == V4L2_PIX_FMT_VP9) {
+ config_params = pdata->dec_input_config_params_vp9;
+ config_params_size = pdata->dec_input_config_params_vp9_size;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ config_params = pdata->dec_output_config_params;
+ config_params_size = pdata->dec_output_config_params_size;
}
} else {
- config_params = core->iris_platform_data->output_config_params;
- config_params_size = core->iris_platform_data->output_config_params_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ config_params = pdata->enc_input_config_params;
+ config_params_size = pdata->enc_input_config_params_size;
+ } else {
+ config_params = pdata->enc_output_config_params;
+ config_params_size = pdata->enc_output_config_params_size;
+ }
}
if (!config_params || !config_params_size)
@@ -459,7 +567,7 @@ static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 p
for (i = 0; i < config_params_size; i++) {
for (j = 0; j < ARRAY_SIZE(prop_type_handle_arr); j++) {
if (prop_type_handle_arr[j].type == config_params[i]) {
- ret = prop_type_handle_arr[j].handle(inst);
+ ret = prop_type_handle_arr[j].handle(inst, plane);
if (ret)
return ret;
break;
@@ -477,14 +585,19 @@ static int iris_hfi_gen2_session_set_codec(struct iris_inst *inst)
switch (inst->codec) {
case V4L2_PIX_FMT_H264:
- codec = HFI_CODEC_DECODE_AVC;
+ if (inst->domain == ENCODER)
+ codec = HFI_CODEC_ENCODE_AVC;
+ else
+ codec = HFI_CODEC_DECODE_AVC;
break;
case V4L2_PIX_FMT_HEVC:
- codec = HFI_CODEC_DECODE_HEVC;
+ if (inst->domain == ENCODER)
+ codec = HFI_CODEC_ENCODE_HEVC;
+ else
+ codec = HFI_CODEC_DECODE_HEVC;
break;
case V4L2_PIX_FMT_VP9:
codec = HFI_CODEC_DECODE_VP9;
- break;
}
iris_hfi_gen2_packet_session_property(inst,
@@ -550,9 +663,11 @@ static int iris_hfi_gen2_session_open(struct iris_inst *inst)
if (ret)
goto fail_free_packet;
- ret = iris_hfi_gen2_session_set_default_header(inst);
- if (ret)
- goto fail_free_packet;
+ if (inst->domain == DECODER) {
+ ret = iris_hfi_gen2_session_set_default_header(inst);
+ if (ret)
+ goto fail_free_packet;
+ }
return 0;
@@ -601,7 +716,7 @@ static int iris_hfi_gen2_session_subscribe_mode(struct iris_inst *inst,
cmd,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
payload_type,
payload,
@@ -623,6 +738,9 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
u32 hfi_port = 0, i;
int ret;
+ if (inst->domain == ENCODER)
+ return 0;
+
if ((V4L2_TYPE_IS_OUTPUT(plane) && inst_hfi_gen2->ipsc_properties_set) ||
(V4L2_TYPE_IS_CAPTURE(plane) && inst_hfi_gen2->opsc_properties_set)) {
dev_err(core->dev, "invalid plane\n");
@@ -631,19 +749,19 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
switch (inst->codec) {
case V4L2_PIX_FMT_H264:
- change_param = core->iris_platform_data->input_config_params_default;
+ change_param = core->iris_platform_data->dec_input_config_params_default;
change_param_size =
- core->iris_platform_data->input_config_params_default_size;
+ core->iris_platform_data->dec_input_config_params_default_size;
break;
case V4L2_PIX_FMT_HEVC:
- change_param = core->iris_platform_data->input_config_params_hevc;
+ change_param = core->iris_platform_data->dec_input_config_params_hevc;
change_param_size =
- core->iris_platform_data->input_config_params_hevc_size;
+ core->iris_platform_data->dec_input_config_params_hevc_size;
break;
case V4L2_PIX_FMT_VP9:
- change_param = core->iris_platform_data->input_config_params_vp9;
+ change_param = core->iris_platform_data->dec_input_config_params_vp9;
change_param_size =
- core->iris_platform_data->input_config_params_vp9_size;
+ core->iris_platform_data->dec_input_config_params_vp9_size;
break;
}
@@ -664,7 +782,7 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
if (V4L2_TYPE_IS_OUTPUT(plane)) {
inst_hfi_gen2->ipsc_properties_set = true;
} else {
- hfi_port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ hfi_port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
memcpy(&inst_hfi_gen2->dst_subcr_params,
&inst_hfi_gen2->src_subcr_params,
sizeof(inst_hfi_gen2->src_subcr_params));
@@ -759,6 +877,9 @@ static int iris_hfi_gen2_subscribe_property(struct iris_inst *inst, u32 plane)
payload[0] = HFI_MODE_PROPERTY;
+ if (inst->domain == ENCODER)
+ return 0;
+
if (V4L2_TYPE_IS_OUTPUT(plane)) {
subscribe_prop_size = core->iris_platform_data->dec_input_prop_size;
subcribe_prop = core->iris_platform_data->dec_input_prop;
@@ -810,7 +931,7 @@ static int iris_hfi_gen2_session_start(struct iris_inst *inst, u32 plane)
HFI_CMD_START,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -832,7 +953,7 @@ static int iris_hfi_gen2_session_stop(struct iris_inst *inst, u32 plane)
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED |
HFI_HOST_FLAGS_NON_DISCARDABLE),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -854,7 +975,7 @@ static int iris_hfi_gen2_session_pause(struct iris_inst *inst, u32 plane)
HFI_CMD_PAUSE,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -873,7 +994,7 @@ static int iris_hfi_gen2_session_resume_drc(struct iris_inst *inst, u32 plane)
HFI_CMD_RESUME,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_U32,
&payload,
@@ -892,7 +1013,7 @@ static int iris_hfi_gen2_session_resume_drain(struct iris_inst *inst, u32 plane)
HFI_CMD_RESUME,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_U32,
&payload,
@@ -914,7 +1035,7 @@ static int iris_hfi_gen2_session_drain(struct iris_inst *inst, u32 plane)
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED |
HFI_HOST_FLAGS_NON_DISCARDABLE),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -924,13 +1045,19 @@ static int iris_hfi_gen2_session_drain(struct iris_inst *inst, u32 plane)
inst_hfi_gen2->packet->size);
}
-static u32 iris_hfi_gen2_buf_type_from_driver(enum iris_buffer_type buffer_type)
+static u32 iris_hfi_gen2_buf_type_from_driver(u32 domain, enum iris_buffer_type buffer_type)
{
switch (buffer_type) {
case BUF_INPUT:
- return HFI_BUFFER_BITSTREAM;
+ if (domain == DECODER)
+ return HFI_BUFFER_BITSTREAM;
+ else
+ return HFI_BUFFER_RAW;
case BUF_OUTPUT:
- return HFI_BUFFER_RAW;
+ if (domain == DECODER)
+ return HFI_BUFFER_RAW;
+ else
+ return HFI_BUFFER_BITSTREAM;
case BUF_BIN:
return HFI_BUFFER_BIN;
case BUF_COMV:
@@ -940,9 +1067,14 @@ static u32 iris_hfi_gen2_buf_type_from_driver(enum iris_buffer_type buffer_type)
case BUF_LINE:
return HFI_BUFFER_LINE;
case BUF_DPB:
+ case BUF_SCRATCH_2:
return HFI_BUFFER_DPB;
case BUF_PERSIST:
return HFI_BUFFER_PERSIST;
+ case BUF_ARP:
+ return HFI_BUFFER_ARP;
+ case BUF_VPSS:
+ return HFI_BUFFER_VPSS;
default:
return 0;
}
@@ -965,16 +1097,17 @@ static int iris_set_num_comv(struct iris_inst *inst)
&num_comv, sizeof(u32));
}
-static void iris_hfi_gen2_get_buffer(struct iris_buffer *buffer, struct iris_hfi_buffer *buf)
+static void iris_hfi_gen2_get_buffer(u32 domain, struct iris_buffer *buffer,
+ struct iris_hfi_buffer *buf)
{
memset(buf, 0, sizeof(*buf));
- buf->type = iris_hfi_gen2_buf_type_from_driver(buffer->type);
+ buf->type = iris_hfi_gen2_buf_type_from_driver(domain, buffer->type);
buf->index = buffer->index;
buf->base_address = buffer->device_addr;
buf->addr_offset = 0;
buf->buffer_size = buffer->buffer_size;
- if (buffer->type == BUF_INPUT)
+ if (domain == DECODER && buffer->type == BUF_INPUT)
buf->buffer_size = ALIGN(buffer->buffer_size, 256);
buf->data_offset = buffer->data_offset;
buf->data_size = buffer->data_size;
@@ -991,14 +1124,14 @@ static int iris_hfi_gen2_session_queue_buffer(struct iris_inst *inst, struct iri
u32 port;
int ret;
- iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ iris_hfi_gen2_get_buffer(inst->domain, buffer, &hfi_buffer);
if (buffer->type == BUF_COMV) {
ret = iris_set_num_comv(inst);
if (ret)
return ret;
}
- port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+ port = iris_hfi_gen2_get_port_from_buf_type(inst, buffer->type);
iris_hfi_gen2_packet_session_command(inst,
HFI_CMD_BUFFER,
HFI_HOST_FLAGS_INTR_REQUIRED,
@@ -1018,9 +1151,9 @@ static int iris_hfi_gen2_session_release_buffer(struct iris_inst *inst, struct i
struct iris_hfi_buffer hfi_buffer;
u32 port;
- iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ iris_hfi_gen2_get_buffer(inst->domain, buffer, &hfi_buffer);
hfi_buffer.flags |= HFI_BUF_HOST_FLAG_RELEASE;
- port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+ port = iris_hfi_gen2_get_port_from_buf_type(inst, buffer->type);
iris_hfi_gen2_packet_session_command(inst,
HFI_CMD_BUFFER,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
index 5f13dc11bea5..aa1f795f5626 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
@@ -49,18 +49,48 @@
#define HFI_PROP_TIER 0x03000109
#define HFI_PROP_STAGE 0x0300010a
#define HFI_PROP_PIPE 0x0300010b
+#define HFI_PROP_FRAME_RATE 0x0300010c
#define HFI_PROP_LUMA_CHROMA_BIT_DEPTH 0x0300010f
#define HFI_PROP_CODED_FRAMES 0x03000120
#define HFI_PROP_CABAC_SESSION 0x03000121
#define HFI_PROP_BUFFER_HOST_MAX_COUNT 0x03000123
#define HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT 0x03000124
#define HFI_PROP_PIC_ORDER_CNT_TYPE 0x03000128
+
+enum hfi_rate_control {
+ HFI_RC_VBR_CFR = 0x00000000,
+ HFI_RC_CBR_CFR = 0x00000001,
+ HFI_RC_CQ = 0x00000002,
+ HFI_RC_OFF = 0x00000003,
+ HFI_RC_CBR_VFR = 0x00000004,
+ HFI_RC_LOSSLESS = 0x00000005,
+};
+
+#define HFI_PROP_RATE_CONTROL 0x0300012a
+#define HFI_PROP_QP_PACKED 0x0300012e
+#define HFI_PROP_MIN_QP_PACKED 0x0300012f
+#define HFI_PROP_MAX_QP_PACKED 0x03000130
+#define HFI_PROP_TOTAL_BITRATE 0x0300013b
+#define HFI_PROP_MAX_GOP_FRAMES 0x03000146
+#define HFI_PROP_MAX_B_FRAMES 0x03000147
#define HFI_PROP_QUALITY_MODE 0x03000148
+
+enum hfi_seq_header_mode {
+ HFI_SEQ_HEADER_SEPERATE_FRAME = 0x00000001,
+ HFI_SEQ_HEADER_JOINED_WITH_1ST_FRAME = 0x00000002,
+ HFI_SEQ_HEADER_PREFIX_WITH_SYNC_FRAME = 0x00000004,
+ HFI_SEQ_HEADER_METADATA = 0x00000008,
+};
+
+#define HFI_PROP_SEQ_HEADER_MODE 0x03000149
#define HFI_PROP_SIGNAL_COLOR_INFO 0x03000155
#define HFI_PROP_PICTURE_TYPE 0x03000162
#define HFI_PROP_DEC_DEFAULT_HEADER 0x03000168
#define HFI_PROP_DEC_START_FROM_RAP_FRAME 0x03000169
#define HFI_PROP_NO_OUTPUT 0x0300016a
+#define HFI_PROP_BUFFER_MARK 0x0300016c
+#define HFI_PROP_RAW_RESOLUTION 0x03000178
+#define HFI_PROP_TOTAL_PEAK_BITRATE 0x0300017C
#define HFI_PROP_COMV_BUFFER_COUNT 0x03000193
#define HFI_PROP_END 0x03FFFFFF
@@ -111,13 +141,13 @@ enum hfi_codec_type {
};
enum hfi_picture_type {
- HFI_PICTURE_IDR = 0x00000001,
- HFI_PICTURE_P = 0x00000002,
- HFI_PICTURE_B = 0x00000004,
- HFI_PICTURE_I = 0x00000008,
- HFI_PICTURE_CRA = 0x00000010,
- HFI_PICTURE_BLA = 0x00000020,
- HFI_PICTURE_NOSHOW = 0x00000040,
+ HFI_GEN2_PICTURE_IDR = 0x00000001,
+ HFI_GEN2_PICTURE_P = 0x00000002,
+ HFI_GEN2_PICTURE_B = 0x00000004,
+ HFI_GEN2_PICTURE_I = 0x00000008,
+ HFI_GEN2_PICTURE_CRA = 0x00000010,
+ HFI_GEN2_PICTURE_BLA = 0x00000020,
+ HFI_GEN2_PICTURE_NOSHOW = 0x00000040,
};
enum hfi_buffer_type {
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
index a8c30fc5c0d0..2f1f118eae4f 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
@@ -29,7 +29,8 @@ struct iris_hfi_gen2_packet_handle {
int (*handle)(struct iris_inst *inst, struct iris_hfi_packet *pkt);
};
-static u32 iris_hfi_gen2_buf_type_to_driver(enum hfi_buffer_type buf_type)
+static u32 iris_hfi_gen2_buf_type_to_driver(struct iris_inst *inst,
+ enum hfi_buffer_type buf_type)
{
switch (buf_type) {
case HFI_BUFFER_BITSTREAM:
@@ -47,7 +48,10 @@ static u32 iris_hfi_gen2_buf_type_to_driver(enum hfi_buffer_type buf_type)
case HFI_BUFFER_LINE:
return BUF_LINE;
case HFI_BUFFER_DPB:
- return BUF_DPB;
+ if (inst->domain == DECODER)
+ return BUF_DPB;
+ else
+ return BUF_SCRATCH_2;
case HFI_BUFFER_PERSIST:
return BUF_PERSIST;
default:
@@ -87,17 +91,18 @@ static bool iris_hfi_gen2_is_valid_hfi_port(u32 port, u32 buffer_type)
static int iris_hfi_gen2_get_driver_buffer_flags(struct iris_inst *inst, u32 hfi_flags)
{
- u32 keyframe = HFI_PICTURE_IDR | HFI_PICTURE_I | HFI_PICTURE_CRA | HFI_PICTURE_BLA;
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 keyframe = HFI_GEN2_PICTURE_IDR | HFI_GEN2_PICTURE_I |
+ HFI_GEN2_PICTURE_CRA | HFI_GEN2_PICTURE_BLA;
u32 driver_flags = 0;
- if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_NOSHOW)
+ if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_GEN2_PICTURE_NOSHOW)
driver_flags |= V4L2_BUF_FLAG_ERROR;
else if (inst_hfi_gen2->hfi_frame_info.picture_type & keyframe)
driver_flags |= V4L2_BUF_FLAG_KEYFRAME;
- else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_P)
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_GEN2_PICTURE_P)
driver_flags |= V4L2_BUF_FLAG_PFRAME;
- else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_B)
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_GEN2_PICTURE_B)
driver_flags |= V4L2_BUF_FLAG_BFRAME;
if (inst_hfi_gen2->hfi_frame_info.data_corrupt || inst_hfi_gen2->hfi_frame_info.overflow)
@@ -420,11 +425,10 @@ static void iris_hfi_gen2_handle_dequeue_buffers(struct iris_inst *inst)
static int iris_hfi_gen2_handle_release_internal_buffer(struct iris_inst *inst,
struct iris_hfi_buffer *buffer)
{
- u32 buf_type = iris_hfi_gen2_buf_type_to_driver(buffer->type);
+ u32 buf_type = iris_hfi_gen2_buf_type_to_driver(inst, buffer->type);
struct iris_buffers *buffers = &inst->buffers[buf_type];
struct iris_buffer *buf, *iter;
bool found = false;
- int ret = 0;
list_for_each_entry(iter, &buffers->list, list) {
if (iter->device_addr == buffer->base_address) {
@@ -437,10 +441,8 @@ static int iris_hfi_gen2_handle_release_internal_buffer(struct iris_inst *inst,
return -EINVAL;
buf->attr &= ~BUF_ATTR_QUEUED;
- if (buf->attr & BUF_ATTR_PENDING_RELEASE)
- ret = iris_destroy_internal_buffer(inst, buf);
- return ret;
+ return iris_destroy_internal_buffer(inst, buf);
}
static int iris_hfi_gen2_handle_session_stop(struct iris_inst *inst,
@@ -478,12 +480,22 @@ static int iris_hfi_gen2_handle_session_buffer(struct iris_inst *inst,
if (!iris_hfi_gen2_is_valid_hfi_port(pkt->port, buffer->type))
return 0;
- if (buffer->type == HFI_BUFFER_BITSTREAM)
- return iris_hfi_gen2_handle_input_buffer(inst, buffer);
- else if (buffer->type == HFI_BUFFER_RAW)
- return iris_hfi_gen2_handle_output_buffer(inst, buffer);
- else
- return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+ if (inst->domain == DECODER) {
+ if (buffer->type == HFI_BUFFER_BITSTREAM)
+ return iris_hfi_gen2_handle_input_buffer(inst, buffer);
+ else if (buffer->type == HFI_BUFFER_RAW)
+ return iris_hfi_gen2_handle_output_buffer(inst, buffer);
+ else
+ return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+ } else {
+ if (buffer->type == HFI_BUFFER_RAW)
+ return iris_hfi_gen2_handle_input_buffer(inst, buffer);
+ else if (buffer->type == HFI_BUFFER_BITSTREAM)
+ return iris_hfi_gen2_handle_output_buffer(inst, buffer);
+ else
+ return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+ }
+ return 0;
}
static int iris_hfi_gen2_handle_session_drain(struct iris_inst *inst,
diff --git a/drivers/media/platform/qcom/iris/iris_instance.h b/drivers/media/platform/qcom/iris/iris_instance.h
index 0e1f5799b72d..5982d7adefea 100644
--- a/drivers/media/platform/qcom/iris/iris_instance.h
+++ b/drivers/media/platform/qcom/iris/iris_instance.h
@@ -12,6 +12,20 @@
#include "iris_core.h"
#include "iris_utils.h"
+#define DEFAULT_WIDTH 320
+#define DEFAULT_HEIGHT 240
+
+enum iris_fmt_type {
+ IRIS_FMT_H264,
+ IRIS_FMT_HEVC,
+ IRIS_FMT_VP9,
+};
+
+struct iris_fmt {
+ u32 pixfmt;
+ u32 type;
+};
+
/**
* struct iris_inst - holds per video instance parameters
*
@@ -24,7 +38,9 @@
* @fmt_src: structure of v4l2_format for source
* @fmt_dst: structure of v4l2_format for destination
* @ctrl_handler: reference of v4l2 ctrl handler
+ * @domain: domain type: encoder or decoder
* @crop: structure of crop info
+ * @compose: structure of compose info
* @completion: structure of signal completions
* @flush_completion: structure of signal completions for flush cmd
* @flush_responses_pending: counter to track number of pending flush responses
@@ -45,6 +61,9 @@
* @metadata_idx: index for metadata buffer
* @codec: codec type
* @last_buffer_dequeued: a flag to indicate that last buffer is sent by driver
+ * @frame_rate: frame rate of current instance
+ * @operating_rate: operating rate of current instance
+ * @hfi_rc_type: rate control type
*/
struct iris_inst {
@@ -57,7 +76,9 @@ struct iris_inst {
struct v4l2_format *fmt_src;
struct v4l2_format *fmt_dst;
struct v4l2_ctrl_handler ctrl_handler;
+ enum domain_type domain;
struct iris_hfi_rect_desc crop;
+ struct iris_hfi_rect_desc compose;
struct completion completion;
struct completion flush_completion;
u32 flush_responses_pending;
@@ -78,6 +99,9 @@ struct iris_inst {
u32 metadata_idx;
u32 codec;
bool last_buffer_dequeued;
+ u32 frame_rate;
+ u32 operating_rate;
+ u32 hfi_rc_type;
};
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
index adafdce8a856..58d05e0a112e 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_common.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -7,6 +7,7 @@
#define __IRIS_PLATFORM_COMMON_H__
#include <linux/bits.h>
+#include "iris_buffer.h"
struct iris_core;
struct iris_inst;
@@ -21,7 +22,13 @@ struct iris_inst;
#define DEFAULT_MAX_HOST_BUF_COUNT 64
#define DEFAULT_MAX_HOST_BURST_BUF_COUNT 256
#define DEFAULT_FPS 30
-#define NUM_MBS_8K ((8192 * 4352) / 256)
+#define MAXIMUM_FPS 480
+#define NUM_MBS_8K ((8192 * 4352) / 256)
+#define MIN_QP_8BIT 1
+#define MAX_QP 51
+#define MAX_QP_HEVC 63
+#define DEFAULT_QP 20
+#define BITRATE_DEFAULT 20000000
enum stage_type {
STAGE_1 = 1,
@@ -38,11 +45,15 @@ extern struct iris_platform_data qcs8300_data;
extern struct iris_platform_data sm8250_data;
extern struct iris_platform_data sm8550_data;
extern struct iris_platform_data sm8650_data;
+extern struct iris_platform_data sm8750_data;
enum platform_clk_type {
- IRIS_AXI_CLK,
+ IRIS_AXI_CLK, /* AXI0 in case of platforms with multiple AXI clocks */
IRIS_CTRL_CLK,
IRIS_HW_CLK,
+ IRIS_AXI1_CLK,
+ IRIS_CTRL_FREERUN_CLK,
+ IRIS_HW_FREERUN_CLK,
};
struct platform_clk_data {
@@ -78,6 +89,8 @@ struct platform_inst_caps {
u32 mb_cycles_fw;
u32 mb_cycles_fw_vpp;
u32 num_comv;
+ u32 max_frame_rate;
+ u32 max_operating_rate;
};
enum platform_inst_fw_cap_type {
@@ -88,6 +101,7 @@ enum platform_inst_fw_cap_type {
LEVEL_HEVC,
LEVEL_VP9,
INPUT_BUF_HOST_MAX_COUNT,
+ OUTPUT_BUF_HOST_MAX_COUNT,
STAGE,
PIPE,
POC,
@@ -95,6 +109,37 @@ enum platform_inst_fw_cap_type {
BIT_DEPTH,
RAP_FRAME,
TIER,
+ HEADER_MODE,
+ PREPEND_SPSPPS_TO_IDR,
+ BITRATE,
+ BITRATE_PEAK,
+ BITRATE_MODE,
+ FRAME_SKIP_MODE,
+ FRAME_RC_ENABLE,
+ GOP_SIZE,
+ ENTROPY_MODE,
+ MIN_FRAME_QP_H264,
+ MIN_FRAME_QP_HEVC,
+ MAX_FRAME_QP_H264,
+ MAX_FRAME_QP_HEVC,
+ I_FRAME_MIN_QP_H264,
+ I_FRAME_MIN_QP_HEVC,
+ P_FRAME_MIN_QP_H264,
+ P_FRAME_MIN_QP_HEVC,
+ B_FRAME_MIN_QP_H264,
+ B_FRAME_MIN_QP_HEVC,
+ I_FRAME_MAX_QP_H264,
+ I_FRAME_MAX_QP_HEVC,
+ P_FRAME_MAX_QP_H264,
+ P_FRAME_MAX_QP_HEVC,
+ B_FRAME_MAX_QP_H264,
+ B_FRAME_MAX_QP_HEVC,
+ I_FRAME_QP_H264,
+ I_FRAME_QP_HEVC,
+ P_FRAME_QP_H264,
+ P_FRAME_QP_HEVC,
+ B_FRAME_QP_H264,
+ B_FRAME_QP_HEVC,
INST_FW_CAP_MAX,
};
@@ -149,6 +194,7 @@ struct iris_platform_data {
void (*init_hfi_command_ops)(struct iris_core *core);
void (*init_hfi_response_ops)(struct iris_core *core);
struct iris_inst *(*get_instance)(void);
+ u32 (*get_vpu_buffer_size)(struct iris_inst *inst, enum iris_buffer_type buffer_type);
const struct vpu_ops *vpu_ops;
void (*set_preset_registers)(struct iris_core *core);
const struct icc_info *icc_tbl;
@@ -169,8 +215,10 @@ struct iris_platform_data {
const char *fwname;
u32 pas_id;
struct platform_inst_caps *inst_caps;
- struct platform_inst_fw_cap *inst_fw_caps;
- u32 inst_fw_caps_size;
+ struct platform_inst_fw_cap *inst_fw_caps_dec;
+ u32 inst_fw_caps_dec_size;
+ struct platform_inst_fw_cap *inst_fw_caps_enc;
+ u32 inst_fw_caps_enc_size;
struct tz_cp_config *tz_cp_config_data;
u32 core_arch;
u32 hw_response_timeout;
@@ -179,14 +227,20 @@ struct iris_platform_data {
u32 max_session_count;
/* max number of macroblocks per frame supported */
u32 max_core_mbpf;
- const u32 *input_config_params_default;
- unsigned int input_config_params_default_size;
- const u32 *input_config_params_hevc;
- unsigned int input_config_params_hevc_size;
- const u32 *input_config_params_vp9;
- unsigned int input_config_params_vp9_size;
- const u32 *output_config_params;
- unsigned int output_config_params_size;
+ /* max number of macroblocks per second supported */
+ u32 max_core_mbps;
+ const u32 *dec_input_config_params_default;
+ unsigned int dec_input_config_params_default_size;
+ const u32 *dec_input_config_params_hevc;
+ unsigned int dec_input_config_params_hevc_size;
+ const u32 *dec_input_config_params_vp9;
+ unsigned int dec_input_config_params_vp9_size;
+ const u32 *dec_output_config_params;
+ unsigned int dec_output_config_params_size;
+ const u32 *enc_input_config_params;
+ unsigned int enc_input_config_params_size;
+ const u32 *enc_output_config_params;
+ unsigned int enc_output_config_params_size;
const u32 *dec_input_prop;
unsigned int dec_input_prop_size;
const u32 *dec_output_prop_avc;
@@ -199,6 +253,10 @@ struct iris_platform_data {
unsigned int dec_ip_int_buf_tbl_size;
const u32 *dec_op_int_buf_tbl;
unsigned int dec_op_int_buf_tbl_size;
+ const u32 *enc_ip_int_buf_tbl;
+ unsigned int enc_ip_int_buf_tbl_size;
+ const u32 *enc_op_int_buf_tbl;
+ unsigned int enc_op_int_buf_tbl_size;
};
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_gen2.c b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
index d3026b2bcb70..36d69cc73986 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_gen2.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025 Linaro Ltd
*/
#include "iris_core.h"
@@ -8,14 +9,17 @@
#include "iris_hfi_gen2.h"
#include "iris_hfi_gen2_defines.h"
#include "iris_platform_common.h"
+#include "iris_vpu_buffer.h"
#include "iris_vpu_common.h"
#include "iris_platform_qcs8300.h"
#include "iris_platform_sm8650.h"
+#include "iris_platform_sm8750.h"
#define VIDEO_ARCH_LX 1
+#define BITRATE_MAX 245000000
-static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
+static struct platform_inst_fw_cap inst_fw_cap_sm8550_dec[] = {
{
.cap_id = PROFILE_H264,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
@@ -199,6 +203,393 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
},
};
+static struct platform_inst_fw_cap inst_fw_cap_sm8550_enc[] = {
+ {
+ .cap_id = PROFILE_H264,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile,
+ },
+ {
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile,
+ },
+ {
+ .cap_id = LEVEL_H264,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_0,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_5_0,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_level,
+ },
+ {
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_5,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_level,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = HEADER_MODE,
+ .min = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .max = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ BIT(V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ .value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .hfi_id = HFI_PROP_SEQ_HEADER_MODE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_header_mode_gen2,
+ },
+ {
+ .cap_id = PREPEND_SPSPPS_TO_IDR,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 0,
+ },
+ {
+ .cap_id = BITRATE,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_bitrate,
+ },
+ {
+ .cap_id = BITRATE_PEAK,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_PEAK_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_peak_bitrate,
+ },
+ {
+ .cap_id = BITRATE_MODE,
+ .min = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ .value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .hfi_id = HFI_PROP_RATE_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_bitrate_mode_gen2,
+ },
+ {
+ .cap_id = FRAME_SKIP_MODE,
+ .min = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .max = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT),
+ .value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_RC_ENABLE,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ },
+ {
+ .cap_id = GOP_SIZE,
+ .min = 0,
+ .max = INT_MAX,
+ .step_or_mask = 1,
+ .value = 2 * DEFAULT_FPS - 1,
+ .hfi_id = HFI_PROP_MAX_GOP_FRAMES,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = ENTROPY_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .max = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC),
+ .value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .hfi_id = HFI_PROP_CABAC_SESSION,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_entropy_mode_gen2,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_min_qp,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_min_qp,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_max_qp,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_max_qp,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = I_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = P_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = P_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = B_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = B_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = INPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = OUTPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_u32,
+ },
+};
+
static struct platform_inst_caps platform_inst_cap_sm8550 = {
.min_frame_width = 96,
.max_frame_width = 8192,
@@ -209,6 +600,8 @@ static struct platform_inst_caps platform_inst_cap_sm8550 = {
.mb_cycles_fw = 489583,
.mb_cycles_fw_vpp = 66234,
.num_comv = 0,
+ .max_frame_rate = MAXIMUM_FPS,
+ .max_operating_rate = MAXIMUM_FPS,
};
static void iris_set_sm8550_preset_registers(struct iris_core *core)
@@ -289,11 +682,25 @@ static const u32 sm8550_vdec_input_config_param_vp9[] = {
HFI_PROP_LEVEL,
};
+static const u32 sm8550_venc_input_config_params[] = {
+ HFI_PROP_COLOR_FORMAT,
+ HFI_PROP_RAW_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_LINEAR_STRIDE_SCANLINE,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+};
+
static const u32 sm8550_vdec_output_config_params[] = {
HFI_PROP_COLOR_FORMAT,
HFI_PROP_LINEAR_STRIDE_SCANLINE,
};
+static const u32 sm8550_venc_output_config_params[] = {
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_FRAME_RATE,
+};
+
static const u32 sm8550_vdec_subscribe_input_properties[] = {
HFI_PROP_NO_OUTPUT,
};
@@ -322,10 +729,19 @@ static const u32 sm8550_dec_op_int_buf_tbl[] = {
BUF_DPB,
};
+static const u32 sm8550_enc_op_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_COMV,
+ BUF_NON_COMV,
+ BUF_LINE,
+ BUF_SCRATCH_2,
+};
+
struct iris_platform_data sm8550_data = {
.get_instance = iris_hfi_gen2_get_instance,
.init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu_buf_size,
.vpu_ops = &iris_vpu3_ops,
.set_preset_registers = iris_set_sm8550_preset_registers,
.icc_tbl = sm8550_icc_table,
@@ -345,8 +761,10 @@ struct iris_platform_data sm8550_data = {
.fwname = "qcom/vpu/vpu30_p4.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_sm8550,
- .inst_fw_caps = inst_fw_cap_sm8550,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .inst_fw_caps_dec = inst_fw_cap_sm8550_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8550_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8550_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8550_enc),
.tz_cp_config_data = &tz_cp_config_sm8550,
.core_arch = VIDEO_ARCH_LX,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
@@ -354,22 +772,33 @@ struct iris_platform_data sm8550_data = {
.num_vpp_pipe = 4,
.max_session_count = 16,
.max_core_mbpf = NUM_MBS_8K * 2,
- .input_config_params_default =
+ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
sm8550_vdec_input_config_params_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8550_vdec_input_config_params_default),
- .input_config_params_hevc =
+ .dec_input_config_params_hevc =
sm8550_vdec_input_config_param_hevc,
- .input_config_params_hevc_size =
+ .dec_input_config_params_hevc_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
- .input_config_params_vp9 =
+ .dec_input_config_params_vp9 =
sm8550_vdec_input_config_param_vp9,
- .input_config_params_vp9_size =
+ .dec_input_config_params_vp9_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
- .output_config_params =
+ .dec_output_config_params =
sm8550_vdec_output_config_params,
- .output_config_params_size =
+ .dec_output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
.dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
@@ -386,6 +815,9 @@ struct iris_platform_data sm8550_data = {
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
};
/*
@@ -399,6 +831,7 @@ struct iris_platform_data sm8650_data = {
.get_instance = iris_hfi_gen2_get_instance,
.init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu33_buf_size,
.vpu_ops = &iris_vpu33_ops,
.set_preset_registers = iris_set_sm8550_preset_registers,
.icc_tbl = sm8550_icc_table,
@@ -420,8 +853,10 @@ struct iris_platform_data sm8650_data = {
.fwname = "qcom/vpu/vpu33_p4.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_sm8550,
- .inst_fw_caps = inst_fw_cap_sm8550,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .inst_fw_caps_dec = inst_fw_cap_sm8550_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8550_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8550_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8550_enc),
.tz_cp_config_data = &tz_cp_config_sm8550,
.core_arch = VIDEO_ARCH_LX,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
@@ -429,22 +864,33 @@ struct iris_platform_data sm8650_data = {
.num_vpp_pipe = 4,
.max_session_count = 16,
.max_core_mbpf = NUM_MBS_8K * 2,
- .input_config_params_default =
+ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
sm8550_vdec_input_config_params_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8550_vdec_input_config_params_default),
- .input_config_params_hevc =
+ .dec_input_config_params_hevc =
sm8550_vdec_input_config_param_hevc,
- .input_config_params_hevc_size =
+ .dec_input_config_params_hevc_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
- .input_config_params_vp9 =
+ .dec_input_config_params_vp9 =
sm8550_vdec_input_config_param_vp9,
- .input_config_params_vp9_size =
+ .dec_input_config_params_vp9_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
- .output_config_params =
+ .dec_output_config_params =
sm8550_vdec_output_config_params,
- .output_config_params_size =
+ .dec_output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
.dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
@@ -461,6 +907,90 @@ struct iris_platform_data sm8650_data = {
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
+};
+
+struct iris_platform_data sm8750_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu35_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8750_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8750_clk_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8750_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8750_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu35_p4.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8550,
+ .inst_fw_caps_dec = inst_fw_cap_sm8550_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8550_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8550_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8550_enc),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = NUM_MBS_8K * 2,
+ .dec_input_config_params_default =
+ sm8550_vdec_input_config_params_default,
+ .dec_input_config_params_default_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params_default),
+ .dec_input_config_params_hevc =
+ sm8550_vdec_input_config_param_hevc,
+ .dec_input_config_params_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
+ .dec_input_config_params_vp9 =
+ sm8550_vdec_input_config_param_vp9,
+ .dec_input_config_params_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
+ .dec_output_config_params =
+ sm8550_vdec_output_config_params,
+ .dec_output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
+ .dec_output_prop_avc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_avc),
+ .dec_output_prop_hevc = sm8550_vdec_subscribe_output_properties_hevc,
+ .dec_output_prop_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_hevc),
+ .dec_output_prop_vp9 = sm8550_vdec_subscribe_output_properties_vp9,
+ .dec_output_prop_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_vp9),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
};
/*
@@ -472,6 +1002,7 @@ struct iris_platform_data qcs8300_data = {
.get_instance = iris_hfi_gen2_get_instance,
.init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu_buf_size,
.vpu_ops = &iris_vpu3_ops,
.set_preset_registers = iris_set_sm8550_preset_registers,
.icc_tbl = sm8550_icc_table,
@@ -491,8 +1022,10 @@ struct iris_platform_data qcs8300_data = {
.fwname = "qcom/vpu/vpu30_p4_s6.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_qcs8300,
- .inst_fw_caps = inst_fw_cap_qcs8300,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_qcs8300),
+ .inst_fw_caps_dec = inst_fw_cap_qcs8300_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_qcs8300_dec),
+ .inst_fw_caps_enc = inst_fw_cap_qcs8300_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_qcs8300_enc),
.tz_cp_config_data = &tz_cp_config_sm8550,
.core_arch = VIDEO_ARCH_LX,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
@@ -500,22 +1033,33 @@ struct iris_platform_data qcs8300_data = {
.num_vpp_pipe = 2,
.max_session_count = 16,
.max_core_mbpf = ((4096 * 2176) / 256) * 4,
- .input_config_params_default =
+ .max_core_mbps = (((3840 * 2176) / 256) * 120),
+ .dec_input_config_params_default =
sm8550_vdec_input_config_params_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8550_vdec_input_config_params_default),
- .input_config_params_hevc =
+ .dec_input_config_params_hevc =
sm8550_vdec_input_config_param_hevc,
- .input_config_params_hevc_size =
+ .dec_input_config_params_hevc_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
- .input_config_params_vp9 =
+ .dec_input_config_params_vp9 =
sm8550_vdec_input_config_param_vp9,
- .input_config_params_vp9_size =
+ .dec_input_config_params_vp9_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
- .output_config_params =
+ .dec_output_config_params =
sm8550_vdec_output_config_params,
- .output_config_params_size =
+ .dec_output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
.dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
@@ -532,4 +1076,7 @@ struct iris_platform_data qcs8300_data = {
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
index a8d66ed388a3..35ea0efade73 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
@@ -3,7 +3,9 @@
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
-static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
+#define BITRATE_MAX 245000000
+
+static struct platform_inst_fw_cap inst_fw_cap_qcs8300_dec[] = {
{
.cap_id = PROFILE_H264,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
@@ -187,6 +189,352 @@ static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
},
};
+static struct platform_inst_fw_cap inst_fw_cap_qcs8300_enc[] = {
+ {
+ .cap_id = PROFILE_H264,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = LEVEL_H264,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_0,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_5_0,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_5,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ },
+ {
+ .cap_id = HEADER_MODE,
+ .min = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .max = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ BIT(V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ .value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .hfi_id = HFI_PROP_SEQ_HEADER_MODE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = PREPEND_SPSPPS_TO_IDR,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 0,
+ },
+ {
+ .cap_id = BITRATE,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = BITRATE_PEAK,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_PEAK_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = BITRATE_MODE,
+ .min = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ .value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .hfi_id = HFI_PROP_RATE_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_SKIP_MODE,
+ .min = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .max = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT),
+ .value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_RC_ENABLE,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ },
+ {
+ .cap_id = GOP_SIZE,
+ .min = 0,
+ .max = INT_MAX,
+ .step_or_mask = 1,
+ .value = 2 * DEFAULT_FPS - 1,
+ .hfi_id = HFI_PROP_MAX_GOP_FRAMES,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = ENTROPY_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .max = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC),
+ .value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .hfi_id = HFI_PROP_CABAC_SESSION,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = I_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = P_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = P_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = B_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = B_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+};
+
static struct platform_inst_caps platform_inst_cap_qcs8300 = {
.min_frame_width = 96,
.max_frame_width = 4096,
@@ -197,4 +545,6 @@ static struct platform_inst_caps platform_inst_cap_qcs8300 = {
.mb_cycles_fw = 326389,
.mb_cycles_fw_vpp = 44156,
.num_comv = 0,
+ .max_frame_rate = MAXIMUM_FPS,
+ .max_operating_rate = MAXIMUM_FPS,
};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
index 8d0816a67ae0..16486284f8ac 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
@@ -9,9 +9,15 @@
#include "iris_resources.h"
#include "iris_hfi_gen1.h"
#include "iris_hfi_gen1_defines.h"
+#include "iris_vpu_buffer.h"
#include "iris_vpu_common.h"
-static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
+#define BITRATE_MIN 32000
+#define BITRATE_MAX 160000000
+#define BITRATE_PEAK_DEFAULT (BITRATE_DEFAULT * 2)
+#define BITRATE_STEP 100
+
+static struct platform_inst_fw_cap inst_fw_cap_sm8250_dec[] = {
{
.cap_id = PIPE,
.min = PIPE_1,
@@ -32,6 +38,200 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
},
};
+static struct platform_inst_fw_cap inst_fw_cap_sm8250_enc[] = {
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROPERTY_PARAM_WORK_MODE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = PROFILE_H264,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = LEVEL_H264,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = HEADER_MODE,
+ .min = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .max = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ BIT(V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ .value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .hfi_id = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_header_mode_gen1,
+ },
+ {
+ .cap_id = BITRATE,
+ .min = BITRATE_MIN,
+ .max = BITRATE_MAX,
+ .step_or_mask = BITRATE_STEP,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_bitrate,
+ },
+ {
+ .cap_id = BITRATE_MODE,
+ .min = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ .value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_bitrate_mode_gen1,
+ },
+ {
+ .cap_id = FRAME_SKIP_MODE,
+ .min = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .max = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT),
+ .value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_RC_ENABLE,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ },
+ {
+ .cap_id = GOP_SIZE,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step_or_mask = 1,
+ .value = 30,
+ .set = iris_set_u32
+ },
+ {
+ .cap_id = ENTROPY_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .max = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC),
+ .value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_entropy_mode_gen1,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP_HEVC,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP_HEVC,
+ .step_or_mask = 1,
+ .value = MAX_QP_HEVC,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+};
+
static struct platform_inst_caps platform_inst_cap_sm8250 = {
.min_frame_width = 128,
.max_frame_width = 8192,
@@ -40,6 +240,8 @@ static struct platform_inst_caps platform_inst_cap_sm8250 = {
.max_mbpf = 138240,
.mb_cycles_vsp = 25,
.mb_cycles_vpp = 200,
+ .max_frame_rate = MAXIMUM_FPS,
+ .max_operating_rate = MAXIMUM_FPS,
};
static void iris_set_sm8250_preset_registers(struct iris_core *core)
@@ -89,6 +291,14 @@ static const u32 sm8250_vdec_input_config_param_default[] = {
HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE,
};
+static const u32 sm8250_venc_input_config_param[] = {
+ HFI_PROPERTY_CONFIG_FRAME_RATE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+ HFI_PROPERTY_PARAM_FRAME_SIZE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+};
+
static const u32 sm8250_dec_ip_int_buf_tbl[] = {
BUF_BIN,
BUF_SCRATCH_1,
@@ -98,10 +308,17 @@ static const u32 sm8250_dec_op_int_buf_tbl[] = {
BUF_DPB,
};
+static const u32 sm8250_enc_ip_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_SCRATCH_1,
+ BUF_SCRATCH_2,
+};
+
struct iris_platform_data sm8250_data = {
.get_instance = iris_hfi_gen1_get_instance,
.init_hfi_command_ops = &iris_hfi_gen1_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen1_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu_buf_size,
.vpu_ops = &iris_vpu2_ops,
.set_preset_registers = iris_set_sm8250_preset_registers,
.icc_tbl = sm8250_icc_table,
@@ -121,20 +338,29 @@ struct iris_platform_data sm8250_data = {
.fwname = "qcom/vpu-1.0/venus.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_sm8250,
- .inst_fw_caps = inst_fw_cap_sm8250,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8250),
+ .inst_fw_caps_dec = inst_fw_cap_sm8250_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8250_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8250_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8250_enc),
.tz_cp_config_data = &tz_cp_config_sm8250,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
.num_vpp_pipe = 4,
.max_session_count = 16,
.max_core_mbpf = NUM_MBS_8K,
- .input_config_params_default =
+ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
sm8250_vdec_input_config_param_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8250_vdec_input_config_param_default),
+ .enc_input_config_params = sm8250_venc_input_config_param,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8250_venc_input_config_param),
.dec_ip_int_buf_tbl = sm8250_dec_ip_int_buf_tbl,
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8250_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_op_int_buf_tbl),
+
+ .enc_ip_int_buf_tbl = sm8250_enc_ip_int_buf_tbl,
+ .enc_ip_int_buf_tbl_size = ARRAY_SIZE(sm8250_enc_ip_int_buf_tbl),
};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8750.h b/drivers/media/platform/qcom/iris/iris_platform_sm8750.h
new file mode 100644
index 000000000000..719056656a5b
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8750.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Linaro Ltd
+ */
+
+#ifndef __MEDIA_IRIS_PLATFORM_SM8750_H__
+#define __MEDIA_IRIS_PLATFORM_SM8750_H__
+
+static const char * const sm8750_clk_reset_table[] = {
+ "bus0", "bus1", "core", "vcodec0_core"
+};
+
+static const struct platform_clk_data sm8750_clk_table[] = {
+ {IRIS_AXI_CLK, "iface" },
+ {IRIS_CTRL_CLK, "core" },
+ {IRIS_HW_CLK, "vcodec0_core" },
+ {IRIS_AXI1_CLK, "iface1" },
+ {IRIS_CTRL_FREERUN_CLK, "core_freerun" },
+ {IRIS_HW_FREERUN_CLK, "vcodec0_core_freerun" },
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
index 4e6e92357968..00e99be16e08 100644
--- a/drivers/media/platform/qcom/iris/iris_probe.c
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -146,7 +146,7 @@ static int iris_init_resources(struct iris_core *core)
return iris_init_resets(core);
}
-static int iris_register_video_device(struct iris_core *core)
+static int iris_register_video_device(struct iris_core *core, enum domain_type type)
{
struct video_device *vdev;
int ret;
@@ -155,19 +155,29 @@ static int iris_register_video_device(struct iris_core *core)
if (!vdev)
return -ENOMEM;
- strscpy(vdev->name, "qcom-iris-decoder", sizeof(vdev->name));
vdev->release = video_device_release;
vdev->fops = core->iris_v4l2_file_ops;
- vdev->ioctl_ops = core->iris_v4l2_ioctl_ops;
vdev->vfl_dir = VFL_DIR_M2M;
vdev->v4l2_dev = &core->v4l2_dev;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ if (type == DECODER) {
+ strscpy(vdev->name, "qcom-iris-decoder", sizeof(vdev->name));
+ vdev->ioctl_ops = core->iris_v4l2_ioctl_ops_dec;
+ core->vdev_dec = vdev;
+ } else if (type == ENCODER) {
+ strscpy(vdev->name, "qcom-iris-encoder", sizeof(vdev->name));
+ vdev->ioctl_ops = core->iris_v4l2_ioctl_ops_enc;
+ core->vdev_enc = vdev;
+ } else {
+ ret = -EINVAL;
+ goto err_vdev_release;
+ }
+
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vdev_release;
- core->vdev_dec = vdev;
video_set_drvdata(vdev, core);
return 0;
@@ -189,6 +199,7 @@ static void iris_remove(struct platform_device *pdev)
iris_core_deinit(core);
video_unregister_device(core->vdev_dec);
+ video_unregister_device(core->vdev_enc);
v4l2_device_unregister(&core->v4l2_dev);
@@ -258,17 +269,21 @@ static int iris_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = iris_register_video_device(core);
+ ret = iris_register_video_device(core, DECODER);
if (ret)
goto err_v4l2_unreg;
+ ret = iris_register_video_device(core, ENCODER);
+ if (ret)
+ goto err_vdev_unreg_dec;
+
platform_set_drvdata(pdev, core);
dma_mask = core->iris_platform_data->dma_mask;
ret = dma_set_mask_and_coherent(dev, dma_mask);
if (ret)
- goto err_vdev_unreg;
+ goto err_vdev_unreg_enc;
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(&pdev->dev, DMA_BIT_MASK(32));
@@ -277,11 +292,13 @@ static int iris_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(core->dev);
ret = devm_pm_runtime_enable(core->dev);
if (ret)
- goto err_vdev_unreg;
+ goto err_vdev_unreg_enc;
return 0;
-err_vdev_unreg:
+err_vdev_unreg_enc:
+ video_unregister_device(core->vdev_enc);
+err_vdev_unreg_dec:
video_unregister_device(core->vdev_dec);
err_v4l2_unreg:
v4l2_device_unregister(&core->v4l2_dev);
@@ -353,6 +370,10 @@ static const struct of_device_id iris_dt_match[] = {
.compatible = "qcom,sm8650-iris",
.data = &sm8650_data,
},
+ {
+ .compatible = "qcom,sm8750-iris",
+ .data = &sm8750_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, iris_dt_match);
diff --git a/drivers/media/platform/qcom/iris/iris_state.c b/drivers/media/platform/qcom/iris/iris_state.c
index 104e1687ad39..d14472414750 100644
--- a/drivers/media/platform/qcom/iris/iris_state.c
+++ b/drivers/media/platform/qcom/iris/iris_state.c
@@ -122,7 +122,8 @@ static bool iris_inst_allow_sub_state(struct iris_inst *inst, enum iris_inst_sub
return false;
case IRIS_INST_OUTPUT_STREAMING:
if (sub_state & (IRIS_INST_SUB_DRC_LAST |
- IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE))
+ IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE |
+ IRIS_INST_SUB_LOAD_RESOURCES))
return true;
return false;
case IRIS_INST_STREAMING:
@@ -251,7 +252,7 @@ bool iris_drc_pending(struct iris_inst *inst)
inst->sub_state & IRIS_INST_SUB_DRC_LAST;
}
-static inline bool iris_drain_pending(struct iris_inst *inst)
+bool iris_drain_pending(struct iris_inst *inst)
{
return inst->sub_state & IRIS_INST_SUB_DRAIN &&
inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
@@ -262,11 +263,11 @@ bool iris_allow_cmd(struct iris_inst *inst, u32 cmd)
struct vb2_queue *src_q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
- if (cmd == V4L2_DEC_CMD_START) {
+ if (cmd == V4L2_DEC_CMD_START || cmd == V4L2_ENC_CMD_START) {
if (vb2_is_streaming(src_q) || vb2_is_streaming(dst_q))
if (iris_drc_pending(inst) || iris_drain_pending(inst))
return true;
- } else if (cmd == V4L2_DEC_CMD_STOP) {
+ } else if (cmd == V4L2_DEC_CMD_STOP || cmd == V4L2_ENC_CMD_STOP) {
if (vb2_is_streaming(src_q))
if (inst->sub_state != IRIS_INST_SUB_DRAIN)
return true;
diff --git a/drivers/media/platform/qcom/iris/iris_state.h b/drivers/media/platform/qcom/iris/iris_state.h
index e718386dbe04..b09fa54cf17e 100644
--- a/drivers/media/platform/qcom/iris/iris_state.h
+++ b/drivers/media/platform/qcom/iris/iris_state.h
@@ -141,5 +141,6 @@ int iris_inst_sub_state_change_drc_last(struct iris_inst *inst);
int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane);
bool iris_allow_cmd(struct iris_inst *inst, u32 cmd);
bool iris_drc_pending(struct iris_inst *inst);
+bool iris_drain_pending(struct iris_inst *inst);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_utils.c b/drivers/media/platform/qcom/iris/iris_utils.c
index 83c70d6a2d90..85c70a62b1fd 100644
--- a/drivers/media/platform/qcom/iris/iris_utils.c
+++ b/drivers/media/platform/qcom/iris/iris_utils.c
@@ -88,3 +88,39 @@ struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id)
mutex_unlock(&core->lock);
return NULL;
}
+
+int iris_check_core_mbpf(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u32 total_mbpf = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ total_mbpf += iris_get_mbpf(instance);
+ mutex_unlock(&core->lock);
+
+ if (total_mbpf > core->iris_platform_data->max_core_mbpf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int iris_check_core_mbps(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u32 total_mbps = 0, fps = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ fps = max(instance->frame_rate, instance->operating_rate);
+ total_mbps += iris_get_mbpf(instance) * fps;
+ }
+ mutex_unlock(&core->lock);
+
+ if (total_mbps > core->iris_platform_data->max_core_mbps)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_utils.h b/drivers/media/platform/qcom/iris/iris_utils.h
index 49869cf7a376..75740181122f 100644
--- a/drivers/media/platform/qcom/iris/iris_utils.h
+++ b/drivers/media/platform/qcom/iris/iris_utils.h
@@ -49,5 +49,7 @@ struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id);
void iris_helper_buffers_done(struct iris_inst *inst, unsigned int type,
enum vb2_buffer_state state);
int iris_wait_for_session_response(struct iris_inst *inst, bool is_flush);
+int iris_check_core_mbpf(struct iris_inst *inst);
+int iris_check_core_mbps(struct iris_inst *inst);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.c b/drivers/media/platform/qcom/iris/iris_vb2.c
index 8b17c7c39487..139b821f7952 100644
--- a/drivers/media/platform/qcom/iris/iris_vb2.c
+++ b/drivers/media/platform/qcom/iris/iris_vb2.c
@@ -7,28 +7,13 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
+#include "iris_common.h"
#include "iris_instance.h"
#include "iris_vb2.h"
#include "iris_vdec.h"
+#include "iris_venc.h"
#include "iris_power.h"
-static int iris_check_core_mbpf(struct iris_inst *inst)
-{
- struct iris_core *core = inst->core;
- struct iris_inst *instance;
- u32 total_mbpf = 0;
-
- mutex_lock(&core->lock);
- list_for_each_entry(instance, &core->instances, list)
- total_mbpf += iris_get_mbpf(instance);
- mutex_unlock(&core->lock);
-
- if (total_mbpf > core->iris_platform_data->max_core_mbpf)
- return -ENOMEM;
-
- return 0;
-}
-
static int iris_check_inst_mbpf(struct iris_inst *inst)
{
struct platform_inst_caps *caps;
@@ -173,9 +158,6 @@ int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
inst = vb2_get_drv_priv(q);
- if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
- return 0;
-
mutex_lock(&inst->lock);
if (inst->state == IRIS_INST_ERROR) {
ret = -EBUSY;
@@ -194,16 +176,35 @@ int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
if (ret)
goto error;
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- ret = iris_vdec_streamon_input(inst);
- else if (V4L2_TYPE_IS_CAPTURE(q->type))
- ret = iris_vdec_streamon_output(inst);
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (inst->domain == DECODER)
+ ret = iris_vdec_streamon_input(inst);
+ else
+ ret = iris_venc_streamon_input(inst);
+ } else if (V4L2_TYPE_IS_CAPTURE(q->type)) {
+ if (inst->domain == DECODER)
+ ret = iris_vdec_streamon_output(inst);
+ else
+ ret = iris_venc_streamon_output(inst);
+ }
if (ret)
goto error;
buf_type = iris_v4l2_type_to_driver(q->type);
- ret = iris_queue_deferred_buffers(inst, buf_type);
+ if (inst->domain == DECODER) {
+ if (inst->state == IRIS_INST_STREAMING)
+ ret = iris_queue_internal_deferred_buffers(inst, BUF_DPB);
+ if (!ret)
+ ret = iris_queue_deferred_buffers(inst, buf_type);
+ } else {
+ if (inst->state == IRIS_INST_STREAMING) {
+ ret = iris_queue_deferred_buffers(inst, BUF_INPUT);
+ if (!ret)
+ ret = iris_queue_deferred_buffers(inst, BUF_OUTPUT);
+ }
+ }
+
if (ret)
goto error;
@@ -235,7 +236,7 @@ void iris_vb2_stop_streaming(struct vb2_queue *q)
!V4L2_TYPE_IS_CAPTURE(q->type))
goto exit;
- ret = iris_vdec_session_streamoff(inst, q->type);
+ ret = iris_session_streamoff(inst, q->type);
if (ret)
goto exit;
@@ -326,7 +327,10 @@ void iris_vb2_buf_queue(struct vb2_buffer *vb2)
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
- ret = iris_vdec_qbuf(inst, vbuf);
+ if (inst->domain == DECODER)
+ ret = iris_vdec_qbuf(inst, vbuf);
+ else
+ ret = iris_venc_qbuf(inst, vbuf);
exit:
if (ret) {
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.c b/drivers/media/platform/qcom/iris/iris_vdec.c
index d670b51c5839..ae13c3e1b426 100644
--- a/drivers/media/platform/qcom/iris/iris_vdec.c
+++ b/drivers/media/platform/qcom/iris/iris_vdec.c
@@ -7,14 +7,13 @@
#include <media/v4l2-mem2mem.h>
#include "iris_buffer.h"
+#include "iris_common.h"
#include "iris_ctrls.h"
#include "iris_instance.h"
#include "iris_power.h"
#include "iris_vdec.h"
#include "iris_vpu_buffer.h"
-#define DEFAULT_WIDTH 320
-#define DEFAULT_HEIGHT 240
#define DEFAULT_CODEC_ALIGNMENT 16
int iris_vdec_inst_init(struct iris_inst *inst)
@@ -56,7 +55,7 @@ int iris_vdec_inst_init(struct iris_inst *inst)
inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
inst->buffers[BUF_OUTPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
- memcpy(&inst->fw_caps[0], &core->inst_fw_caps[0],
+ memcpy(&inst->fw_caps[0], &core->inst_fw_caps_dec[0],
INST_FW_CAP_MAX * sizeof(struct platform_inst_fw_cap));
return iris_ctrls_init(inst);
@@ -158,7 +157,7 @@ int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
}
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (!fmt) {
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
f_inst = inst->fmt_dst;
f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
@@ -265,6 +264,19 @@ int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
return 0;
}
+int iris_vdec_validate_format(struct iris_inst *inst, u32 pixelformat)
+{
+ const struct iris_fmt *fmt = NULL;
+
+ if (pixelformat != V4L2_PIX_FMT_NV12) {
+ fmt = find_format(inst, pixelformat, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub)
{
int ret = 0;
@@ -301,125 +313,6 @@ void iris_vdec_src_change(struct iris_inst *inst)
v4l2_event_queue_fh(&inst->fh, &event);
}
-
-static void iris_vdec_flush_deferred_buffers(struct iris_inst *inst,
- enum iris_buffer_type type)
-{
- struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
- struct v4l2_m2m_buffer *buffer, *n;
- struct iris_buffer *buf;
-
- if (type == BUF_INPUT) {
- v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
- buf = to_iris_buffer(&buffer->vb);
- if (buf->attr & BUF_ATTR_DEFERRED) {
- if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
- buf->attr |= BUF_ATTR_BUFFER_DONE;
- buf->data_size = 0;
- iris_vb2_buffer_done(inst, buf);
- }
- }
- }
- } else {
- v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
- buf = to_iris_buffer(&buffer->vb);
- if (buf->attr & BUF_ATTR_DEFERRED) {
- if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
- buf->attr |= BUF_ATTR_BUFFER_DONE;
- buf->data_size = 0;
- iris_vb2_buffer_done(inst, buf);
- }
- }
- }
- }
-}
-
-static void iris_vdec_kill_session(struct iris_inst *inst)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
-
- if (!inst->session_id)
- return;
-
- hfi_ops->session_close(inst);
- iris_inst_change_state(inst, IRIS_INST_ERROR);
-}
-
-int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
- enum iris_buffer_type buffer_type;
- int ret;
-
- switch (plane) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- buffer_type = BUF_INPUT;
- break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- buffer_type = BUF_OUTPUT;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hfi_ops->session_stop(inst, plane);
- if (ret)
- goto error;
-
- ret = iris_inst_state_change_streamoff(inst, plane);
- if (ret)
- goto error;
-
- iris_vdec_flush_deferred_buffers(inst, buffer_type);
-
- return 0;
-
-error:
- iris_vdec_kill_session(inst);
- iris_vdec_flush_deferred_buffers(inst, buffer_type);
-
- return ret;
-}
-
-static int iris_vdec_process_streamon_input(struct iris_inst *inst)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
- enum iris_inst_sub_state set_sub_state = 0;
- int ret;
-
- iris_scale_power(inst);
-
- ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
-
- if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
- ret = iris_inst_change_sub_state(inst, IRIS_INST_SUB_INPUT_PAUSE, 0);
- if (ret)
- return ret;
- }
-
- if (inst->sub_state & IRIS_INST_SUB_DRC ||
- inst->sub_state & IRIS_INST_SUB_DRAIN ||
- inst->sub_state & IRIS_INST_SUB_FIRST_IPSC) {
- if (!(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE)) {
- if (hfi_ops->session_pause) {
- ret = hfi_ops->session_pause(inst,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
- }
- set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
- }
- }
-
- ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
-
- return iris_inst_change_sub_state(inst, 0, set_sub_state);
-}
-
int iris_vdec_streamon_input(struct iris_inst *inst)
{
int ret;
@@ -428,7 +321,7 @@ int iris_vdec_streamon_input(struct iris_inst *inst)
if (ret)
return ret;
- ret = iris_alloc_and_queue_persist_bufs(inst);
+ ret = iris_alloc_and_queue_persist_bufs(inst, BUF_PERSIST);
if (ret)
return ret;
@@ -446,71 +339,7 @@ int iris_vdec_streamon_input(struct iris_inst *inst)
if (ret)
return ret;
- return iris_vdec_process_streamon_input(inst);
-}
-
-static int iris_vdec_process_streamon_output(struct iris_inst *inst)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
- bool drain_active = false, drc_active = false;
- enum iris_inst_sub_state clear_sub_state = 0;
- int ret = 0;
-
- iris_scale_power(inst);
-
- drain_active = inst->sub_state & IRIS_INST_SUB_DRAIN &&
- inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
-
- drc_active = inst->sub_state & IRIS_INST_SUB_DRC &&
- inst->sub_state & IRIS_INST_SUB_DRC_LAST;
-
- if (drc_active)
- clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
- else if (drain_active)
- clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
-
- if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
- ret = iris_alloc_and_queue_input_int_bufs(inst);
- if (ret)
- return ret;
- ret = iris_set_stage(inst, STAGE);
- if (ret)
- return ret;
- ret = iris_set_pipe(inst, PIPE);
- if (ret)
- return ret;
- }
-
- if (inst->state == IRIS_INST_INPUT_STREAMING &&
- inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
- if (!drain_active)
- ret = hfi_ops->session_resume_drc(inst,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- else if (hfi_ops->session_resume_drain)
- ret = hfi_ops->session_resume_drain(inst,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
- clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
- }
-
- if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
- clear_sub_state |= IRIS_INST_SUB_FIRST_IPSC;
-
- ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (ret)
- return ret;
-
- if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE)
- clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
-
- ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (ret)
- return ret;
-
- inst->last_buffer_dequeued = false;
-
- return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+ return iris_process_streamon_input(inst);
}
int iris_vdec_streamon_output(struct iris_inst *inst)
@@ -532,7 +361,7 @@ int iris_vdec_streamon_output(struct iris_inst *inst)
if (ret)
return ret;
- ret = iris_vdec_process_streamon_output(inst);
+ ret = iris_process_streamon_output(inst);
if (ret)
goto error;
@@ -543,49 +372,11 @@ int iris_vdec_streamon_output(struct iris_inst *inst)
return ret;
error:
- iris_vdec_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ iris_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
return ret;
}
-static int
-iris_vdec_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
-
- buf->type = iris_v4l2_type_to_driver(vb2->type);
- buf->index = vb2->index;
- buf->fd = vb2->planes[0].m.fd;
- buf->buffer_size = vb2->planes[0].length;
- buf->data_offset = vb2->planes[0].data_offset;
- buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
- buf->flags = vbuf->flags;
- buf->timestamp = vb2->timestamp;
- buf->attr = 0;
-
- return 0;
-}
-
-static void
-iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
-{
- u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- struct vb2_buffer *vb = &vbuf->vb2_buf;
- u64 ts_us = vb->timestamp;
-
- if (inst->metadata_idx >= ARRAY_SIZE(inst->tss))
- inst->metadata_idx = 0;
-
- do_div(ts_us, NSEC_PER_USEC);
-
- inst->tss[inst->metadata_idx].flags = vbuf->flags & mask;
- inst->tss[inst->metadata_idx].tc = vbuf->timecode;
- inst->tss[inst->metadata_idx].ts_us = ts_us;
- inst->tss[inst->metadata_idx].ts_ns = vb->timestamp;
-
- inst->metadata_idx++;
-}
-
int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
{
struct iris_buffer *buf = to_iris_buffer(vbuf);
@@ -593,7 +384,7 @@ int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
struct vb2_queue *q;
int ret;
- ret = iris_vdec_vb2_buffer_to_driver(vb2, buf);
+ ret = iris_vb2_buffer_to_driver(vb2, buf);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.h b/drivers/media/platform/qcom/iris/iris_vdec.h
index cd7aab66dc7c..ec1ce55d1375 100644
--- a/drivers/media/platform/qcom/iris/iris_vdec.h
+++ b/drivers/media/platform/qcom/iris/iris_vdec.h
@@ -8,22 +8,12 @@
struct iris_inst;
-enum iris_fmt_type {
- IRIS_FMT_H264,
- IRIS_FMT_HEVC,
- IRIS_FMT_VP9,
-};
-
-struct iris_fmt {
- u32 pixfmt;
- u32 type;
-};
-
int iris_vdec_inst_init(struct iris_inst *inst);
void iris_vdec_inst_deinit(struct iris_inst *inst);
int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f);
int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_vdec_validate_format(struct iris_inst *inst, u32 pixelformat);
int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub);
void iris_vdec_src_change(struct iris_inst *inst);
int iris_vdec_streamon_input(struct iris_inst *inst);
@@ -31,6 +21,5 @@ int iris_vdec_streamon_output(struct iris_inst *inst);
int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
int iris_vdec_start_cmd(struct iris_inst *inst);
int iris_vdec_stop_cmd(struct iris_inst *inst);
-int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_venc.c b/drivers/media/platform/qcom/iris/iris_venc.c
new file mode 100644
index 000000000000..099bd5ed4ae0
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_venc.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_common.h"
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_venc.h"
+#include "iris_vpu_buffer.h"
+
+int iris_venc_inst_init(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct v4l2_format *f;
+
+ inst->fmt_src = kzalloc(sizeof(*inst->fmt_src), GFP_KERNEL);
+ inst->fmt_dst = kzalloc(sizeof(*inst->fmt_dst), GFP_KERNEL);
+ if (!inst->fmt_src || !inst->fmt_dst) {
+ kfree(inst->fmt_src);
+ kfree(inst->fmt_dst);
+ return -ENOMEM;
+ }
+
+ f = inst->fmt_dst;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->fmt.pix_mp.width = DEFAULT_WIDTH;
+ f->fmt.pix_mp.height = DEFAULT_HEIGHT;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ inst->codec = f->fmt.pix_mp.pixelformat;
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ f = inst->fmt_src;
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
+ f->fmt.pix_mp.width = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.height = ALIGN(DEFAULT_HEIGHT, 32);
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = f->fmt.pix_mp.width;
+ inst->crop.height = f->fmt.pix_mp.height;
+
+ inst->operating_rate = DEFAULT_FPS;
+ inst->frame_rate = DEFAULT_FPS;
+
+ memcpy(&inst->fw_caps[0], &core->inst_fw_caps_enc[0],
+ INST_FW_CAP_MAX * sizeof(struct platform_inst_fw_cap));
+
+ return iris_ctrls_init(inst);
+}
+
+void iris_venc_inst_deinit(struct iris_inst *inst)
+{
+ kfree(inst->fmt_dst);
+ kfree(inst->fmt_src);
+}
+
+static const struct iris_fmt iris_venc_formats[] = {
+ [IRIS_FMT_H264] = {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+ [IRIS_FMT_HEVC] = {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+};
+
+static const struct iris_fmt *
+find_format(struct iris_inst *inst, u32 pixfmt, u32 type)
+{
+ const struct iris_fmt *fmt = iris_venc_formats;
+ unsigned int size = ARRAY_SIZE(iris_venc_formats);
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct iris_fmt *
+find_format_by_index(struct iris_inst *inst, u32 index, u32 type)
+{
+ const struct iris_fmt *fmt = iris_venc_formats;
+ unsigned int size = ARRAY_SIZE(iris_venc_formats);
+
+ if (index >= size || fmt[index].type != type)
+ return NULL;
+
+ return &fmt[index];
+}
+
+int iris_venc_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f)
+{
+ const struct iris_fmt *fmt;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_NV12;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ fmt = find_format_by_index(inst, f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED | V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iris_venc_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ const struct iris_fmt *fmt;
+ struct v4l2_format *f_inst;
+
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
+ f_inst = inst->fmt_src;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (!fmt) {
+ f_inst = inst->fmt_dst;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ pixmp->num_planes = 1;
+
+ return 0;
+}
+
+static int iris_venc_s_fmt_output(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_format *fmt;
+
+ iris_venc_try_fmt(inst, f);
+
+ if (!(find_format(inst, f->fmt.pix_mp.pixelformat, f->type)))
+ return -EINVAL;
+
+ fmt = inst->fmt_dst;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+
+ if (f->fmt.pix_mp.colorspace != V4L2_COLORSPACE_DEFAULT &&
+ f->fmt.pix_mp.colorspace != V4L2_COLORSPACE_REC709)
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->codec = f->fmt.pix_mp.pixelformat;
+ memcpy(f, fmt, sizeof(struct v4l2_format));
+
+ return 0;
+}
+
+static int iris_venc_s_fmt_input(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_format *fmt, *output_fmt;
+
+ iris_venc_try_fmt(inst, f);
+
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+
+ fmt = inst->fmt_src;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, 32);
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+
+ fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ output_fmt = inst->fmt_dst;
+ output_fmt->fmt.pix_mp.width = fmt->fmt.pix_mp.width;
+ output_fmt->fmt.pix_mp.height = fmt->fmt.pix_mp.height;
+ output_fmt->fmt.pix_mp.colorspace = fmt->fmt.pix_mp.colorspace;
+ output_fmt->fmt.pix_mp.xfer_func = fmt->fmt.pix_mp.xfer_func;
+ output_fmt->fmt.pix_mp.ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc;
+ output_fmt->fmt.pix_mp.quantization = fmt->fmt.pix_mp.quantization;
+
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (f->fmt.pix_mp.width != inst->crop.width ||
+ f->fmt.pix_mp.height != inst->crop.height) {
+ inst->crop.top = 0;
+ inst->crop.left = 0;
+ inst->crop.width = fmt->fmt.pix_mp.width;
+ inst->crop.height = fmt->fmt.pix_mp.height;
+
+ iris_venc_s_fmt_output(inst, output_fmt);
+ }
+
+ memcpy(f, fmt, sizeof(struct v4l2_format));
+
+ return 0;
+}
+
+int iris_venc_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return iris_venc_s_fmt_input(inst, f);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return iris_venc_s_fmt_output(inst, f);
+ default:
+ return -EINVAL;
+ }
+}
+
+int iris_venc_validate_format(struct iris_inst *inst, u32 pixelformat)
+{
+ const struct iris_fmt *fmt = NULL;
+
+ if (pixelformat != V4L2_PIX_FMT_NV12) {
+ fmt = find_format(inst, pixelformat, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iris_venc_subscribe_event(struct iris_inst *inst,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(&inst->fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(&inst->fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+int iris_venc_s_selection(struct iris_inst *inst, struct v4l2_selection *s)
+{
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+
+ if (s->r.width > inst->fmt_src->fmt.pix_mp.width ||
+ s->r.height > inst->fmt_src->fmt.pix_mp.height)
+ return -EINVAL;
+
+ inst->crop.left = s->r.left;
+ inst->crop.top = s->r.top;
+ inst->crop.width = s->r.width;
+ inst->crop.height = s->r.height;
+ inst->fmt_dst->fmt.pix_mp.width = inst->crop.width;
+ inst->fmt_dst->fmt.pix_mp.height = inst->crop.height;
+ return iris_venc_s_fmt_output(inst, inst->fmt_dst);
+ default:
+ return -EINVAL;
+ }
+}
+
+int iris_venc_s_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ struct v4l2_fract *timeperframe = NULL;
+ u32 default_rate = DEFAULT_FPS;
+ bool is_frame_rate = false;
+ u64 us_per_frame, fps;
+ u32 max_rate;
+
+ int ret = 0;
+
+ if (s_parm->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ timeperframe = &s_parm->parm.output.timeperframe;
+ max_rate = caps->max_operating_rate;
+ s_parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
+ timeperframe = &s_parm->parm.capture.timeperframe;
+ is_frame_rate = true;
+ max_rate = caps->max_frame_rate;
+ s_parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ }
+
+ if (!timeperframe->denominator || !timeperframe->numerator) {
+ if (!timeperframe->numerator)
+ timeperframe->numerator = 1;
+ if (!timeperframe->denominator)
+ timeperframe->denominator = default_rate;
+ }
+
+ us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ do_div(us_per_frame, timeperframe->denominator);
+
+ if (!us_per_frame)
+ return -EINVAL;
+
+ fps = (u64)USEC_PER_SEC;
+ do_div(fps, us_per_frame);
+ if (fps > max_rate) {
+ ret = -ENOMEM;
+ goto reset_rate;
+ }
+
+ if (is_frame_rate)
+ inst->frame_rate = (u32)fps;
+ else
+ inst->operating_rate = (u32)fps;
+
+ if ((s_parm->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && vb2_is_streaming(src_q)) ||
+ (s_parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && vb2_is_streaming(dst_q))) {
+ ret = iris_check_core_mbpf(inst);
+ if (ret)
+ goto reset_rate;
+ ret = iris_check_core_mbps(inst);
+ if (ret)
+ goto reset_rate;
+ }
+
+ return 0;
+
+reset_rate:
+ if (ret) {
+ if (is_frame_rate)
+ inst->frame_rate = default_rate;
+ else
+ inst->operating_rate = default_rate;
+ }
+
+ return ret;
+}
+
+int iris_venc_g_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm)
+{
+ struct v4l2_fract *timeperframe = NULL;
+
+ if (s_parm->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ timeperframe = &s_parm->parm.output.timeperframe;
+ timeperframe->numerator = 1;
+ timeperframe->denominator = inst->operating_rate;
+ s_parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
+ timeperframe = &s_parm->parm.capture.timeperframe;
+ timeperframe->numerator = 1;
+ timeperframe->denominator = inst->frame_rate;
+ s_parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ }
+
+ return 0;
+}
+
+int iris_venc_streamon_input(struct iris_inst *inst)
+{
+ int ret;
+
+ ret = iris_set_properties(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_alloc_and_queue_persist_bufs(inst, BUF_ARP);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ret = iris_destroy_dequeued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_process_streamon_input(inst);
+}
+
+int iris_venc_streamon_output(struct iris_inst *inst)
+{
+ int ret;
+
+ ret = iris_set_properties(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_alloc_and_queue_persist_bufs(inst, BUF_ARP);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ret = iris_destroy_dequeued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_process_streamon_output(inst);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ iris_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+int iris_venc_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct iris_buffer *buf = to_iris_buffer(vbuf);
+ struct vb2_buffer *vb2 = &vbuf->vb2_buf;
+ struct vb2_queue *q;
+ int ret;
+
+ ret = iris_vb2_buffer_to_driver(vb2, buf);
+ if (ret)
+ return ret;
+
+ if (buf->type == BUF_INPUT)
+ iris_set_ts_metadata(inst, vbuf);
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, vb2->type);
+ if (!vb2_is_streaming(q)) {
+ buf->attr |= BUF_ATTR_DEFERRED;
+ return 0;
+ }
+
+ iris_scale_power(inst);
+
+ return iris_queue_buffer(inst, buf);
+}
+
+int iris_venc_start_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ dst_vq = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST) {
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+ }
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+ } else {
+ dev_err(inst->core->dev, "start called before receiving last_flag\n");
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return -EBUSY;
+ }
+
+ inst->last_buffer_dequeued = false;
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+int iris_venc_stop_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_drain(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_DRAIN);
+
+ iris_scale_power(inst);
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_venc.h b/drivers/media/platform/qcom/iris/iris_venc.h
new file mode 100644
index 000000000000..c4db7433da53
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_venc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IRIS_VENC_H_
+#define _IRIS_VENC_H_
+
+struct iris_inst;
+
+int iris_venc_inst_init(struct iris_inst *inst);
+void iris_venc_inst_deinit(struct iris_inst *inst);
+int iris_venc_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
+int iris_venc_try_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_venc_s_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_venc_validate_format(struct iris_inst *inst, u32 pixelformat);
+int iris_venc_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub);
+int iris_venc_s_selection(struct iris_inst *inst, struct v4l2_selection *s);
+int iris_venc_g_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm);
+int iris_venc_s_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm);
+int iris_venc_streamon_input(struct iris_inst *inst);
+int iris_venc_streamon_output(struct iris_inst *inst);
+int iris_venc_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
+int iris_venc_start_cmd(struct iris_inst *inst);
+int iris_venc_stop_cmd(struct iris_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.c b/drivers/media/platform/qcom/iris/iris_vidc.c
index c417e8c31f80..d38d0f6961cd 100644
--- a/drivers/media/platform/qcom/iris/iris_vidc.c
+++ b/drivers/media/platform/qcom/iris/iris_vidc.c
@@ -12,6 +12,7 @@
#include "iris_vidc.h"
#include "iris_instance.h"
#include "iris_vdec.h"
+#include "iris_venc.h"
#include "iris_vb2.h"
#include "iris_vpu_buffer.h"
#include "iris_platform_common.h"
@@ -21,16 +22,19 @@
#define STEP_WIDTH 1
#define STEP_HEIGHT 1
-static void iris_v4l2_fh_init(struct iris_inst *inst)
+static void iris_v4l2_fh_init(struct iris_inst *inst, struct file *filp)
{
- v4l2_fh_init(&inst->fh, inst->core->vdev_dec);
+ if (inst->domain == ENCODER)
+ v4l2_fh_init(&inst->fh, inst->core->vdev_enc);
+ else if (inst->domain == DECODER)
+ v4l2_fh_init(&inst->fh, inst->core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, filp);
}
-static void iris_v4l2_fh_deinit(struct iris_inst *inst)
+static void iris_v4l2_fh_deinit(struct iris_inst *inst, struct file *filp)
{
- v4l2_fh_del(&inst->fh);
+ v4l2_fh_del(&inst->fh, filp);
inst->fh.ctrl_handler = NULL;
v4l2_fh_exit(&inst->fh);
}
@@ -67,9 +71,9 @@ static void iris_remove_session(struct iris_inst *inst)
mutex_unlock(&core->lock);
}
-static inline struct iris_inst *iris_get_inst(struct file *filp, void *fh)
+static inline struct iris_inst *iris_get_inst(struct file *filp)
{
- return container_of(filp->private_data, struct iris_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct iris_inst, fh);
}
static void iris_m2m_device_run(void *priv)
@@ -126,9 +130,19 @@ iris_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_
int iris_open(struct file *filp)
{
struct iris_core *core = video_drvdata(filp);
+ struct video_device *vdev;
struct iris_inst *inst;
+ u32 session_type;
int ret;
+ vdev = video_devdata(filp);
+ if (strcmp(vdev->name, "qcom-iris-decoder") == 0)
+ session_type = DECODER;
+ else if (strcmp(vdev->name, "qcom-iris-encoder") == 0)
+ session_type = ENCODER;
+ else
+ return -EINVAL;
+
ret = pm_runtime_resume_and_get(core->dev);
if (ret < 0)
return ret;
@@ -147,6 +161,7 @@ int iris_open(struct file *filp)
return -ENOMEM;
inst->core = core;
+ inst->domain = session_type;
inst->session_id = hash32_ptr(inst);
inst->state = IRIS_INST_DEINIT;
@@ -161,10 +176,12 @@ int iris_open(struct file *filp)
INIT_LIST_HEAD(&inst->buffers[BUF_DPB].list);
INIT_LIST_HEAD(&inst->buffers[BUF_PERSIST].list);
INIT_LIST_HEAD(&inst->buffers[BUF_SCRATCH_1].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_SCRATCH_2].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_VPSS].list);
init_completion(&inst->completion);
init_completion(&inst->flush_completion);
- iris_v4l2_fh_init(inst);
+ iris_v4l2_fh_init(inst, filp);
inst->m2m_dev = v4l2_m2m_init(&iris_m2m_ops);
if (IS_ERR_OR_NULL(inst->m2m_dev)) {
@@ -178,14 +195,16 @@ int iris_open(struct file *filp)
goto fail_m2m_release;
}
- ret = iris_vdec_inst_init(inst);
+ if (inst->domain == DECODER)
+ ret = iris_vdec_inst_init(inst);
+ else if (inst->domain == ENCODER)
+ ret = iris_venc_inst_init(inst);
if (ret)
goto fail_m2m_ctx_release;
iris_add_session(inst);
inst->fh.m2m_ctx = inst->m2m_ctx;
- filp->private_data = &inst->fh;
return 0;
@@ -194,7 +213,7 @@ fail_m2m_ctx_release:
fail_m2m_release:
v4l2_m2m_release(inst->m2m_dev);
fail_v4l2_fh_deinit:
- iris_v4l2_fh_deinit(inst);
+ iris_v4l2_fh_deinit(inst, filp);
mutex_destroy(&inst->ctx_q_lock);
mutex_destroy(&inst->lock);
kfree(inst);
@@ -240,26 +259,42 @@ static void iris_check_num_queued_internal_buffers(struct iris_inst *inst, u32 p
for (i = 0; i < internal_buffer_count; i++) {
buffers = &inst->buffers[internal_buf_type[i]];
+ count = 0;
list_for_each_entry_safe(buf, next, &buffers->list, list)
count++;
if (count)
dev_err(inst->core->dev, "%d buffer of type %d not released",
count, internal_buf_type[i]);
}
+
+ if (inst->domain == DECODER)
+ buffers = &inst->buffers[BUF_PERSIST];
+ else
+ buffers = &inst->buffers[BUF_ARP];
+
+ count = 0;
+ list_for_each_entry_safe(buf, next, &buffers->list, list)
+ count++;
+ if (count)
+ dev_err(inst->core->dev, "%d buffer of type %d not released",
+ count, inst->domain == DECODER ? BUF_PERSIST : BUF_ARP);
}
int iris_close(struct file *filp)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
v4l2_ctrl_handler_free(&inst->ctrl_handler);
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
mutex_lock(&inst->lock);
- iris_vdec_inst_deinit(inst);
+ if (inst->domain == DECODER)
+ iris_vdec_inst_deinit(inst);
+ else if (inst->domain == ENCODER)
+ iris_venc_inst_deinit(inst);
iris_session_close(inst);
iris_inst_change_state(inst, IRIS_INST_DEINIT);
- iris_v4l2_fh_deinit(inst);
+ iris_v4l2_fh_deinit(inst, filp);
iris_destroy_all_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
iris_destroy_all_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
iris_check_num_queued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
@@ -269,25 +304,34 @@ int iris_close(struct file *filp)
mutex_destroy(&inst->ctx_q_lock);
mutex_destroy(&inst->lock);
kfree(inst);
- filp->private_data = NULL;
return 0;
}
static int iris_enum_fmt(struct file *filp, void *fh, struct v4l2_fmtdesc *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
- return iris_vdec_enum_fmt(inst, f);
+ if (inst->domain == DECODER)
+ return iris_vdec_enum_fmt(inst, f);
+ else if (inst->domain == ENCODER)
+ return iris_venc_enum_fmt(inst, f);
+ else
+ return -EINVAL;
}
static int iris_try_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
- int ret;
+ struct iris_inst *inst = iris_get_inst(filp);
+ int ret = 0;
mutex_lock(&inst->lock);
- ret = iris_vdec_try_fmt(inst, f);
+
+ if (inst->domain == DECODER)
+ ret = iris_vdec_try_fmt(inst, f);
+ else if (inst->domain == ENCODER)
+ ret = iris_venc_try_fmt(inst, f);
+
mutex_unlock(&inst->lock);
return ret;
@@ -295,11 +339,16 @@ static int iris_try_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_form
static int iris_s_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
- int ret;
+ struct iris_inst *inst = iris_get_inst(filp);
+ int ret = 0;
mutex_lock(&inst->lock);
- ret = iris_vdec_s_fmt(inst, f);
+
+ if (inst->domain == DECODER)
+ ret = iris_vdec_s_fmt(inst, f);
+ else if (inst->domain == ENCODER)
+ ret = iris_venc_s_fmt(inst, f);
+
mutex_unlock(&inst->lock);
return ret;
@@ -307,7 +356,7 @@ static int iris_s_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format
static int iris_g_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
int ret = 0;
mutex_lock(&inst->lock);
@@ -326,15 +375,20 @@ static int iris_g_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format
static int iris_enum_framesizes(struct file *filp, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
struct platform_inst_caps *caps;
+ int ret = 0;
if (fsize->index)
return -EINVAL;
- if (fsize->pixel_format != V4L2_PIX_FMT_H264 &&
- fsize->pixel_format != V4L2_PIX_FMT_NV12)
- return -EINVAL;
+ if (inst->domain == DECODER)
+ ret = iris_vdec_validate_format(inst, fsize->pixel_format);
+ else
+ ret = iris_venc_validate_format(inst, fsize->pixel_format);
+
+ if (ret)
+ return ret;
caps = inst->core->iris_platform_data->inst_caps;
@@ -346,55 +400,174 @@ static int iris_enum_framesizes(struct file *filp, void *fh,
fsize->stepwise.max_height = caps->max_frame_height;
fsize->stepwise.step_height = STEP_HEIGHT;
+ return ret;
+}
+
+static int iris_enum_frameintervals(struct file *filp, void *fh,
+ struct v4l2_frmivalenum *fival)
+
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+ struct iris_core *core = inst->core;
+ struct platform_inst_caps *caps;
+ u32 fps, mbpf;
+ int ret = 0;
+
+ if (inst->domain == DECODER)
+ return -ENOTTY;
+
+ if (fival->index)
+ return -EINVAL;
+
+ ret = iris_venc_validate_format(inst, fival->pixel_format);
+ if (ret)
+ return ret;
+
+ if (!fival->width || !fival->height)
+ return -EINVAL;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+ if (fival->width > caps->max_frame_width ||
+ fival->width < caps->min_frame_width ||
+ fival->height > caps->max_frame_height ||
+ fival->height < caps->min_frame_height)
+ return -EINVAL;
+
+ mbpf = NUM_MBS_PER_FRAME(fival->height, fival->width);
+ fps = DIV_ROUND_UP(core->iris_platform_data->max_core_mbps, mbpf);
+
+ fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ fival->stepwise.min.numerator = 1;
+ fival->stepwise.min.denominator =
+ min_t(u32, fps, MAXIMUM_FPS);
+ fival->stepwise.max.numerator = 1;
+ fival->stepwise.max.denominator = 1;
+ fival->stepwise.step.numerator = 1;
+ fival->stepwise.step.denominator = MAXIMUM_FPS;
+
return 0;
}
static int iris_querycap(struct file *filp, void *fh, struct v4l2_capability *cap)
{
+ struct iris_inst *inst = iris_get_inst(filp);
+
strscpy(cap->driver, IRIS_DRV_NAME, sizeof(cap->driver));
- strscpy(cap->card, "Iris Decoder", sizeof(cap->card));
+
+ if (inst->domain == DECODER)
+ strscpy(cap->card, "Iris Decoder", sizeof(cap->card));
+ else
+ strscpy(cap->card, "Iris Encoder", sizeof(cap->card));
return 0;
}
static int iris_g_selection(struct file *filp, void *fh, struct v4l2_selection *s)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ inst->domain == DECODER)
return -EINVAL;
- switch (s->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- case V4L2_SEL_TGT_COMPOSE_PADDED:
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ inst->domain == ENCODER)
+ return -EINVAL;
+
+ if (inst->domain == DECODER) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = inst->crop.left;
+ s->r.top = inst->crop.top;
+ s->r.width = inst->crop.width;
+ s->r.height = inst->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (inst->domain == ENCODER) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.width = inst->fmt_src->fmt.pix_mp.width;
+ s->r.height = inst->fmt_src->fmt.pix_mp.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.width = inst->crop.width;
+ s->r.height = inst->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
s->r.left = inst->crop.left;
s->r.top = inst->crop.top;
- s->r.width = inst->crop.width;
- s->r.height = inst->crop.height;
- break;
- default:
- return -EINVAL;
}
return 0;
}
+static int iris_s_selection(struct file *filp, void *fh, struct v4l2_selection *s)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+
+ if (inst->domain == DECODER)
+ return -EINVAL;
+ else if (inst->domain == ENCODER)
+ return iris_venc_s_selection(inst, s);
+
+ return -EINVAL;
+}
+
static int iris_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
{
struct iris_inst *inst = container_of(fh, struct iris_inst, fh);
- return iris_vdec_subscribe_event(inst, sub);
+ if (inst->domain == DECODER)
+ return iris_vdec_subscribe_event(inst, sub);
+ else if (inst->domain == ENCODER)
+ return iris_venc_subscribe_event(inst, sub);
+
+ return -EINVAL;
+}
+
+static int iris_s_parm(struct file *filp, void *fh, struct v4l2_streamparm *a)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ if (inst->domain == ENCODER)
+ return iris_venc_s_param(inst, a);
+ else
+ return -EINVAL;
+}
+
+static int iris_g_parm(struct file *filp, void *fh, struct v4l2_streamparm *a)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ if (inst->domain == ENCODER)
+ return iris_venc_g_param(inst, a);
+ else
+ return -EINVAL;
}
static int iris_dec_cmd(struct file *filp, void *fh,
struct v4l2_decoder_cmd *dec)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
int ret = 0;
mutex_lock(&inst->lock);
@@ -424,6 +597,39 @@ unlock:
return ret;
}
+static int iris_enc_cmd(struct file *filp, void *fh,
+ struct v4l2_encoder_cmd *enc)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+ int ret = 0;
+
+ mutex_lock(&inst->lock);
+
+ ret = v4l2_m2m_ioctl_encoder_cmd(filp, fh, enc);
+ if (ret)
+ goto unlock;
+
+ if (inst->state == IRIS_INST_DEINIT)
+ goto unlock;
+
+ if (!iris_allow_cmd(inst, enc->cmd)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (enc->cmd == V4L2_ENC_CMD_START)
+ ret = iris_venc_start_cmd(inst);
+ else if (enc->cmd == V4L2_ENC_CMD_STOP)
+ ret = iris_venc_stop_cmd(inst);
+ else
+ ret = -EINVAL;
+
+unlock:
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
static struct v4l2_file_operations iris_v4l2_file_ops = {
.owner = THIS_MODULE,
.open = iris_open,
@@ -443,7 +649,7 @@ static const struct vb2_ops iris_vb2_ops = {
.buf_queue = iris_vb2_buf_queue,
};
-static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops = {
+static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops_dec = {
.vidioc_enum_fmt_vid_cap = iris_enum_fmt,
.vidioc_enum_fmt_vid_out = iris_enum_fmt,
.vidioc_try_fmt_vid_cap_mplane = iris_try_fmt_vid_mplane,
@@ -471,9 +677,42 @@ static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops = {
.vidioc_decoder_cmd = iris_dec_cmd,
};
+static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops_enc = {
+ .vidioc_enum_fmt_vid_cap = iris_enum_fmt,
+ .vidioc_enum_fmt_vid_out = iris_enum_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_try_fmt_vid_out_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_out_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_enum_framesizes = iris_enum_framesizes,
+ .vidioc_enum_frameintervals = iris_enum_frameintervals,
+ .vidioc_querycap = iris_querycap,
+ .vidioc_subscribe_event = iris_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = iris_g_selection,
+ .vidioc_s_selection = iris_s_selection,
+ .vidioc_s_parm = iris_s_parm,
+ .vidioc_g_parm = iris_g_parm,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = iris_enc_cmd,
+};
+
void iris_init_ops(struct iris_core *core)
{
core->iris_v4l2_file_ops = &iris_v4l2_file_ops;
core->iris_vb2_ops = &iris_vb2_ops;
- core->iris_v4l2_ioctl_ops = &iris_v4l2_ioctl_ops;
+ core->iris_v4l2_ioctl_ops_dec = &iris_v4l2_ioctl_ops_dec;
+ core->iris_v4l2_ioctl_ops_enc = &iris_v4l2_ioctl_ops_enc;
}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu2.c b/drivers/media/platform/qcom/iris/iris_vpu2.c
index 7cf1bfc352d3..de7d142316d2 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu2.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu2.c
@@ -34,6 +34,8 @@ static u64 iris_vpu2_calc_freq(struct iris_inst *inst, size_t data_size)
const struct vpu_ops iris_vpu2_ops = {
.power_off_hw = iris_vpu_power_off_hw,
+ .power_on_hw = iris_vpu_power_on_hw,
.power_off_controller = iris_vpu_power_off_controller,
+ .power_on_controller = iris_vpu_power_on_controller,
.calc_freq = iris_vpu2_calc_freq,
};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3x.c b/drivers/media/platform/qcom/iris/iris_vpu3x.c
index 9b7c9a1495ee..339776a0b467 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu3x.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu3x.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025 Linaro Ltd
*/
#include <linux/iopoll.h>
@@ -19,8 +20,13 @@
#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
#define REQ_POWER_DOWN_PREP BIT(0)
#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
+#define NOC_LPI_STATUS_DONE BIT(0) /* Indicates the NOC handshake is complete */
+#define NOC_LPI_STATUS_DENY BIT(1) /* Indicates the NOC handshake is denied */
+#define NOC_LPI_STATUS_ACTIVE BIT(2) /* Indicates the NOC is active */
#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
#define CORE_CLK_RUN 0x0
+/* VPU v3.5 */
+#define WRAPPER_IRIS_VCODEC_VPU_WRAPPER_SPARE_0 (WRAPPER_BASE_OFFS + 0x78)
#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
#define CTL_AXI_CLK_HALT BIT(0)
@@ -52,6 +58,8 @@
#define AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL (AON_BASE_OFFS + 0x20)
#define NOC_HALT BIT(0)
#define AON_WRAPPER_SPARE (AON_BASE_OFFS + 0x28)
+#define AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL (AON_BASE_OFFS + 0x2C)
+#define AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS (AON_BASE_OFFS + 0x30)
static bool iris_vpu3x_hw_power_collapsed(struct iris_core *core)
{
@@ -109,7 +117,9 @@ disable_power:
static void iris_vpu33_power_off_hardware(struct iris_core *core)
{
+ bool handshake_done = false, handshake_busy = false;
u32 reg_val = 0, value, i;
+ u32 count = 0;
int ret;
if (iris_vpu3x_hw_power_collapsed(core))
@@ -128,13 +138,36 @@ static void iris_vpu33_power_off_hardware(struct iris_core *core)
goto disable_power;
}
+ /* Retry up to 1000 times as recommended by hardware documentation */
+ do {
+ /* set MNoC to low power */
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ value = readl(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS);
+
+ handshake_done = value & NOC_LPI_STATUS_DONE;
+ handshake_busy = value & (NOC_LPI_STATUS_DENY | NOC_LPI_STATUS_ACTIVE);
+
+ if (handshake_done || !handshake_busy)
+ break;
+
+ writel(0, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ } while (++count < 1000);
+
+ if (!handshake_done && handshake_busy)
+ dev_err(core->dev, "LPI handshake timeout\n");
+
ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
reg_val, reg_val & BIT(0), 200, 2000);
if (ret)
goto disable_power;
- /* set MNoC to low power, set PD_NOC_QREQ (bit 0) */
- writel(BIT(0), core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+ writel(0, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
@@ -225,6 +258,158 @@ disable_power:
return 0;
}
+static int iris_vpu35_power_on_hw(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_HW_FREERUN_CLK);
+ if (ret)
+ goto err_disable_axi_clk;
+
+ ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
+ if (ret)
+ goto err_disable_hw_free_clk;
+
+ ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
+ if (ret)
+ goto err_disable_hw_clk;
+
+ return 0;
+
+err_disable_hw_clk:
+ iris_disable_unprepare_clock(core, IRIS_HW_CLK);
+err_disable_hw_free_clk:
+ iris_disable_unprepare_clock(core, IRIS_HW_FREERUN_CLK);
+err_disable_axi_clk:
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+
+ return ret;
+}
+
+static void iris_vpu35_power_off_hw(struct iris_core *core)
+{
+ iris_vpu33_power_off_hardware(core);
+
+ iris_disable_unprepare_clock(core, IRIS_HW_FREERUN_CLK);
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+}
+
+static int iris_vpu35_power_off_controller(struct iris_core *core)
+{
+ u32 clk_rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+ unsigned int count = 0;
+ u32 val = 0;
+ bool handshake_done, handshake_busy;
+ int ret;
+
+ writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ /* Retry up to 1000 times as recommended by hardware documentation */
+ do {
+ /* set MNoC to low power */
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ val = readl(core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS);
+
+ handshake_done = val & NOC_LPI_STATUS_DONE;
+ handshake_busy = val & (NOC_LPI_STATUS_DENY | NOC_LPI_STATUS_ACTIVE);
+
+ if (handshake_done || !handshake_busy)
+ break;
+
+ writel(0, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ } while (++count < 1000);
+
+ if (!handshake_done && handshake_busy)
+ dev_err(core->dev, "LPI handshake timeout\n");
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
+
+ writel(0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
+ val, val == 0, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+disable_power:
+ iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
+ iris_disable_unprepare_clock(core, IRIS_CTRL_FREERUN_CLK);
+ iris_disable_unprepare_clock(core, IRIS_AXI1_CLK);
+
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ reset_control_bulk_reset(clk_rst_tbl_size, core->resets);
+
+ return 0;
+}
+
+static int iris_vpu35_power_on_controller(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = iris_prepare_enable_clock(core, IRIS_AXI1_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_CTRL_FREERUN_CLK);
+ if (ret)
+ goto err_disable_axi1_clk;
+
+ ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
+ if (ret)
+ goto err_disable_ctrl_free_clk;
+
+ return 0;
+
+err_disable_ctrl_free_clk:
+ iris_disable_unprepare_clock(core, IRIS_CTRL_FREERUN_CLK);
+err_disable_axi1_clk:
+ iris_disable_unprepare_clock(core, IRIS_AXI1_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ return ret;
+}
+
+static void iris_vpu35_program_bootup_registers(struct iris_core *core)
+{
+ writel(0x1, core->reg_base + WRAPPER_IRIS_VCODEC_VPU_WRAPPER_SPARE_0);
+}
+
static u64 iris_vpu3x_calculate_frequency(struct iris_inst *inst, size_t data_size)
{
struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
@@ -264,12 +449,25 @@ static u64 iris_vpu3x_calculate_frequency(struct iris_inst *inst, size_t data_si
const struct vpu_ops iris_vpu3_ops = {
.power_off_hw = iris_vpu3_power_off_hardware,
+ .power_on_hw = iris_vpu_power_on_hw,
.power_off_controller = iris_vpu_power_off_controller,
+ .power_on_controller = iris_vpu_power_on_controller,
.calc_freq = iris_vpu3x_calculate_frequency,
};
const struct vpu_ops iris_vpu33_ops = {
.power_off_hw = iris_vpu33_power_off_hardware,
+ .power_on_hw = iris_vpu_power_on_hw,
.power_off_controller = iris_vpu33_power_off_controller,
+ .power_on_controller = iris_vpu_power_on_controller,
+ .calc_freq = iris_vpu3x_calculate_frequency,
+};
+
+const struct vpu_ops iris_vpu35_ops = {
+ .power_off_hw = iris_vpu35_power_off_hw,
+ .power_on_hw = iris_vpu35_power_on_hw,
+ .power_off_controller = iris_vpu35_power_off_controller,
+ .power_on_controller = iris_vpu35_power_on_controller,
+ .program_bootup_registers = iris_vpu35_program_bootup_registers,
.calc_freq = iris_vpu3x_calculate_frequency,
};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
index f92fd39fe310..4463be05ce16 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
@@ -5,6 +5,14 @@
#include "iris_instance.h"
#include "iris_vpu_buffer.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_hfi_gen2_defines.h"
+
+#define HFI_MAX_COL_FRAME 6
+
+#ifndef SYSTEM_LAL_TILE10
+#define SYSTEM_LAL_TILE10 192
+#endif
static u32 size_h264d_hw_bin_buffer(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
{
@@ -548,6 +556,858 @@ static u32 iris_vpu_dec_scratch1_size(struct iris_inst *inst)
iris_vpu_dec_line_size(inst);
}
+static inline u32 size_bin_bitstream_enc(u32 width, u32 height,
+ u32 rc_type)
+{
+ u32 aligned_height = ALIGN(height, 32);
+ u32 aligned_width = ALIGN(width, 32);
+ u32 frame_size = width * height * 3;
+ u32 mbs_per_frame;
+
+ /*
+ * Encoder output size calculation: 32 Align width/height
+ * For resolution < 720p : YUVsize * 4
+ * For resolution > 720p & <= 4K : YUVsize / 2
+ * For resolution > 4k : YUVsize / 4
+ * Initially frame_size = YUVsize * 2;
+ */
+
+ mbs_per_frame = (ALIGN(aligned_height, 16) * ALIGN(aligned_width, 16)) / 256;
+
+ if (mbs_per_frame < NUM_MBS_720P)
+ frame_size = frame_size << 1;
+ else if (mbs_per_frame <= NUM_MBS_4K)
+ frame_size = frame_size >> 2;
+ else
+ frame_size = frame_size >> 3;
+
+ if (rc_type == HFI_RATE_CONTROL_OFF || rc_type == HFI_RATE_CONTROL_CQ ||
+ rc_type == HFI_RC_OFF || rc_type == HFI_RC_CQ)
+ frame_size = frame_size << 1;
+
+ /*
+ * In case of opaque color format bitdepth will be known
+ * with first ETB, buffers allocated already with 8 bit
+ * won't be sufficient for 10 bit
+ * calculate size considering 10-bit by default
+ * For 10-bit cases size = size * 1.25
+ */
+ frame_size *= 5;
+ frame_size /= 4;
+
+ return ALIGN(frame_size, SZ_4K);
+}
+
+static inline u32 hfi_buffer_bin_enc(u32 width, u32 height,
+ u32 work_mode, u32 lcu_size,
+ u32 num_vpp_pipes, u32 rc_type)
+{
+ u32 sao_bin_buffer_size, padded_bin_size, bitstream_size;
+ u32 total_bitbin_buffers, size_single_pipe, bitbin_size;
+ u32 aligned_height = ALIGN(height, lcu_size);
+ u32 aligned_width = ALIGN(width, lcu_size);
+
+ bitstream_size = size_bin_bitstream_enc(width, height, rc_type);
+ bitstream_size = ALIGN(bitstream_size, 256);
+
+ if (work_mode == STAGE_2) {
+ total_bitbin_buffers = 3;
+ bitbin_size = bitstream_size * 17 / 10;
+ bitbin_size = ALIGN(bitbin_size, 256);
+ } else {
+ total_bitbin_buffers = 1;
+ bitstream_size = aligned_width * aligned_height * 3;
+ bitbin_size = ALIGN(bitstream_size, 256);
+ }
+
+ if (num_vpp_pipes > 2)
+ size_single_pipe = bitbin_size / 2;
+ else
+ size_single_pipe = bitbin_size;
+
+ size_single_pipe = ALIGN(size_single_pipe, 256);
+ sao_bin_buffer_size = (64 * (((width + 32) * (height + 32)) >> 10)) + 384;
+ padded_bin_size = ALIGN(size_single_pipe, 256);
+ size_single_pipe = sao_bin_buffer_size + padded_bin_size;
+ size_single_pipe = ALIGN(size_single_pipe, 256);
+ bitbin_size = size_single_pipe * num_vpp_pipes;
+
+ return ALIGN(bitbin_size, 256) * total_bitbin_buffers + 512;
+}
+
+static u32 iris_vpu_enc_bin_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ u32 stage = inst->fw_caps[STAGE].value;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC)
+ lcu_size = 32;
+ else
+ lcu_size = 16;
+
+ return hfi_buffer_bin_enc(width, height, stage, lcu_size,
+ num_vpp_pipes, inst->hfi_rc_type);
+}
+
+static inline
+u32 hfi_buffer_comv_enc(u32 frame_width, u32 frame_height, u32 lcu_size,
+ u32 num_recon, u32 standard)
+{
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 num_lcu_in_frame = width_in_lcus * height_in_lcus;
+ u32 mb_height = ((frame_height) + 15) >> 4;
+ u32 mb_width = ((frame_width) + 15) >> 4;
+ u32 size_colloc_mv, size_colloc_rc;
+
+ size_colloc_mv = (standard == HFI_CODEC_ENCODE_HEVC) ?
+ (16 * ((num_lcu_in_frame << 2) + 32)) :
+ (3 * 16 * (width_in_lcus * height_in_lcus + 32));
+ size_colloc_mv = ALIGN(size_colloc_mv, 256) * num_recon;
+ size_colloc_rc = (((mb_width + 7) >> 3) * 16 * 2 * mb_height);
+ size_colloc_rc = ALIGN(size_colloc_rc, 256) * HFI_MAX_COL_FRAME;
+
+ return size_colloc_mv + size_colloc_rc;
+}
+
+static u32 iris_vpu_enc_comv_size(struct iris_inst *inst)
+{
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 num_recon = 1;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_comv_enc(width, height, lcu_size,
+ num_recon + 1, HFI_CODEC_ENCODE_HEVC);
+ }
+
+ return hfi_buffer_comv_enc(width, height, lcu_size,
+ num_recon + 1, HFI_CODEC_ENCODE_AVC);
+}
+
+static inline
+u32 size_frame_rc_buf_size(u32 standard, u32 frame_height_coded,
+ u32 num_vpp_pipes_enc)
+{
+ u32 size = 0;
+
+ size = (standard == HFI_CODEC_ENCODE_HEVC) ?
+ (256 + 16 * (14 + ((((frame_height_coded) >> 5) + 7) >> 3))) :
+ (256 + 16 * (14 + ((((frame_height_coded) >> 4) + 7) >> 3)));
+ size *= 11;
+
+ if (num_vpp_pipes_enc > 1)
+ size = ALIGN(size, 256) * num_vpp_pipes_enc;
+
+ return ALIGN(size, 512) * HFI_MAX_COL_FRAME;
+}
+
+static inline
+u32 size_enc_slice_info_buf(u32 num_lcu_in_frame)
+{
+ return ALIGN((256 + (num_lcu_in_frame << 4)), 256);
+}
+
+static inline u32 enc_bitcnt_buf_size(u32 num_lcu_in_frame)
+{
+ return ALIGN((256 + (4 * (num_lcu_in_frame))), 256);
+}
+
+static inline u32 enc_bitmap_buf_size(u32 num_lcu_in_frame)
+{
+ return ALIGN((256 + ((num_lcu_in_frame) >> 3)), 256);
+}
+
+static inline u32 size_override_buf(u32 num_lcumb)
+{
+ return ALIGN(((16 * (((num_lcumb) + 7) >> 3))), 256) * 2;
+}
+
+static inline u32 size_ir_buf(u32 num_lcu_in_frame)
+{
+ return ALIGN((((((num_lcu_in_frame) << 1) + 7) & (~7)) * 3), 256);
+}
+
+static inline
+u32 size_linebuff_data(bool is_ten_bit, u32 frame_width_coded)
+{
+ return is_ten_bit ?
+ (((((10 * (frame_width_coded) + 1024) + (256 - 1)) &
+ (~(256 - 1))) * 1) +
+ (((((10 * (frame_width_coded) + 1024) >> 1) + (256 - 1)) &
+ (~(256 - 1))) * 2)) :
+ (((((8 * (frame_width_coded) + 1024) + (256 - 1)) &
+ (~(256 - 1))) * 1) +
+ (((((8 * (frame_width_coded) + 1024) >> 1) + (256 - 1)) &
+ (~(256 - 1))) * 2));
+}
+
+static inline
+u32 size_left_linebuff_ctrl(u32 standard, u32 frame_height_coded,
+ u32 num_vpp_pipes_enc)
+{
+ u32 size = 0;
+
+ size = standard == HFI_CODEC_ENCODE_HEVC ?
+ (((frame_height_coded) +
+ (32)) / 32 * 4 * 16) :
+ (((frame_height_coded) + 15) / 16 * 5 * 16);
+
+ if ((num_vpp_pipes_enc) > 1) {
+ size += 512;
+ size = ALIGN(size, 512) *
+ num_vpp_pipes_enc;
+ }
+
+ return ALIGN(size, 256);
+}
+
+static inline
+u32 size_left_linebuff_recon_pix(bool is_ten_bit, u32 frame_height_coded,
+ u32 num_vpp_pipes_enc)
+{
+ return (((is_ten_bit + 1) * 2 * (frame_height_coded) + 256) +
+ (256 << (num_vpp_pipes_enc - 1)) - 1) &
+ (~((256 << (num_vpp_pipes_enc - 1)) - 1)) * 1;
+}
+
+static inline
+u32 size_top_linebuff_ctrl_fe(u32 frame_width_coded, u32 standard)
+{
+ return standard == HFI_CODEC_ENCODE_HEVC ?
+ ALIGN((64 * ((frame_width_coded) >> 5)), 256) :
+ ALIGN((256 + 16 * ((frame_width_coded) >> 4)), 256);
+}
+
+static inline
+u32 size_left_linebuff_ctrl_fe(u32 frame_height_coded, u32 num_vpp_pipes_enc)
+{
+ return (((256 + 64 * ((frame_height_coded) >> 4)) +
+ (256 << (num_vpp_pipes_enc - 1)) - 1) &
+ (~((256 << (num_vpp_pipes_enc - 1)) - 1)) * 1) *
+ num_vpp_pipes_enc;
+}
+
+static inline
+u32 size_left_linebuff_metadata_recon_y(u32 frame_height_coded,
+ bool is_ten_bit,
+ u32 num_vpp_pipes_enc)
+{
+ return ALIGN(((256 + 64 * ((frame_height_coded) /
+ (8 * (is_ten_bit ? 4 : 8))))), 256) * num_vpp_pipes_enc;
+}
+
+static inline
+u32 size_left_linebuff_metadata_recon_uv(u32 frame_height_coded,
+ bool is_ten_bit,
+ u32 num_vpp_pipes_enc)
+{
+ return ALIGN(((256 + 64 * ((frame_height_coded) /
+ (4 * (is_ten_bit ? 4 : 8))))), 256) * num_vpp_pipes_enc;
+}
+
+static inline
+u32 size_linebuff_recon_pix(bool is_ten_bit, u32 frame_width_coded)
+{
+ return ALIGN(((is_ten_bit ? 3 : 2) * (frame_width_coded)), 256);
+}
+
+static inline
+u32 size_line_buf_ctrl(u32 frame_width_coded)
+{
+ return ALIGN(frame_width_coded, 256);
+}
+
+static inline
+u32 size_line_buf_ctrl_id2(u32 frame_width_coded)
+{
+ return ALIGN(frame_width_coded, 256);
+}
+
+static inline u32 size_line_buf_sde(u32 frame_width_coded)
+{
+ return ALIGN((256 + (16 * ((frame_width_coded) >> 4))), 256);
+}
+
+static inline
+u32 size_vpss_line_buf(u32 num_vpp_pipes_enc, u32 frame_height_coded,
+ u32 frame_width_coded)
+{
+ return ALIGN(((((((8192) >> 2) << 5) * (num_vpp_pipes_enc)) + 64) +
+ (((((max_t(u32, (frame_width_coded),
+ (frame_height_coded)) + 3) >> 2) << 5) + 256) * 16)), 256);
+}
+static inline
+u32 size_vpss_line_buf_vpu33(u32 num_vpp_pipes_enc, u32 frame_height_coded,
+ u32 frame_width_coded)
+{
+ u32 vpss_4tap_top, vpss_4tap_left, vpss_div2_top;
+ u32 vpss_div2_left, vpss_top_lb, vpss_left_lb;
+ u32 size_left, size_top;
+ u32 max_width_height;
+
+ max_width_height = max_t(u32, frame_width_coded, frame_height_coded);
+ vpss_4tap_top = ((((max_width_height * 2) + 3) >> 2) << 4) + 256;
+ vpss_4tap_left = (((8192 + 3) >> 2) << 5) + 64;
+ vpss_div2_top = (((max_width_height + 3) >> 2) << 4) + 256;
+ vpss_div2_left = ((((max_width_height * 2) + 3) >> 2) << 5) + 64;
+ vpss_top_lb = (frame_width_coded + 1) << 3;
+ vpss_left_lb = (frame_height_coded << 3) * num_vpp_pipes_enc;
+ size_left = (vpss_4tap_left + vpss_div2_left) * 2 * num_vpp_pipes_enc;
+ size_top = (vpss_4tap_top + vpss_div2_top) * 2;
+
+ return ALIGN(size_left + size_top + vpss_top_lb + vpss_left_lb, DMA_ALIGNMENT);
+}
+
+static inline
+u32 size_top_line_buf_first_stg_sao(u32 frame_width_coded)
+{
+ return ALIGN((16 * ((frame_width_coded) >> 5)), 256);
+}
+
+static inline
+u32 size_enc_ref_buffer(u32 frame_width, u32 frame_height)
+{
+ u32 u_chroma_buffer_height = ALIGN(frame_height >> 1, 32);
+ u32 u_buffer_height = ALIGN(frame_height, 32);
+ u32 u_buffer_width = ALIGN(frame_width, 32);
+
+ return (u_buffer_height + u_chroma_buffer_height) * u_buffer_width;
+}
+
+static inline
+u32 size_enc_ten_bit_ref_buffer(u32 frame_width, u32 frame_height)
+{
+ u32 ref_luma_stride_in_bytes = ((frame_width + SYSTEM_LAL_TILE10 - 1) / SYSTEM_LAL_TILE10) *
+ SYSTEM_LAL_TILE10;
+ u32 ref_buf_height = (frame_height + (32 - 1)) & (~(32 - 1));
+ u32 u_ref_stride, luma_size;
+ u32 ref_chrm_height_in_bytes;
+ u32 chroma_size;
+
+ u_ref_stride = 4 * (ref_luma_stride_in_bytes / 3);
+ u_ref_stride = (u_ref_stride + (128 - 1)) & (~(128 - 1));
+ luma_size = ref_buf_height * u_ref_stride;
+ luma_size = (luma_size + (4096 - 1)) & (~(4096 - 1));
+
+ ref_chrm_height_in_bytes = (((frame_height + 1) >> 1) + (32 - 1)) & (~(32 - 1));
+ chroma_size = u_ref_stride * ref_chrm_height_in_bytes;
+ chroma_size = (chroma_size + (4096 - 1)) & (~(4096 - 1));
+
+ return luma_size + chroma_size;
+}
+
+static inline
+u32 hfi_ubwc_calc_metadata_plane_stride(u32 frame_width,
+ u32 metadata_stride_multiple,
+ u32 tile_width_in_pels)
+{
+ return ALIGN(((frame_width + (tile_width_in_pels - 1)) / tile_width_in_pels),
+ metadata_stride_multiple);
+}
+
+static inline
+u32 hfi_ubwc_metadata_plane_bufheight(u32 frame_height,
+ u32 metadata_height_multiple,
+ u32 tile_height_in_pels)
+{
+ return ALIGN(((frame_height + (tile_height_in_pels - 1)) / tile_height_in_pels),
+ metadata_height_multiple);
+}
+
+static inline
+u32 hfi_ubwc_metadata_plane_buffer_size(u32 _metadata_tride, u32 _metadata_buf_height)
+{
+ return ALIGN(_metadata_tride * _metadata_buf_height, 4096);
+}
+
+static inline
+u32 hfi_buffer_non_comv_enc(u32 frame_width, u32 frame_height,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 num_lcu_in_frame = width_in_lcus * height_in_lcus;
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+ u32 num_lcumb, frame_rc_buf_size;
+
+ num_lcumb = (frame_height_coded / lcu_size) *
+ ((frame_width_coded + lcu_size * 8) / lcu_size);
+ frame_rc_buf_size = size_frame_rc_buf_size(standard, frame_height_coded,
+ num_vpp_pipes_enc);
+ return size_enc_slice_info_buf(num_lcu_in_frame) +
+ SIZE_SLICE_CMD_BUFFER +
+ SIZE_SPS_PPS_SLICE_HDR +
+ frame_rc_buf_size +
+ enc_bitcnt_buf_size(num_lcu_in_frame) +
+ enc_bitmap_buf_size(num_lcu_in_frame) +
+ SIZE_BSE_SLICE_CMD_BUF +
+ SIZE_LAMBDA_LUT +
+ size_override_buf(num_lcumb) +
+ size_ir_buf(num_lcu_in_frame);
+}
+
+static u32 iris_vpu_enc_non_comv_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_non_comv_enc(width, height, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_HEVC) +
+ SIZE_ONE_SLICE_BUF;
+ }
+
+ return hfi_buffer_non_comv_enc(width, height, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_AVC);
+}
+
+static inline
+u32 hfi_buffer_line_enc_base(u32 frame_width, u32 frame_height, bool is_ten_bit,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+ u32 line_buff_data_size, left_line_buff_ctrl_size;
+ u32 left_line_buff_metadata_recon__uv__size;
+ u32 left_line_buff_metadata_recon__y__size;
+ u32 left_line_buff_recon_pix_size;
+ u32 top_line_buff_ctrl_fe_size;
+ u32 line_buff_recon_pix_size;
+
+ line_buff_data_size = size_linebuff_data(is_ten_bit, frame_width_coded);
+ left_line_buff_ctrl_size =
+ size_left_linebuff_ctrl(standard, frame_height_coded, num_vpp_pipes_enc);
+ left_line_buff_recon_pix_size =
+ size_left_linebuff_recon_pix(is_ten_bit, frame_height_coded,
+ num_vpp_pipes_enc);
+ top_line_buff_ctrl_fe_size =
+ size_top_linebuff_ctrl_fe(frame_width_coded, standard);
+ left_line_buff_metadata_recon__y__size =
+ size_left_linebuff_metadata_recon_y(frame_height_coded, is_ten_bit,
+ num_vpp_pipes_enc);
+ left_line_buff_metadata_recon__uv__size =
+ size_left_linebuff_metadata_recon_uv(frame_height_coded, is_ten_bit,
+ num_vpp_pipes_enc);
+ line_buff_recon_pix_size = size_linebuff_recon_pix(is_ten_bit, frame_width_coded);
+
+ return size_line_buf_ctrl(frame_width_coded) +
+ size_line_buf_ctrl_id2(frame_width_coded) +
+ line_buff_data_size +
+ left_line_buff_ctrl_size +
+ left_line_buff_recon_pix_size +
+ top_line_buff_ctrl_fe_size +
+ left_line_buff_metadata_recon__y__size +
+ left_line_buff_metadata_recon__uv__size +
+ line_buff_recon_pix_size +
+ size_left_linebuff_ctrl_fe(frame_height_coded, num_vpp_pipes_enc) +
+ size_line_buf_sde(frame_width_coded) +
+ size_top_line_buf_first_stg_sao(frame_width_coded);
+}
+
+static inline
+u32 hfi_buffer_line_enc(u32 frame_width, u32 frame_height, bool is_ten_bit,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+
+ return hfi_buffer_line_enc_base(frame_width, frame_height, is_ten_bit,
+ num_vpp_pipes_enc, lcu_size, standard) +
+ size_vpss_line_buf(num_vpp_pipes_enc, frame_height_coded, frame_width_coded);
+}
+
+static inline
+u32 hfi_buffer_line_enc_vpu33(u32 frame_width, u32 frame_height, bool is_ten_bit,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+
+ return hfi_buffer_line_enc_base(frame_width, frame_height, is_ten_bit,
+ num_vpp_pipes_enc, lcu_size, standard) +
+ size_vpss_line_buf_vpu33(num_vpp_pipes_enc, frame_height_coded,
+ frame_width_coded);
+}
+
+static u32 iris_vpu_enc_line_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_line_enc(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_HEVC);
+ }
+
+ return hfi_buffer_line_enc(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_AVC);
+}
+
+static u32 iris_vpu33_enc_line_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_line_enc_vpu33(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_HEVC);
+ }
+
+ return hfi_buffer_line_enc_vpu33(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_AVC);
+}
+
+static inline
+u32 hfi_buffer_dpb_enc(u32 frame_width, u32 frame_height, bool is_ten_bit)
+{
+ u32 metadata_stride, metadata_buf_height, meta_size_y, meta_size_c;
+ u32 ten_bit_ref_buf_size = 0, ref_buf_size = 0;
+ u32 size;
+
+ if (!is_ten_bit) {
+ ref_buf_size = size_enc_ref_buffer(frame_width, frame_height);
+ metadata_stride =
+ hfi_ubwc_calc_metadata_plane_stride(frame_width, 64,
+ HFI_COL_FMT_NV12C_Y_TILE_WIDTH);
+ metadata_buf_height =
+ hfi_ubwc_metadata_plane_bufheight(frame_height, 16,
+ HFI_COL_FMT_NV12C_Y_TILE_HEIGHT);
+ meta_size_y =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ meta_size_c =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ size = ref_buf_size + meta_size_y + meta_size_c;
+ } else {
+ ten_bit_ref_buf_size = size_enc_ten_bit_ref_buffer(frame_width, frame_height);
+ metadata_stride =
+ hfi_ubwc_calc_metadata_plane_stride(frame_width,
+ IRIS_METADATA_STRIDE_MULTIPLE,
+ HFI_COL_FMT_TP10C_Y_TILE_WIDTH);
+ metadata_buf_height =
+ hfi_ubwc_metadata_plane_bufheight(frame_height,
+ IRIS_METADATA_HEIGHT_MULTIPLE,
+ HFI_COL_FMT_TP10C_Y_TILE_HEIGHT);
+ meta_size_y =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ meta_size_c =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ size = ten_bit_ref_buf_size + meta_size_y + meta_size_c;
+ }
+
+ return size;
+}
+
+static u32 iris_vpu_enc_arp_size(struct iris_inst *inst)
+{
+ return HFI_BUFFER_ARP_ENC;
+}
+
+inline bool is_scaling_enabled(struct iris_inst *inst)
+{
+ return inst->crop.left != inst->compose.left ||
+ inst->crop.top != inst->compose.top ||
+ inst->crop.width != inst->compose.width ||
+ inst->crop.height != inst->compose.height;
+}
+
+static inline
+u32 hfi_buffer_vpss_enc(u32 dswidth, u32 dsheight, bool ds_enable,
+ u32 blur, bool is_ten_bit)
+{
+ if (ds_enable || blur)
+ return hfi_buffer_dpb_enc(dswidth, dsheight, is_ten_bit);
+
+ return 0;
+}
+
+static inline u32 hfi_buffer_scratch1_enc(u32 frame_width, u32 frame_height,
+ u32 lcu_size, u32 num_ref,
+ bool ten_bit, u32 num_vpp_pipes,
+ bool is_h265)
+{
+ u32 line_buf_ctrl_size, line_buf_data_size, leftline_buf_ctrl_size;
+ u32 line_buf_sde_size, sps_pps_slice_hdr, topline_buf_ctrl_size_FE;
+ u32 leftline_buf_ctrl_size_FE, line_buf_recon_pix_size;
+ u32 leftline_buf_recon_pix_size, lambda_lut_size, override_buffer_size;
+ u32 col_mv_buf_size, vpp_reg_buffer_size, ir_buffer_size;
+ u32 vpss_line_buf, leftline_buf_meta_recony, h265e_colrcbuf_size;
+ u32 h265e_framerc_bufsize, h265e_lcubitcnt_bufsize;
+ u32 h265e_lcubitmap_bufsize, se_stats_bufsize;
+ u32 bse_reg_buffer_size, bse_slice_cmd_buffer_size, slice_info_bufsize;
+ u32 line_buf_ctrl_size_buffid2, slice_cmd_buffer_size;
+ u32 width_lcu_num, height_lcu_num, width_coded, height_coded;
+ u32 frame_num_lcu, linebuf_meta_recon_uv, topline_bufsize_fe_1stg_sao;
+ u32 vpss_line_buffer_size_1;
+ u32 bit_depth, num_lcu_mb;
+
+ width_lcu_num = (frame_width + lcu_size - 1) / lcu_size;
+ height_lcu_num = (frame_height + lcu_size - 1) / lcu_size;
+ frame_num_lcu = width_lcu_num * height_lcu_num;
+ width_coded = width_lcu_num * lcu_size;
+ height_coded = height_lcu_num * lcu_size;
+ num_lcu_mb = (height_coded / lcu_size) *
+ ((width_coded + lcu_size * 8) / lcu_size);
+ slice_info_bufsize = 256 + (frame_num_lcu << 4);
+ slice_info_bufsize = ALIGN(slice_info_bufsize, 256);
+ line_buf_ctrl_size = ALIGN(width_coded, 256);
+ line_buf_ctrl_size_buffid2 = ALIGN(width_coded, 256);
+
+ bit_depth = ten_bit ? 10 : 8;
+ line_buf_data_size =
+ (((((bit_depth * width_coded + 1024) + (256 - 1)) &
+ (~(256 - 1))) * 1) +
+ (((((bit_depth * width_coded + 1024) >> 1) + (256 - 1)) &
+ (~(256 - 1))) * 2));
+
+ leftline_buf_ctrl_size = is_h265 ? ((height_coded + 32) / 32 * 4 * 16) :
+ ((height_coded + 15) / 16 * 5 * 16);
+
+ if (num_vpp_pipes > 1) {
+ leftline_buf_ctrl_size += 512;
+ leftline_buf_ctrl_size =
+ ALIGN(leftline_buf_ctrl_size, 512) * num_vpp_pipes;
+ }
+
+ leftline_buf_ctrl_size = ALIGN(leftline_buf_ctrl_size, 256);
+ leftline_buf_recon_pix_size =
+ (((ten_bit + 1) * 2 * (height_coded) + 256) +
+ (256 << (num_vpp_pipes - 1)) - 1) &
+ (~((256 << (num_vpp_pipes - 1)) - 1)) * 1;
+
+ topline_buf_ctrl_size_FE = is_h265 ? (64 * (width_coded >> 5)) :
+ (256 + 16 * (width_coded >> 4));
+ topline_buf_ctrl_size_FE = ALIGN(topline_buf_ctrl_size_FE, 256);
+ leftline_buf_ctrl_size_FE =
+ (((256 + 64 * (height_coded >> 4)) +
+ (256 << (num_vpp_pipes - 1)) - 1) &
+ (~((256 << (num_vpp_pipes - 1)) - 1)) * 1) *
+ num_vpp_pipes;
+ leftline_buf_meta_recony =
+ (256 + 64 * ((height_coded) / (8 * (ten_bit ? 4 : 8))));
+ leftline_buf_meta_recony = ALIGN(leftline_buf_meta_recony, 256);
+ leftline_buf_meta_recony = leftline_buf_meta_recony * num_vpp_pipes;
+ linebuf_meta_recon_uv =
+ (256 + 64 * ((height_coded) / (4 * (ten_bit ? 4 : 8))));
+ linebuf_meta_recon_uv = ALIGN(linebuf_meta_recon_uv, 256);
+ linebuf_meta_recon_uv = linebuf_meta_recon_uv * num_vpp_pipes;
+ line_buf_recon_pix_size = ((ten_bit ? 3 : 2) * width_coded);
+ line_buf_recon_pix_size = ALIGN(line_buf_recon_pix_size, 256);
+ slice_cmd_buffer_size = ALIGN(20480, 256);
+ sps_pps_slice_hdr = 2048 + 4096;
+ col_mv_buf_size =
+ is_h265 ? (16 * ((frame_num_lcu << 2) + 32)) :
+ (3 * 16 * (width_lcu_num * height_lcu_num + 32));
+ col_mv_buf_size = ALIGN(col_mv_buf_size, 256) * (num_ref + 1);
+ h265e_colrcbuf_size =
+ (((width_lcu_num + 7) >> 3) * 16 * 2 * height_lcu_num);
+ if (num_vpp_pipes > 1)
+ h265e_colrcbuf_size =
+ ALIGN(h265e_colrcbuf_size, 256) * num_vpp_pipes;
+
+ h265e_colrcbuf_size =
+ ALIGN(h265e_colrcbuf_size, 256) * HFI_MAX_COL_FRAME;
+ h265e_framerc_bufsize =
+ (is_h265) ?
+ (256 + 16 * (14 + (((height_coded >> 5) + 7) >> 3))) :
+ (256 + 16 * (14 + (((height_coded >> 4) + 7) >> 3)));
+ h265e_framerc_bufsize *= 6;
+ if (num_vpp_pipes > 1)
+ h265e_framerc_bufsize =
+ ALIGN(h265e_framerc_bufsize, 256) * num_vpp_pipes;
+
+ h265e_framerc_bufsize =
+ ALIGN(h265e_framerc_bufsize, 512) * HFI_MAX_COL_FRAME;
+ h265e_lcubitcnt_bufsize = 256 + 4 * frame_num_lcu;
+ h265e_lcubitcnt_bufsize = ALIGN(h265e_lcubitcnt_bufsize, 256);
+ h265e_lcubitmap_bufsize = 256 + (frame_num_lcu >> 3);
+ h265e_lcubitmap_bufsize = ALIGN(h265e_lcubitmap_bufsize, 256);
+ line_buf_sde_size = 256 + 16 * (width_coded >> 4);
+ line_buf_sde_size = ALIGN(line_buf_sde_size, 256);
+ if ((width_coded * height_coded) > (4096 * 2160))
+ se_stats_bufsize = 0;
+ else if ((width_coded * height_coded) > (1920 * 1088))
+ se_stats_bufsize = (40 * 4 * frame_num_lcu + 256 + 256);
+ else
+ se_stats_bufsize = (1024 * frame_num_lcu + 256 + 256);
+
+ se_stats_bufsize = ALIGN(se_stats_bufsize, 256) * 2;
+ bse_slice_cmd_buffer_size = (((8192 << 2) + 7) & (~7)) * 6;
+ bse_reg_buffer_size = (((512 << 3) + 7) & (~7)) * 4;
+ vpp_reg_buffer_size = (((2048 << 3) + 31) & (~31)) * 10;
+ lambda_lut_size = 256 * 11;
+ override_buffer_size = 16 * ((num_lcu_mb + 7) >> 3);
+ override_buffer_size = ALIGN(override_buffer_size, 256) * 2;
+ ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
+ vpss_line_buffer_size_1 = (((8192 >> 2) << 5) * num_vpp_pipes) + 64;
+ vpss_line_buf =
+ (((((max(width_coded, height_coded) + 3) >> 2) << 5) + 256) *
+ 16) +
+ vpss_line_buffer_size_1;
+ topline_bufsize_fe_1stg_sao = 16 * (width_coded >> 5);
+ topline_bufsize_fe_1stg_sao = ALIGN(topline_bufsize_fe_1stg_sao, 256);
+
+ return line_buf_ctrl_size + line_buf_data_size +
+ line_buf_ctrl_size_buffid2 + leftline_buf_ctrl_size +
+ vpss_line_buf + col_mv_buf_size + topline_buf_ctrl_size_FE +
+ leftline_buf_ctrl_size_FE + line_buf_recon_pix_size +
+ leftline_buf_recon_pix_size + leftline_buf_meta_recony +
+ linebuf_meta_recon_uv + h265e_colrcbuf_size +
+ h265e_framerc_bufsize + h265e_lcubitcnt_bufsize +
+ h265e_lcubitmap_bufsize + line_buf_sde_size +
+ topline_bufsize_fe_1stg_sao + override_buffer_size +
+ bse_reg_buffer_size + vpp_reg_buffer_size + sps_pps_slice_hdr +
+ slice_cmd_buffer_size + bse_slice_cmd_buffer_size +
+ ir_buffer_size + slice_info_bufsize + lambda_lut_size +
+ se_stats_bufsize + 1024;
+}
+
+static u32 iris_vpu_enc_scratch1_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 frame_height = f->fmt.pix_mp.height;
+ u32 frame_width = f->fmt.pix_mp.width;
+ u32 num_ref = 1;
+ u32 lcu_size;
+ bool is_h265;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ lcu_size = 16;
+ is_h265 = false;
+ } else if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ is_h265 = true;
+ } else {
+ return 0;
+ }
+
+ return hfi_buffer_scratch1_enc(frame_width, frame_height, lcu_size,
+ num_ref, false, num_vpp_pipes, is_h265);
+}
+
+static inline u32 ubwc_metadata_plane_stride(u32 width,
+ u32 metadata_stride_multi,
+ u32 tile_width_pels)
+{
+ return ALIGN(((width + (tile_width_pels - 1)) / tile_width_pels),
+ metadata_stride_multi);
+}
+
+static inline u32 ubwc_metadata_plane_bufheight(u32 height,
+ u32 metadata_height_multi,
+ u32 tile_height_pels)
+{
+ return ALIGN(((height + (tile_height_pels - 1)) / tile_height_pels),
+ metadata_height_multi);
+}
+
+static inline u32 ubwc_metadata_plane_buffer_size(u32 metadata_stride,
+ u32 metadata_buf_height)
+{
+ return ALIGN(metadata_stride * metadata_buf_height, SZ_4K);
+}
+
+static inline u32 hfi_buffer_scratch2_enc(u32 frame_width, u32 frame_height,
+ u32 num_ref, bool ten_bit)
+{
+ u32 aligned_width, aligned_height, chroma_height, ref_buf_height;
+ u32 metadata_stride, meta_buf_height, meta_size_y, meta_size_c;
+ u32 ref_luma_stride_bytes, ref_chroma_height_bytes;
+ u32 ref_buf_size, ref_stride;
+ u32 luma_size, chroma_size;
+ u32 size;
+
+ if (!ten_bit) {
+ aligned_height = ALIGN(frame_height, 32);
+ chroma_height = frame_height >> 1;
+ chroma_height = ALIGN(chroma_height, 32);
+ aligned_width = ALIGN(frame_width, 128);
+ metadata_stride =
+ ubwc_metadata_plane_stride(frame_width, 64, 32);
+ meta_buf_height =
+ ubwc_metadata_plane_bufheight(frame_height, 16, 8);
+ meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ size = (aligned_height + chroma_height) * aligned_width +
+ meta_size_y + meta_size_c;
+ size = (size * (num_ref + 3)) + 4096;
+ } else {
+ ref_buf_height = (frame_height + (32 - 1)) & (~(32 - 1));
+ ref_luma_stride_bytes = ((frame_width + 192 - 1) / 192) * 192;
+ ref_stride = 4 * (ref_luma_stride_bytes / 3);
+ ref_stride = (ref_stride + (128 - 1)) & (~(128 - 1));
+ luma_size = ref_buf_height * ref_stride;
+ ref_chroma_height_bytes =
+ (((frame_height + 1) >> 1) + (32 - 1)) & (~(32 - 1));
+ chroma_size = ref_stride * ref_chroma_height_bytes;
+ luma_size = (luma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
+ chroma_size = (chroma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
+ ref_buf_size = luma_size + chroma_size;
+ metadata_stride =
+ ubwc_metadata_plane_stride(frame_width, 64, 48);
+ meta_buf_height =
+ ubwc_metadata_plane_bufheight(frame_height, 16, 4);
+ meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ size = ref_buf_size + meta_size_y + meta_size_c;
+ size = (size * (num_ref + 3)) + 4096;
+ }
+
+ return size;
+}
+
+static u32 iris_vpu_enc_scratch2_size(struct iris_inst *inst)
+{
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 frame_width = f->fmt.pix_mp.width;
+ u32 frame_height = f->fmt.pix_mp.height;
+ u32 num_ref = 1;
+
+ return hfi_buffer_scratch2_enc(frame_width, frame_height, num_ref,
+ false);
+}
+
+static u32 iris_vpu_enc_vpss_size(struct iris_inst *inst)
+{
+ u32 ds_enable = is_scaling_enabled(inst);
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_vpss_enc(width, height, ds_enable, 0, 0);
+}
+
static int output_min_count(struct iris_inst *inst)
{
int output_min_count = 4;
@@ -571,10 +1431,10 @@ struct iris_vpu_buf_type_handle {
u32 (*handle)(struct iris_inst *inst);
};
-int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+u32 iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
{
- const struct iris_vpu_buf_type_handle *buf_type_handle_arr;
- u32 size = 0, buf_type_handle_size, i;
+ const struct iris_vpu_buf_type_handle *buf_type_handle_arr = NULL;
+ u32 size = 0, buf_type_handle_size = 0, i;
static const struct iris_vpu_buf_type_handle dec_internal_buf_type_handle[] = {
{BUF_BIN, iris_vpu_dec_bin_size },
@@ -586,8 +1446,24 @@ int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
{BUF_SCRATCH_1, iris_vpu_dec_scratch1_size },
};
- buf_type_handle_size = ARRAY_SIZE(dec_internal_buf_type_handle);
- buf_type_handle_arr = dec_internal_buf_type_handle;
+ static const struct iris_vpu_buf_type_handle enc_internal_buf_type_handle[] = {
+ {BUF_BIN, iris_vpu_enc_bin_size },
+ {BUF_COMV, iris_vpu_enc_comv_size },
+ {BUF_NON_COMV, iris_vpu_enc_non_comv_size },
+ {BUF_LINE, iris_vpu_enc_line_size },
+ {BUF_ARP, iris_vpu_enc_arp_size },
+ {BUF_VPSS, iris_vpu_enc_vpss_size },
+ {BUF_SCRATCH_1, iris_vpu_enc_scratch1_size },
+ {BUF_SCRATCH_2, iris_vpu_enc_scratch2_size },
+ };
+
+ if (inst->domain == DECODER) {
+ buf_type_handle_size = ARRAY_SIZE(dec_internal_buf_type_handle);
+ buf_type_handle_arr = dec_internal_buf_type_handle;
+ } else if (inst->domain == ENCODER) {
+ buf_type_handle_size = ARRAY_SIZE(enc_internal_buf_type_handle);
+ buf_type_handle_arr = enc_internal_buf_type_handle;
+ }
for (i = 0; i < buf_type_handle_size; i++) {
if (buf_type_handle_arr[i].type == buffer_type) {
@@ -599,6 +1475,34 @@ int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
return size;
}
+u32 iris_vpu33_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ u32 size = 0, i;
+
+ static const struct iris_vpu_buf_type_handle enc_internal_buf_type_handle[] = {
+ {BUF_BIN, iris_vpu_enc_bin_size },
+ {BUF_COMV, iris_vpu_enc_comv_size },
+ {BUF_NON_COMV, iris_vpu_enc_non_comv_size },
+ {BUF_LINE, iris_vpu33_enc_line_size },
+ {BUF_ARP, iris_vpu_enc_arp_size },
+ {BUF_VPSS, iris_vpu_enc_vpss_size },
+ {BUF_SCRATCH_1, iris_vpu_enc_scratch1_size },
+ {BUF_SCRATCH_2, iris_vpu_enc_scratch2_size },
+ };
+
+ if (inst->domain == DECODER)
+ return iris_vpu_buf_size(inst, buffer_type);
+
+ for (i = 0; i < ARRAY_SIZE(enc_internal_buf_type_handle); i++) {
+ if (enc_internal_buf_type_handle[i].type == buffer_type) {
+ size = enc_internal_buf_type_handle[i].handle(inst);
+ break;
+ }
+ }
+
+ return size;
+}
+
static u32 internal_buffer_count(struct iris_inst *inst,
enum iris_buffer_type buffer_type)
{
@@ -628,7 +1532,10 @@ int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type
case BUF_INPUT:
return MIN_BUFFERS;
case BUF_OUTPUT:
- return output_min_count(inst);
+ if (inst->domain == ENCODER)
+ return MIN_BUFFERS;
+ else
+ return output_min_count(inst);
case BUF_BIN:
case BUF_COMV:
case BUF_NON_COMV:
@@ -636,6 +1543,9 @@ int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type
case BUF_PERSIST:
return internal_buffer_count(inst, buffer_type);
case BUF_SCRATCH_1:
+ case BUF_SCRATCH_2:
+ case BUF_VPSS:
+ case BUF_ARP:
return 1; /* internal buffer count needed by firmware is 1 */
case BUF_DPB:
return iris_vpu_dpb_count(inst);
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
index ee95fd20b794..04f0b7400a1e 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
@@ -41,6 +41,7 @@ struct iris_inst;
#define SIZE_SLIST_BUF_H265 (BIT(10))
#define H265_DISPLAY_BUF_SIZE (3072)
#define H265_NUM_FRM_INFO (48)
+#define SIZE_ONE_SLICE_BUF 256
#define VP9_NUM_FRAME_INFO_BUF 32
#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4)
@@ -80,6 +81,26 @@ struct iris_inst;
#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE 384
#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640
+#define SIZE_SLICE_CMD_BUFFER (ALIGN(20480, 256))
+#define SIZE_SPS_PPS_SLICE_HDR (2048 + 4096)
+#define SIZE_BSE_SLICE_CMD_BUF ((((8192 << 2) + 7) & (~7)) * 3)
+#define SIZE_LAMBDA_LUT (256 * 11)
+
+#define HFI_COL_FMT_NV12C_Y_TILE_HEIGHT (8)
+#define HFI_COL_FMT_NV12C_Y_TILE_WIDTH (32)
+#define HFI_COL_FMT_TP10C_Y_TILE_HEIGHT (4)
+#define HFI_COL_FMT_TP10C_Y_TILE_WIDTH (48)
+
+#define IRIS_METADATA_STRIDE_MULTIPLE 64
+#define IRIS_METADATA_HEIGHT_MULTIPLE 16
+
+#define HFI_BUFFER_ARP_ENC 204800
+
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 2304
+#define NUM_MBS_4K (DIV_ROUND_UP(MAX_WIDTH, 16) * DIV_ROUND_UP(MAX_HEIGHT, 16))
+#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4))
+
static inline u32 size_h264d_lb_fe_top_data(u32 frame_width)
{
return MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN(frame_width, 16) * 3;
@@ -125,7 +146,8 @@ static inline u32 size_h264d_qp(u32 frame_width, u32 frame_height)
return DIV_ROUND_UP(frame_width, 64) * DIV_ROUND_UP(frame_height, 64) * 128;
}
-int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+u32 iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+u32 iris_vpu33_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.c b/drivers/media/platform/qcom/iris/iris_vpu_common.c
index 268e45acaa7c..bb98950e018f 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.c
@@ -84,6 +84,7 @@ static void iris_vpu_interrupt_init(struct iris_core *core)
static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
{
u32 queue_size, value;
+ const struct vpu_ops *vpu_ops = core->iris_platform_data->vpu_ops;
/* Iris hardware requires 4K queue alignment */
queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
@@ -105,6 +106,9 @@ static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
writel(value, core->reg_base + SFR_ADDR);
}
+
+ if (vpu_ops->program_bootup_registers)
+ vpu_ops->program_bootup_registers(core);
}
int iris_vpu_boot_firmware(struct iris_core *core)
@@ -271,7 +275,7 @@ void iris_vpu_power_off(struct iris_core *core)
disable_irq_nosync(core->irq);
}
-static int iris_vpu_power_on_controller(struct iris_core *core)
+int iris_vpu_power_on_controller(struct iris_core *core)
{
u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
int ret;
@@ -302,7 +306,7 @@ err_disable_power:
return ret;
}
-static int iris_vpu_power_on_hw(struct iris_core *core)
+int iris_vpu_power_on_hw(struct iris_core *core)
{
int ret;
@@ -337,11 +341,11 @@ int iris_vpu_power_on(struct iris_core *core)
if (ret)
goto err;
- ret = iris_vpu_power_on_controller(core);
+ ret = core->iris_platform_data->vpu_ops->power_on_controller(core);
if (ret)
goto err_unvote_icc;
- ret = iris_vpu_power_on_hw(core);
+ ret = core->iris_platform_data->vpu_ops->power_on_hw(core);
if (ret)
goto err_power_off_ctrl;
@@ -359,7 +363,7 @@ int iris_vpu_power_on(struct iris_core *core)
return 0;
err_power_off_ctrl:
- iris_vpu_power_off_controller(core);
+ core->iris_platform_data->vpu_ops->power_off_controller(core);
err_unvote_icc:
iris_unset_icc_bw(core);
err:
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.h b/drivers/media/platform/qcom/iris/iris_vpu_common.h
index 93b7fa27be3b..d636e287457a 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.h
@@ -11,10 +11,14 @@ struct iris_core;
extern const struct vpu_ops iris_vpu2_ops;
extern const struct vpu_ops iris_vpu3_ops;
extern const struct vpu_ops iris_vpu33_ops;
+extern const struct vpu_ops iris_vpu35_ops;
struct vpu_ops {
void (*power_off_hw)(struct iris_core *core);
+ int (*power_on_hw)(struct iris_core *core);
int (*power_off_controller)(struct iris_core *core);
+ int (*power_on_controller)(struct iris_core *core);
+ void (*program_bootup_registers)(struct iris_core *core);
u64 (*calc_freq)(struct iris_inst *inst, size_t data_size);
};
@@ -23,6 +27,8 @@ void iris_vpu_raise_interrupt(struct iris_core *core);
void iris_vpu_clear_interrupt(struct iris_core *core);
int iris_vpu_watchdog(struct iris_core *core, u32 intr_status);
int iris_vpu_prepare_pc(struct iris_core *core);
+int iris_vpu_power_on_controller(struct iris_core *core);
+int iris_vpu_power_on_hw(struct iris_core *core);
int iris_vpu_power_on(struct iris_core *core);
int iris_vpu_power_off_controller(struct iris_core *core);
void iris_vpu_power_off_hw(struct iris_core *core);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 4c049c694d9c..abf959b8f3a6 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -254,14 +254,19 @@ err:
static void venus_assign_register_offsets(struct venus_core *core)
{
- if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
- core->vbif_base = core->base + VBIF_BASE;
+ if (IS_IRIS2(core) || IS_IRIS2_1(core) || IS_AR50_LITE(core)) {
core->cpu_base = core->base + CPU_BASE_V6;
core->cpu_cs_base = core->base + CPU_CS_BASE_V6;
core->cpu_ic_base = core->base + CPU_IC_BASE_V6;
core->wrapper_base = core->base + WRAPPER_BASE_V6;
core->wrapper_tz_base = core->base + WRAPPER_TZ_BASE_V6;
- core->aon_base = core->base + AON_BASE_V6;
+ if (IS_AR50_LITE(core)) {
+ core->vbif_base = NULL;
+ core->aon_base = NULL;
+ } else {
+ core->vbif_base = core->base + VBIF_BASE;
+ core->aon_base = core->base + AON_BASE_V6;
+ }
} else {
core->vbif_base = core->base + VBIF_BASE;
core->cpu_base = core->base + CPU_BASE;
@@ -448,24 +453,18 @@ static int venus_probe(struct platform_device *pdev)
if (ret < 0)
goto err_runtime_disable;
- if (core->res->dec_nodename || core->res->enc_nodename) {
- ret = venus_add_dynamic_nodes(core);
- if (ret)
- goto err_runtime_disable;
- }
-
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret)
- goto err_remove_dynamic_nodes;
-
ret = venus_firmware_init(core);
if (ret)
- goto err_of_depopulate;
+ goto err_runtime_disable;
ret = venus_boot(core);
if (ret)
goto err_firmware_deinit;
+ ret = venus_firmware_cfg(core);
+ if (ret)
+ goto err_venus_shutdown;
+
ret = hfi_core_resume(core, true);
if (ret)
goto err_venus_shutdown;
@@ -474,34 +473,48 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
goto err_venus_shutdown;
- ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ ret = venus_firmware_check(core);
if (ret)
goto err_core_deinit;
+ if (core->res->dec_nodename || core->res->enc_nodename) {
+ ret = venus_add_dynamic_nodes(core);
+ if (ret)
+ goto err_core_deinit;
+ }
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_remove_dynamic_nodes;
+
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ if (ret)
+ goto err_of_depopulate;
+
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
if (ret)
- goto err_core_deinit;
+ goto err_of_depopulate;
ret = pm_runtime_put_sync(dev);
if (ret) {
pm_runtime_get_noresume(dev);
- goto err_core_deinit;
+ goto err_of_depopulate;
}
venus_dbgfs_init(core);
return 0;
+err_of_depopulate:
+ of_platform_depopulate(dev);
+err_remove_dynamic_nodes:
+ venus_remove_dynamic_nodes(core);
err_core_deinit:
hfi_core_deinit(core, false);
err_venus_shutdown:
venus_shutdown(core);
err_firmware_deinit:
venus_firmware_deinit(core);
-err_of_depopulate:
- of_platform_depopulate(dev);
-err_remove_dynamic_nodes:
- venus_remove_dynamic_nodes(core);
err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
@@ -596,7 +609,7 @@ err_cpucfg_path:
return ret;
}
-void venus_close_common(struct venus_inst *inst)
+void venus_close_common(struct venus_inst *inst, struct file *filp)
{
/*
* Make sure we don't have IRQ/IRQ-thread currently running
@@ -607,7 +620,7 @@ void venus_close_common(struct venus_inst *inst)
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
hfi_session_destroy(inst);
- v4l2_fh_del(&inst->fh);
+ v4l2_fh_del(&inst->fh, filp);
v4l2_fh_exit(&inst->fh);
v4l2_ctrl_handler_free(&inst->ctrl_handler);
@@ -1057,15 +1070,65 @@ static const struct venus_resources sc7280_res = {
.enc_nodename = "video-encoder",
};
+static const struct bw_tbl qcm2290_bw_table_dec[] = {
+ { 352800, 597000, 0, 746000, 0 }, /* 1080p@30 + 720p@30 */
+ { 244800, 413000, 0, 516000, 0 }, /* 1080p@30 */
+ { 216000, 364000, 0, 454000, 0 }, /* 720p@60 */
+ { 108000, 182000, 0, 227000, 0 }, /* 720p@30 */
+};
+
+static const struct bw_tbl qcm2290_bw_table_enc[] = {
+ { 352800, 396000, 0, 0, 0 }, /* 1080p@30 + 720p@30 */
+ { 244800, 275000, 0, 0, 0 }, /* 1080p@30 */
+ { 216000, 242000, 0, 0, 0 }, /* 720p@60 */
+ { 108000, 121000, 0, 0, 0 }, /* 720p@30 */
+};
+
+static const struct firmware_version min_fw = {
+ .major = 6, .minor = 0, .rev = 55,
+};
+
+static const struct venus_resources qcm2290_res = {
+ .bw_tbl_dec = qcm2290_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(qcm2290_bw_table_dec),
+ .bw_tbl_enc = qcm2290_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(qcm2290_bw_table_enc),
+ .clks = { "core", "iface", "bus", "throttle" },
+ .clks_num = 4,
+ .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" },
+ .vcodec_pmdomains_num = 2,
+ .opp_pmdomain = (const char *[]) { "cx" },
+ .vcodec_num = 1,
+ .hfi_version = HFI_VERSION_4XX,
+ .vpu_version = VPU_VERSION_AR50_LITE,
+ .max_load = 352800,
+ .num_vpp_pipes = 1,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .cp_start = 0,
+ .cp_size = 0x70800000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x24800000,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-6.0/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
+ .min_fw = &min_fw,
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
{ .compatible = "qcom,msm8998-venus", .data = &msm8998_res, },
+ { .compatible = "qcom,qcm2290-venus", .data = &qcm2290_res, },
+ { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
+ { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sdm660-venus", .data = &sdm660_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
- { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
- { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sm8250-venus", .data = &sm8250_res, },
{ }
};
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 5b1ba1c69adb..7506f5d0f609 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -58,6 +58,12 @@ enum vpu_version {
VPU_VERSION_IRIS2_1,
};
+struct firmware_version {
+ u32 major;
+ u32 minor;
+ u32 rev;
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
@@ -94,6 +100,7 @@ struct venus_resources {
const char *fwname;
const char *enc_nodename;
const char *dec_nodename;
+ const struct firmware_version *min_fw;
};
enum venus_fmt {
@@ -231,11 +238,7 @@ struct venus_core {
unsigned int core0_usage_count;
unsigned int core1_usage_count;
struct dentry *root;
- struct venus_img_version {
- u32 major;
- u32 minor;
- u32 rev;
- } venus_ver;
+ struct firmware_version venus_ver;
unsigned long dump_core;
struct of_changeset *ocs;
bool hwmode_dev;
@@ -530,12 +533,17 @@ struct venus_inst {
#define IS_IRIS2(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2)
#define IS_IRIS2_1(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2_1)
+static inline bool is_lite(struct venus_core *core)
+{
+ return IS_AR50_LITE(core);
+}
+
#define ctrl_to_inst(ctrl) \
container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
static inline struct venus_inst *to_inst(struct file *filp)
{
- return container_of(filp->private_data, struct venus_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct venus_inst, fh);
}
static inline void *to_hfi_priv(struct venus_core *core)
@@ -573,5 +581,5 @@ is_fw_rev_or_older(struct venus_core *core, u32 vmajor, u32 vminor, u32 vrev)
(core)->venus_ver.rev <= vrev);
}
-void venus_close_common(struct venus_inst *inst);
+void venus_close_common(struct venus_inst *inst, struct file *filp);
#endif
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 66a18830e66d..af0ac40bec9b 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -30,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core)
u32 fw_size = core->fw.mapped_mem_size;
void __iomem *wrapper_base;
- if (IS_IRIS2_1(core))
+ if (IS_IRIS2(core) || IS_IRIS2_1(core))
wrapper_base = core->wrapper_tz_base;
else
wrapper_base = core->wrapper_base;
@@ -42,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
- if (IS_IRIS2_1(core)) {
+ if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
/* Bring XTSS out of reset */
writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
} else {
@@ -68,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume) {
venus_reset_cpu(core);
} else {
- if (IS_IRIS2_1(core))
+ if (IS_IRIS2(core) || IS_IRIS2_1(core))
writel(WRAPPER_XTSS_SW_RESET_BIT,
core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
else
@@ -136,8 +136,8 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID,
mem_va, *mem_phys, *mem_size, NULL);
else
- ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
- mem_va, *mem_phys, *mem_size, NULL);
+ ret = qcom_mdt_load_no_init(dev, mdt, fwname, mem_va,
+ *mem_phys, *mem_size, NULL);
memunmap(mem_va);
err_release_fw:
@@ -181,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core)
void __iomem *wrapper_base = core->wrapper_base;
void __iomem *wrapper_tz_base = core->wrapper_tz_base;
- if (IS_IRIS2_1(core)) {
+ if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
/* Assert the reset to XTSS */
reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
reg |= WRAPPER_XTSS_SW_RESET_BIT;
@@ -207,6 +207,16 @@ static int venus_shutdown_no_tz(struct venus_core *core)
return 0;
}
+int venus_firmware_cfg(struct venus_core *core)
+{
+ void __iomem *cpu_cs_base = core->cpu_cs_base;
+
+ if (IS_AR50_LITE(core))
+ writel(CPU_CS_VCICMD_ARP_OFF, cpu_cs_base + CPU_CS_VCICMD);
+
+ return 0;
+}
+
int venus_boot(struct venus_core *core)
{
struct device *dev = core->dev;
@@ -280,6 +290,26 @@ int venus_shutdown(struct venus_core *core)
return ret;
}
+int venus_firmware_check(struct venus_core *core)
+{
+ const struct firmware_version *req = core->res->min_fw;
+ const struct firmware_version *run = &core->venus_ver;
+
+ if (!req)
+ return 0;
+
+ if (!is_fw_rev_or_newer(core, req->major, req->minor, req->rev))
+ goto error;
+
+ return 0;
+error:
+ dev_err(core->dev, "Firmware v%d.%d.%d < v%d.%d.%d\n",
+ run->major, run->minor, run->rev,
+ req->major, req->minor, req->rev);
+
+ return -EINVAL;
+}
+
int venus_firmware_init(struct venus_core *core)
{
struct platform_device_info info;
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
index aaccd847fa30..87e1d922b369 100644
--- a/drivers/media/platform/qcom/venus/firmware.h
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -9,6 +9,8 @@ struct device;
int venus_firmware_init(struct venus_core *core);
void venus_firmware_deinit(struct venus_core *core);
+int venus_firmware_check(struct venus_core *core);
+int venus_firmware_cfg(struct venus_core *core);
int venus_boot(struct venus_core *core);
int venus_shutdown(struct venus_core *core);
int venus_set_hw_state(struct venus_core *core, bool suspend);
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 8295542e1a7c..2e4363f82231 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -1715,11 +1715,17 @@ int venus_helper_session_init(struct venus_inst *inst)
if (ret)
return ret;
- inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(version, codec,
+ inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(inst->core,
+ version,
+ codec,
session_type);
- inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(version, codec,
+ inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(inst->core,
+ version,
+ codec,
session_type);
- inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(version, codec,
+ inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(inst->core,
+ version,
+ codec,
session_type);
return 0;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index cf0d97cbc463..47b99d5b5af7 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -277,7 +277,12 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
done:
core->error = error;
- complete(&core->done);
+ /*
+ * Since core_init could ask for the firmware version to be validated,
+ * completion might have to wait until the version is retrieved.
+ */
+ if (!core->res->min_fw)
+ complete(&core->done);
}
static void
@@ -328,6 +333,10 @@ done:
if (!IS_ERR(smem_tbl_ptr) && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
memcpy(smem_tbl_ptr + SMEM_IMG_OFFSET_VENUS,
img_ver, VER_STR_SZ);
+
+ /* core_init could have had to wait for a version check */
+ if (core->res->min_fw)
+ complete(&core->done);
}
static void hfi_sys_property_info(struct venus_core *core,
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 1b3db2caa99f..92765f9c8873 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -282,7 +282,7 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
return ret;
if (plat->capabilities)
- caps = plat->capabilities(&entries);
+ caps = plat->capabilities(core, &entries);
if (!caps || !entries || !count)
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index 643e5aa138f5..cde7f93045ac 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -21,7 +21,9 @@ const struct hfi_platform *hfi_platform_get(enum hfi_version version)
}
unsigned long
-hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session_type)
+hfi_platform_get_codec_vpp_freq(struct venus_core *core,
+ enum hfi_version version, u32 codec,
+ u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
@@ -31,13 +33,15 @@ hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session
return 0;
if (plat->codec_vpp_freq)
- freq = plat->codec_vpp_freq(session_type, codec);
+ freq = plat->codec_vpp_freq(core, session_type, codec);
return freq;
}
unsigned long
-hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session_type)
+hfi_platform_get_codec_vsp_freq(struct venus_core *core,
+ enum hfi_version version, u32 codec,
+ u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
@@ -47,13 +51,15 @@ hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session
return 0;
if (plat->codec_vpp_freq)
- freq = plat->codec_vsp_freq(session_type, codec);
+ freq = plat->codec_vsp_freq(core, session_type, codec);
return freq;
}
unsigned long
-hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_type)
+hfi_platform_get_codec_lp_freq(struct venus_core *core,
+ enum hfi_version version, u32 codec,
+ u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
@@ -63,13 +69,14 @@ hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_
return 0;
if (plat->codec_lp_freq)
- freq = plat->codec_lp_freq(session_type, codec);
+ freq = plat->codec_lp_freq(core, session_type, codec);
return freq;
}
int
-hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count)
{
const struct hfi_platform *plat;
@@ -78,7 +85,7 @@ hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codec
return -EINVAL;
if (plat->codecs)
- plat->codecs(enc_codecs, dec_codecs, count);
+ plat->codecs(core, enc_codecs, dec_codecs, count);
if (IS_IRIS2_1(core)) {
*enc_codecs &= ~HFI_VIDEO_CODEC_VP8;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index ec89a90a8129..5e4f8013a6b1 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -47,11 +47,16 @@ struct hfi_platform_codec_freq_data {
};
struct hfi_platform {
- unsigned long (*codec_vpp_freq)(u32 session_type, u32 codec);
- unsigned long (*codec_vsp_freq)(u32 session_type, u32 codec);
- unsigned long (*codec_lp_freq)(u32 session_type, u32 codec);
- void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
- const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
+ unsigned long (*codec_vpp_freq)(struct venus_core *core,
+ u32 session_type, u32 codec);
+ unsigned long (*codec_vsp_freq)(struct venus_core *core,
+ u32 session_type, u32 codec);
+ unsigned long (*codec_lp_freq)(struct venus_core *core,
+ u32 session_type, u32 codec);
+ void (*codecs)(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count);
+ const struct hfi_plat_caps *(*capabilities)(struct venus_core *core,
+ unsigned int *entries);
int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type,
u32 buftype, struct hfi_buffer_requirements *bufreq);
};
@@ -60,12 +65,15 @@ extern const struct hfi_platform hfi_plat_v4;
extern const struct hfi_platform hfi_plat_v6;
const struct hfi_platform *hfi_platform_get(enum hfi_version version);
-unsigned long hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec,
- u32 session_type);
-unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec,
- u32 session_type);
-unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
- u32 session_type);
-int hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs,
- u32 *count);
+unsigned long hfi_platform_get_codec_vpp_freq(struct venus_core *core,
+ enum hfi_version version,
+ u32 codec, u32 session_type);
+unsigned long hfi_platform_get_codec_vsp_freq(struct venus_core *core,
+ enum hfi_version version,
+ u32 codec, u32 session_type);
+unsigned long hfi_platform_get_codec_lp_freq(struct venus_core *core,
+ enum hfi_version version,
+ u32 codec, u32 session_type);
+int hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v4.c b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
index e3f0a90a567b..cda888b56b5d 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v4.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
+#include "core.h"
#include "hfi_platform.h"
static const struct hfi_plat_caps caps[] = {
@@ -245,20 +246,150 @@ static const struct hfi_plat_caps caps[] = {
.num_fmts = 4,
} };
-static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
+static const struct hfi_plat_caps caps_lite[] = {
{
- *entries = ARRAY_SIZE(caps);
- return caps;
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 7,
+ .pl[0] = { HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_5},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_5},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_5},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_5},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_5},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 7,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0 << 28 },
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0 << 28 },
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP9,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 7,
+ .pl[0] = {HFI_VP9_PROFILE_P0, 200},
+ .pl[1] = {HFI_VP9_PROFILE_P2_10B, 200},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .caps[7] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 6, 1},
+ .caps[8] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[9] = {HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE, 0, 244800, 1},
+ .caps[10] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
+ .caps[11] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
+ .caps[12] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
+ .caps[13] = {HFI_CAPABILITY_SLICE_BYTE, 1, 10, 1},
+ .caps[14] = {HFI_CAPABILITY_SLICE_MB, 1, 10, 1},
+ .num_caps = 15,
+ .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_5},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_5},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_5},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_5},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_5},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .num_fmts = 2,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .caps[7] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 6, 1},
+ .caps[8] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[9] = {HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE, 0, 244800, 1},
+ .caps[10] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
+ .caps[11] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
+ .caps[12] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
+ .caps[13] = {HFI_CAPABILITY_SLICE_BYTE, 1, 10, 1},
+ .caps[14] = {HFI_CAPABILITY_SLICE_MB, 1, 10, 1},
+ .num_caps = 15,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0},
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .num_fmts = 2,
+} };
+
+static const struct hfi_plat_caps *get_capabilities(struct venus_core *core,
+ unsigned int *entries)
+{
+ *entries = is_lite(core) ? ARRAY_SIZE(caps_lite) : ARRAY_SIZE(caps);
+
+ return is_lite(core) ? caps_lite : caps;
}
-static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+static void get_codecs(struct venus_core *core,
+ u32 *enc_codecs, u32 *dec_codecs, u32 *count)
{
- *enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
- HFI_VIDEO_CODEC_VP8;
- *dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
- HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 |
- HFI_VIDEO_CODEC_MPEG2;
- *count = 8;
+ const struct hfi_plat_caps *caps;
+ unsigned int num;
+ size_t i;
+
+ *enc_codecs = 0;
+ *dec_codecs = 0;
+
+ caps = get_capabilities(core, &num);
+
+ for (i = 0; i < num; caps++, i++) {
+ if (caps->domain == VIDC_SESSION_TYPE_ENC)
+ *enc_codecs |= caps->codec;
+ else
+ *dec_codecs |= caps->codec;
+ }
+
+ *count = num;
}
static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
@@ -272,13 +403,29 @@ static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
{ V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
};
+static const struct hfi_platform_codec_freq_data codec_freq_data_lite[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 440, 0, 440 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 440, 0, 440 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 440, 0, 440 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 0, 675 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 0, 675 },
+};
+
static const struct hfi_platform_codec_freq_data *
-get_codec_freq_data(u32 session_type, u32 pixfmt)
+get_codec_freq_data(struct venus_core *core, u32 session_type, u32 pixfmt)
{
- const struct hfi_platform_codec_freq_data *data = codec_freq_data;
- unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
+ const struct hfi_platform_codec_freq_data *data;
+ unsigned int i, data_size;
const struct hfi_platform_codec_freq_data *found = NULL;
+ if (is_lite(core)) {
+ data = codec_freq_data_lite;
+ data_size = ARRAY_SIZE(codec_freq_data_lite);
+ } else {
+ data = codec_freq_data;
+ data_size = ARRAY_SIZE(codec_freq_data);
+ }
+
for (i = 0; i < data_size; i++) {
if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
found = &data[i];
@@ -289,33 +436,36 @@ get_codec_freq_data(u32 session_type, u32 pixfmt)
return found;
}
-static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vpp_freq(struct venus_core *core,
+ u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vpp_freq;
return 0;
}
-static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vsp_freq(struct venus_core *core,
+ u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vsp_freq;
return 0;
}
-static unsigned long codec_lp_freq(u32 session_type, u32 codec)
+static unsigned long codec_lp_freq(struct venus_core *core,
+ u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->low_power_freq;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
index 4e8af645f8b9..d8568c08cc36 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
+#include "core.h"
#include "hfi_platform.h"
static const struct hfi_plat_caps caps[] = {
@@ -245,14 +246,22 @@ static const struct hfi_plat_caps caps[] = {
.num_fmts = 4,
} };
-static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
+static const struct hfi_plat_caps *get_capabilities(struct venus_core *core,
+ unsigned int *entries)
{
+ if (is_lite(core))
+ return NULL;
+
*entries = ARRAY_SIZE(caps);
return caps;
}
-static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+static void get_codecs(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count)
{
+ if (is_lite(core))
+ return;
+
*enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
HFI_VIDEO_CODEC_VP8;
*dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
@@ -273,12 +282,15 @@ static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
};
static const struct hfi_platform_codec_freq_data *
-get_codec_freq_data(u32 session_type, u32 pixfmt)
+get_codec_freq_data(struct venus_core *core, u32 session_type, u32 pixfmt)
{
const struct hfi_platform_codec_freq_data *data = codec_freq_data;
unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
const struct hfi_platform_codec_freq_data *found = NULL;
+ if (is_lite(core))
+ return NULL;
+
for (i = 0; i < data_size; i++) {
if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
found = &data[i];
@@ -289,33 +301,36 @@ get_codec_freq_data(u32 session_type, u32 pixfmt)
return found;
}
-static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vpp_freq(struct venus_core *core, u32 session_type,
+ u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vpp_freq;
return 0;
}
-static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vsp_freq(struct venus_core *core, u32 session_type,
+ u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vsp_freq;
return 0;
}
-static unsigned long codec_lp_freq(u32 session_type, u32 codec)
+static unsigned long codec_lp_freq(struct venus_core *core, u32 session_type,
+ u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->low_power_freq;
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index cec7f5964d3d..d3da35f67fd5 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -380,7 +380,7 @@ static void venus_soft_int(struct venus_hfi_device *hdev)
void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
u32 clear_bit;
- if (IS_V6(hdev->core))
+ if (IS_V6(hdev->core) || (IS_V4(hdev->core) && is_lite(hdev->core)))
clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
else
clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
@@ -501,9 +501,11 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
if (count >= max_tries)
ret = -ETIMEDOUT;
- if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core)) {
writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
- writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
+
+ if (!IS_AR50_LITE(hdev->core))
+ writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
}
return ret;
@@ -569,6 +571,9 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
u32 mask_val;
int ret;
+ if (IS_AR50_LITE(hdev->core))
+ return 0;
+
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
@@ -1138,7 +1143,13 @@ static irqreturn_t venus_isr(struct venus_core *core)
wrapper_base = hdev->core->wrapper_base;
status = readl(wrapper_base + WRAPPER_INTR_STATUS);
- if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
+
+ if (IS_AR50_LITE(core)) {
+ if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
+ status & WRAPPER_INTR_STATUS_A2HWD_MASK_V4_LITE ||
+ status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ hdev->irq_status = status;
+ } else if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
@@ -1150,7 +1161,7 @@ static irqreturn_t venus_isr(struct venus_core *core)
hdev->irq_status = status;
}
writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
- if (!(IS_IRIS2(core) || IS_IRIS2_1(core)))
+ if (!(IS_IRIS2(core) || IS_IRIS2_1(core) || IS_AR50_LITE(core)))
writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
return IRQ_WAKE_THREAD;
@@ -1535,7 +1546,7 @@ static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
- if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
@@ -1555,7 +1566,7 @@ static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
- if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 9735a246ce36..f2c3064c44ae 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -51,6 +51,9 @@
/* Venus cpu */
#define CPU_CS_SCIACMDARG3 0x58
+#define CPU_CS_VCICMD 0x20
+#define CPU_CS_VCICMD_ARP_OFF BIT(0)
+
#define SFR_ADDR 0x5c
#define MMAP_ADDR 0x60
#define UC_REGION_ADDR 0x64
@@ -100,6 +103,7 @@
#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4
#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2
+#define WRAPPER_INTR_STATUS_A2HWD_MASK_V4_LITE 0x10
#define WRAPPER_INTR_STATUS_A2HWD_MASK_V6 0x8
#define WRAPPER_INTR_MASK_A2HWD_BASK_V6 0x8
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index e32f8862a9f9..f0269524ac70 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -40,6 +40,8 @@ static int core_clks_get(struct venus_core *core)
static int core_clks_enable(struct venus_core *core)
{
+ const struct freq_tbl *freq_tbl = core->res->freq_tbl;
+ unsigned int freq_tbl_size = core->res->freq_tbl_size;
const struct venus_resources *res = core->res;
struct device *dev = core->dev;
unsigned long freq = 0;
@@ -48,11 +50,16 @@ static int core_clks_enable(struct venus_core *core)
int ret;
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- if (!IS_ERR(opp))
+ if (IS_ERR(opp)) {
+ if (!freq_tbl)
+ return -ENODEV;
+ freq = freq_tbl[freq_tbl_size - 1].freq;
+ } else {
dev_pm_opp_put(opp);
+ }
for (i = 0; i < res->clks_num; i++) {
- if (IS_V6(core)) {
+ if (IS_V6(core) || (IS_V4(core) && is_lite(core))) {
ret = clk_set_rate(core->clks[i], freq);
if (ret)
goto err;
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 29b0d6a5303d..55c27345b7d8 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -1732,9 +1732,8 @@ static int vdec_open(struct file *file)
v4l2_fh_init(&inst->fh, core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
inst->fh.m2m_ctx = inst->m2m_ctx;
- file->private_data = &inst->fh;
return 0;
@@ -1755,7 +1754,7 @@ static int vdec_close(struct file *file)
vdec_pm_get(inst);
cancel_work_sync(&inst->delayed_process_work);
- venus_close_common(inst);
+ venus_close_common(inst, file);
ida_destroy(&inst->dpb_ids);
vdec_pm_put(inst, false);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index c0a0ccdded80..fba07557a399 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1515,9 +1515,8 @@ static int venc_open(struct file *file)
v4l2_fh_init(&inst->fh, core->vdev_enc);
inst->fh.ctrl_handler = &inst->ctrl_handler;
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
inst->fh.m2m_ctx = inst->m2m_ctx;
- file->private_data = &inst->fh;
return 0;
@@ -1537,7 +1536,7 @@ static int venc_close(struct file *file)
struct venus_inst *inst = to_inst(file);
venc_pm_get(inst);
- venus_close_common(inst);
+ venus_close_common(inst, file);
inst->enc_state = VENUS_ENC_STATE_DEINIT;
venc_pm_put(inst, false);
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c b/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
index 35c2ab1e2cd4..2c5b4d24b4e6 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
@@ -525,7 +525,7 @@ static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
int csi2_init(struct csi2_device *csi2, struct dentry *debugfs)
{
- unsigned int ret;
+ int ret;
spin_lock_init(&csi2->errors_lock);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index f73729f59671..100105b620e3 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -849,7 +849,7 @@ static int rvin_isp_init(struct rvin_dev *vin)
* Suspend / Resume
*/
-static int __maybe_unused rvin_suspend(struct device *dev)
+static int rvin_suspend(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
@@ -861,7 +861,7 @@ static int __maybe_unused rvin_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rvin_resume(struct device *dev)
+static int rvin_resume(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
@@ -1276,13 +1276,13 @@ static void rcar_vin_remove(struct platform_device *pdev)
rvin_dma_unregister(vin);
}
-static SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume);
static struct platform_driver rcar_vin_driver = {
.driver = {
.name = "rcar-vin",
.suppress_bind_attrs = true,
- .pm = &rvin_pm_ops,
+ .pm = pm_sleep_ptr(&rvin_pm_ops),
.of_match_table = rvin_of_id_table,
},
.probe = rcar_vin_probe,
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 62eddf3a35fc..079dbaf016c2 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -588,8 +588,6 @@ static int rvin_open(struct file *file)
if (ret)
goto err_pm;
- file->private_data = vin;
-
ret = v4l2_fh_open(file);
if (ret)
goto err_unlock;
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index fc8b6bbef793..11bf47fb8266 100644
--- a/drivers/media/platform/renesas/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -1446,18 +1446,18 @@ static void rcar_drif_remove(struct platform_device *pdev)
}
/* FIXME: Implement suspend/resume support */
-static int __maybe_unused rcar_drif_suspend(struct device *dev)
+static int rcar_drif_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused rcar_drif_resume(struct device *dev)
+static int rcar_drif_resume(struct device *dev)
{
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
- rcar_drif_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
+ rcar_drif_resume);
static const struct of_device_id rcar_drif_of_table[] = {
{ .compatible = "renesas,rcar-gen3-drif" },
@@ -1470,8 +1470,8 @@ static struct platform_driver rcar_drif_driver = {
.driver = {
.name = RCAR_DRIF_DRV_NAME,
.of_match_table = rcar_drif_of_table,
- .pm = &rcar_drif_pm_ops,
- },
+ .pm = pm_sleep_ptr(&rcar_drif_pm_ops),
+ },
.probe = rcar_drif_probe,
.remove = rcar_drif_remove,
};
diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
index 5d453a7a8988..e615c56083f1 100644
--- a/drivers/media/platform/renesas/rcar_fdp1.c
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
@@ -630,9 +630,9 @@ struct fdp1_ctx {
struct fdp1_field_buffer *previous;
};
-static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct fdp1_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct fdp1_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct fdp1_ctx, fh);
}
static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
@@ -1406,8 +1406,8 @@ static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct fdp1_ctx *ctx = file_to_ctx(file);
struct fdp1_q_data *q_data;
- struct fdp1_ctx *ctx = fh_to_ctx(priv);
if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
return -EINVAL;
@@ -1584,7 +1584,7 @@ static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct fdp1_ctx *ctx = file_to_ctx(file);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
@@ -1655,7 +1655,7 @@ static void fdp1_set_format(struct fdp1_ctx *ctx,
static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct fdp1_ctx *ctx = file_to_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
@@ -2088,7 +2088,6 @@ static int fdp1_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->fdp1 = fdp1;
/* Initialise Queues */
@@ -2137,7 +2136,7 @@ static int fdp1_open(struct file *file)
if (ret < 0)
goto error_pm;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
@@ -2158,11 +2157,11 @@ done:
static int fdp1_release(struct file *file)
{
struct fdp1_dev *fdp1 = video_drvdata(file);
- struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fdp1_ctx *ctx = file_to_ctx(file);
dprintk(fdp1, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&fdp1->dev_mutex);
@@ -2409,7 +2408,7 @@ static void fdp1_remove(struct platform_device *pdev)
rcar_fcp_put(fdp1->fcp);
}
-static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+static int fdp1_pm_runtime_suspend(struct device *dev)
{
struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
@@ -2418,7 +2417,7 @@ static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+static int fdp1_pm_runtime_resume(struct device *dev)
{
struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
@@ -2429,9 +2428,7 @@ static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops fdp1_pm_ops = {
- SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
- fdp1_pm_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(fdp1_pm_runtime_suspend, fdp1_pm_runtime_resume, NULL)
};
static const struct of_device_id fdp1_dt_ids[] = {
@@ -2446,7 +2443,7 @@ static struct platform_driver fdp1_pdrv = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fdp1_dt_ids,
- .pm = &fdp1_pm_ops,
+ .pm = pm_ptr(&fdp1_pm_ops),
},
};
diff --git a/drivers/media/platform/renesas/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c
index 81038df71bb5..46ea259a2bb9 100644
--- a/drivers/media/platform/renesas/rcar_jpu.c
+++ b/drivers/media/platform/renesas/rcar_jpu.c
@@ -480,9 +480,9 @@ static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
return container_of(c->handler, struct jpu_ctx, ctrl_handler);
}
-static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static struct jpu_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct jpu_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct jpu_ctx, fh);
}
static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
@@ -656,7 +656,7 @@ static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
static int jpu_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
if (ctx->encoder)
strscpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
@@ -714,7 +714,7 @@ static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
static int jpu_enum_fmt_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
JPU_DEC_CAPTURE);
@@ -723,7 +723,7 @@ static int jpu_enum_fmt_cap(struct file *file, void *priv,
static int jpu_enum_fmt_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
}
@@ -823,7 +823,7 @@ static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
return -EINVAL;
@@ -834,7 +834,7 @@ static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct vb2_queue *vq;
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct jpu_fmt *fmtinfo;
struct jpu_q_data *q_data;
@@ -863,8 +863,8 @@ static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct jpu_ctx *ctx = file_to_ctx(file);
struct jpu_q_data *q_data;
- struct jpu_ctx *ctx = fh_to_ctx(priv);
if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
return -EINVAL;
@@ -897,8 +897,8 @@ static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
+ struct jpu_ctx *ctx = file_to_ctx(file);
enum v4l2_buf_type adj_type;
src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
@@ -1226,8 +1226,7 @@ static int jpu_open(struct file *file)
v4l2_fh_init(&ctx->fh, vfd);
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->jpu = jpu;
ctx->encoder = vfd == &jpu->vfd_encoder;
@@ -1272,7 +1271,7 @@ jpu_reset_rollback:
device_prepare_rollback:
mutex_unlock(&jpu->mutex);
v4l_prepare_rollback:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
return ret;
@@ -1280,12 +1279,12 @@ v4l_prepare_rollback:
static int jpu_release(struct file *file)
{
+ struct jpu_ctx *ctx = file_to_ctx(file);
struct jpu *jpu = video_drvdata(file);
- struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -1745,6 +1744,6 @@ static struct platform_driver jpu_driver = {
module_platform_driver(jpu_driver);
MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
+MODULE_AUTHOR("Mikhail Ulianov");
MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index 8cceafe491b1..deed49d0fb10 100644
--- a/drivers/media/platform/renesas/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -1048,7 +1048,7 @@ static int ceu_init_mbus_fmt(struct ceu_device *ceudev)
/*
* ceu_runtime_resume() - soft-reset the interface and turn sensor power on.
*/
-static int __maybe_unused ceu_runtime_resume(struct device *dev)
+static int ceu_runtime_resume(struct device *dev)
{
struct ceu_device *ceudev = dev_get_drvdata(dev);
struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
@@ -1064,7 +1064,7 @@ static int __maybe_unused ceu_runtime_resume(struct device *dev)
* ceu_runtime_suspend() - disable capture and interrupts and soft-reset.
* Turn sensor power off.
*/
-static int __maybe_unused ceu_runtime_suspend(struct device *dev)
+static int ceu_runtime_suspend(struct device *dev)
{
struct ceu_device *ceudev = dev_get_drvdata(dev);
struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
@@ -1709,15 +1709,13 @@ static void ceu_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops ceu_pm_ops = {
- SET_RUNTIME_PM_OPS(ceu_runtime_suspend,
- ceu_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(ceu_runtime_suspend, ceu_runtime_resume, NULL)
};
static struct platform_driver ceu_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &ceu_pm_ops,
+ .pm = pm_ptr(&ceu_pm_ops),
.of_match_table = of_match_ptr(ceu_of_match),
},
.probe = ceu_probe,
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
index 806acc8f9728..3c5fbd857371 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -367,7 +367,6 @@ static const struct rzg2l_cru_info rzg3e_cru_info = {
.enable_interrupts = rzg3e_cru_enable_interrupts,
.disable_interrupts = rzg3e_cru_disable_interrupts,
.fifo_empty = rzg3e_fifo_empty,
- .csi_setup = rzg3e_cru_csi2_setup,
};
static const u16 rzg2l_cru_regs[] = {
@@ -412,7 +411,6 @@ static const struct rzg2l_cru_info rzg2l_cru_info = {
.enable_interrupts = rzg2l_cru_enable_interrupts,
.disable_interrupts = rzg2l_cru_disable_interrupts,
.fifo_empty = rzg2l_fifo_empty,
- .csi_setup = rzg2l_cru_csi2_setup,
};
static const struct of_device_id rzg2l_cru_of_id_table[] = {
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index be95b41c37df..3a200db15730 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -92,9 +92,6 @@ struct rzg2l_cru_info {
void (*enable_interrupts)(struct rzg2l_cru_dev *cru);
void (*disable_interrupts)(struct rzg2l_cru_dev *cru);
bool (*fifo_empty)(struct rzg2l_cru_dev *cru);
- void (*csi_setup)(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc);
};
/**
@@ -204,11 +201,5 @@ void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru);
bool rzg3e_fifo_empty(struct rzg2l_cru_dev *cru);
-void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc);
-void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc);
#endif
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index a8817a7066b2..162e2ace6931 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -257,30 +257,18 @@ static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr);
}
-void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc)
+static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc)
{
const struct rzg2l_cru_info *info = cru->info;
u32 icnmc = ICnMC_INF(ip_fmt->datatype);
- icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
-
- /* Set virtual channel CSI2 */
- icnmc |= ICnMC_VCSEL(csi_vc);
-
- rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
- rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
- ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
- rzg2l_cru_write(cru, info->image_conv, icnmc);
-}
-
-void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc)
-{
- const struct rzg2l_cru_info *info = cru->info;
- u32 icnmc = ICnMC_INF(ip_fmt->datatype);
+ if (cru->info->regs[ICnSVC]) {
+ rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
+ rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
+ ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
+ }
icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
@@ -299,7 +287,7 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
const struct rzg2l_cru_ip_format *cru_ip_fmt;
cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code);
- info->csi_setup(cru, cru_ip_fmt, csi_vc);
+ rzg2l_cru_csi2_setup(cru, cru_ip_fmt, csi_vc);
/* Output format */
cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
@@ -1078,7 +1066,6 @@ static int rzg2l_cru_open(struct file *file)
if (ret)
return ret;
- file->private_data = cru;
ret = v4l2_fh_open(file);
if (ret)
goto err_unlock;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index b8d06e88c475..6c64657fc4f3 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -618,7 +618,7 @@ void vsp1_device_put(struct vsp1_device *vsp1)
* Power Management
*/
-static int __maybe_unused vsp1_pm_suspend(struct device *dev)
+static int vsp1_pm_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
@@ -634,7 +634,7 @@ static int __maybe_unused vsp1_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused vsp1_pm_resume(struct device *dev)
+static int vsp1_pm_resume(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
@@ -650,7 +650,7 @@ static int __maybe_unused vsp1_pm_resume(struct device *dev)
return 0;
}
-static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
+static int vsp1_pm_runtime_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
@@ -660,7 +660,7 @@ static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
+static int vsp1_pm_runtime_resume(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
int ret;
@@ -693,8 +693,8 @@ done:
}
static const struct dev_pm_ops vsp1_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
- SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
+ RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -1042,7 +1042,7 @@ static struct platform_driver vsp1_platform_driver = {
.remove = vsp1_remove,
.driver = {
.name = "vsp1",
- .pm = &vsp1_pm_ops,
+ .pm = pm_ptr(&vsp1_pm_ops),
.of_match_table = vsp1_of_match,
},
};
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
index c762202877ba..390ea50f1595 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
@@ -392,7 +392,7 @@ static const struct v4l2_subdev_ops histo_ops = {
static int histo_v4l2_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
@@ -409,7 +409,7 @@ static int histo_v4l2_querycap(struct file *file, void *fh,
static int histo_v4l2_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
if (f->index > 0 || f->type != histo->queue.type)
@@ -423,7 +423,7 @@ static int histo_v4l2_enum_format(struct file *file, void *fh,
static int histo_v4l2_get_format(struct file *file, void *fh,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
struct v4l2_meta_format *meta = &format->fmt.meta;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index bc66fbdde3cc..75f9a1a85d55 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -896,7 +896,7 @@ static const struct vb2_ops vsp1_video_queue_qops = {
static int
vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
@@ -912,7 +912,7 @@ vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
static int vsp1_video_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
const struct vsp1_format_info *info;
@@ -933,7 +933,7 @@ static int vsp1_video_enum_format(struct file *file, void *fh,
static int
vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
if (format->type != video->queue.type)
@@ -949,7 +949,7 @@ vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
static int
vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
if (format->type != video->queue.type)
@@ -961,7 +961,7 @@ vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
static int
vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
const struct vsp1_format_info *info;
int ret;
@@ -991,7 +991,7 @@ done:
static int
vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
struct media_device *mdev = &video->vsp1->media_dev;
struct vsp1_pipeline *pipe;
@@ -1079,13 +1079,11 @@ static int vsp1_video_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(vfh, &video->video);
- v4l2_fh_add(vfh);
-
- file->private_data = vfh;
+ v4l2_fh_add(vfh, file);
ret = vsp1_device_get(video->vsp1);
if (ret < 0) {
- v4l2_fh_del(vfh);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
kfree(vfh);
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_vspx.c b/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
index a754b92232bd..1673479be0ff 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
@@ -286,6 +286,7 @@ void vsp1_isp_free_buffer(struct device *dev,
dma_free_coherent(bus_master, buffer_desc->size, buffer_desc->cpu_addr,
buffer_desc->dma_addr);
}
+EXPORT_SYMBOL_GPL(vsp1_isp_free_buffer);
/**
* vsp1_isp_start_streaming - Start processing VSPX jobs
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 3dccab5fa4a1..776046de979a 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -395,8 +395,7 @@ static int rga_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
rga_setup_ctrls(ctx);
@@ -411,8 +410,7 @@ static int rga_open(struct file *file)
static int rga_release(struct file *file)
{
- struct rga_ctx *ctx =
- container_of(file->private_data, struct rga_ctx, fh);
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rockchip_rga *rga = ctx->rga;
mutex_lock(&rga->mutex);
@@ -420,7 +418,7 @@ static int rga_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -448,7 +446,7 @@ vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
return 0;
}
-static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
struct rga_fmt *fmt;
@@ -461,10 +459,10 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return 0;
}
-static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct vb2_queue *vq;
struct rga_frame *frm;
@@ -483,7 +481,7 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
struct rga_fmt *fmt;
@@ -503,10 +501,10 @@ static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rockchip_rga *rga = ctx->rga;
struct vb2_queue *vq;
struct rga_frame *frm;
@@ -516,7 +514,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
/* Adjust all values accordingly to the hardware capabilities
* and chosen format.
*/
- ret = vidioc_try_fmt(file, prv, f);
+ ret = vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -560,10 +558,10 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_g_selection(struct file *file, void *prv,
+static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rga_frame *f;
bool use_frame = false;
@@ -608,10 +606,10 @@ static int vidioc_g_selection(struct file *file, void *prv,
return 0;
}
-static int vidioc_s_selection(struct file *file, void *prv,
+static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rockchip_rga *rga = ctx->rga;
struct rga_frame *f;
int ret = 0;
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 530e12de73c4..72a28b120fab 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -68,6 +68,11 @@ struct rga_ctx {
u32 fill_color;
};
+static inline struct rga_ctx *file_to_rga_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct rga_ctx, fh);
+}
+
struct rockchip_rga {
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 5f187f9efc7b..6028ecdd23de 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -24,6 +24,7 @@
#include "rkisp1-regs.h"
struct dentry;
+struct dev_pm_domain_list;
struct regmap;
/*
@@ -55,7 +56,7 @@ struct regmap;
#define RKISP1_BUS_INFO "platform:" RKISP1_DRIVER_NAME
/* maximum number of clocks */
-#define RKISP1_MAX_BUS_CLK 8
+#define RKISP1_MAX_BUS_CLK 4
/* a bitmask of the ready stats */
#define RKISP1_STATS_MEAS_MASK (RKISP1_CIF_ISP_AWB_DONE | \
@@ -139,27 +140,31 @@ enum rkisp1_feature {
/*
* struct rkisp1_info - Model-specific ISP Information
*
- * @clks: array of ISP clock names
- * @clk_size: number of entries in the @clks array
+ * @num_clocks: number of clocks
* @isrs: array of ISP interrupt descriptors
* @isr_size: number of entries in the @isrs array
* @isp_ver: ISP version
* @features: bitmask of rkisp1_feature features implemented by the ISP
* @max_width: maximum input frame width
* @max_height: maximum input frame height
+ * @pm_domains.names: name of the power domains
+ * @pm_domains.count: number of power domains
*
* This structure contains information about the ISP specific to a particular
* ISP model, version, or integration in a particular SoC.
*/
struct rkisp1_info {
- const char * const *clks;
- unsigned int clk_size;
+ unsigned int num_clocks;
const struct rkisp1_isr_data *isrs;
unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
unsigned int features;
unsigned int max_width;
unsigned int max_height;
+ struct {
+ const char * const *names;
+ unsigned int count;
+ } pm_domains;
};
/*
@@ -483,6 +488,7 @@ struct rkisp1_debug {
* @dev: a pointer to the struct device
* @clk_size: number of clocks
* @clks: array of clocks
+ * @pm_domains: power domains
* @gasket: the gasket - i.MX8MP only
* @gasket_id: the gasket ID (0 or 1) - i.MX8MP only
* @v4l2_dev: v4l2_device variable
@@ -507,6 +513,7 @@ struct rkisp1_device {
struct device *dev;
unsigned int clk_size;
struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
+ struct dev_pm_domain_list *pm_domains;
struct regmap *gasket;
unsigned int gasket_id;
struct v4l2_device v4l2_dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index dc65a7924f8a..1791c02a40ae 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -8,6 +8,7 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
+#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
@@ -16,6 +17,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
@@ -491,13 +493,6 @@ static irqreturn_t rkisp1_isr(int irq, void *ctx)
return ret;
}
-static const char * const px30_isp_clks[] = {
- "isp",
- "aclk",
- "hclk",
- "pclk",
-};
-
static const struct rkisp1_isr_data px30_isp_isrs[] = {
{ "isp", rkisp1_isp_isr, BIT(RKISP1_IRQ_ISP) },
{ "mi", rkisp1_capture_isr, BIT(RKISP1_IRQ_MI) },
@@ -505,8 +500,7 @@ static const struct rkisp1_isr_data px30_isp_isrs[] = {
};
static const struct rkisp1_info px30_isp_info = {
- .clks = px30_isp_clks,
- .clk_size = ARRAY_SIZE(px30_isp_clks),
+ .num_clocks = 4,
.isrs = px30_isp_isrs,
.isr_size = ARRAY_SIZE(px30_isp_isrs),
.isp_ver = RKISP1_V12,
@@ -518,19 +512,12 @@ static const struct rkisp1_info px30_isp_info = {
.max_height = 2448,
};
-static const char * const rk3399_isp_clks[] = {
- "isp",
- "aclk",
- "hclk",
-};
-
static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
{ NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) | BIT(RKISP1_IRQ_MIPI) },
};
static const struct rkisp1_info rk3399_isp_info = {
- .clks = rk3399_isp_clks,
- .clk_size = ARRAY_SIZE(rk3399_isp_clks),
+ .num_clocks = 3,
.isrs = rk3399_isp_isrs,
.isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
@@ -542,19 +529,17 @@ static const struct rkisp1_info rk3399_isp_info = {
.max_height = 3312,
};
-static const char * const imx8mp_isp_clks[] = {
- "isp",
- "hclk",
- "aclk",
-};
-
static const struct rkisp1_isr_data imx8mp_isp_isrs[] = {
{ NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) },
};
+static const char * const imx8mp_isp_pm_domains[] = {
+ "isp",
+ "csi2",
+};
+
static const struct rkisp1_info imx8mp_isp_info = {
- .clks = imx8mp_isp_clks,
- .clk_size = ARRAY_SIZE(imx8mp_isp_clks),
+ .num_clocks = 3,
.isrs = imx8mp_isp_isrs,
.isr_size = ARRAY_SIZE(imx8mp_isp_isrs),
.isp_ver = RKISP1_V_IMX8MP,
@@ -563,6 +548,10 @@ static const struct rkisp1_info imx8mp_isp_info = {
| RKISP1_FEATURE_COMPAND,
.max_width = 4096,
.max_height = 3072,
+ .pm_domains = {
+ .names = imx8mp_isp_pm_domains,
+ .count = ARRAY_SIZE(imx8mp_isp_pm_domains),
+ },
};
static const struct of_device_id rkisp1_of_match[] = {
@@ -582,6 +571,81 @@ static const struct of_device_id rkisp1_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rkisp1_of_match);
+static const char * const rkisp1_clk_names[] = {
+ "isp",
+ "aclk",
+ "hclk",
+ "pclk",
+};
+
+static int rkisp1_init_clocks(struct rkisp1_device *rkisp1)
+{
+ const struct rkisp1_info *info = rkisp1->info;
+ unsigned int i;
+ int ret;
+
+ static_assert(ARRAY_SIZE(rkisp1_clk_names) == ARRAY_SIZE(rkisp1->clks));
+
+ for (i = 0; i < info->num_clocks; i++)
+ rkisp1->clks[i].id = rkisp1_clk_names[i];
+
+ ret = devm_clk_bulk_get(rkisp1->dev, info->num_clocks, rkisp1->clks);
+ if (ret)
+ return ret;
+
+ rkisp1->clk_size = info->num_clocks;
+
+ /*
+ * On i.MX8MP the pclk clock is needed to access the HDR stitching
+ * registers, but wasn't required by DT bindings. Try to acquire it as
+ * an optional clock to avoid breaking backward compatibility.
+ */
+ if (info->isp_ver == RKISP1_V_IMX8MP) {
+ struct clk *clk;
+
+ clk = devm_clk_get_optional(rkisp1->dev, "pclk");
+ if (IS_ERR(clk))
+ return dev_err_probe(rkisp1->dev, PTR_ERR(clk),
+ "Failed to acquire pclk clock\n");
+
+ if (clk)
+ rkisp1->clks[rkisp1->clk_size++].clk = clk;
+ }
+
+ return 0;
+}
+
+static int rkisp1_init_pm_domains(struct rkisp1_device *rkisp1)
+{
+ const struct rkisp1_info *info = rkisp1->info;
+ struct dev_pm_domain_attach_data pm_domain_data = {
+ .pd_names = info->pm_domains.names,
+ .num_pd_names = info->pm_domains.count,
+ };
+ int ret;
+
+ /*
+ * Most platforms have a single power domain, which the PM domain core
+ * automatically attaches at probe time. When that's the case there's
+ * nothing to do here.
+ */
+ if (rkisp1->dev->pm_domain)
+ return 0;
+
+ if (!pm_domain_data.num_pd_names)
+ return 0;
+
+ ret = devm_pm_domain_attach_list(rkisp1->dev, &pm_domain_data,
+ &rkisp1->pm_domains);
+ if (ret < 0) {
+ dev_err_probe(rkisp1->dev, ret,
+ "Failed to attach power domains\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int rkisp1_probe(struct platform_device *pdev)
{
const struct rkisp1_info *info;
@@ -639,12 +703,13 @@ static int rkisp1_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < info->clk_size; i++)
- rkisp1->clks[i].id = info->clks[i];
- ret = devm_clk_bulk_get(dev, info->clk_size, rkisp1->clks);
+ ret = rkisp1_init_clocks(rkisp1);
+ if (ret)
+ return ret;
+
+ ret = rkisp1_init_pm_domains(rkisp1);
if (ret)
return ret;
- rkisp1->clk_size = info->clk_size;
if (info->isp_ver == RKISP1_V_IMX8MP) {
unsigned int id;
diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.c b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
index d3b31f461194..6e606d73ff51 100644
--- a/drivers/media/platform/rockchip/rkvdec/rkvdec.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
@@ -354,7 +354,7 @@ static int rkvdec_try_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
const struct rkvdec_coded_fmt_desc *coded_desc;
/*
@@ -387,7 +387,7 @@ static int rkvdec_try_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
const struct rkvdec_coded_fmt_desc *desc;
desc = rkvdec_find_coded_fmt_desc(pix_mp->pixelformat);
@@ -418,7 +418,7 @@ static int rkvdec_try_output_fmt(struct file *file, void *priv,
static int rkvdec_s_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
struct vb2_queue *vq;
int ret;
@@ -439,7 +439,7 @@ static int rkvdec_s_capture_fmt(struct file *file, void *priv,
static int rkvdec_s_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct rkvdec_coded_fmt_desc *desc;
struct v4l2_format *cap_fmt;
@@ -504,7 +504,7 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
static int rkvdec_g_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
*f = ctx->coded_fmt;
return 0;
@@ -513,7 +513,7 @@ static int rkvdec_g_output_fmt(struct file *file, void *priv,
static int rkvdec_g_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
*f = ctx->decoded_fmt;
return 0;
@@ -532,7 +532,7 @@ static int rkvdec_enum_output_fmt(struct file *file, void *priv,
static int rkvdec_enum_capture_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
u32 fourcc;
fourcc = rkvdec_enum_decoded_fmt(ctx, f->index, ctx->image_fmt);
@@ -937,8 +937,7 @@ static int rkvdec_open(struct file *filp)
if (ret)
goto err_cleanup_m2m_ctx;
- filp->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, filp);
return 0;
@@ -952,9 +951,9 @@ err_free_ctx:
static int rkvdec_release(struct file *filp)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(filp->private_data);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(filp);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, filp);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_fh_exit(&ctx->fh);
diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.h b/drivers/media/platform/rockchip/rkvdec/rkvdec.h
index f6e8bf38add3..481aaa4bffe9 100644
--- a/drivers/media/platform/rockchip/rkvdec/rkvdec.h
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.h
@@ -124,9 +124,9 @@ struct rkvdec_ctx {
void *priv;
};
-static inline struct rkvdec_ctx *fh_to_rkvdec_ctx(struct v4l2_fh *fh)
+static inline struct rkvdec_ctx *file_to_rkvdec_ctx(struct file *filp)
{
- return container_of(fh, struct rkvdec_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct rkvdec_ctx, fh);
}
struct rkvdec_aux_buf {
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
index b9777e07fb6d..265221abf4dc 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
@@ -85,7 +85,6 @@ enum gsc_yuv_fmt {
GSC_CRCB,
};
-#define fh_to_ctx(__fh) container_of(__fh, struct gsc_ctx, fh)
#define is_rgb(x) (!!((x) & 0x1))
#define is_yuv420(x) (!!((x) & 0x2))
#define is_yuv422(x) (!!((x) & 0x4))
@@ -381,6 +380,11 @@ struct gsc_ctx {
enum v4l2_colorspace out_colorspace;
};
+static inline struct gsc_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct gsc_ctx, fh);
+}
+
void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm);
int gsc_register_m2m_device(struct gsc_dev *gsc);
void gsc_unregister_m2m_device(struct gsc_dev *gsc);
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c b/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
index 4bda1c369c44..722e2531e23f 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
@@ -297,7 +297,7 @@ static int gsc_m2m_enum_fmt(struct file *file, void *priv,
static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return gsc_g_fmt_mplane(ctx, f);
}
@@ -305,7 +305,7 @@ static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return gsc_try_fmt_mplane(ctx, f);
}
@@ -313,7 +313,7 @@ static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct vb2_queue *vq;
struct gsc_frame *frame;
struct v4l2_pix_format_mplane *pix;
@@ -359,7 +359,7 @@ static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
static int gsc_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
u32 max_cnt;
@@ -374,35 +374,35 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
static int gsc_m2m_expbuf(struct file *file, void *fh,
struct v4l2_exportbuffer *eb)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
static int gsc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
/* The source and target color format need to be set */
if (V4L2_TYPE_IS_OUTPUT(type)) {
@@ -418,7 +418,7 @@ static int gsc_m2m_streamon(struct file *file, void *fh,
static int gsc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
@@ -440,8 +440,8 @@ static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
static int gsc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_frame *frame;
- struct gsc_ctx *ctx = fh_to_ctx(fh);
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
@@ -478,7 +478,7 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct gsc_frame *frame;
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_variant *variant = ctx->gsc_dev->variant;
struct v4l2_selection sel = *s;
int ret;
@@ -625,8 +625,7 @@ static int gsc_m2m_open(struct file *file)
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->gsc_dev = gsc;
/* Default color format */
@@ -655,7 +654,7 @@ static int gsc_m2m_open(struct file *file)
error_ctrls:
gsc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -666,7 +665,7 @@ unlock:
static int gsc_m2m_release(struct file *file)
{
- struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
@@ -676,7 +675,7 @@ static int gsc_m2m_release(struct file *file)
v4l2_m2m_ctx_release(ctx->m2m_ctx);
gsc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
if (--gsc->m2m.refcnt <= 0)
@@ -690,7 +689,7 @@ static int gsc_m2m_release(struct file *file)
static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
__poll_t ret;
@@ -705,7 +704,7 @@ static __poll_t gsc_m2m_poll(struct file *file,
static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
int ret;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.h b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
index 63385152a2ff..c23cbdee7afc 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
@@ -496,7 +496,10 @@ struct fimc_ctx {
struct fimc_ctrls ctrls;
};
-#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
+static inline struct fimc_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct fimc_ctx, fh);
+}
static inline void set_frame_bounds(struct fimc_frame *f, u32 width, u32 height)
{
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
index 951433c8e92a..562c57f186c6 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
@@ -249,7 +249,7 @@ static int fimc_m2m_enum_fmt(struct file *file, void *priv,
static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
const struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
@@ -308,7 +308,7 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
return fimc_try_fmt_mplane(ctx, f);
}
@@ -337,7 +337,7 @@ static void __set_frame_format(struct fimc_frame *frame,
static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_fmt *fmt;
struct vb2_queue *vq;
@@ -376,7 +376,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
static int fimc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
const struct fimc_frame *frame;
frame = ctx_get_frame(ctx, s->type);
@@ -484,7 +484,7 @@ static int fimc_m2m_try_selection(struct fimc_ctx *ctx,
static int fimc_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
int ret;
@@ -634,8 +634,7 @@ static int fimc_m2m_open(struct file *file)
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrls.handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
/* Setup the device context for memory-to-memory mode */
ctx->state = FIMC_CTX_M2M;
@@ -664,7 +663,7 @@ error_m2m_ctx:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -675,7 +674,7 @@ unlock:
static int fimc_m2m_release(struct file *file)
{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_ctx *ctx = file_to_ctx(file);
struct fimc_dev *fimc = ctx->fimc_dev;
dbg("pid: %d, state: 0x%lx, refcnt= %d",
@@ -685,7 +684,7 @@ static int fimc_m2m_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
fimc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
if (--fimc->m2m.refcnt <= 0)
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index 3e566b65f417..ed1a1d693293 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -572,7 +572,7 @@ static int s3c_camif_close(struct file *file)
mutex_lock(&camif->lock);
- if (vp->owner == file->private_data) {
+ if (vp->owner == file_to_v4l2_fh(file)) {
camif_stop_capture(vp);
vb2_queue_release(&vp->vb_queue);
vp->owner = NULL;
@@ -595,7 +595,7 @@ static __poll_t s3c_camif_poll(struct file *file,
__poll_t ret;
mutex_lock(&camif->lock);
- if (vp->owner && vp->owner != file->private_data)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
ret = EPOLLERR;
else
ret = vb2_poll(&vp->vb_queue, file, wait);
@@ -609,7 +609,7 @@ static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
struct camif_vp *vp = video_drvdata(file);
int ret;
- if (vp->owner && vp->owner != file->private_data)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
ret = -EBUSY;
else
ret = vb2_mmap(&vp->vb_queue, vma);
@@ -791,7 +791,7 @@ static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
out_frame->rect.top = 0;
if (vp->owner == NULL)
- vp->owner = priv;
+ vp->owner = file_to_v4l2_fh(file);
pr_debug("%ux%u. payload: %u. fmt: 0x%08x. %d %d. sizeimage: %d. bpl: %d\n",
out_frame->f_width, out_frame->f_height, vp->payload,
@@ -841,7 +841,7 @@ static int s3c_camif_streamon(struct file *file, void *priv,
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
if (s3c_vp_active(vp))
@@ -872,7 +872,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv,
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
ret = vb2_streamoff(&vp->vb_queue, type);
@@ -888,9 +888,9 @@ static int s3c_camif_reqbufs(struct file *file, void *priv,
int ret;
pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
- vp->id, rb->count, vp->owner, priv);
+ vp->id, rb->count, vp->owner, file_to_v4l2_fh(file));
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
if (rb->count)
@@ -910,7 +910,7 @@ static int s3c_camif_reqbufs(struct file *file, void *priv,
vp->reqbufs_count = rb->count;
if (vp->owner == NULL && rb->count > 0)
- vp->owner = priv;
+ vp->owner = file_to_v4l2_fh(file);
return ret;
}
@@ -929,7 +929,7 @@ static int s3c_camif_qbuf(struct file *file, void *priv,
pr_debug("[vp%d]\n", vp->id);
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf);
@@ -942,7 +942,7 @@ static int s3c_camif_dqbuf(struct file *file, void *priv,
pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
@@ -954,14 +954,14 @@ static int s3c_camif_create_bufs(struct file *file, void *priv,
struct camif_vp *vp = video_drvdata(file);
int ret;
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
create->count = max_t(u32, 1, create->count);
ret = vb2_create_bufs(&vp->vb_queue, create);
if (!ret && vp->owner == NULL)
- vp->owner = priv;
+ vp->owner = file_to_v4l2_fh(file);
return ret;
}
diff --git a/drivers/media/platform/samsung/s5p-g2d/g2d.c b/drivers/media/platform/samsung/s5p-g2d/g2d.c
index ffed16a34493..ffb9bee6cb9d 100644
--- a/drivers/media/platform/samsung/s5p-g2d/g2d.c
+++ b/drivers/media/platform/samsung/s5p-g2d/g2d.c
@@ -25,7 +25,10 @@
#include "g2d.h"
#include "g2d-regs.h"
-#define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh)
+static inline struct g2d_ctx *file2ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct g2d_ctx, fh);
+}
static struct g2d_fmt formats[] = {
{
@@ -254,8 +257,7 @@ static int g2d_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
g2d_setup_ctrls(ctx);
@@ -272,13 +274,13 @@ static int g2d_open(struct file *file)
static int g2d_release(struct file *file)
{
struct g2d_dev *dev = video_drvdata(file);
- struct g2d_ctx *ctx = fh2ctx(file->private_data);
+ struct g2d_ctx *ctx = file2ctx(file);
mutex_lock(&dev->mutex);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
v4l2_info(&dev->v4l2_dev, "instance closed\n");
@@ -295,7 +297,7 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
if (f->index >= NUM_FORMATS)
return -EINVAL;
@@ -303,9 +305,9 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return 0;
}
-static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct vb2_queue *vq;
struct g2d_frame *frm;
@@ -325,7 +327,7 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct g2d_fmt *fmt;
enum v4l2_field *field;
@@ -355,9 +357,9 @@ static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_dev *dev = ctx->dev;
struct vb2_queue *vq;
struct g2d_frame *frm;
@@ -366,7 +368,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
/* Adjust all values accordingly to the hardware capabilities
* and chosen format. */
- ret = vidioc_try_fmt(file, prv, f);
+ ret = vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -395,10 +397,10 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_g_selection(struct file *file, void *prv,
+static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_frame *f;
f = get_frame(ctx, s->type);
@@ -445,10 +447,10 @@ static int vidioc_g_selection(struct file *file, void *prv,
return 0;
}
-static int vidioc_try_selection(struct file *file, void *prv,
+static int vidioc_try_selection(struct file *file, void *priv,
const struct v4l2_selection *s)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_dev *dev = ctx->dev;
struct g2d_frame *f;
@@ -473,14 +475,14 @@ static int vidioc_try_selection(struct file *file, void *prv,
return 0;
}
-static int vidioc_s_selection(struct file *file, void *prv,
+static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_frame *f;
int ret;
- ret = vidioc_try_selection(file, prv, s);
+ ret = vidioc_try_selection(file, priv, s);
if (ret)
return ret;
f = get_frame(ctx, s->type);
diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
index ac4cf269456a..81792f7f8b16 100644
--- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
@@ -580,9 +580,9 @@ static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
}
-static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct s5p_jpeg_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct s5p_jpeg_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct s5p_jpeg_ctx, fh);
}
static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
@@ -965,8 +965,7 @@ static int s5p_jpeg_open(struct file *file)
v4l2_fh_init(&ctx->fh, vfd);
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->jpeg = jpeg;
if (vfd == jpeg->vfd_encoder) {
@@ -1001,7 +1000,7 @@ static int s5p_jpeg_open(struct file *file)
return 0;
error:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&jpeg->lock);
free:
@@ -1011,13 +1010,13 @@ free:
static int s5p_jpeg_release(struct file *file)
{
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct s5p_jpeg *jpeg = video_drvdata(file);
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&jpeg->lock);
@@ -1249,7 +1248,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
static int s5p_jpeg_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (ctx->mode == S5P_JPEG_ENCODE) {
strscpy(cap->driver, S5P_JPEG_M2M_NAME,
@@ -1297,7 +1296,7 @@ static int enum_fmt(struct s5p_jpeg_ctx *ctx,
static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (ctx->mode == S5P_JPEG_ENCODE)
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
@@ -1310,7 +1309,7 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (ctx->mode == S5P_JPEG_ENCODE)
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
@@ -1336,7 +1335,7 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct vb2_queue *vq;
struct s5p_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format *pix = &f->fmt.pix;
- struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ct = file_to_ctx(file);
vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
@@ -1476,7 +1475,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_fmt *fmt;
int ret;
@@ -1535,7 +1534,7 @@ exit:
static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct s5p_jpeg_fmt *fmt;
fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
@@ -1682,7 +1681,7 @@ static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+ return s5p_jpeg_s_fmt(file_to_ctx(file), f);
}
static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
@@ -1694,7 +1693,7 @@ static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
if (ret)
return ret;
- return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+ return s5p_jpeg_s_fmt(file_to_ctx(file), f);
}
static int s5p_jpeg_subscribe_event(struct v4l2_fh *fh,
@@ -1791,7 +1790,7 @@ static int exynos3250_jpeg_try_crop(struct s5p_jpeg_ctx *ctx,
static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1828,7 +1827,7 @@ static int s5p_jpeg_g_selection(struct file *file, void *priv,
static int s5p_jpeg_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct v4l2_rect *rect = &s->r;
int ret = -EINVAL;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index 73fdcd362265..4948d734eb02 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -801,8 +801,7 @@ static int s5p_mfc_open(struct file *file)
}
init_waitqueue_head(&ctx->queue);
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->dev = dev;
INIT_LIST_HEAD(&ctx->src_queue);
INIT_LIST_HEAD(&ctx->dst_queue);
@@ -877,7 +876,7 @@ static int s5p_mfc_open(struct file *file)
/* Init videobuf2 queue for CAPTURE */
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- q->drv_priv = &ctx->fh;
+ q->drv_priv = ctx;
q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
@@ -904,7 +903,7 @@ static int s5p_mfc_open(struct file *file)
/* Init videobuf2 queue for OUTPUT */
q = &ctx->vq_src;
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- q->drv_priv = &ctx->fh;
+ q->drv_priv = ctx;
q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
@@ -956,7 +955,7 @@ err_ctrls_setup:
err_bad_node:
dev->ctx[ctx->num] = NULL;
err_no_ctx:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
err_alloc:
@@ -970,7 +969,7 @@ err_enter:
/* Release MFC context */
static int s5p_mfc_release(struct file *file)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
/* if dev is null, do cleanup that doesn't need dev */
@@ -1011,7 +1010,7 @@ static int s5p_mfc_release(struct file *file)
if (dev)
dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
/* vdev is gone if dev is null */
if (dev)
v4l2_fh_exit(&ctx->fh);
@@ -1027,7 +1026,7 @@ static int s5p_mfc_release(struct file *file)
static __poll_t s5p_mfc_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
@@ -1078,7 +1077,7 @@ end:
/* Mmap */
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int ret;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
index 47bc3014b5d8..f7c682fca645 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -14,8 +14,7 @@
#include "s5p_mfc_opr.h"
#include "s5p_mfc_cmd_v6.h"
-static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
- const struct s5p_mfc_cmd_args *args)
+static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd)
{
mfc_debug(2, "Issue the command: %d\n", cmd);
@@ -31,7 +30,6 @@ static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_cmd_args h2r_args;
const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
int ret;
@@ -41,33 +39,23 @@ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6);
}
static int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_cmd_args h2r_args;
-
- memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6);
}
static int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_cmd_args h2r_args;
-
- memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6);
}
/* Open a new instance and get its number */
static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_cmd_args h2r_args;
int codec_type;
mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode);
@@ -129,23 +117,20 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6);
}
/* Close instance */
static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_cmd_args h2r_args;
int ret = 0;
dev->curr_ctx = ctx->num;
if (ctx->state != MFCINST_FREE) {
mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
ret = s5p_mfc_cmd_host2risc_v6(dev,
- S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6,
- &h2r_args);
+ S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6);
} else {
ret = -EINVAL;
}
@@ -153,9 +138,15 @@ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
return ret;
}
+static int s5p_mfc_cmd_host2risc_v6_args(struct s5p_mfc_dev *dev, int cmd,
+ const struct s5p_mfc_cmd_args *ignored)
+{
+ return s5p_mfc_cmd_host2risc_v6(dev, cmd);
+}
+
/* Initialize cmd function pointers for MFC v6 */
static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
- .cmd_host2risc = s5p_mfc_cmd_host2risc_v6,
+ .cmd_host2risc = s5p_mfc_cmd_host2risc_v6_args,
.sys_init_cmd = s5p_mfc_sys_init_cmd_v6,
.sleep_cmd = s5p_mfc_sleep_cmd_v6,
.wakeup_cmd = s5p_mfc_wakeup_cmd_v6,
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index 86c316c1ff8f..58dc1768082c 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -767,7 +767,11 @@ struct mfc_control {
#define s5p_mfc_hw_call(f, op, args...) \
((f && f->op) ? f->op(args) : (typeof(f->op(args)))(-ENODEV))
-#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
+static inline struct s5p_mfc_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct s5p_mfc_ctx, fh);
+}
+
#define ctrl_to_ctx(__ctrl) \
container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
index 3efbc3367906..afd28beabfde 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
@@ -330,7 +330,7 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return 0;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, false);
@@ -345,7 +345,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
/* Get format */
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format_mplane *pix_mp;
mfc_debug_enter();
@@ -442,7 +442,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = 0;
struct v4l2_pix_format_mplane *pix_mp;
const struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
@@ -598,7 +598,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (reqbufs->memory != V4L2_MEMORY_MMAP) {
mfc_debug(2, "Only V4L2_MEMORY_MMAP is supported\n");
@@ -619,7 +619,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret;
int i;
@@ -647,7 +647,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
/* Queue a buffer */
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (ctx->state == MFCINST_ERROR) {
mfc_err("Call on QBUF after unrecoverable error\n");
@@ -666,7 +666,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
const struct v4l2_event ev = {
.type = V4L2_EVENT_EOS
};
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret;
if (ctx->state == MFCINST_ERROR) {
@@ -695,7 +695,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
static int vidioc_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_expbuf(&ctx->vq_src, eb);
@@ -708,7 +708,7 @@ static int vidioc_expbuf(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = -EINVAL;
mfc_debug_enter();
@@ -724,7 +724,7 @@ static int vidioc_streamon(struct file *file, void *priv,
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_streamoff(&ctx->vq_src, type);
@@ -801,7 +801,7 @@ static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
u32 left, right, top, bottom;
u32 width, height;
@@ -856,7 +856,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *buf;
unsigned long flags;
@@ -937,7 +937,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
unsigned int *plane_count, unsigned int psize[],
struct device *alloc_devs[])
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
const struct v4l2_format_info *format;
@@ -1006,7 +1006,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
unsigned int i;
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -1068,7 +1068,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
@@ -1085,7 +1085,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
static void s5p_mfc_stop_streaming(struct vb2_queue *q)
{
unsigned long flags;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
int aborted = 0;
@@ -1130,7 +1130,7 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
unsigned long flags;
struct s5p_mfc_buf *mfc_buf;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
index 6c603dcd5664..3f8701e5614f 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
@@ -1375,7 +1375,7 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, false);
@@ -1389,8 +1389,8 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -1472,8 +1472,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int ret = 0;
@@ -1531,7 +1531,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = 0;
/* if memory is not mmp or userptr or dmabuf return error */
@@ -1601,7 +1601,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = 0;
/* if memory is not mmp or userptr or dmabuf return error */
@@ -1636,7 +1636,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
/* Queue a buffer */
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (ctx->state == MFCINST_ERROR) {
mfc_err("Call on QBUF after unrecoverable error\n");
@@ -1657,10 +1657,10 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
/* Dequeue a buffer */
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
const struct v4l2_event ev = {
.type = V4L2_EVENT_EOS
};
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret;
if (ctx->state == MFCINST_ERROR) {
@@ -1685,7 +1685,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
static int vidioc_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_expbuf(&ctx->vq_src, eb);
@@ -1698,7 +1698,7 @@ static int vidioc_expbuf(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_streamon(&ctx->vq_src, type);
@@ -1711,7 +1711,7 @@ static int vidioc_streamon(struct file *file, void *priv,
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_streamoff(&ctx->vq_src, type);
@@ -2284,7 +2284,7 @@ static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
ctx->enc_params.rc_framerate_num =
@@ -2301,7 +2301,7 @@ static int vidioc_s_parm(struct file *file, void *priv,
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
a->parm.output.timeperframe.denominator =
@@ -2318,7 +2318,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *buf;
unsigned long flags;
@@ -2418,7 +2418,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
unsigned int *buf_count, unsigned int *plane_count,
unsigned int psize[], struct device *alloc_devs[])
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -2477,7 +2477,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
unsigned int i;
int ret;
@@ -2516,7 +2516,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
int ret;
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -2557,7 +2557,7 @@ static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
if (IS_MFCV6_PLUS(dev) &&
@@ -2588,7 +2588,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
static void s5p_mfc_stop_streaming(struct vb2_queue *q)
{
unsigned long flags;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
if ((ctx->state == MFCINST_FINISHING ||
@@ -2617,7 +2617,7 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
unsigned long flags;
struct s5p_mfc_buf *mfc_buf;
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 1eb934490c0b..56169b70652d 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -33,7 +33,10 @@
#define BDISP_MIN_H 1
#define BDISP_MAX_H 8191
-#define fh_to_ctx(__fh) container_of(__fh, struct bdisp_ctx, fh)
+static inline struct bdisp_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct bdisp_ctx, fh);
+}
enum bdisp_dev_flags {
ST_M2M_OPEN, /* Driver opened */
@@ -603,8 +606,7 @@ static int bdisp_open(struct file *file)
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
/* Default format */
ctx->src = bdisp_dflt_fmt;
@@ -630,7 +632,7 @@ static int bdisp_open(struct file *file)
error_ctrls:
bdisp_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
error_fh:
v4l2_fh_exit(&ctx->fh);
bdisp_hw_free_nodes(ctx);
@@ -644,7 +646,7 @@ unlock:
static int bdisp_release(struct file *file)
{
- struct bdisp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_dev *bdisp = ctx->bdisp_dev;
dev_dbg(bdisp->dev, "%s\n", __func__);
@@ -655,7 +657,7 @@ static int bdisp_release(struct file *file)
bdisp_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
if (--bdisp->m2m.refcnt <= 0)
@@ -682,7 +684,7 @@ static const struct v4l2_file_operations bdisp_fops = {
static int bdisp_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_dev *bdisp = ctx->bdisp_dev;
strscpy(cap->driver, bdisp->pdev->name, sizeof(cap->driver));
@@ -694,7 +696,7 @@ static int bdisp_querycap(struct file *file, void *fh,
static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
const struct bdisp_fmt *fmt;
if (f->index >= ARRAY_SIZE(bdisp_formats))
@@ -714,7 +716,7 @@ static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format *pix;
struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
@@ -738,7 +740,7 @@ static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int bdisp_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
const struct bdisp_fmt *format;
u32 in_w, in_h;
@@ -788,7 +790,7 @@ static int bdisp_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct vb2_queue *vq;
struct bdisp_frame *frame;
struct v4l2_pix_format *pix;
@@ -841,8 +843,8 @@ static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int bdisp_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_frame *frame;
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame)) {
@@ -919,8 +921,8 @@ static int is_rect_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
static int bdisp_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_frame *frame;
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
struct v4l2_rect *in, out;
bool valid = false;
@@ -997,7 +999,7 @@ static int bdisp_s_selection(struct file *file, void *fh,
static int bdisp_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
if ((type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
!bdisp_ctx_state_is_set(BDISP_SRC_FMT, ctx)) {
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
index 0533d4a083d2..a078f1107300 100644
--- a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
@@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
return 0;
}
-static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend)
{
struct delta_dev *delta = pctx->dev;
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
@@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
memset(params, 0, sizeof(*params));
- params->picture_start_addr_p = (u32)(au->paddr);
- params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
+ params->picture_start_addr_p = pstart;
+ params->picture_end_addr_p = pend;
/*
* !WARNING!
@@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
struct delta_dev *delta = pctx->dev;
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
int ret;
- struct delta_au au = *pau;
+ void *au_vaddr = pau->vaddr;
+ dma_addr_t au_dma = pau->paddr;
+ size_t au_size = pau->size;
unsigned int data_offset = 0;
struct mjpeg_header *header = &ctx->header_struct;
if (!ctx->header) {
- ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
header, &data_offset);
if (ret) {
pctx->stream_errors++;
@@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
goto err;
}
- ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
ctx->header, &data_offset);
if (ret) {
pctx->stream_errors++;
goto err;
}
- au.paddr += data_offset;
- au.vaddr += data_offset;
+ au_dma += data_offset;
+ au_vaddr += data_offset;
- ret = delta_mjpeg_ipc_decode(pctx, &au);
+ ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1);
if (ret)
goto err;
diff --git a/drivers/media/platform/st/sti/delta/delta-v4l2.c b/drivers/media/platform/st/sti/delta/delta-v4l2.c
index 196e6a40335d..6c1a53c771f7 100644
--- a/drivers/media/platform/st/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/st/sti/delta/delta-v4l2.c
@@ -24,7 +24,11 @@
#define DELTA_PREFIX "[---:----]"
-#define to_ctx(__fh) container_of(__fh, struct delta_ctx, fh)
+static inline struct delta_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct delta_ctx, fh);
+}
+
#define to_au(__vbuf) container_of(__vbuf, struct delta_au, vbuf)
#define to_frame(__vbuf) container_of(__vbuf, struct delta_frame, vbuf)
@@ -382,7 +386,7 @@ static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
static int delta_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
strscpy(cap->driver, DELTA_NAME, sizeof(cap->driver));
@@ -396,7 +400,7 @@ static int delta_querycap(struct file *file, void *priv,
static int delta_enum_fmt_stream(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
if (unlikely(f->index >= delta->nb_of_streamformats))
@@ -410,7 +414,7 @@ static int delta_enum_fmt_stream(struct file *file, void *priv,
static int delta_enum_fmt_frame(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
if (unlikely(f->index >= delta->nb_of_pixelformats))
@@ -424,7 +428,7 @@ static int delta_enum_fmt_frame(struct file *file, void *priv,
static int delta_g_fmt_stream(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct delta_streaminfo *streaminfo = &ctx->streaminfo;
@@ -452,7 +456,7 @@ static int delta_g_fmt_stream(struct file *file, void *fh,
static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct delta_frameinfo *frameinfo = &ctx->frameinfo;
@@ -491,7 +495,7 @@ static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int delta_try_fmt_stream(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 streamformat = pix->pixelformat;
@@ -545,7 +549,7 @@ static int delta_try_fmt_stream(struct file *file, void *priv,
static int delta_try_fmt_frame(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 pixelformat = pix->pixelformat;
@@ -605,7 +609,7 @@ static int delta_try_fmt_frame(struct file *file, void *priv,
static int delta_s_fmt_stream(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct vb2_queue *vq;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -641,7 +645,7 @@ static int delta_s_fmt_stream(struct file *file, void *fh,
static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
const struct delta_dec *dec = ctx->dec;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -721,7 +725,7 @@ static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int delta_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_frameinfo *frameinfo = &ctx->frameinfo;
struct v4l2_rect crop;
@@ -803,7 +807,7 @@ static int delta_try_decoder_cmd(struct file *file, void *fh,
return 0;
}
-static int delta_decoder_stop_cmd(struct delta_ctx *ctx, void *fh)
+static int delta_decoder_stop_cmd(struct delta_ctx *ctx)
{
const struct delta_dec *dec = ctx->dec;
struct delta_dev *delta = ctx->dev;
@@ -866,14 +870,14 @@ delay_eos:
static int delta_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *cmd)
{
- struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_ctx *ctx = file_to_ctx(file);
int ret = 0;
ret = delta_try_decoder_cmd(file, fh, cmd);
if (ret)
return ret;
- return delta_decoder_stop_cmd(ctx, fh);
+ return delta_decoder_stop_cmd(ctx);
}
static int delta_subscribe_event(struct v4l2_fh *fh,
@@ -1633,8 +1637,7 @@ static int delta_open(struct file *file)
ctx->dev = delta;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_WORK(&ctx->run_work, delta_run_work);
mutex_init(&ctx->lock);
@@ -1679,7 +1682,7 @@ static int delta_open(struct file *file)
return 0;
err_fh_del:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
err:
@@ -1690,7 +1693,7 @@ err:
static int delta_release(struct file *file)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
const struct delta_dec *dec = ctx->dec;
@@ -1707,7 +1710,7 @@ static int delta_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
/* disable ST231 clocks */
diff --git a/drivers/media/platform/st/sti/hva/hva-v4l2.c b/drivers/media/platform/st/sti/hva/hva-v4l2.c
index 5366c0f92549..3581b73a99b8 100644
--- a/drivers/media/platform/st/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/st/sti/hva/hva-v4l2.c
@@ -36,7 +36,10 @@
#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
"frame" : "stream")
-#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+static inline struct hva_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct hva_ctx, fh);
+}
/* registry of available encoders */
static const struct hva_enc *hva_encoders[] = {
@@ -254,7 +257,7 @@ static void hva_dbg_summary(struct hva_ctx *ctx)
static int hva_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
strscpy(cap->driver, HVA_NAME, sizeof(cap->driver));
@@ -268,7 +271,7 @@ static int hva_querycap(struct file *file, void *priv,
static int hva_enum_fmt_stream(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
if (unlikely(f->index >= hva->nb_of_streamformats))
@@ -282,7 +285,7 @@ static int hva_enum_fmt_stream(struct file *file, void *priv,
static int hva_enum_fmt_frame(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
if (unlikely(f->index >= hva->nb_of_pixelformats))
@@ -295,7 +298,7 @@ static int hva_enum_fmt_frame(struct file *file, void *priv,
static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_streaminfo *streaminfo = &ctx->streaminfo;
f->fmt.pix.width = streaminfo->width;
@@ -314,7 +317,7 @@ static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_frameinfo *frameinfo = &ctx->frameinfo;
f->fmt.pix.width = frameinfo->width;
@@ -335,7 +338,7 @@ static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int hva_try_fmt_stream(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 streamformat = pix->pixelformat;
@@ -399,7 +402,7 @@ static int hva_try_fmt_stream(struct file *file, void *priv,
static int hva_try_fmt_frame(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 pixelformat = pix->pixelformat;
@@ -449,7 +452,7 @@ static int hva_try_fmt_frame(struct file *file, void *priv,
static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct vb2_queue *vq;
int ret;
@@ -479,7 +482,7 @@ static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct v4l2_pix_format *pix = &f->fmt.pix;
struct vb2_queue *vq;
@@ -517,7 +520,7 @@ static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -533,7 +536,7 @@ static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -553,7 +556,7 @@ static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -1171,8 +1174,7 @@ static int hva_open(struct file *file)
INIT_WORK(&ctx->run_work, hva_run_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ret = hva_ctrls_setup(ctx);
if (ret) {
@@ -1216,7 +1218,7 @@ static int hva_open(struct file *file)
err_ctrls:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
err_fh:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
out:
@@ -1225,7 +1227,7 @@ out:
static int hva_release(struct file *file)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
struct device *dev = ctx_to_dev(ctx);
const struct hva_enc *enc = ctx->enc;
@@ -1247,7 +1249,7 @@ static int hva_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
diff --git a/drivers/media/platform/st/sti/hva/hva.h b/drivers/media/platform/st/sti/hva/hva.h
index ba6b893416ec..1fe561082a74 100644
--- a/drivers/media/platform/st/sti/hva/hva.h
+++ b/drivers/media/platform/st/sti/hva/hva.h
@@ -13,8 +13,6 @@
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
-#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
-
#define hva_to_dev(h) (h->dev)
#define ctx_to_dev(c) (c->hva_dev->dev)
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
index 48fa781aab06..468c247ba328 100644
--- a/drivers/media/platform/st/stm32/dma2d/dma2d.c
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
@@ -45,7 +45,10 @@
* whole of a destination image with a pixel format conversion.
*/
-#define fh2ctx(__fh) container_of(__fh, struct dma2d_ctx, fh)
+static inline struct dma2d_ctx *file2ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct dma2d_ctx, fh);
+}
static const struct dma2d_fmt formats[] = {
{
@@ -301,8 +304,7 @@ static int dma2d_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dma2d_setup_ctrls(ctx);
@@ -318,13 +320,13 @@ static int dma2d_open(struct file *file)
static int dma2d_release(struct file *file)
{
struct dma2d_dev *dev = video_drvdata(file);
- struct dma2d_ctx *ctx = fh2ctx(file->private_data);
+ struct dma2d_ctx *ctx = file2ctx(file);
mutex_lock(&dev->mutex);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -341,7 +343,7 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
if (f->index >= NUM_FORMATS)
return -EINVAL;
@@ -350,9 +352,9 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return 0;
}
-static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct dma2d_ctx *ctx = prv;
+ struct dma2d_ctx *ctx = file2ctx(file);
struct vb2_queue *vq;
struct dma2d_frame *frm;
@@ -375,9 +377,9 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct dma2d_ctx *ctx = prv;
+ struct dma2d_ctx *ctx = file2ctx(file);
struct dma2d_fmt *fmt;
enum v4l2_field *field;
u32 fourcc = f->fmt.pix.pixelformat;
@@ -418,9 +420,9 @@ static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct dma2d_ctx *ctx = prv;
+ struct dma2d_ctx *ctx = file2ctx(file);
struct vb2_queue *vq;
struct dma2d_frame *frm;
struct dma2d_fmt *fmt;
@@ -429,7 +431,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
/* Adjust all values accordingly to the hardware capabilities
* and chosen format.
*/
- ret = vidioc_try_fmt(file, prv, f);
+ ret = vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
index b69048144cc1..fd2b6dfbd44c 100644
--- a/drivers/media/platform/st/stm32/stm32-csi.c
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -443,8 +443,7 @@ static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
static int stm32_csi_start(struct stm32_csi_dev *csidev,
struct v4l2_subdev_state *state)
{
- struct media_pad *src_pad =
- &csidev->s_subdev->entity.pads[csidev->s_subdev_pad_nb];
+ struct media_pad *src_pad;
const struct stm32_csi_mbps_phy_reg *phy_regs = NULL;
struct v4l2_mbus_framefmt *sink_fmt;
const struct stm32_csi_fmts *fmt;
@@ -466,6 +465,7 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
if (!csidev->s_subdev)
return -EIO;
+ src_pad = &csidev->s_subdev->entity.pads[csidev->s_subdev_pad_nb];
link_freq = v4l2_get_link_freq(src_pad,
fmt->bpp, 2 * csidev->num_lanes);
if (link_freq < 0)
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index d94c61b8569d..13762861b769 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -1701,8 +1701,8 @@ static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = dcmi->sd_format->mbus_code,
};
- unsigned int ret;
unsigned int i;
+ int ret;
/* Allocate discrete framesizes array */
while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
@@ -1808,8 +1808,8 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asd)
{
struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
- unsigned int ret;
int src_pad;
+ int ret;
dev_dbg(dcmi->dev, "Subdev \"%s\" bound\n", subdev->name);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
index 76356bc7f10e..65879f4802c0 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
@@ -711,7 +711,7 @@ static void sun6i_csi_capture_format_prepare(struct v4l2_format *format)
pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun6i_csi_capture_querycap(struct file *file, void *private,
+static int sun6i_csi_capture_querycap(struct file *file, void *priv,
struct v4l2_capability *capability)
{
struct sun6i_csi_device *csi_dev = video_drvdata(file);
@@ -725,7 +725,7 @@ static int sun6i_csi_capture_querycap(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_enum_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmtdesc)
{
u32 index = fmtdesc->index;
@@ -738,7 +738,7 @@ static int sun6i_csi_capture_enum_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_g_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_csi_device *csi_dev = video_drvdata(file);
@@ -748,7 +748,7 @@ static int sun6i_csi_capture_g_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_s_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_csi_device *csi_dev = video_drvdata(file);
@@ -764,7 +764,7 @@ static int sun6i_csi_capture_s_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_try_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
sun6i_csi_capture_format_prepare(format);
@@ -772,7 +772,7 @@ static int sun6i_csi_capture_try_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_enum_input(struct file *file, void *private,
+static int sun6i_csi_capture_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
if (input->index != 0)
@@ -784,7 +784,7 @@ static int sun6i_csi_capture_enum_input(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_g_input(struct file *file, void *private,
+static int sun6i_csi_capture_g_input(struct file *file, void *priv,
unsigned int *index)
{
*index = 0;
@@ -792,7 +792,7 @@ static int sun6i_csi_capture_g_input(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_s_input(struct file *file, void *private,
+static int sun6i_csi_capture_s_input(struct file *file, void *priv,
unsigned int index)
{
if (index != 0)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 3e7f2df70408..eb519afb30ca 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -309,7 +309,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
{
- return container_of(file->private_data, struct deinterlace_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct deinterlace_ctx, fh);
}
static bool deinterlace_check_format(u32 pixelformat)
@@ -730,7 +730,6 @@ static int deinterlace_open(struct file *file)
deinterlace_prepare_format(&ctx->dst_fmt);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
@@ -740,7 +739,7 @@ static int deinterlace_open(struct file *file)
goto err_free;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_unlock(&dev->dev_mutex);
@@ -756,12 +755,11 @@ err_free:
static int deinterlace_release(struct file *file)
{
struct deinterlace_dev *dev = video_drvdata(file);
- struct deinterlace_ctx *ctx = container_of(file->private_data,
- struct deinterlace_ctx, fh);
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
mutex_lock(&dev->dev_mutex);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
index abd10b218aa1..89992feaab60 100644
--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -170,7 +170,7 @@ static irqreturn_t rotate_irq(int irq, void *data)
static inline struct rotate_ctx *rotate_file2ctx(struct file *file)
{
- return container_of(file->private_data, struct rotate_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct rotate_ctx, fh);
}
static void rotate_prepare_format(struct v4l2_pix_format *pix_fmt)
@@ -659,7 +659,6 @@ static int rotate_open(struct file *file)
rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
@@ -669,7 +668,7 @@ static int rotate_open(struct file *file)
goto err_free;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ret = rotate_setup_ctrls(ctx);
if (ret)
@@ -691,13 +690,12 @@ err_free:
static int rotate_release(struct file *file)
{
struct rotate_dev *dev = video_drvdata(file);
- struct rotate_ctx *ctx = container_of(file->private_data,
- struct rotate_ctx, fh);
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
mutex_lock(&dev->dev_mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
index 7af6765532e3..b7d278b3889f 100644
--- a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
@@ -459,7 +459,7 @@ static bool port_no_link(struct snps_hdmirx_dev *hdmirx_dev)
return !tx_5v_power_present(hdmirx_dev);
}
-static int hdmirx_query_dv_timings(struct file *file, void *_fh,
+static int hdmirx_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdmirx_stream *stream = video_drvdata(file);
@@ -751,7 +751,7 @@ static int hdmirx_dv_timings_cap(struct file *file, void *fh,
return 0;
}
-static int hdmirx_enum_dv_timings(struct file *file, void *_fh,
+static int hdmirx_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
return v4l2_enum_dv_timings_cap(timings, &hdmirx_timings_cap, NULL, NULL);
@@ -1323,7 +1323,7 @@ static int hdmirx_g_fmt_vid_cap_mplane(struct file *file, void *fh,
return 0;
}
-static int hdmirx_g_dv_timings(struct file *file, void *_fh,
+static int hdmirx_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdmirx_stream *stream = video_drvdata(file);
@@ -1339,7 +1339,7 @@ static int hdmirx_g_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int hdmirx_s_dv_timings(struct file *file, void *_fh,
+static int hdmirx_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdmirx_stream *stream = video_drvdata(file);
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
index 220ab99ca611..b13f58e31944 100644
--- a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
@@ -8,10 +8,12 @@
#ifndef DW_HDMIRX_H
#define DW_HDMIRX_H
+#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/hw_bitfield.h>
-#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
-#define HIWORD_UPDATE(v, h, l) (((v) << (l)) | (GENMASK((h), (l)) << 16))
+#define UPDATE(x, h, l) FIELD_PREP(GENMASK((h), (l)), (x))
+#define HIWORD_UPDATE(v, h, l) FIELD_PREP_WM16(GENMASK((h), (l)), (v))
/* SYS_GRF */
#define SYS_GRF_SOC_CON1 0x0304
diff --git a/drivers/media/platform/ti/Kconfig b/drivers/media/platform/ti/Kconfig
index bab998c4179a..3bc4aa35887e 100644
--- a/drivers/media/platform/ti/Kconfig
+++ b/drivers/media/platform/ti/Kconfig
@@ -67,7 +67,8 @@ config VIDEO_TI_J721E_CSI2RX
tristate "TI J721E CSI2RX wrapper layer driver"
depends on VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_SUPPORT && MEDIA_CONTROLLER
- depends on (PHY_CADENCE_DPHY_RX && VIDEO_CADENCE_CSI2RX) || COMPILE_TEST
+ depends on VIDEO_CADENCE_CSI2RX
+ depends on PHY_CADENCE_DPHY_RX || COMPILE_TEST
depends on ARCH_K3 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index b628d6e081db..b75aa363d1bf 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -13,7 +13,9 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <media/cadence/cdns-csi2rx.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -28,6 +30,7 @@
#define SHIM_DMACNTX 0x20
#define SHIM_DMACNTX_EN BIT(31)
#define SHIM_DMACNTX_YUV422 GENMASK(27, 26)
+#define SHIM_DMACNTX_DUAL_PCK_CFG BIT(24)
#define SHIM_DMACNTX_SIZE GENMASK(21, 20)
#define SHIM_DMACNTX_FMT GENMASK(5, 0)
#define SHIM_DMACNTX_YUV422_MODE_11 3
@@ -39,6 +42,7 @@
#define SHIM_PSI_CFG0_SRC_TAG GENMASK(15, 0)
#define SHIM_PSI_CFG0_DST_TAG GENMASK(31, 16)
+#define TI_CSI2RX_MAX_PIX_PER_CLK 4
#define PSIL_WORD_SIZE_BYTES 16
/*
* There are no hard limits on the width or height. The DMA engine can handle
@@ -52,6 +56,8 @@
#define DRAIN_TIMEOUT_MS 50
#define DRAIN_BUFFER_SIZE SZ_32K
+#define CSI2RX_BRIDGE_SOURCE_PAD 1
+
struct ti_csi2rx_fmt {
u32 fourcc; /* Four character code. */
u32 code; /* Mbus code. */
@@ -107,6 +113,7 @@ struct ti_csi2rx_dev {
struct v4l2_format v_fmt;
struct ti_csi2rx_dma dma;
u32 sequence;
+ u8 pix_per_clk;
};
static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
@@ -299,7 +306,7 @@ static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
+static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct ti_csi2rx_dev *csi = video_drvdata(file);
@@ -426,8 +433,9 @@ static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
if (ret)
return ret;
- ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
- MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ ret = media_create_pad_link(&csi->source->entity, CSI2RX_BRIDGE_SOURCE_PAD,
+ &vdev->entity, csi->pad.index,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret) {
video_unregister_device(vdev);
@@ -450,25 +458,23 @@ static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
{
struct fwnode_handle *fwnode;
struct v4l2_async_connection *asc;
- struct device_node *node;
int ret;
- node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
- if (!node)
+ fwnode = fwnode_get_named_child_node(csi->dev->fwnode, "csi-bridge");
+ if (!fwnode)
return -EINVAL;
- fwnode = of_fwnode_handle(node);
- if (!fwnode) {
- of_node_put(node);
- return -EINVAL;
- }
-
v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
csi->notifier.ops = &csi_async_notifier_ops;
asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
struct v4l2_async_connection);
- of_node_put(node);
+ /*
+ * Calling v4l2_async_nf_add_fwnode grabs a refcount,
+ * so drop the one we got in fwnode_get_named_child_node
+ */
+ fwnode_handle_put(fwnode);
+
if (IS_ERR(asc)) {
v4l2_async_nf_cleanup(&csi->notifier);
return PTR_ERR(asc);
@@ -483,6 +489,26 @@ static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
return 0;
}
+/* Request maximum possible pixels per clock from the bridge */
+static void ti_csi2rx_request_max_ppc(struct ti_csi2rx_dev *csi)
+{
+ u8 ppc = TI_CSI2RX_MAX_PIX_PER_CLK;
+ struct media_pad *pad;
+ int ret;
+
+ pad = media_entity_remote_source_pad_unique(&csi->vdev.entity);
+ if (IS_ERR(pad))
+ return;
+
+ ret = cdns_csi2rx_negotiate_ppc(csi->source, pad->index, &ppc);
+ if (ret) {
+ dev_warn(csi->dev, "NUM_PIXELS negotiation failed: %d\n", ret);
+ csi->pix_per_clk = 1;
+ } else {
+ csi->pix_per_clk = ppc;
+ }
+}
+
static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
{
const struct ti_csi2rx_fmt *fmt;
@@ -494,6 +520,9 @@ static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
reg = SHIM_CNTL_PIX_RST;
writel(reg, csi->shim + SHIM_CNTL);
+ /* Negotiate pixel count from the source */
+ ti_csi2rx_request_max_ppc(csi);
+
reg = SHIM_DMACNTX_EN;
reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
@@ -522,14 +551,18 @@ static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
case V4L2_PIX_FMT_YVYU:
reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
SHIM_DMACNTX_YUV422_MODE_11);
+ /* Multiple pixels are handled differently for packed YUV */
+ if (csi->pix_per_clk == 2)
+ reg |= SHIM_DMACNTX_DUAL_PCK_CFG;
+ reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
break;
default:
- /* Ignore if not YUV 4:2:2 */
+ /* By default we change the shift size for multiple pixels */
+ reg |= FIELD_PREP(SHIM_DMACNTX_SIZE,
+ fmt->size + (csi->pix_per_clk >> 1));
break;
}
- reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
-
writel(reg, csi->shim + SHIM_DMACNTX);
reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
@@ -1120,7 +1153,7 @@ static int ti_csi2rx_probe(struct platform_device *pdev)
if (ret)
goto err_vb2q;
- ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
+ ret = devm_of_platform_populate(csi->dev);
if (ret) {
dev_err(csi->dev, "Failed to create children: %d\n", ret);
goto err_subdev;
diff --git a/drivers/media/platform/ti/omap/omap_vout.c b/drivers/media/platform/ti/omap/omap_vout.c
index a87d5030ac35..22782e9f1f4e 100644
--- a/drivers/media/platform/ti/omap/omap_vout.c
+++ b/drivers/media/platform/ti/omap/omap_vout.c
@@ -1236,7 +1236,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
return 0;
}
-static int vidioc_enum_output(struct file *file, void *priv_fh,
+static int vidioc_enum_output(struct file *file, void *priv,
struct v4l2_output *out)
{
if (out->index)
@@ -1246,13 +1246,13 @@ static int vidioc_enum_output(struct file *file, void *priv_fh,
return 0;
}
-static int vidioc_g_output(struct file *file, void *priv_fh, unsigned int *i)
+static int vidioc_g_output(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
-static int vidioc_s_output(struct file *file, void *priv_fh, unsigned int i)
+static int vidioc_s_output(struct file *file, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index 7d0c723dcd11..55ee14e8b449 100644
--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
@@ -1873,12 +1873,6 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
}
-static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- return v4l2_event_unsubscribe(fh, sub);
-}
-
/*
* ccdc_set_stream - Enable/Disable streaming on the CCDC module
* @sd: ISP CCDC V4L2 subdevice
@@ -2487,7 +2481,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = {
.ioctl = ccdc_ioctl,
.subscribe_event = ccdc_subscribe_event,
- .unsubscribe_event = ccdc_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
/* V4L2 subdev video operations */
diff --git a/drivers/media/platform/ti/omap3isp/isph3a_aewb.c b/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
index e6c54c4bbfca..ae93da9c4542 100644
--- a/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
@@ -269,7 +269,7 @@ static const struct ispstat_ops h3a_aewb_ops = {
static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = {
.ioctl = h3a_aewb_ioctl,
.subscribe_event = omap3isp_stat_subscribe_event,
- .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = {
diff --git a/drivers/media/platform/ti/omap3isp/isph3a_af.c b/drivers/media/platform/ti/omap3isp/isph3a_af.c
index de7b116d0122..ca478da4ad34 100644
--- a/drivers/media/platform/ti/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/ti/omap3isp/isph3a_af.c
@@ -334,7 +334,7 @@ static const struct ispstat_ops h3a_af_ops = {
static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = {
.ioctl = h3a_af_ioctl,
.subscribe_event = omap3isp_stat_subscribe_event,
- .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = {
diff --git a/drivers/media/platform/ti/omap3isp/isphist.c b/drivers/media/platform/ti/omap3isp/isphist.c
index 0ef78aace6da..7851ad13d84f 100644
--- a/drivers/media/platform/ti/omap3isp/isphist.c
+++ b/drivers/media/platform/ti/omap3isp/isphist.c
@@ -456,7 +456,7 @@ static const struct ispstat_ops hist_ops = {
static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
.ioctl = hist_ioctl,
.subscribe_event = omap3isp_stat_subscribe_event,
- .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
diff --git a/drivers/media/platform/ti/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c
index d3da68408ecb..07bd62a93d99 100644
--- a/drivers/media/platform/ti/omap3isp/ispstat.c
+++ b/drivers/media/platform/ti/omap3isp/ispstat.c
@@ -1010,13 +1010,6 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
}
-int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
- struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- return v4l2_event_unsubscribe(fh, sub);
-}
-
void omap3isp_stat_unregister_entities(struct ispstat *stat)
{
v4l2_device_unregister_subdev(&stat->subdev);
diff --git a/drivers/media/platform/ti/omap3isp/ispstat.h b/drivers/media/platform/ti/omap3isp/ispstat.h
index b548e617cf62..59842c4a9c33 100644
--- a/drivers/media/platform/ti/omap3isp/ispstat.h
+++ b/drivers/media/platform/ti/omap3isp/ispstat.h
@@ -135,9 +135,6 @@ void omap3isp_stat_cleanup(struct ispstat *stat);
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
-int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
- struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub);
int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable);
int omap3isp_stat_busy(struct ispstat *stat);
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index 78e30298c7ad..0e7f0bf2b346 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
@@ -657,7 +657,7 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
static int
isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
if (format->type != video->type)
@@ -673,7 +673,7 @@ isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
static int
isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
struct v4l2_mbus_framefmt fmt;
@@ -858,7 +858,7 @@ isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
static int
isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
@@ -876,7 +876,7 @@ isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
static int
isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
@@ -894,7 +894,7 @@ isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
static int
isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -908,7 +908,7 @@ isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
static int
isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -922,7 +922,7 @@ isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
static int
isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -936,7 +936,7 @@ isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
static int
isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -1074,7 +1074,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
static int
isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
enum isp_pipeline_state state;
struct isp_pipeline *pipe;
@@ -1180,7 +1180,7 @@ err_enum_init:
static int
isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
enum isp_pipeline_state state;
@@ -1297,7 +1297,7 @@ static int isp_video_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, &video->video);
- v4l2_fh_add(&handle->vfh);
+ v4l2_fh_add(&handle->vfh, file);
/* If this is the first user, initialise the pipeline. */
if (omap3isp_get(video->isp) == NULL) {
@@ -1333,11 +1333,10 @@ static int isp_video_open(struct file *file)
handle->timeperframe.denominator = 1;
handle->video = video;
- file->private_data = &handle->vfh;
done:
if (ret < 0) {
- v4l2_fh_del(&handle->vfh);
+ v4l2_fh_del(&handle->vfh, file);
v4l2_fh_exit(&handle->vfh);
kfree(handle);
}
@@ -1348,8 +1347,8 @@ done:
static int isp_video_release(struct file *file)
{
struct isp_video *video = video_drvdata(file);
- struct v4l2_fh *vfh = file->private_data;
- struct isp_video_fh *handle = to_isp_video_fh(vfh);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+ struct isp_video_fh *handle = file_to_isp_video_fh(file);
/* Disable streaming and free the buffers queue resources. */
isp_video_streamoff(file, vfh, video->type);
@@ -1361,10 +1360,9 @@ static int isp_video_release(struct file *file)
v4l2_pipeline_pm_put(&video->video.entity);
/* Release the file handle. */
- v4l2_fh_del(vfh);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
kfree(handle);
- file->private_data = NULL;
omap3isp_put(video->isp);
@@ -1373,7 +1371,7 @@ static int isp_video_release(struct file *file)
static __poll_t isp_video_poll(struct file *file, poll_table *wait)
{
- struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
__poll_t ret;
@@ -1386,7 +1384,7 @@ static __poll_t isp_video_poll(struct file *file, poll_table *wait)
static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
return vb2_mmap(&vfh->queue, vma);
}
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.h b/drivers/media/platform/ti/omap3isp/ispvideo.h
index 1d23df576e6b..537da59cff62 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.h
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.h
@@ -194,7 +194,11 @@ struct isp_video_fh {
struct v4l2_fract timeperframe;
};
-#define to_isp_video_fh(fh) container_of(fh, struct isp_video_fh, vfh)
+static inline struct isp_video_fh *file_to_isp_video_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct isp_video_fh, vfh);
+}
+
#define isp_video_queue_to_isp_video_fh(q) \
container_of(q, struct isp_video_fh, queue)
diff --git a/drivers/media/platform/ti/vpe/vpe.c b/drivers/media/platform/ti/vpe/vpe.c
index 636d76ecebcd..6029d4e8e0bd 100644
--- a/drivers/media/platform/ti/vpe/vpe.c
+++ b/drivers/media/platform/ti/vpe/vpe.c
@@ -422,6 +422,10 @@ struct vpe_ctx {
unsigned int src_mv_buf_selector;
};
+static inline struct vpe_ctx *to_vpe_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct vpe_ctx, fh);
+}
/*
* M2M devices get 2 queues.
@@ -1562,7 +1566,7 @@ static int vpe_enum_fmt(struct file *file, void *priv,
static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vb2_queue *vq;
struct vpe_q_data *q_data;
@@ -1719,7 +1723,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vpe_fmt *fmt = find_format(f);
if (V4L2_TYPE_IS_OUTPUT(f->type))
@@ -1783,7 +1787,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
int ret;
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
ret = vpe_try_fmt(file, priv, f);
if (ret)
@@ -1871,7 +1875,7 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
static int vpe_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vpe_q_data *q_data;
struct v4l2_pix_format_mplane *pix;
bool use_c_rect = false;
@@ -1935,7 +1939,7 @@ static int vpe_g_selection(struct file *file, void *fh,
static int vpe_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vpe_q_data *q_data;
struct v4l2_selection sel = *s;
int ret;
@@ -2306,7 +2310,6 @@ static int vpe_open(struct file *file)
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = ctx;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 1);
@@ -2360,7 +2363,7 @@ static int vpe_open(struct file *file)
goto exit_fh;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
/*
* for now, just report the creation of the first instance, we can later
@@ -2400,7 +2403,7 @@ free_ctx:
static int vpe_release(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
vpe_dbg(dev, "releasing instance %p\n", ctx);
@@ -2418,7 +2421,7 @@ static int vpe_release(struct file *file)
vpdma_free_desc_buf(&ctx->sc_coeff_v);
vpdma_free_desc_buf(&ctx->sc_coeff_h);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 81328c63b796..e0fdc4535b2d 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -382,9 +382,9 @@ extern int hantro_debug;
pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
/* Structure access helpers. */
-static __always_inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static __always_inline struct hantro_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct hantro_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct hantro_ctx, fh);
}
/* Register accessors. */
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index fa972effd4a2..e0c11fe8b55c 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -662,8 +662,7 @@ static int hantro_open(struct file *filp)
}
v4l2_fh_init(&ctx->fh, vdev);
- filp->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, filp);
hantro_reset_fmts(ctx);
@@ -677,7 +676,7 @@ static int hantro_open(struct file *filp)
return 0;
err_fh_free:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, filp);
v4l2_fh_exit(&ctx->fh);
err_ctx_free:
kfree(ctx);
@@ -686,15 +685,14 @@ err_ctx_free:
static int hantro_release(struct file *filp)
{
- struct hantro_ctx *ctx =
- container_of(filp->private_data, struct hantro_ctx, fh);
+ struct hantro_ctx *ctx = file_to_ctx(filp);
/*
* No need for extra locking because this was the last reference
* to this file.
*/
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, filp);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
kfree(ctx);
@@ -917,6 +915,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
vpu->decoder = func;
v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_G_SELECTION);
+ v4l2_disable_ioctl(vfd, VIDIOC_S_SELECTION);
}
video_set_drvdata(vfd, vpu);
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 7c3515cf7d64..fcf3bd9bcda2 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -185,7 +185,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
const struct hantro_fmt *fmt;
fmt = hantro_find_format(ctx, fsize->pixel_format);
@@ -217,7 +217,7 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f, bool capture)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
const struct hantro_fmt *fmt, *formats;
unsigned int num_fmts, i, j = 0;
bool skip_mode_none, enum_all_formats;
@@ -297,7 +297,7 @@ static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
vpu_debug(4, "f->type = %d\n", f->type);
@@ -310,7 +310,7 @@ static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
vpu_debug(4, "f->type = %d\n", f->type);
@@ -398,13 +398,13 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+ return hantro_try_fmt(file_to_ctx(file), &f->fmt.pix_mp, f->type);
}
static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+ return hantro_try_fmt(file_to_ctx(file), &f->fmt.pix_mp, f->type);
}
static void
@@ -648,23 +648,22 @@ static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
static int
vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
- return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
+ return hantro_set_fmt_out(file_to_ctx(file), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
}
static int
vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
- return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
+ return hantro_set_fmt_cap(file_to_ctx(file), &f->fmt.pix_mp);
}
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
/* Crop only supported on source. */
- if (!ctx->is_encoder ||
- sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
switch (sel->target) {
@@ -691,13 +690,12 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
struct v4l2_rect *rect = &sel->r;
struct vb2_queue *vq;
/* Crop only supported on source. */
- if (!ctx->is_encoder ||
- sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
/* Change not allowed if the queue is streaming. */
@@ -738,7 +736,7 @@ static const struct v4l2_event hantro_eos_event = {
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
int ret;
ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
index 35799da534ed..f9f276385c11 100644
--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
@@ -234,24 +234,6 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
},
};
-static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vdpu_read(vpu, G1_REG_INTERRUPT);
- state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vdpu_write(vpu, 0, G1_REG_INTERRUPT);
- vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
{
vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
@@ -328,7 +310,7 @@ static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
*/
static const struct hantro_irq imx8mq_irqs[] = {
- { "g1", imx8m_vpu_g1_irq },
+ { "g1", hantro_g1_irq },
};
static const struct hantro_irq imx8mq_g2_irqs[] = {
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 18bfa6001909..fcfe0883aba5 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -469,7 +469,7 @@ static const struct vb2_ops xvip_dma_queue_qops = {
static int
xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
@@ -491,7 +491,7 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
static int
xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
if (f->index > 0)
@@ -505,7 +505,7 @@ xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int
xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
format->fmt.pix = dma->format;
@@ -565,7 +565,7 @@ __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
static int
xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
__xvip_dma_try_format(dma, &format->fmt.pix, NULL);
@@ -575,7 +575,7 @@ xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
static int
xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
const struct xvip_video_format *info;
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 72776d08046a..bbbdd054ba64 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -141,23 +141,6 @@ config RADIO_TIMBERDALE
found behind the Timberdale FPGA on the Russellville board.
Enabling this driver will automatically select the DSP and tuner.
-config RADIO_WL1273
- tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C
- select MFD_CORE
- select MFD_WL1273_CORE
- select FW_LOADER
- help
- Choose Y here if you have this FM radio chip.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/userspace-api/media/index.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-wl1273.
-
config USB_DSBR
tristate "D-Link/GemTek USB FM radio support"
depends on USB
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 1ff46f3a6ed3..f54693ebdb30 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_RADIO_TERRATEC) += radio-terratec.o
obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
-obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 3c8c17d64821..2c1d413e8636 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -4,7 +4,7 @@
*
* Copyright 1997 M. Kirkwood
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index d989c0b3966f..0a4667bb7034 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -2,7 +2,7 @@
/*
* radio-aztech.c - Aztech radio card driver
*
- * Converted to the radio-isa framework by Hans Verkuil <hverkuil@xs4all.nl>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Adapted to support the Video for Linux API by
* Russell Kroll <rkroll@exploits.org>. Based on original tuner code by:
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 5ca6274c45bd..a3265f1dd189 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -15,7 +15,7 @@
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Note: this card seems to swap the left and right audio channels!
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 4f87c76a2a96..1a144536ffa7 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -4,7 +4,7 @@
* This takes care of all the V4L2 scaffolding, allowing the ISA drivers
* to concentrate on the actual hardware operation.
*
- * Copyright (C) 2012 Hans Verkuil <hansverk@cisco.com>
+ * Copyright (C) 2012 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/radio-isa.h b/drivers/media/radio/radio-isa.h
index 0f3db473da5e..62ff5c3fb5d5 100644
--- a/drivers/media/radio/radio-isa.h
+++ b/drivers/media/radio/radio-isa.h
@@ -4,7 +4,7 @@
* This takes care of all the V4L2 scaffolding, allowing the ISA drivers
* to concentrate on the actual hardware operation.
*
- * Copyright (C) 2012 Hans Verkuil <hansverk@cisco.com>
+ * Copyright (C) 2012 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _RADIO_ISA_H_
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index a35648316aa8..f3b57f0cb1ec 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2012 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (c) 2012 Hans Verkuil <hverkuil@kernel.org>
*/
/* kernel includes */
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
/* driver and module definitions */
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Keene FM Transmitter driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 27f058c5e677..67712ab3d564 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -23,7 +23,7 @@
* This code has been reintroduced and converted to use
* the new V4L2 RDS API by:
*
- * Hans Verkuil <hansverk@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 64c7452c05b5..f60775b005e1 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -25,14 +25,14 @@
*
* The USB protocol has been reversed engineered using wireshark, initially
* by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil
- * <hverkuil@xs4all.nl>.
+ * <hverkuil@kernel.org>.
*
* Sadly the firmware used in this product hides lots of goodies since the
* si4734 has more features than are supported by the firmware. Oh well...
*/
/* driver and module definitions */
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 16b13a63bfed..efc02069bf9d 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -7,7 +7,7 @@
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Fully tested with actual hardware and the v4l2-compliance tool.
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 720080634454..43817dd0a0fe 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -17,7 +17,7 @@
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
* Volume Control is done digitally
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
deleted file mode 100644
index f55217ccf2b8..000000000000
--- a/drivers/media/radio/radio-wl1273.c
+++ /dev/null
@@ -1,2159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for the Texas Instruments WL1273 FM radio.
- *
- * Copyright (C) 2011 Nokia Corporation
- * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
- */
-
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/wl1273-core.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-#define DRIVER_DESC "Wl1273 FM Radio"
-
-#define WL1273_POWER_SET_OFF 0
-#define WL1273_POWER_SET_FM BIT(0)
-#define WL1273_POWER_SET_RDS BIT(1)
-#define WL1273_POWER_SET_RETENTION BIT(4)
-
-#define WL1273_PUPD_SET_OFF 0x00
-#define WL1273_PUPD_SET_ON 0x01
-#define WL1273_PUPD_SET_RETENTION 0x10
-
-#define WL1273_FREQ(x) (x * 10000 / 625)
-#define WL1273_INV_FREQ(x) (x * 625 / 10000)
-
-/*
- * static int radio_nr - The number of the radio device
- *
- * The default is 0.
- */
-static int radio_nr;
-module_param(radio_nr, int, 0);
-MODULE_PARM_DESC(radio_nr, "The number of the radio device. Default = 0");
-
-struct wl1273_device {
- char *bus_type;
-
- u8 forbidden;
- unsigned int preemphasis;
- unsigned int spacing;
- unsigned int tx_power;
- unsigned int rx_frequency;
- unsigned int tx_frequency;
- unsigned int rangelow;
- unsigned int rangehigh;
- unsigned int band;
- bool stereo;
-
- /* RDS */
- unsigned int rds_on;
-
- wait_queue_head_t read_queue;
- struct mutex lock; /* for serializing fm radio operations */
- struct completion busy;
-
- unsigned char *buffer;
- unsigned int buf_size;
- unsigned int rd_index;
- unsigned int wr_index;
-
- /* Selected interrupts */
- u16 irq_flags;
- u16 irq_received;
-
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_device v4l2dev;
- struct video_device videodev;
- struct device *dev;
- struct wl1273_core *core;
- struct file *owner;
- char *write_buf;
- unsigned int rds_users;
-};
-
-#define WL1273_IRQ_MASK (WL1273_FR_EVENT | \
- WL1273_POW_ENB_EVENT)
-
-/*
- * static unsigned int rds_buf - the number of RDS buffer blocks used.
- *
- * The default number is 100.
- */
-static unsigned int rds_buf = 100;
-module_param(rds_buf, uint, 0);
-MODULE_PARM_DESC(rds_buf, "Number of RDS buffer entries. Default = 100");
-
-static int wl1273_fm_write_fw(struct wl1273_core *core,
- __u8 *fw, int len)
-{
- struct i2c_client *client = core->client;
- struct i2c_msg msg;
- int i, r = 0;
-
- msg.addr = client->addr;
- msg.flags = 0;
-
- for (i = 0; i <= len; i++) {
- msg.len = fw[0];
- msg.buf = fw + 1;
-
- fw += msg.len + 1;
- dev_dbg(&client->dev, "%s:len[%d]: %d\n", __func__, i, msg.len);
-
- r = i2c_transfer(client->adapter, &msg, 1);
- if (r < 0 && i < len + 1)
- break;
- }
-
- dev_dbg(&client->dev, "%s: i: %d\n", __func__, i);
- dev_dbg(&client->dev, "%s: len + 1: %d\n", __func__, len + 1);
-
- /* Last transfer always fails. */
- if (i == len || r == 1)
- r = 0;
-
- return r;
-}
-
-#define WL1273_FIFO_HAS_DATA(status) (1 << 5 & status)
-#define WL1273_RDS_CORRECTABLE_ERROR (1 << 3)
-#define WL1273_RDS_UNCORRECTABLE_ERROR (1 << 4)
-
-static int wl1273_fm_rds(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- struct i2c_client *client = core->client;
- u16 val;
- u8 b0 = WL1273_RDS_DATA_GET, status;
- struct v4l2_rds_data rds = { 0, 0, 0 };
- struct i2c_msg msg[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .buf = &b0,
- .len = 1,
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .buf = (u8 *) &rds,
- .len = sizeof(rds),
- }
- };
- int r;
-
- if (core->mode != WL1273_MODE_RX)
- return 0;
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r)
- return r;
-
- if ((val & 0x01) == 0) {
- /* RDS decoder not synchronized */
- return -EAGAIN;
- }
-
- /* copy all four RDS blocks to internal buffer */
- do {
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME
- ": %s: read_rds error r == %i)\n",
- __func__, r);
- }
-
- status = rds.block;
-
- if (!WL1273_FIFO_HAS_DATA(status))
- break;
-
- /* copy bits 0-2 (the block ID) to bits 3-5 */
- rds.block = V4L2_RDS_BLOCK_MSK & status;
- rds.block |= rds.block << 3;
-
- /* copy the error bits to standard positions */
- if (WL1273_RDS_UNCORRECTABLE_ERROR & status) {
- rds.block |= V4L2_RDS_BLOCK_ERROR;
- rds.block &= ~V4L2_RDS_BLOCK_CORRECTED;
- } else if (WL1273_RDS_CORRECTABLE_ERROR & status) {
- rds.block &= ~V4L2_RDS_BLOCK_ERROR;
- rds.block |= V4L2_RDS_BLOCK_CORRECTED;
- }
-
- /* copy RDS block to internal buffer */
- memcpy(&radio->buffer[radio->wr_index], &rds, RDS_BLOCK_SIZE);
- radio->wr_index += 3;
-
- /* wrap write pointer */
- if (radio->wr_index >= radio->buf_size)
- radio->wr_index = 0;
-
- /* check for overflow & start over */
- if (radio->wr_index == radio->rd_index) {
- dev_dbg(radio->dev, "RDS OVERFLOW");
-
- radio->rd_index = 0;
- radio->wr_index = 0;
- break;
- }
- } while (WL1273_FIFO_HAS_DATA(status));
-
- /* wake up read queue */
- if (radio->wr_index != radio->rd_index)
- wake_up_interruptible(&radio->read_queue);
-
- return 0;
-}
-
-static irqreturn_t wl1273_fm_irq_thread_handler(int irq, void *dev_id)
-{
- struct wl1273_device *radio = dev_id;
- struct wl1273_core *core = radio->core;
- u16 flags;
- int r;
-
- r = core->read(core, WL1273_FLAG_GET, &flags);
- if (r)
- goto out;
-
- if (flags & WL1273_BL_EVENT) {
- radio->irq_received = flags;
- dev_dbg(radio->dev, "IRQ: BL\n");
- }
-
- if (flags & WL1273_RDS_EVENT) {
- msleep(200);
-
- wl1273_fm_rds(radio);
- }
-
- if (flags & WL1273_BBLK_EVENT)
- dev_dbg(radio->dev, "IRQ: BBLK\n");
-
- if (flags & WL1273_LSYNC_EVENT)
- dev_dbg(radio->dev, "IRQ: LSYNC\n");
-
- if (flags & WL1273_LEV_EVENT) {
- u16 level;
-
- r = core->read(core, WL1273_RSSI_LVL_GET, &level);
- if (r)
- goto out;
-
- if (level > 14)
- dev_dbg(radio->dev, "IRQ: LEV: 0x%x04\n", level);
- }
-
- if (flags & WL1273_IFFR_EVENT)
- dev_dbg(radio->dev, "IRQ: IFFR\n");
-
- if (flags & WL1273_PI_EVENT)
- dev_dbg(radio->dev, "IRQ: PI\n");
-
- if (flags & WL1273_PD_EVENT)
- dev_dbg(radio->dev, "IRQ: PD\n");
-
- if (flags & WL1273_STIC_EVENT)
- dev_dbg(radio->dev, "IRQ: STIC\n");
-
- if (flags & WL1273_MAL_EVENT)
- dev_dbg(radio->dev, "IRQ: MAL\n");
-
- if (flags & WL1273_POW_ENB_EVENT) {
- complete(&radio->busy);
- dev_dbg(radio->dev, "NOT BUSY\n");
- dev_dbg(radio->dev, "IRQ: POW_ENB\n");
- }
-
- if (flags & WL1273_SCAN_OVER_EVENT)
- dev_dbg(radio->dev, "IRQ: SCAN_OVER\n");
-
- if (flags & WL1273_ERROR_EVENT)
- dev_dbg(radio->dev, "IRQ: ERROR\n");
-
- if (flags & WL1273_FR_EVENT) {
- u16 freq;
-
- dev_dbg(radio->dev, "IRQ: FR:\n");
-
- if (core->mode == WL1273_MODE_RX) {
- r = core->write(core, WL1273_TUNER_MODE_SET,
- TUNER_MODE_STOP_SEARCH);
- if (r) {
- dev_err(radio->dev,
- "%s: TUNER_MODE_SET fails: %d\n",
- __func__, r);
- goto out;
- }
-
- r = core->read(core, WL1273_FREQ_SET, &freq);
- if (r)
- goto out;
-
- if (radio->band == WL1273_BAND_JAPAN)
- radio->rx_frequency = WL1273_BAND_JAPAN_LOW +
- freq * 50;
- else
- radio->rx_frequency = WL1273_BAND_OTHER_LOW +
- freq * 50;
- /*
- * The driver works better with this msleep,
- * the documentation doesn't mention it.
- */
- usleep_range(10000, 15000);
-
- dev_dbg(radio->dev, "%dkHz\n", radio->rx_frequency);
-
- } else {
- r = core->read(core, WL1273_CHANL_SET, &freq);
- if (r)
- goto out;
-
- dev_dbg(radio->dev, "%dkHz\n", freq);
- }
- dev_dbg(radio->dev, "%s: NOT BUSY\n", __func__);
- }
-
-out:
- core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- complete(&radio->busy);
-
- return IRQ_HANDLED;
-}
-
-static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
-{
- struct wl1273_core *core = radio->core;
- int r = 0;
- unsigned long t;
-
- if (freq < WL1273_BAND_TX_LOW) {
- dev_err(radio->dev,
- "Frequency out of range: %d < %d\n", freq,
- WL1273_BAND_TX_LOW);
- return -ERANGE;
- }
-
- if (freq > WL1273_BAND_TX_HIGH) {
- dev_err(radio->dev,
- "Frequency out of range: %d > %d\n", freq,
- WL1273_BAND_TX_HIGH);
- return -ERANGE;
- }
-
- /*
- * The driver works better with this sleep,
- * the documentation doesn't mention it.
- */
- usleep_range(5000, 10000);
-
- dev_dbg(radio->dev, "%s: freq: %d kHz\n", __func__, freq);
-
- /* Set the current tx channel */
- r = core->write(core, WL1273_CHANL_SET, freq / 10);
- if (r)
- return r;
-
- reinit_completion(&radio->busy);
-
- /* wait for the FR IRQ */
- t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
- if (!t)
- return -ETIMEDOUT;
-
- dev_dbg(radio->dev, "WL1273_CHANL_SET: %lu\n", t);
-
- /* Enable the output power */
- r = core->write(core, WL1273_POWER_ENB_SET, 1);
- if (r)
- return r;
-
- reinit_completion(&radio->busy);
-
- /* wait for the POWER_ENB IRQ */
- t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
- if (!t)
- return -ETIMEDOUT;
-
- radio->tx_frequency = freq;
- dev_dbg(radio->dev, "WL1273_POWER_ENB_SET: %lu\n", t);
-
- return 0;
-}
-
-static int wl1273_fm_set_rx_freq(struct wl1273_device *radio, unsigned int freq)
-{
- struct wl1273_core *core = radio->core;
- int r, f;
- unsigned long t;
-
- if (freq < radio->rangelow) {
- dev_err(radio->dev,
- "Frequency out of range: %d < %d\n", freq,
- radio->rangelow);
- r = -ERANGE;
- goto err;
- }
-
- if (freq > radio->rangehigh) {
- dev_err(radio->dev,
- "Frequency out of range: %d > %d\n", freq,
- radio->rangehigh);
- r = -ERANGE;
- goto err;
- }
-
- dev_dbg(radio->dev, "%s: %dkHz\n", __func__, freq);
-
- core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
-
- if (radio->band == WL1273_BAND_JAPAN)
- f = (freq - WL1273_BAND_JAPAN_LOW) / 50;
- else
- f = (freq - WL1273_BAND_OTHER_LOW) / 50;
-
- r = core->write(core, WL1273_FREQ_SET, f);
- if (r) {
- dev_err(radio->dev, "FREQ_SET fails\n");
- goto err;
- }
-
- r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_PRESET);
- if (r) {
- dev_err(radio->dev, "TUNER_MODE_SET fails\n");
- goto err;
- }
-
- reinit_completion(&radio->busy);
-
- t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
- if (!t) {
- dev_err(radio->dev, "%s: TIMEOUT\n", __func__);
- return -ETIMEDOUT;
- }
-
- radio->rd_index = 0;
- radio->wr_index = 0;
- radio->rx_frequency = freq;
- return 0;
-err:
- return r;
-}
-
-static int wl1273_fm_get_freq(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- unsigned int freq;
- u16 f;
- int r;
-
- if (core->mode == WL1273_MODE_RX) {
- r = core->read(core, WL1273_FREQ_SET, &f);
- if (r)
- return r;
-
- dev_dbg(radio->dev, "Freq get: 0x%04x\n", f);
- if (radio->band == WL1273_BAND_JAPAN)
- freq = WL1273_BAND_JAPAN_LOW + 50 * f;
- else
- freq = WL1273_BAND_OTHER_LOW + 50 * f;
- } else {
- r = core->read(core, WL1273_CHANL_SET, &f);
- if (r)
- return r;
-
- freq = f * 10;
- }
-
- return freq;
-}
-
-/**
- * wl1273_fm_upload_firmware_patch() - Upload the firmware.
- * @radio: A pointer to the device struct.
- *
- * The firmware file consists of arrays of bytes where the first byte
- * gives the array length. The first byte in the file gives the
- * number of these arrays.
- */
-static int wl1273_fm_upload_firmware_patch(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- unsigned int packet_num;
- const struct firmware *fw_p;
- const char *fw_name = "radio-wl1273-fw.bin";
- struct device *dev = radio->dev;
- __u8 *ptr;
- int r;
-
- dev_dbg(dev, "%s:\n", __func__);
-
- /*
- * Uploading the firmware patch is not always necessary,
- * so we only print an info message.
- */
- if (request_firmware(&fw_p, fw_name, dev)) {
- dev_info(dev, "%s - %s not found\n", __func__, fw_name);
-
- return 0;
- }
-
- ptr = (__u8 *) fw_p->data;
- packet_num = ptr[0];
- dev_dbg(dev, "%s: packets: %d\n", __func__, packet_num);
-
- r = wl1273_fm_write_fw(core, ptr + 1, packet_num);
- if (r) {
- dev_err(dev, "FW upload error: %d\n", r);
- goto out;
- }
-
- /* ignore possible error here */
- core->write(core, WL1273_RESET, 0);
-
- dev_dbg(dev, "%s - download OK, r: %d\n", __func__, r);
-out:
- release_firmware(fw_p);
- return r;
-}
-
-static int wl1273_fm_stop(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
-
- if (core->mode == WL1273_MODE_RX) {
- int r = core->write(core, WL1273_POWER_SET,
- WL1273_POWER_SET_OFF);
- if (r)
- dev_err(radio->dev, "%s: POWER_SET fails: %d\n",
- __func__, r);
- } else if (core->mode == WL1273_MODE_TX) {
- int r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_OFF);
- if (r)
- dev_err(radio->dev,
- "%s: PUPD_SET fails: %d\n", __func__, r);
- }
-
- if (core->pdata->disable) {
- core->pdata->disable();
- dev_dbg(radio->dev, "Back to reset\n");
- }
-
- return 0;
-}
-
-static int wl1273_fm_start(struct wl1273_device *radio, int new_mode)
-{
- struct wl1273_core *core = radio->core;
- struct wl1273_fm_platform_data *pdata = core->pdata;
- struct device *dev = radio->dev;
- int r = -EINVAL;
-
- if (pdata->enable && core->mode == WL1273_MODE_OFF) {
- dev_dbg(radio->dev, "Out of reset\n");
-
- pdata->enable();
- msleep(250);
- }
-
- if (new_mode == WL1273_MODE_RX) {
- u16 val = WL1273_POWER_SET_FM;
-
- if (radio->rds_on)
- val |= WL1273_POWER_SET_RDS;
-
- /* If this fails try again */
- r = core->write(core, WL1273_POWER_SET, val);
- if (r) {
- msleep(100);
-
- r = core->write(core, WL1273_POWER_SET, val);
- if (r) {
- dev_err(dev, "%s: POWER_SET fails\n", __func__);
- goto fail;
- }
- }
-
- /* rds buffer configuration */
- radio->wr_index = 0;
- radio->rd_index = 0;
-
- } else if (new_mode == WL1273_MODE_TX) {
- /* If this fails try again once */
- r = core->write(core, WL1273_PUPD_SET, WL1273_PUPD_SET_ON);
- if (r) {
- msleep(100);
- r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_ON);
- if (r) {
- dev_err(dev, "%s: PUPD_SET fails\n", __func__);
- goto fail;
- }
- }
-
- if (radio->rds_on) {
- r = core->write(core, WL1273_RDS_DATA_ENB, 1);
- if (r) {
- dev_err(dev, "%s: RDS_DATA_ENB ON fails\n",
- __func__);
- goto fail;
- }
- } else {
- r = core->write(core, WL1273_RDS_DATA_ENB, 0);
- if (r) {
- dev_err(dev, "%s: RDS_DATA_ENB OFF fails\n",
- __func__);
- goto fail;
- }
- }
- } else {
- dev_warn(dev, "%s: Illegal mode.\n", __func__);
- }
-
- if (core->mode == WL1273_MODE_OFF) {
- r = wl1273_fm_upload_firmware_patch(radio);
- if (r)
- dev_warn(dev, "Firmware upload failed.\n");
-
- /*
- * Sometimes the chip is in a wrong power state at this point.
- * So we set the power once again.
- */
- if (new_mode == WL1273_MODE_RX) {
- u16 val = WL1273_POWER_SET_FM;
-
- if (radio->rds_on)
- val |= WL1273_POWER_SET_RDS;
-
- r = core->write(core, WL1273_POWER_SET, val);
- if (r) {
- dev_err(dev, "%s: POWER_SET fails\n", __func__);
- goto fail;
- }
- } else if (new_mode == WL1273_MODE_TX) {
- r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_ON);
- if (r) {
- dev_err(dev, "%s: PUPD_SET fails\n", __func__);
- goto fail;
- }
- }
- }
-
- return 0;
-fail:
- if (pdata->disable)
- pdata->disable();
-
- dev_dbg(dev, "%s: return: %d\n", __func__, r);
- return r;
-}
-
-static int wl1273_fm_suspend(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- /* Cannot go from OFF to SUSPENDED */
- if (core->mode == WL1273_MODE_RX)
- r = core->write(core, WL1273_POWER_SET,
- WL1273_POWER_SET_RETENTION);
- else if (core->mode == WL1273_MODE_TX)
- r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_RETENTION);
- else
- r = -EINVAL;
-
- if (r) {
- dev_err(radio->dev, "%s: POWER_SET fails: %d\n", __func__, r);
- goto out;
- }
-
-out:
- return r;
-}
-
-static int wl1273_fm_set_mode(struct wl1273_device *radio, int mode)
-{
- struct wl1273_core *core = radio->core;
- struct device *dev = radio->dev;
- int old_mode;
- int r;
-
- dev_dbg(dev, "%s\n", __func__);
- dev_dbg(dev, "Forbidden modes: 0x%02x\n", radio->forbidden);
-
- old_mode = core->mode;
- if (mode & radio->forbidden) {
- r = -EPERM;
- goto out;
- }
-
- switch (mode) {
- case WL1273_MODE_RX:
- case WL1273_MODE_TX:
- r = wl1273_fm_start(radio, mode);
- if (r) {
- dev_err(dev, "%s: Cannot start.\n", __func__);
- wl1273_fm_stop(radio);
- goto out;
- }
-
- core->mode = mode;
- r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- if (r) {
- dev_err(dev, "INT_MASK_SET fails.\n");
- goto out;
- }
-
- /* remember previous settings */
- if (mode == WL1273_MODE_RX) {
- r = wl1273_fm_set_rx_freq(radio, radio->rx_frequency);
- if (r) {
- dev_err(dev, "set freq fails: %d.\n", r);
- goto out;
- }
-
- r = core->set_volume(core, core->volume);
- if (r) {
- dev_err(dev, "set volume fails: %d.\n", r);
- goto out;
- }
-
- dev_dbg(dev, "%s: Set vol: %d.\n", __func__,
- core->volume);
- } else {
- r = wl1273_fm_set_tx_freq(radio, radio->tx_frequency);
- if (r) {
- dev_err(dev, "set freq fails: %d.\n", r);
- goto out;
- }
- }
-
- dev_dbg(radio->dev, "%s: Set audio mode.\n", __func__);
-
- r = core->set_audio(core, core->audio_mode);
- if (r)
- dev_err(dev, "Cannot set audio mode.\n");
- break;
-
- case WL1273_MODE_OFF:
- r = wl1273_fm_stop(radio);
- if (r)
- dev_err(dev, "%s: Off fails: %d\n", __func__, r);
- else
- core->mode = WL1273_MODE_OFF;
-
- break;
-
- case WL1273_MODE_SUSPENDED:
- r = wl1273_fm_suspend(radio);
- if (r)
- dev_err(dev, "%s: Suspend fails: %d\n", __func__, r);
- else
- core->mode = WL1273_MODE_SUSPENDED;
-
- break;
-
- default:
- dev_err(dev, "%s: Unknown mode: %d\n", __func__, mode);
- r = -EINVAL;
- break;
- }
-out:
- if (r)
- core->mode = old_mode;
-
- return r;
-}
-
-static int wl1273_fm_set_seek(struct wl1273_device *radio,
- unsigned int wrap_around,
- unsigned int seek_upward,
- int level)
-{
- struct wl1273_core *core = radio->core;
- int r = 0;
- unsigned int dir = (seek_upward == 0) ? 0 : 1;
- unsigned int f;
-
- f = radio->rx_frequency;
- dev_dbg(radio->dev, "rx_frequency: %d\n", f);
-
- if (dir && f + radio->spacing <= radio->rangehigh)
- r = wl1273_fm_set_rx_freq(radio, f + radio->spacing);
- else if (dir && wrap_around)
- r = wl1273_fm_set_rx_freq(radio, radio->rangelow);
- else if (f - radio->spacing >= radio->rangelow)
- r = wl1273_fm_set_rx_freq(radio, f - radio->spacing);
- else if (wrap_around)
- r = wl1273_fm_set_rx_freq(radio, radio->rangehigh);
-
- if (r)
- goto out;
-
- if (level < SCHAR_MIN || level > SCHAR_MAX)
- return -EINVAL;
-
- reinit_completion(&radio->busy);
- dev_dbg(radio->dev, "%s: BUSY\n", __func__);
-
- r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- if (r)
- goto out;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- r = core->write(core, WL1273_SEARCH_LVL_SET, level);
- if (r)
- goto out;
-
- r = core->write(core, WL1273_SEARCH_DIR_SET, dir);
- if (r)
- goto out;
-
- r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
- if (r)
- goto out;
-
- /* wait for the FR IRQ */
- wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
- if (!(radio->irq_received & WL1273_BL_EVENT)) {
- r = -ETIMEDOUT;
- goto out;
- }
-
- radio->irq_received &= ~WL1273_BL_EVENT;
-
- if (!wrap_around)
- goto out;
-
- /* Wrap around */
- dev_dbg(radio->dev, "Wrap around in HW seek.\n");
-
- if (seek_upward)
- f = radio->rangelow;
- else
- f = radio->rangehigh;
-
- r = wl1273_fm_set_rx_freq(radio, f);
- if (r)
- goto out;
-
- reinit_completion(&radio->busy);
- dev_dbg(radio->dev, "%s: BUSY\n", __func__);
-
- r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
- if (r)
- goto out;
-
- /* wait for the FR IRQ */
- if (!wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000)))
- r = -ETIMEDOUT;
-out:
- dev_dbg(radio->dev, "%s: Err: %d\n", __func__, r);
- return r;
-}
-
-/**
- * wl1273_fm_get_tx_ctune() - Get the TX tuning capacitor value.
- * @radio: A pointer to the device struct.
- */
-static unsigned int wl1273_fm_get_tx_ctune(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- struct device *dev = radio->dev;
- u16 val;
- int r;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- r = core->read(core, WL1273_READ_FMANT_TUNE_VALUE, &val);
- if (r) {
- dev_err(dev, "%s: read error: %d\n", __func__, r);
- goto out;
- }
-
-out:
- return val;
-}
-
-/**
- * wl1273_fm_set_preemphasis() - Set the TX pre-emphasis value.
- * @radio: A pointer to the device struct.
- * @preemphasis: The new pre-amphasis value.
- *
- * Possible pre-emphasis values are: V4L2_PREEMPHASIS_DISABLED,
- * V4L2_PREEMPHASIS_50_uS and V4L2_PREEMPHASIS_75_uS.
- */
-static int wl1273_fm_set_preemphasis(struct wl1273_device *radio,
- unsigned int preemphasis)
-{
- struct wl1273_core *core = radio->core;
- int r;
- u16 em;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- mutex_lock(&core->lock);
-
- switch (preemphasis) {
- case V4L2_PREEMPHASIS_DISABLED:
- em = 1;
- break;
- case V4L2_PREEMPHASIS_50_uS:
- em = 0;
- break;
- case V4L2_PREEMPHASIS_75_uS:
- em = 2;
- break;
- default:
- r = -EINVAL;
- goto out;
- }
-
- r = core->write(core, WL1273_PREMPH_SET, em);
- if (r)
- goto out;
-
- radio->preemphasis = preemphasis;
-
-out:
- mutex_unlock(&core->lock);
- return r;
-}
-
-static int wl1273_fm_rds_on(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
- if (radio->rds_on)
- return 0;
-
- r = core->write(core, WL1273_POWER_SET,
- WL1273_POWER_SET_FM | WL1273_POWER_SET_RDS);
- if (r)
- goto out;
-
- r = wl1273_fm_set_rx_freq(radio, radio->rx_frequency);
- if (r)
- dev_err(radio->dev, "set freq fails: %d.\n", r);
-out:
- return r;
-}
-
-static int wl1273_fm_rds_off(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- if (!radio->rds_on)
- return 0;
-
- radio->irq_flags &= ~WL1273_RDS_EVENT;
-
- r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- if (r)
- goto out;
-
- /* Service pending read */
- wake_up_interruptible(&radio->read_queue);
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- r = core->write(core, WL1273_POWER_SET, WL1273_POWER_SET_FM);
- if (r)
- goto out;
-
- r = wl1273_fm_set_rx_freq(radio, radio->rx_frequency);
- if (r)
- dev_err(radio->dev, "set freq fails: %d.\n", r);
-out:
- dev_dbg(radio->dev, "%s: exiting...\n", __func__);
-
- return r;
-}
-
-static int wl1273_fm_set_rds(struct wl1273_device *radio, unsigned int new_mode)
-{
- int r = 0;
- struct wl1273_core *core = radio->core;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- if (new_mode == WL1273_RDS_RESET) {
- r = core->write(core, WL1273_RDS_CNTRL_SET, 1);
- return r;
- }
-
- if (core->mode == WL1273_MODE_TX && new_mode == WL1273_RDS_OFF) {
- r = core->write(core, WL1273_RDS_DATA_ENB, 0);
- } else if (core->mode == WL1273_MODE_TX && new_mode == WL1273_RDS_ON) {
- r = core->write(core, WL1273_RDS_DATA_ENB, 1);
- } else if (core->mode == WL1273_MODE_RX && new_mode == WL1273_RDS_OFF) {
- r = wl1273_fm_rds_off(radio);
- } else if (core->mode == WL1273_MODE_RX && new_mode == WL1273_RDS_ON) {
- r = wl1273_fm_rds_on(radio);
- } else {
- dev_err(radio->dev, "%s: Unknown mode: %d\n",
- __func__, new_mode);
- r = -EINVAL;
- }
-
- if (!r)
- radio->rds_on = new_mode == WL1273_RDS_ON;
-
- return r;
-}
-
-static ssize_t wl1273_fm_fops_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- u16 val;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (core->mode != WL1273_MODE_TX)
- return count;
-
- if (radio->rds_users == 0) {
- dev_warn(radio->dev, "%s: RDS not on.\n", __func__);
- return 0;
- }
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
- /*
- * Multiple processes can open the device, but only
- * one gets to write to it.
- */
- if (radio->owner && radio->owner != file) {
- r = -EBUSY;
- goto out;
- }
- radio->owner = file;
-
- /* Manual Mode */
- if (count > 255)
- val = 255;
- else
- val = count;
-
- core->write(core, WL1273_RDS_CONFIG_DATA_SET, val);
-
- if (copy_from_user(radio->write_buf + 1, buf, val)) {
- r = -EFAULT;
- goto out;
- }
-
- dev_dbg(radio->dev, "Count: %d\n", val);
- dev_dbg(radio->dev, "From user: \"%s\"\n", radio->write_buf);
-
- radio->write_buf[0] = WL1273_RDS_DATA_SET;
- core->write_data(core, radio->write_buf, val + 1);
-
- r = val;
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static __poll_t wl1273_fm_fops_poll(struct file *file,
- struct poll_table_struct *pts)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
-
- if (radio->owner && radio->owner != file)
- return EPOLLERR;
-
- radio->owner = file;
-
- if (core->mode == WL1273_MODE_RX) {
- poll_wait(file, &radio->read_queue, pts);
-
- if (radio->rd_index != radio->wr_index)
- return EPOLLIN | EPOLLRDNORM;
-
- } else if (core->mode == WL1273_MODE_TX) {
- return EPOLLOUT | EPOLLWRNORM;
- }
-
- return 0;
-}
-
-static int wl1273_fm_fops_open(struct file *file)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (core->mode == WL1273_MODE_RX && radio->rds_on &&
- !radio->rds_users) {
- dev_dbg(radio->dev, "%s: Mode: %d\n", __func__, core->mode);
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- radio->irq_flags |= WL1273_RDS_EVENT;
-
- r = core->write(core, WL1273_INT_MASK_SET,
- radio->irq_flags);
- if (r) {
- mutex_unlock(&core->lock);
- goto out;
- }
-
- radio->rds_users++;
-
- mutex_unlock(&core->lock);
- }
-out:
- return r;
-}
-
-static int wl1273_fm_fops_release(struct file *file)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (radio->rds_users > 0) {
- radio->rds_users--;
- if (radio->rds_users == 0) {
- mutex_lock(&core->lock);
-
- radio->irq_flags &= ~WL1273_RDS_EVENT;
-
- if (core->mode == WL1273_MODE_RX) {
- r = core->write(core,
- WL1273_INT_MASK_SET,
- radio->irq_flags);
- if (r) {
- mutex_unlock(&core->lock);
- goto out;
- }
- }
- mutex_unlock(&core->lock);
- }
- }
-
- if (file == radio->owner)
- radio->owner = NULL;
-out:
- return r;
-}
-
-static ssize_t wl1273_fm_fops_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int r = 0;
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- unsigned int block_count = 0;
- u16 val;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (core->mode != WL1273_MODE_RX)
- return 0;
-
- if (radio->rds_users == 0) {
- dev_warn(radio->dev, "%s: RDS not on.\n", __func__);
- return 0;
- }
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- /*
- * Multiple processes can open the device, but only
- * one at a time gets read access.
- */
- if (radio->owner && radio->owner != file) {
- r = -EBUSY;
- goto out;
- }
- radio->owner = file;
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r) {
- dev_err(radio->dev, "%s: Get RDS_SYNC fails.\n", __func__);
- goto out;
- } else if (val == 0) {
- dev_info(radio->dev, "RDS_SYNC: Not synchronized\n");
- r = -ENODATA;
- goto out;
- }
-
- /* block if no new data available */
- while (radio->wr_index == radio->rd_index) {
- if (file->f_flags & O_NONBLOCK) {
- r = -EWOULDBLOCK;
- goto out;
- }
-
- dev_dbg(radio->dev, "%s: Wait for RDS data.\n", __func__);
- if (wait_event_interruptible(radio->read_queue,
- radio->wr_index !=
- radio->rd_index) < 0) {
- r = -EINTR;
- goto out;
- }
- }
-
- /* calculate block count from byte count */
- count /= RDS_BLOCK_SIZE;
-
- /* copy RDS blocks from the internal buffer and to user buffer */
- while (block_count < count) {
- if (radio->rd_index == radio->wr_index)
- break;
-
- /* always transfer complete RDS blocks */
- if (copy_to_user(buf, &radio->buffer[radio->rd_index],
- RDS_BLOCK_SIZE))
- break;
-
- /* increment and wrap the read pointer */
- radio->rd_index += RDS_BLOCK_SIZE;
- if (radio->rd_index >= radio->buf_size)
- radio->rd_index = 0;
-
- /* increment counters */
- block_count++;
- buf += RDS_BLOCK_SIZE;
- r += RDS_BLOCK_SIZE;
- }
-
-out:
- dev_dbg(radio->dev, "%s: exit\n", __func__);
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static const struct v4l2_file_operations wl1273_fops = {
- .owner = THIS_MODULE,
- .read = wl1273_fm_fops_read,
- .write = wl1273_fm_fops_write,
- .poll = wl1273_fm_fops_poll,
- .unlocked_ioctl = video_ioctl2,
- .open = wl1273_fm_fops_open,
- .release = wl1273_fm_fops_release,
-};
-
-static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *capability)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- strscpy(capability->driver, WL1273_FM_DRIVER_NAME,
- sizeof(capability->driver));
- strscpy(capability->card, "TI Wl1273 FM Radio",
- sizeof(capability->card));
- strscpy(capability->bus_info, radio->bus_type,
- sizeof(capability->bus_info));
- return 0;
-}
-
-static int wl1273_fm_vidioc_g_input(struct file *file, void *priv,
- unsigned int *i)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- *i = 0;
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_s_input(struct file *file, void *priv,
- unsigned int i)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (i != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * wl1273_fm_set_tx_power() - Set the transmission power value.
- * @radio: A pointer to the device struct.
- * @power: The new power value.
- */
-static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- mutex_lock(&core->lock);
-
- /* Convert the dBuV value to chip presentation */
- r = core->write(core, WL1273_POWER_LEV_SET, 122 - power);
- if (r)
- goto out;
-
- radio->tx_power = power;
-
-out:
- mutex_unlock(&core->lock);
- return r;
-}
-
-#define WL1273_SPACING_50kHz 1
-#define WL1273_SPACING_100kHz 2
-#define WL1273_SPACING_200kHz 4
-
-static int wl1273_fm_tx_set_spacing(struct wl1273_device *radio,
- unsigned int spacing)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- if (spacing == 0) {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_100kHz);
- radio->spacing = 100;
- } else if (spacing - 50000 < 25000) {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_50kHz);
- radio->spacing = 50;
- } else if (spacing - 100000 < 50000) {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_100kHz);
- radio->spacing = 100;
- } else {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_200kHz);
- radio->spacing = 200;
- }
-
- return r;
-}
-
-static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct wl1273_device *radio = ctrl->priv;
- struct wl1273_core *core = radio->core;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- switch (ctrl->id) {
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = wl1273_fm_get_tx_ctune(radio);
- break;
-
- default:
- dev_warn(radio->dev, "%s: Unknown IOCTL: %d\n",
- __func__, ctrl->id);
- break;
- }
-
- mutex_unlock(&core->lock);
-
- return 0;
-}
-
-#define WL1273_MUTE_SOFT_ENABLE (1 << 0)
-#define WL1273_MUTE_AC (1 << 1)
-#define WL1273_MUTE_HARD_LEFT (1 << 2)
-#define WL1273_MUTE_HARD_RIGHT (1 << 3)
-#define WL1273_MUTE_SOFT_FORCE (1 << 4)
-
-static inline struct wl1273_device *to_radio(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct wl1273_device, ctrl_handler);
-}
-
-static int wl1273_fm_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct wl1273_device *radio = to_radio(ctrl);
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- if (core->mode == WL1273_MODE_RX && ctrl->val)
- r = core->write(core,
- WL1273_MUTE_STATUS_SET,
- WL1273_MUTE_HARD_LEFT |
- WL1273_MUTE_HARD_RIGHT);
- else if (core->mode == WL1273_MODE_RX)
- r = core->write(core,
- WL1273_MUTE_STATUS_SET, 0x0);
- else if (core->mode == WL1273_MODE_TX && ctrl->val)
- r = core->write(core, WL1273_MUTE, 1);
- else if (core->mode == WL1273_MODE_TX)
- r = core->write(core, WL1273_MUTE, 0);
-
- mutex_unlock(&core->lock);
- break;
-
- case V4L2_CID_AUDIO_VOLUME:
- if (ctrl->val == 0)
- r = wl1273_fm_set_mode(radio, WL1273_MODE_OFF);
- else
- r = core->set_volume(core, core->volume);
- break;
-
- case V4L2_CID_TUNE_PREEMPHASIS:
- r = wl1273_fm_set_preemphasis(radio, ctrl->val);
- break;
-
- case V4L2_CID_TUNE_POWER_LEVEL:
- r = wl1273_fm_set_tx_power(radio, ctrl->val);
- break;
-
- default:
- dev_warn(radio->dev, "%s: Unknown IOCTL: %d\n",
- __func__, ctrl->id);
- break;
- }
-
- dev_dbg(radio->dev, "%s\n", __func__);
- return r;
-}
-
-static int wl1273_fm_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (audio->index > 1)
- return -EINVAL;
-
- strscpy(audio->name, "Radio", sizeof(audio->name));
- audio->capability = V4L2_AUDCAP_STEREO;
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *audio)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (audio->index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-#define WL1273_RDS_NOT_SYNCHRONIZED 0
-#define WL1273_RDS_SYNCHRONIZED 1
-
-static int wl1273_fm_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *tuner)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- u16 val;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (tuner->index > 0)
- return -EINVAL;
-
- strscpy(tuner->name, WL1273_FM_DRIVER_NAME, sizeof(tuner->name));
- tuner->type = V4L2_TUNER_RADIO;
-
- tuner->rangelow = WL1273_FREQ(WL1273_BAND_JAPAN_LOW);
- tuner->rangehigh = WL1273_FREQ(WL1273_BAND_OTHER_HIGH);
-
- tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS_BLOCK_IO |
- V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP;
-
- if (radio->stereo)
- tuner->audmode = V4L2_TUNER_MODE_STEREO;
- else
- tuner->audmode = V4L2_TUNER_MODE_MONO;
-
- if (core->mode != WL1273_MODE_RX)
- return 0;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = core->read(core, WL1273_STEREO_GET, &val);
- if (r)
- goto out;
-
- if (val == 1)
- tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
- else
- tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
-
- r = core->read(core, WL1273_RSSI_LVL_GET, &val);
- if (r)
- goto out;
-
- tuner->signal = (s16) val;
- dev_dbg(radio->dev, "Signal: %d\n", tuner->signal);
-
- tuner->afc = 0;
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r)
- goto out;
-
- if (val == WL1273_RDS_SYNCHRONIZED)
- tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static int wl1273_fm_vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *tuner)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
- dev_dbg(radio->dev, "tuner->index: %d\n", tuner->index);
- dev_dbg(radio->dev, "tuner->name: %s\n", tuner->name);
- dev_dbg(radio->dev, "tuner->capability: 0x%04x\n", tuner->capability);
- dev_dbg(radio->dev, "tuner->rxsubchans: 0x%04x\n", tuner->rxsubchans);
- dev_dbg(radio->dev, "tuner->rangelow: %d\n", tuner->rangelow);
- dev_dbg(radio->dev, "tuner->rangehigh: %d\n", tuner->rangehigh);
-
- if (tuner->index > 0)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = wl1273_fm_set_mode(radio, WL1273_MODE_RX);
- if (r)
- goto out;
-
- if (tuner->rxsubchans & V4L2_TUNER_SUB_RDS)
- r = wl1273_fm_set_rds(radio, WL1273_RDS_ON);
- else
- r = wl1273_fm_set_rds(radio, WL1273_RDS_OFF);
-
- if (r)
- dev_warn(radio->dev, "%s: RDS fails: %d\n", __func__, r);
-
- if (tuner->audmode == V4L2_TUNER_MODE_MONO) {
- r = core->write(core, WL1273_MOST_MODE_SET, WL1273_RX_MONO);
- if (r < 0) {
- dev_warn(radio->dev, "%s: MOST_MODE fails: %d\n",
- __func__, r);
- goto out;
- }
- radio->stereo = false;
- } else if (tuner->audmode == V4L2_TUNER_MODE_STEREO) {
- r = core->write(core, WL1273_MOST_MODE_SET, WL1273_RX_STEREO);
- if (r < 0) {
- dev_warn(radio->dev, "%s: MOST_MODE fails: %d\n",
- __func__, r);
- goto out;
- }
- radio->stereo = true;
- } else {
- dev_err(radio->dev, "%s: tuner->audmode: %d\n",
- __func__, tuner->audmode);
- r = -EINVAL;
- goto out;
- }
-
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static int wl1273_fm_vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- freq->type = V4L2_TUNER_RADIO;
- freq->frequency = WL1273_FREQ(wl1273_fm_get_freq(radio));
-
- mutex_unlock(&core->lock);
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r;
-
- dev_dbg(radio->dev, "%s: %d\n", __func__, freq->frequency);
-
- if (freq->type != V4L2_TUNER_RADIO) {
- dev_dbg(radio->dev,
- "freq->type != V4L2_TUNER_RADIO: %d\n", freq->type);
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- if (core->mode == WL1273_MODE_RX) {
- dev_dbg(radio->dev, "freq: %d\n", freq->frequency);
-
- r = wl1273_fm_set_rx_freq(radio,
- WL1273_INV_FREQ(freq->frequency));
- if (r)
- dev_warn(radio->dev, WL1273_FM_DRIVER_NAME
- ": set frequency failed with %d\n", r);
- } else {
- r = wl1273_fm_set_tx_freq(radio,
- WL1273_INV_FREQ(freq->frequency));
- if (r)
- dev_warn(radio->dev, WL1273_FM_DRIVER_NAME
- ": set frequency failed with %d\n", r);
- }
-
- mutex_unlock(&core->lock);
-
- dev_dbg(radio->dev, "wl1273_vidioc_s_frequency: DONE\n");
- return r;
-}
-
-#define WL1273_DEFAULT_SEEK_LEVEL 7
-
-static int wl1273_fm_vidioc_s_hw_freq_seek(struct file *file, void *priv,
- const struct v4l2_hw_freq_seek *seek)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (seek->tuner != 0 || seek->type != V4L2_TUNER_RADIO)
- return -EINVAL;
-
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = wl1273_fm_set_mode(radio, WL1273_MODE_RX);
- if (r)
- goto out;
-
- r = wl1273_fm_tx_set_spacing(radio, seek->spacing);
- if (r)
- dev_warn(radio->dev, "HW seek failed: %d\n", r);
-
- r = wl1273_fm_set_seek(radio, seek->wrap_around, seek->seek_upward,
- WL1273_DEFAULT_SEEK_LEVEL);
- if (r)
- dev_warn(radio->dev, "HW seek failed: %d\n", r);
-
-out:
- mutex_unlock(&core->lock);
- return r;
-}
-
-static int wl1273_fm_vidioc_s_modulator(struct file *file, void *priv,
- const struct v4l2_modulator *modulator)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (modulator->index > 0)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = wl1273_fm_set_mode(radio, WL1273_MODE_TX);
- if (r)
- goto out;
-
- if (modulator->txsubchans & V4L2_TUNER_SUB_RDS)
- r = wl1273_fm_set_rds(radio, WL1273_RDS_ON);
- else
- r = wl1273_fm_set_rds(radio, WL1273_RDS_OFF);
-
- if (modulator->txsubchans & V4L2_TUNER_SUB_MONO)
- r = core->write(core, WL1273_MONO_SET, WL1273_TX_MONO);
- else
- r = core->write(core, WL1273_MONO_SET,
- WL1273_RX_STEREO);
- if (r < 0)
- dev_warn(radio->dev, WL1273_FM_DRIVER_NAME
- "MONO_SET fails: %d\n", r);
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static int wl1273_fm_vidioc_g_modulator(struct file *file, void *priv,
- struct v4l2_modulator *modulator)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- u16 val;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- strscpy(modulator->name, WL1273_FM_DRIVER_NAME,
- sizeof(modulator->name));
-
- modulator->rangelow = WL1273_FREQ(WL1273_BAND_JAPAN_LOW);
- modulator->rangehigh = WL1273_FREQ(WL1273_BAND_OTHER_HIGH);
-
- modulator->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-
- if (core->mode != WL1273_MODE_TX)
- return 0;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = core->read(core, WL1273_MONO_SET, &val);
- if (r)
- goto out;
-
- if (val == WL1273_TX_STEREO)
- modulator->txsubchans = V4L2_TUNER_SUB_STEREO;
- else
- modulator->txsubchans = V4L2_TUNER_SUB_MONO;
-
- if (radio->rds_on)
- modulator->txsubchans |= V4L2_TUNER_SUB_RDS;
-out:
- mutex_unlock(&core->lock);
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_log_status(struct file *file, void *priv)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- struct device *dev = radio->dev;
- u16 val;
- int r;
-
- dev_info(dev, DRIVER_DESC);
-
- if (core->mode == WL1273_MODE_OFF) {
- dev_info(dev, "Mode: Off\n");
- return 0;
- }
-
- if (core->mode == WL1273_MODE_SUSPENDED) {
- dev_info(dev, "Mode: Suspended\n");
- return 0;
- }
-
- r = core->read(core, WL1273_ASIC_ID_GET, &val);
- if (r)
- dev_err(dev, "%s: Get ASIC_ID fails.\n", __func__);
- else
- dev_info(dev, "ASIC_ID: 0x%04x\n", val);
-
- r = core->read(core, WL1273_ASIC_VER_GET, &val);
- if (r)
- dev_err(dev, "%s: Get ASIC_VER fails.\n", __func__);
- else
- dev_info(dev, "ASIC Version: 0x%04x\n", val);
-
- r = core->read(core, WL1273_FIRM_VER_GET, &val);
- if (r)
- dev_err(dev, "%s: Get FIRM_VER fails.\n", __func__);
- else
- dev_info(dev, "FW version: %d(0x%04x)\n", val, val);
-
- r = core->read(core, WL1273_BAND_SET, &val);
- if (r)
- dev_err(dev, "%s: Get BAND fails.\n", __func__);
- else
- dev_info(dev, "BAND: %d\n", val);
-
- if (core->mode == WL1273_MODE_TX) {
- r = core->read(core, WL1273_PUPD_SET, &val);
- if (r)
- dev_err(dev, "%s: Get PUPD fails.\n", __func__);
- else
- dev_info(dev, "PUPD: 0x%04x\n", val);
-
- r = core->read(core, WL1273_CHANL_SET, &val);
- if (r)
- dev_err(dev, "%s: Get CHANL fails.\n", __func__);
- else
- dev_info(dev, "Tx frequency: %dkHz\n", val*10);
- } else if (core->mode == WL1273_MODE_RX) {
- int bf = radio->rangelow;
-
- r = core->read(core, WL1273_FREQ_SET, &val);
- if (r)
- dev_err(dev, "%s: Get FREQ fails.\n", __func__);
- else
- dev_info(dev, "RX Frequency: %dkHz\n", bf + val*50);
-
- r = core->read(core, WL1273_MOST_MODE_SET, &val);
- if (r)
- dev_err(dev, "%s: Get MOST_MODE fails.\n",
- __func__);
- else if (val == 0)
- dev_info(dev, "MOST_MODE: Stereo according to blend\n");
- else if (val == 1)
- dev_info(dev, "MOST_MODE: Force mono output\n");
- else
- dev_info(dev, "MOST_MODE: Unexpected value: %d\n", val);
-
- r = core->read(core, WL1273_MOST_BLEND_SET, &val);
- if (r)
- dev_err(dev, "%s: Get MOST_BLEND fails.\n", __func__);
- else if (val == 0)
- dev_info(dev,
- "MOST_BLEND: Switched blend & hysteresis.\n");
- else if (val == 1)
- dev_info(dev, "MOST_BLEND: Soft blend.\n");
- else
- dev_info(dev, "MOST_BLEND: Unexpected val: %d\n", val);
-
- r = core->read(core, WL1273_STEREO_GET, &val);
- if (r)
- dev_err(dev, "%s: Get STEREO fails.\n", __func__);
- else if (val == 0)
- dev_info(dev, "STEREO: Not detected\n");
- else if (val == 1)
- dev_info(dev, "STEREO: Detected\n");
- else
- dev_info(dev, "STEREO: Unexpected value: %d\n", val);
-
- r = core->read(core, WL1273_RSSI_LVL_GET, &val);
- if (r)
- dev_err(dev, "%s: Get RSSI_LVL fails.\n", __func__);
- else
- dev_info(dev, "RX signal strength: %d\n", (s16) val);
-
- r = core->read(core, WL1273_POWER_SET, &val);
- if (r)
- dev_err(dev, "%s: Get POWER fails.\n", __func__);
- else
- dev_info(dev, "POWER: 0x%04x\n", val);
-
- r = core->read(core, WL1273_INT_MASK_SET, &val);
- if (r)
- dev_err(dev, "%s: Get INT_MASK fails.\n", __func__);
- else
- dev_info(dev, "INT_MASK: 0x%04x\n", val);
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r)
- dev_err(dev, "%s: Get RDS_SYNC fails.\n",
- __func__);
- else if (val == 0)
- dev_info(dev, "RDS_SYNC: Not synchronized\n");
-
- else if (val == 1)
- dev_info(dev, "RDS_SYNC: Synchronized\n");
- else
- dev_info(dev, "RDS_SYNC: Unexpected value: %d\n", val);
-
- r = core->read(core, WL1273_I2S_MODE_CONFIG_SET, &val);
- if (r)
- dev_err(dev, "%s: Get I2S_MODE_CONFIG fails.\n",
- __func__);
- else
- dev_info(dev, "I2S_MODE_CONFIG: 0x%04x\n", val);
-
- r = core->read(core, WL1273_VOLUME_SET, &val);
- if (r)
- dev_err(dev, "%s: Get VOLUME fails.\n", __func__);
- else
- dev_info(dev, "VOLUME: 0x%04x\n", val);
- }
-
- return 0;
-}
-
-static void wl1273_vdev_release(struct video_device *dev)
-{
-}
-
-static const struct v4l2_ctrl_ops wl1273_ctrl_ops = {
- .s_ctrl = wl1273_fm_s_ctrl,
- .g_volatile_ctrl = wl1273_fm_g_volatile_ctrl,
-};
-
-static const struct v4l2_ioctl_ops wl1273_ioctl_ops = {
- .vidioc_querycap = wl1273_fm_vidioc_querycap,
- .vidioc_g_input = wl1273_fm_vidioc_g_input,
- .vidioc_s_input = wl1273_fm_vidioc_s_input,
- .vidioc_g_audio = wl1273_fm_vidioc_g_audio,
- .vidioc_s_audio = wl1273_fm_vidioc_s_audio,
- .vidioc_g_tuner = wl1273_fm_vidioc_g_tuner,
- .vidioc_s_tuner = wl1273_fm_vidioc_s_tuner,
- .vidioc_g_frequency = wl1273_fm_vidioc_g_frequency,
- .vidioc_s_frequency = wl1273_fm_vidioc_s_frequency,
- .vidioc_s_hw_freq_seek = wl1273_fm_vidioc_s_hw_freq_seek,
- .vidioc_g_modulator = wl1273_fm_vidioc_g_modulator,
- .vidioc_s_modulator = wl1273_fm_vidioc_s_modulator,
- .vidioc_log_status = wl1273_fm_vidioc_log_status,
-};
-
-static const struct video_device wl1273_viddev_template = {
- .fops = &wl1273_fops,
- .ioctl_ops = &wl1273_ioctl_ops,
- .name = WL1273_FM_DRIVER_NAME,
- .release = wl1273_vdev_release,
- .vfl_dir = VFL_DIR_TX,
- .device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
- V4L2_CAP_RADIO | V4L2_CAP_AUDIO |
- V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR |
- V4L2_CAP_RDS_OUTPUT,
-};
-
-static void wl1273_fm_radio_remove(struct platform_device *pdev)
-{
- struct wl1273_device *radio = platform_get_drvdata(pdev);
- struct wl1273_core *core = radio->core;
-
- dev_info(&pdev->dev, "%s.\n", __func__);
-
- free_irq(core->client->irq, radio);
- core->pdata->free_resources();
-
- v4l2_ctrl_handler_free(&radio->ctrl_handler);
- video_unregister_device(&radio->videodev);
- v4l2_device_unregister(&radio->v4l2dev);
-}
-
-static int wl1273_fm_radio_probe(struct platform_device *pdev)
-{
- struct wl1273_core **core = pdev->dev.platform_data;
- struct wl1273_device *radio;
- struct v4l2_ctrl *ctrl;
- int r = 0;
-
- pr_debug("%s\n", __func__);
-
- if (!core) {
- dev_err(&pdev->dev, "No platform data.\n");
- r = -EINVAL;
- goto pdata_err;
- }
-
- radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
- if (!radio) {
- r = -ENOMEM;
- goto pdata_err;
- }
-
- /* RDS buffer allocation */
- radio->buf_size = rds_buf * RDS_BLOCK_SIZE;
- radio->buffer = devm_kzalloc(&pdev->dev, radio->buf_size, GFP_KERNEL);
- if (!radio->buffer) {
- pr_err("Cannot allocate memory for RDS buffer.\n");
- r = -ENOMEM;
- goto pdata_err;
- }
-
- radio->core = *core;
- radio->irq_flags = WL1273_IRQ_MASK;
- radio->dev = &radio->core->client->dev;
- radio->rds_on = false;
- radio->core->mode = WL1273_MODE_OFF;
- radio->tx_power = 118;
- radio->core->audio_mode = WL1273_AUDIO_ANALOG;
- radio->band = WL1273_BAND_OTHER;
- radio->core->i2s_mode = WL1273_I2S_DEF_MODE;
- radio->core->channel_number = 2;
- radio->core->volume = WL1273_DEFAULT_VOLUME;
- radio->rx_frequency = WL1273_BAND_OTHER_LOW;
- radio->tx_frequency = WL1273_BAND_OTHER_HIGH;
- radio->rangelow = WL1273_BAND_OTHER_LOW;
- radio->rangehigh = WL1273_BAND_OTHER_HIGH;
- radio->stereo = true;
- radio->bus_type = "I2C";
-
- if (radio->core->pdata->request_resources) {
- r = radio->core->pdata->request_resources(radio->core->client);
- if (r) {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME
- ": Cannot get platform data\n");
- goto pdata_err;
- }
-
- dev_dbg(radio->dev, "irq: %d\n", radio->core->client->irq);
-
- r = request_threaded_irq(radio->core->client->irq, NULL,
- wl1273_fm_irq_thread_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- "wl1273-fm", radio);
- if (r < 0) {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME
- ": Unable to register IRQ handler: %d\n", r);
- goto err_request_irq;
- }
- } else {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ not configured");
- r = -EINVAL;
- goto pdata_err;
- }
-
- init_completion(&radio->busy);
- init_waitqueue_head(&radio->read_queue);
-
- radio->write_buf = devm_kzalloc(&pdev->dev, 256, GFP_KERNEL);
- if (!radio->write_buf) {
- r = -ENOMEM;
- goto write_buf_err;
- }
-
- radio->dev = &pdev->dev;
- radio->v4l2dev.ctrl_handler = &radio->ctrl_handler;
- radio->rds_users = 0;
-
- r = v4l2_device_register(&pdev->dev, &radio->v4l2dev);
- if (r) {
- dev_err(&pdev->dev, "Cannot register v4l2_device.\n");
- goto write_buf_err;
- }
-
- /* V4L2 configuration */
- radio->videodev = wl1273_viddev_template;
-
- radio->videodev.v4l2_dev = &radio->v4l2dev;
-
- v4l2_ctrl_handler_init(&radio->ctrl_handler, 6);
-
- /* add in ascending ID order */
- v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, 0, WL1273_MAX_VOLUME, 1,
- WL1273_DEFAULT_VOLUME);
-
- v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
-
- v4l2_ctrl_new_std_menu(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_TUNE_PREEMPHASIS,
- V4L2_PREEMPHASIS_75_uS, 0x03,
- V4L2_PREEMPHASIS_50_uS);
-
- v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_TUNE_POWER_LEVEL, 91, 122, 1, 118);
-
- ctrl = v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_TUNE_ANTENNA_CAPACITOR,
- 0, 255, 1, 255);
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
-
- if (radio->ctrl_handler.error) {
- r = radio->ctrl_handler.error;
- dev_err(&pdev->dev, "Ctrl handler error: %d\n", r);
- goto handler_init_err;
- }
-
- video_set_drvdata(&radio->videodev, radio);
- platform_set_drvdata(pdev, radio);
-
- /* register video device */
- r = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
- if (r) {
- dev_err(&pdev->dev, WL1273_FM_DRIVER_NAME
- ": Could not register video device\n");
- goto handler_init_err;
- }
-
- return 0;
-
-handler_init_err:
- v4l2_ctrl_handler_free(&radio->ctrl_handler);
- v4l2_device_unregister(&radio->v4l2dev);
-write_buf_err:
- free_irq(radio->core->client->irq, radio);
-err_request_irq:
- radio->core->pdata->free_resources();
-pdata_err:
- return r;
-}
-
-static struct platform_driver wl1273_fm_radio_driver = {
- .probe = wl1273_fm_radio_probe,
- .remove = wl1273_fm_radio_remove,
- .driver = {
- .name = "wl1273_fm_radio",
- },
-};
-
-module_platform_driver(wl1273_fm_radio_driver);
-
-MODULE_AUTHOR("Matti Aaltonen <matti.j.aaltonen@nokia.com>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:wl1273_fm_radio");
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 099b7af6a410..e043bee52384 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -30,7 +30,7 @@
* 2006-07-24 - Converted to V4L2 API
* by Mauro Carvalho Chehab <mchehab@kernel.org>
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
*
* Note that this is the driver for the Zoltrix Radio Plus.
* This driver does not work for the Zoltrix Radio Plus 108 or the
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index 67b4afadc95a..4132968110e3 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -75,35 +75,35 @@ static inline struct v4l2_device *get_v4l2_dev(struct file *file)
return &((struct radio_si4713_device *)video_drvdata(file))->v4l2_dev;
}
-static int radio_si4713_g_modulator(struct file *file, void *p,
+static int radio_si4713_g_modulator(struct file *file, void *priv,
struct v4l2_modulator *vm)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
g_modulator, vm);
}
-static int radio_si4713_s_modulator(struct file *file, void *p,
+static int radio_si4713_s_modulator(struct file *file, void *priv,
const struct v4l2_modulator *vm)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
s_modulator, vm);
}
-static int radio_si4713_g_frequency(struct file *file, void *p,
+static int radio_si4713_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *vf)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
g_frequency, vf);
}
-static int radio_si4713_s_frequency(struct file *file, void *p,
+static int radio_si4713_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *vf)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
s_frequency, vf);
}
-static long radio_si4713_default(struct file *file, void *p,
+static long radio_si4713_default(struct file *file, void *priv,
bool valid_prio, unsigned int cmd, void *arg)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, core,
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index f5221b018808..35b9e07003d8 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -39,11 +39,6 @@
#define DISPLAY_MINOR_BASE 144
#define DEVICE_NAME "lcd%d"
-#define BUF_CHUNK_SIZE 8
-#define BUF_SIZE 128
-
-#define BIT_DURATION 250 /* each bit received is 250us */
-
#define IMON_CLOCK_ENABLE_PACKETS 2
/*** P R O T O T Y P E S ***/
@@ -536,7 +531,9 @@ static int display_open(struct inode *inode, struct file *file)
mutex_lock(&ictx->lock);
- if (!ictx->display_supported) {
+ if (ictx->disconnected) {
+ retval = -ENODEV;
+ } else if (!ictx->display_supported) {
pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (ictx->display_isopen) {
@@ -598,6 +595,11 @@ static int send_packet(struct imon_context *ictx)
int retval = 0;
struct usb_ctrlrequest *control_req = NULL;
+ lockdep_assert_held(&ictx->lock);
+
+ if (ictx->disconnected)
+ return -ENODEV;
+
/* Check if we need to use control or interrupt urb */
if (!ictx->tx_control) {
pipe = usb_sndintpipe(ictx->usbdev_intf0,
@@ -645,12 +647,15 @@ static int send_packet(struct imon_context *ictx)
smp_rmb(); /* ensure later readers know we're not busy */
pr_err_ratelimited("error submitting urb(%d)\n", retval);
} else {
- /* Wait for transmission to complete (or abort) */
- retval = wait_for_completion_interruptible(
- &ictx->tx.finished);
- if (retval) {
+ /* Wait for transmission to complete (or abort or timeout) */
+ retval = wait_for_completion_interruptible_timeout(&ictx->tx.finished, 10 * HZ);
+ if (retval <= 0) {
usb_kill_urb(ictx->tx_urb);
pr_err_ratelimited("task interrupted\n");
+ if (retval < 0)
+ ictx->tx.status = retval;
+ else
+ ictx->tx.status = -ETIMEDOUT;
}
ictx->tx.busy = false;
@@ -949,12 +954,14 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
- if (ictx->disconnected)
- return -ENODEV;
-
if (mutex_lock_interruptible(&ictx->lock))
return -ERESTARTSYS;
+ if (ictx->disconnected) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
if (!ictx->dev_present_intf0) {
pr_err_ratelimited("no iMON device present\n");
retval = -ENODEV;
@@ -1029,11 +1036,13 @@ static ssize_t lcd_write(struct file *file, const char __user *buf,
int retval = 0;
struct imon_context *ictx = file->private_data;
- if (ictx->disconnected)
- return -ENODEV;
-
mutex_lock(&ictx->lock);
+ if (ictx->disconnected) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
if (!ictx->display_supported) {
pr_err_ratelimited("no iMON display present\n");
retval = -ENODEV;
@@ -1121,7 +1130,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
int retval;
struct imon_context *ictx = rc->priv;
struct device *dev = ictx->dev;
- bool unlock = false;
+ const bool unlock = mutex_trylock(&ictx->lock);
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
@@ -1148,8 +1157,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
- unlock = mutex_trylock(&ictx->lock);
-
retval = send_packet(ictx);
if (retval)
goto out;
@@ -1745,14 +1752,6 @@ static void usb_rx_callback_intf0(struct urb *urb)
if (!ictx)
return;
- /*
- * if we get a callback before we're done configuring the hardware, we
- * can't yet process the data, as there's nowhere to send it, but we
- * still need to submit a new rx URB to avoid wedging the hardware
- */
- if (!ictx->dev_present_intf0)
- goto out;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -1761,16 +1760,29 @@ static void usb_rx_callback_intf0(struct urb *urb)
break;
case 0:
- imon_incoming_packet(ictx, urb, intfnum);
+ /*
+ * if we get a callback before we're done configuring the hardware, we
+ * can't yet process the data, as there's nowhere to send it, but we
+ * still need to submit a new rx URB to avoid wedging the hardware
+ */
+ if (ictx->dev_present_intf0)
+ imon_incoming_packet(ictx, urb, intfnum);
break;
+ case -ECONNRESET:
+ case -EILSEQ:
+ case -EPROTO:
+ case -EPIPE:
+ dev_warn(ictx->dev, "imon %s: status(%d)\n",
+ __func__, urb->status);
+ return;
+
default:
dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
__func__, urb->status);
break;
}
-out:
usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
}
@@ -1786,14 +1798,6 @@ static void usb_rx_callback_intf1(struct urb *urb)
if (!ictx)
return;
- /*
- * if we get a callback before we're done configuring the hardware, we
- * can't yet process the data, as there's nowhere to send it, but we
- * still need to submit a new rx URB to avoid wedging the hardware
- */
- if (!ictx->dev_present_intf1)
- goto out;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -1802,16 +1806,29 @@ static void usb_rx_callback_intf1(struct urb *urb)
break;
case 0:
- imon_incoming_packet(ictx, urb, intfnum);
+ /*
+ * if we get a callback before we're done configuring the hardware, we
+ * can't yet process the data, as there's nowhere to send it, but we
+ * still need to submit a new rx URB to avoid wedging the hardware
+ */
+ if (ictx->dev_present_intf1)
+ imon_incoming_packet(ictx, urb, intfnum);
break;
+ case -ECONNRESET:
+ case -EILSEQ:
+ case -EPROTO:
+ case -EPIPE:
+ dev_warn(ictx->dev, "imon %s: status(%d)\n",
+ __func__, urb->status);
+ return;
+
default:
dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
__func__, urb->status);
break;
}
-out:
usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
}
@@ -2499,7 +2516,11 @@ static void imon_disconnect(struct usb_interface *interface)
int ifnum;
ictx = usb_get_intfdata(interface);
+
+ mutex_lock(&ictx->lock);
ictx->disconnected = true;
+ mutex_unlock(&ictx->lock);
+
dev = ictx->dev;
ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index a2257dc2f25d..7d4942925993 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -736,11 +736,11 @@ int lirc_register(struct rc_dev *dev)
cdev_init(&dev->lirc_cdev, &lirc_fops);
+ get_device(&dev->dev);
+
err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev);
if (err)
- goto out_ida;
-
- get_device(&dev->dev);
+ goto out_put_device;
switch (dev->driver_type) {
case RC_DRIVER_SCANCODE:
@@ -764,7 +764,8 @@ int lirc_register(struct rc_dev *dev)
return 0;
-out_ida:
+out_put_device:
+ put_device(&dev->lirc_dev);
ida_free(&lirc_ida, minor);
return err;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index d89a4cfe3c89..a49173f54a4d 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -422,7 +422,7 @@ static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3)
static int redrat3_enable_detector(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
- u8 ret;
+ int ret;
ret = redrat3_send_cmd(RR3_RC_DET_ENABLE, rr3);
if (ret != 0)
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index c45f5cf12ded..a3df3a33237e 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -26,7 +26,7 @@
#include "codec-v4l2-fwht.h"
MODULE_DESCRIPTION("Virtual codec device");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL v2");
static bool multiplanar;
@@ -144,7 +144,7 @@ static const struct v4l2_event vicodec_eos_event = {
static inline struct vicodec_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct vicodec_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct vicodec_ctx, fh);
}
static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
@@ -1207,20 +1207,20 @@ static int vidioc_s_selection(struct file *file, void *priv,
return 0;
}
-static int vicodec_encoder_cmd(struct file *file, void *fh,
+static int vicodec_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
struct vicodec_ctx *ctx = file2ctx(file);
int ret;
- ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
if (ret < 0)
return ret;
if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
- ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec);
+ ret = v4l2_m2m_ioctl_encoder_cmd(file, priv, ec);
if (ret < 0)
return ret;
@@ -1235,7 +1235,7 @@ static int vicodec_encoder_cmd(struct file *file, void *fh,
return 0;
}
-static int vicodec_decoder_cmd(struct file *file, void *fh,
+static int vicodec_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
struct vicodec_ctx *ctx = file2ctx(file);
@@ -1247,14 +1247,14 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
*/
WARN_ON(ctx->is_stateless);
- ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, priv, dc);
if (ret < 0)
return ret;
if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
- ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc);
+ ret = v4l2_m2m_ioctl_decoder_cmd(file, priv, dc);
if (ret < 0)
return ret;
@@ -1269,7 +1269,7 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
return 0;
}
-static int vicodec_enum_framesizes(struct file *file, void *fh,
+static int vicodec_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
switch (fsize->pixel_format) {
@@ -1848,7 +1848,6 @@ static int vicodec_open(struct file *file)
ctx->is_stateless = true;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 5);
@@ -1932,7 +1931,7 @@ static int vicodec_open(struct file *file)
goto open_unlock;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
open_unlock:
mutex_unlock(vfd->lock);
@@ -1947,7 +1946,7 @@ static int vicodec_release(struct file *file)
mutex_lock(vfd->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(vfd->lock);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
kvfree(ctx->state.compressed_frame);
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index 1d1a9e768505..86c32699111a 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -191,9 +191,7 @@ static void get_alignment(u32 fourcc,
struct vim2m_dev {
struct v4l2_device v4l2_dev;
struct video_device vfd;
-#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device mdev;
-#endif
atomic_t num_inst;
struct mutex dev_mutex;
@@ -236,7 +234,7 @@ struct vim2m_ctx {
static inline struct vim2m_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct vim2m_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct vim2m_ctx, fh);
}
static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
@@ -268,9 +266,6 @@ static const char *type_name(enum v4l2_buf_type type)
}
}
-#define CLIP(__color) \
- (u8)(((__color) > 0xff) ? 0xff : (((__color) < 0) ? 0 : (__color)))
-
static void copy_line(struct vim2m_q_data *q_data_out,
u8 *src, u8 *dst, bool reverse)
{
@@ -1389,7 +1384,6 @@ static int vim2m_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 4);
@@ -1433,7 +1427,7 @@ static int vim2m_open(struct file *file)
goto open_unlock;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
atomic_inc(&dev->num_inst);
dprintk(dev, 1, "Created instance: %p, m2m_ctx: %p\n",
@@ -1451,7 +1445,7 @@ static int vim2m_release(struct file *file)
dprintk(dev, 1, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
@@ -1470,9 +1464,7 @@ static void vim2m_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_m2m_release(dev->m2m_dev);
-#ifdef CONFIG_MEDIA_CONTROLLER
media_device_cleanup(&dev->mdev);
-#endif
kfree(dev);
}
@@ -1543,7 +1535,6 @@ static int vim2m_probe(struct platform_device *pdev)
goto error_dev;
}
-#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
strscpy(dev->mdev.bus_info, "platform:vim2m",
@@ -1551,7 +1542,6 @@ static int vim2m_probe(struct platform_device *pdev)
media_device_init(&dev->mdev);
dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
-#endif
ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
@@ -1562,7 +1552,6 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
-#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
@@ -1575,13 +1564,11 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
goto error_m2m_mc;
}
-#endif
+
return 0;
-#ifdef CONFIG_MEDIA_CONTROLLER
error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-#endif
error_v4l2:
video_unregister_device(&dev->vfd);
/* vim2m_device_release called by video_unregister_device to release various objects */
@@ -1602,10 +1589,8 @@ static void vim2m_remove(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
-#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-#endif
video_unregister_device(&dev->vfd);
}
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index 10df039278e7..7f6124025fc9 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -57,8 +57,6 @@ static int vimc_capture_querycap(struct file *file, void *priv,
{
strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver));
strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", VIMC_PDEV_NAME);
return 0;
}
@@ -169,7 +167,7 @@ static int vimc_capture_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vimc_capture_enum_framesizes(struct file *file, void *fh,
+static int vimc_capture_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
const struct vimc_pix_map *vpix;
diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
index c812fa9f0650..f632c77e52f5 100644
--- a/drivers/media/test-drivers/vimc/vimc-core.c
+++ b/drivers/media/test-drivers/vimc/vimc-core.c
@@ -366,8 +366,6 @@ static int vimc_probe(struct platform_device *pdev)
/* Initialize media device */
strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
sizeof(vimc->mdev.model));
- snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info),
- "platform:%s", VIMC_PDEV_NAME);
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
index 5bf3136b36eb..26c6c6835f79 100644
--- a/drivers/media/test-drivers/visl/visl-core.c
+++ b/drivers/media/test-drivers/visl/visl-core.c
@@ -341,7 +341,6 @@ static int visl_open(struct file *file)
ctx->tpg_str_buf = kzalloc(TPG_STR_BUF_SZ, GFP_KERNEL);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
rc = visl_init_ctrls(ctx);
@@ -361,7 +360,7 @@ static int visl_open(struct file *file)
if (rc)
goto free_m2m_ctx;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
@@ -390,7 +389,7 @@ static int visl_release(struct file *file)
dprintk(dev, "Releasing instance %p\n", ctx);
tpg_free(&ctx->tpg);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
diff --git a/drivers/media/test-drivers/visl/visl.h b/drivers/media/test-drivers/visl/visl.h
index 434e9efbf9b2..2971e8b37ff6 100644
--- a/drivers/media/test-drivers/visl/visl.h
+++ b/drivers/media/test-drivers/visl/visl.h
@@ -163,12 +163,7 @@ struct visl_ctrl_desc {
static inline struct visl_ctx *visl_file_to_ctx(struct file *file)
{
- return container_of(file->private_data, struct visl_ctx, fh);
-}
-
-static inline struct visl_ctx *visl_v4l2fh_to_ctx(struct v4l2_fh *v4l2_fh)
-{
- return container_of(v4l2_fh, struct visl_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct visl_ctx, fh);
}
void *visl_find_control_data(struct visl_ctx *ctx, u32 id);
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 356a988dd6a1..2d15fdd5d999 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -327,7 +327,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
char osd[14];
if (!cec_is_sink(adap))
- return -ENOMSG;
+ break;
cec_ops_set_osd_string(msg, &disp_ctl, osd);
switch (disp_ctl) {
case CEC_OP_DISP_CTL_DEFAULT:
@@ -348,7 +348,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
cec_transmit_msg(adap, &reply, false);
break;
}
- break;
+ return 0;
}
case CEC_MSG_VENDOR_COMMAND_WITH_ID: {
u32 vendor_id;
@@ -379,7 +379,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
if (size == 1) {
// Ignore even op values
if (!(vendor_cmd[0] & 1))
- break;
+ return 0;
reply.len = msg->len;
memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1);
reply.msg[msg->len - 1]++;
@@ -388,12 +388,10 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
CEC_OP_ABORT_INVALID_OP);
}
cec_transmit_msg(adap, &reply, false);
- break;
+ return 0;
}
- default:
- return -ENOMSG;
}
- return 0;
+ return -ENOMSG;
}
static const struct cec_adap_ops vivid_cec_adap_ops = {
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 8d56168c72aa..86506be36acb 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -277,49 +277,49 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+static int vidioc_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_s_hw_freq_seek(file, fh, a);
+ return vivid_radio_rx_s_hw_freq_seek(file, priv, a);
return -ENOTTY;
}
-static int vidioc_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+static int vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_enum_freq_bands(file, fh, band);
+ return vivid_radio_rx_enum_freq_bands(file, priv, band);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_enum_freq_bands(file, fh, band);
+ return vivid_sdr_enum_freq_bands(file, priv, band);
return -ENOTTY;
}
-static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_g_tuner(file, fh, vt);
+ return vivid_radio_rx_g_tuner(file, priv, vt);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_g_tuner(file, fh, vt);
- return vivid_video_g_tuner(file, fh, vt);
+ return vivid_sdr_g_tuner(file, priv, vt);
+ return vivid_video_g_tuner(file, priv, vt);
}
-static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_s_tuner(file, fh, vt);
+ return vivid_radio_rx_s_tuner(file, priv, vt);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_s_tuner(file, fh, vt);
- return vivid_video_s_tuner(file, fh, vt);
+ return vivid_sdr_s_tuner(file, priv, vt);
+ return vivid_video_s_tuner(file, priv, vt);
}
-static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
@@ -329,11 +329,11 @@ static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency
vdev->vfl_dir == VFL_DIR_RX ?
&dev->radio_rx_freq : &dev->radio_tx_freq, vf);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_g_frequency(file, fh, vf);
- return vivid_video_g_frequency(file, fh, vf);
+ return vivid_sdr_g_frequency(file, priv, vf);
+ return vivid_video_g_frequency(file, priv, vf);
}
-static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
@@ -343,113 +343,113 @@ static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_fre
vdev->vfl_dir == VFL_DIR_RX ?
&dev->radio_rx_freq : &dev->radio_tx_freq, vf);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_s_frequency(file, fh, vf);
- return vivid_video_s_frequency(file, fh, vf);
+ return vivid_sdr_s_frequency(file, priv, vf);
+ return vivid_video_s_frequency(file, priv, vf);
}
-static int vidioc_overlay(struct file *file, void *fh, unsigned i)
+static int vidioc_overlay(struct file *file, void *priv, unsigned i)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
return -ENOTTY;
- return vivid_vid_out_overlay(file, fh, i);
+ return vivid_vid_out_overlay(file, priv, i);
}
-static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a)
+static int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
return -ENOTTY;
- return vivid_vid_out_g_fbuf(file, fh, a);
+ return vivid_vid_out_g_fbuf(file, priv, a);
}
-static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
+static int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
return -ENOTTY;
- return vivid_vid_out_s_fbuf(file, fh, a);
+ return vivid_vid_out_s_fbuf(file, priv, a);
}
-static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id id)
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_std(file, fh, id);
- return vivid_vid_out_s_std(file, fh, id);
+ return vivid_vid_cap_s_std(file, priv, id);
+ return vivid_vid_out_s_std(file, priv, id);
}
-static int vidioc_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings)
+static int vidioc_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_dv_timings(file, fh, timings);
- return vivid_vid_out_s_dv_timings(file, fh, timings);
+ return vivid_vid_cap_s_dv_timings(file, priv, timings);
+ return vivid_vid_out_s_dv_timings(file, priv, timings);
}
-static int vidioc_g_pixelaspect(struct file *file, void *fh,
+static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_g_pixelaspect(file, fh, type, f);
- return vivid_vid_out_g_pixelaspect(file, fh, type, f);
+ return vivid_vid_cap_g_pixelaspect(file, priv, type, f);
+ return vivid_vid_out_g_pixelaspect(file, priv, type, f);
}
-static int vidioc_g_selection(struct file *file, void *fh,
+static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_g_selection(file, fh, sel);
- return vivid_vid_out_g_selection(file, fh, sel);
+ return vivid_vid_cap_g_selection(file, priv, sel);
+ return vivid_vid_out_g_selection(file, priv, sel);
}
-static int vidioc_s_selection(struct file *file, void *fh,
+static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_selection(file, fh, sel);
- return vivid_vid_out_s_selection(file, fh, sel);
+ return vivid_vid_cap_s_selection(file, priv, sel);
+ return vivid_vid_out_s_selection(file, priv, sel);
}
-static int vidioc_g_parm(struct file *file, void *fh,
+static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_TOUCH)
- return vivid_g_parm_tch(file, fh, parm);
+ return vivid_g_parm_tch(file, priv, parm);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_g_parm(file, fh, parm);
- return vivid_vid_out_g_parm(file, fh, parm);
+ return vivid_vid_cap_g_parm(file, priv, parm);
+ return vivid_vid_out_g_parm(file, priv, parm);
}
-static int vidioc_s_parm(struct file *file, void *fh,
+static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_parm(file, fh, parm);
+ return vivid_vid_cap_s_parm(file, priv, parm);
return -ENOTTY;
}
-static int vidioc_log_status(struct file *file, void *fh)
+static int vidioc_log_status(struct file *file, void *priv)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
- v4l2_ctrl_log_status(file, fh);
+ v4l2_ctrl_log_status(file, priv);
if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_VIDEO)
tpg_log_status(&dev->tpg);
return 0;
@@ -654,11 +654,11 @@ static int vivid_fop_release(struct file *file)
v4l2_info(&dev->v4l2_dev, "reconnect\n");
vivid_reconnect(dev);
}
- if (file->private_data == dev->radio_rx_rds_owner) {
+ if (file_to_v4l2_fh(file) == dev->radio_rx_rds_owner) {
dev->radio_rx_rds_last_block = 0;
dev->radio_rx_rds_owner = NULL;
}
- if (file->private_data == dev->radio_tx_rds_owner) {
+ if (file_to_v4l2_fh(file) == dev->radio_tx_rds_owner) {
dev->radio_tx_rds_last_block = 0;
dev->radio_tx_rds_owner = NULL;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-rx.c b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
index 79c1723bd84c..b5e3026f883e 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-rx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
@@ -42,13 +42,13 @@ ssize_t vivid_radio_rx_read(struct file *file, char __user *buf,
if (mutex_lock_interruptible(&dev->mutex))
return -ERESTARTSYS;
if (dev->radio_rx_rds_owner &&
- file->private_data != dev->radio_rx_rds_owner) {
+ file_to_v4l2_fh(file) != dev->radio_rx_rds_owner) {
mutex_unlock(&dev->mutex);
return -EBUSY;
}
if (dev->radio_rx_rds_owner == NULL) {
vivid_radio_rds_init(dev);
- dev->radio_rx_rds_owner = file->private_data;
+ dev->radio_rx_rds_owner = file_to_v4l2_fh(file);
}
retry:
@@ -133,7 +133,7 @@ __poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
return EPOLLIN | EPOLLRDNORM | v4l2_ctrl_poll(file, wait);
}
-int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band)
{
if (band->tuner != 0)
return -EINVAL;
@@ -145,7 +145,7 @@ int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_freq
return 0;
}
-int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a)
{
struct vivid_dev *dev = video_drvdata(file);
unsigned low, high;
@@ -214,7 +214,7 @@ int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2
return 0;
}
-int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+int vivid_radio_rx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
int delta = 800;
@@ -267,7 +267,7 @@ int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
return 0;
}
-int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+int vivid_radio_rx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-rx.h b/drivers/media/test-drivers/vivid/vivid-radio-rx.h
index c9c7849f6f99..a2ae17c78ece 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-rx.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.h
@@ -11,9 +11,9 @@
ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
-int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
-int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
-int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
-int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band);
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a);
+int vivid_radio_rx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt);
+int vivid_radio_rx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-tx.c b/drivers/media/test-drivers/vivid/vivid-radio-tx.c
index 049d40b948bb..ada60722066e 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-tx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-tx.c
@@ -39,11 +39,11 @@ ssize_t vivid_radio_tx_write(struct file *file, const char __user *buf,
if (mutex_lock_interruptible(&dev->mutex))
return -ERESTARTSYS;
if (dev->radio_tx_rds_owner &&
- file->private_data != dev->radio_tx_rds_owner) {
+ file_to_v4l2_fh(file) != dev->radio_tx_rds_owner) {
mutex_unlock(&dev->mutex);
return -EBUSY;
}
- dev->radio_tx_rds_owner = file->private_data;
+ dev->radio_tx_rds_owner = file_to_v4l2_fh(file);
retry:
timestamp = ktime_sub(ktime_get(), dev->radio_rds_init_time);
@@ -96,7 +96,7 @@ __poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait);
}
-int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
+int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *a)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -115,7 +115,7 @@ int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
return 0;
}
-int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a)
+int vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *a)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-tx.h b/drivers/media/test-drivers/vivid/vivid-radio-tx.h
index c2bf1e7e634a..20cb6f1363ff 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-tx.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-tx.h
@@ -11,7 +11,7 @@
ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
-int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
-int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
+int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *a);
+int vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *a);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index c633fc2ed664..2664a593e8e1 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -344,7 +344,7 @@ const struct vb2_ops vivid_sdr_cap_qops = {
.buf_request_complete = sdr_cap_buf_request_complete,
};
-int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
+int vivid_sdr_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
switch (band->tuner) {
@@ -363,7 +363,7 @@ int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
}
}
-int vivid_sdr_g_frequency(struct file *file, void *fh,
+int vivid_sdr_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -382,7 +382,7 @@ int vivid_sdr_g_frequency(struct file *file, void *fh,
}
}
-int vivid_sdr_s_frequency(struct file *file, void *fh,
+int vivid_sdr_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -423,7 +423,7 @@ int vivid_sdr_s_frequency(struct file *file, void *fh,
}
}
-int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+int vivid_sdr_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
switch (vt->index) {
case 0:
@@ -447,14 +447,14 @@ int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
}
}
-int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+int vivid_sdr_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
if (vt->index > 1)
return -EINVAL;
return 0;
}
-int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
@@ -462,7 +462,7 @@ int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return 0;
}
-int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+int vidioc_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -471,7 +471,7 @@ int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
-int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+int vidioc_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct vivid_dev *dev = video_drvdata(file);
struct vb2_queue *q = &dev->vb_sdr_cap_q;
@@ -495,7 +495,7 @@ int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
-int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+int vidioc_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f)
{
int i;
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.h b/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
index 813c9248e5a7..3d8eeabbfc10 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
@@ -8,15 +8,15 @@
#ifndef _VIVID_SDR_CAP_H_
#define _VIVID_SDR_CAP_H_
-int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
-int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
-int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
-int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
-int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f);
-int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
-int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
-int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
+int vivid_sdr_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band);
+int vivid_sdr_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf);
+int vivid_sdr_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf);
+int vivid_sdr_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt);
+int vivid_sdr_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt);
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f);
void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
extern const struct vb2_ops vivid_sdr_cap_qops;
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
index a09f62c66c33..791382a54b4f 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
@@ -282,7 +282,7 @@ void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_se
}
}
-int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -294,7 +294,7 @@ int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format
return 0;
}
-int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -310,11 +310,11 @@ int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_forma
return 0;
}
-int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
- int ret = vidioc_try_fmt_sliced_vbi_cap(file, fh, fmt);
+ int ret = vidioc_try_fmt_sliced_vbi_cap(file, priv, fmt);
if (ret)
return ret;
@@ -324,7 +324,7 @@ int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format
return 0;
}
-int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
+int vidioc_g_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_sliced_vbi_cap *cap)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-cap.h b/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
index 91d2de01381c..ec2d200c9e0d 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
@@ -16,10 +16,10 @@ int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f);
int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f);
-int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap);
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_g_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_sliced_vbi_cap *cap);
void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set);
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-out.c b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
index b7a09d2f394e..7b3ea96744bb 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
@@ -168,7 +168,7 @@ int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
return 0;
}
-int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -180,7 +180,7 @@ int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format
return 0;
}
-int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -196,12 +196,12 @@ int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_forma
return 0;
}
-int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
- int ret = vidioc_try_fmt_sliced_vbi_out(file, fh, fmt);
+ int ret = vidioc_try_fmt_sliced_vbi_out(file, priv, fmt);
if (ret)
return ret;
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-out.h b/drivers/media/test-drivers/vivid/vivid-vbi-out.h
index 76584940cdaf..a28e55519ade 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.h
@@ -13,9 +13,9 @@ int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
struct v4l2_format *f);
int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
struct v4l2_format *f);
-int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt);
extern const struct vb2_ops vivid_vbi_out_qops;
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 2e4c1ed37cd2..8b3162e82032 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -899,7 +899,7 @@ int vivid_vid_cap_g_selection(struct file *file, void *priv,
return 0;
}
-int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+int vivid_vid_cap_s_selection(struct file *file, void *priv, struct v4l2_selection *s)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_cap;
@@ -1222,7 +1222,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
return 0;
}
-int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
+int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)
{
if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
return -EINVAL;
@@ -1230,7 +1230,7 @@ int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
return 0;
}
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
+int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *vin)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1240,7 +1240,7 @@ int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
return 0;
}
-int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
+int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *vin)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1252,7 +1252,7 @@ int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
return 0;
}
-int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+int vivid_video_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1262,7 +1262,7 @@ int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
return 0;
}
-int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int vivid_video_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1274,7 +1274,7 @@ int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequ
return 0;
}
-int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+int vivid_video_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1286,7 +1286,7 @@ int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
return 0;
}
-int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+int vivid_video_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
enum tpg_quality qual;
@@ -1490,7 +1490,7 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
return false;
}
-int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
+int vivid_vid_cap_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1513,7 +1513,7 @@ int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
return 0;
}
-int vidioc_query_dv_timings(struct file *file, void *_fh,
+int vidioc_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1600,7 +1600,7 @@ void vivid_update_connected_outputs(struct vivid_dev *dev)
}
}
-int vidioc_s_edid(struct file *file, void *_fh,
+int vidioc_s_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1638,7 +1638,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
return 0;
}
-int vidioc_enum_framesizes(struct file *file, void *fh,
+int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.h b/drivers/media/test-drivers/vivid/vivid-vid-cap.h
index 7a8daf0af2ca..38a99f7e038e 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.h
@@ -29,7 +29,7 @@ int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vivid_vid_cap_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
-int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_cap_s_selection(struct file *file, void *priv, struct v4l2_selection *s);
int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f);
int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
@@ -38,19 +38,19 @@ int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *
int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp);
int vidioc_g_input(struct file *file, void *priv, unsigned *i);
int vidioc_s_input(struct file *file, void *priv, unsigned i);
-int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin);
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin);
-int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin);
-int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
-int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
-int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin);
+int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *vin);
+int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *vin);
+int vivid_video_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf);
+int vivid_video_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf);
+int vivid_video_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt);
+int vivid_video_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt);
int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id);
int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id);
-int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
-int vidioc_query_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
-int vidioc_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
-int vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize);
+int vivid_vid_cap_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
+int vidioc_query_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
+int vidioc_s_edid(struct file *file, void *priv, struct v4l2_edid *edid);
+int vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize);
int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival);
int vivid_vid_cap_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
int vivid_vid_cap_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-common.c b/drivers/media/test-drivers/vivid/vivid-vid-common.c
index df7678db67fb..786a1aa3b26b 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-common.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.c
@@ -1021,7 +1021,7 @@ int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
-int vidioc_g_dv_timings(struct file *file, void *_fh,
+int vidioc_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1039,7 +1039,7 @@ int vidioc_g_dv_timings(struct file *file, void *_fh,
return 0;
}
-int vidioc_enum_dv_timings(struct file *file, void *_fh,
+int vidioc_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1056,7 +1056,7 @@ int vidioc_enum_dv_timings(struct file *file, void *_fh,
NULL, NULL);
}
-int vidioc_dv_timings_cap(struct file *file, void *_fh,
+int vidioc_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1073,7 +1073,7 @@ int vidioc_dv_timings_cap(struct file *file, void *_fh,
return 0;
}
-int vidioc_g_edid(struct file *file, void *_fh,
+int vidioc_g_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-common.h b/drivers/media/test-drivers/vivid/vivid-vid-common.h
index c49ac85abaed..fb5878174dba 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-common.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.h
@@ -32,10 +32,10 @@ int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
int vivid_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id);
-int vidioc_g_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
-int vidioc_enum_dv_timings(struct file *file, void *_fh, struct v4l2_enum_dv_timings *timings);
-int vidioc_dv_timings_cap(struct file *file, void *_fh, struct v4l2_dv_timings_cap *cap);
-int vidioc_g_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
+int vidioc_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
+int vidioc_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings);
+int vidioc_dv_timings_cap(struct file *file, void *priv, struct v4l2_dv_timings_cap *cap);
+int vidioc_g_edid(struct file *file, void *priv, struct v4l2_edid *edid);
int vidioc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index c3398bce6c15..8c037b90833e 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
@@ -672,7 +672,7 @@ int vivid_vid_out_g_selection(struct file *file, void *priv,
return 0;
}
-int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+int vivid_vid_out_s_selection(struct file *file, void *priv, struct v4l2_selection *s)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_out;
@@ -880,7 +880,7 @@ int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv,
return ret;
}
-int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i)
+int vivid_vid_out_overlay(struct file *file, void *priv, unsigned i)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -893,7 +893,7 @@ int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i)
return 0;
}
-int vivid_vid_out_g_fbuf(struct file *file, void *fh,
+int vivid_vid_out_g_fbuf(struct file *file, void *priv,
struct v4l2_framebuffer *a)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -920,7 +920,7 @@ int vivid_vid_out_g_fbuf(struct file *file, void *fh,
return 0;
}
-int vivid_vid_out_s_fbuf(struct file *file, void *fh,
+int vivid_vid_out_s_fbuf(struct file *file, void *priv,
const struct v4l2_framebuffer *a)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1016,7 +1016,7 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
return 0;
}
-int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout)
+int vidioc_enumaudout(struct file *file, void *priv, struct v4l2_audioout *vout)
{
if (vout->index >= ARRAY_SIZE(vivid_audio_outputs))
return -EINVAL;
@@ -1024,7 +1024,7 @@ int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout)
return 0;
}
-int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout)
+int vidioc_g_audout(struct file *file, void *priv, struct v4l2_audioout *vout)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1034,7 +1034,7 @@ int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout)
return 0;
}
-int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
+int vidioc_s_audout(struct file *file, void *priv, const struct v4l2_audioout *vout)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1072,7 +1072,7 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
return false;
}
-int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
+int vivid_vid_out_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.h b/drivers/media/test-drivers/vivid/vivid-vid-out.h
index 8d56314f4ea1..1d03891a5de5 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.h
@@ -22,23 +22,23 @@ int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vivid_vid_out_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
-int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_out_s_selection(struct file *file, void *priv, struct v4l2_selection *s);
int vivid_vid_out_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f);
int vidioc_enum_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
-int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i);
-int vivid_vid_out_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a);
-int vivid_vid_out_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a);
+int vivid_vid_out_overlay(struct file *file, void *priv, unsigned i);
+int vivid_vid_out_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *a);
+int vivid_vid_out_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *a);
int vidioc_enum_output(struct file *file, void *priv, struct v4l2_output *out);
int vidioc_g_output(struct file *file, void *priv, unsigned *i);
int vidioc_s_output(struct file *file, void *priv, unsigned i);
-int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout);
-int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout);
-int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout);
+int vidioc_enumaudout(struct file *file, void *priv, struct v4l2_audioout *vout);
+int vidioc_g_audout(struct file *file, void *priv, struct v4l2_audioout *vout);
+int vidioc_s_audout(struct file *file, void *priv, const struct v4l2_audioout *vout);
int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id);
-int vivid_vid_out_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vivid_vid_out_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
int vivid_vid_out_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
#endif
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 3cf54d776d36..b44c97e4e5ec 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1087,12 +1087,12 @@ fail:
static void xc_debug_dump(struct xc4000_priv *priv)
{
- u16 adc_envelope;
+ u16 adc_envelope = 0;
u32 freq_error_hz = 0;
- u16 lock_status;
+ u16 lock_status = 0;
u32 hsync_freq_hz = 0;
- u16 frame_lines;
- u16 quality;
+ u16 frame_lines = 0;
+ u16 quality = 0;
u16 signal = 0;
u16 noise = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 30aa4ee958bd..a28481edd22e 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -622,14 +622,14 @@ static int xc5000_fwupload(struct dvb_frontend *fe,
static void xc_debug_dump(struct xc5000_priv *priv)
{
- u16 adc_envelope;
+ u16 adc_envelope = 0;
u32 freq_error_hz = 0;
- u16 lock_status;
+ u16 lock_status = 0;
u32 hsync_freq_hz = 0;
- u16 frame_lines;
- u16 quality;
- u16 snr;
- u16 totalgain;
+ u16 frame_lines = 0;
+ u16 quality = 0;
+ u16 snr = 0;
+ u16 totalgain = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
u16 fw_buildversion = 0;
@@ -1304,7 +1304,7 @@ static void xc5000_release(struct dvb_frontend *fe)
mutex_lock(&xc5000_list_mutex);
if (priv) {
- cancel_delayed_work(&priv->timer_sleep);
+ cancel_delayed_work_sync(&priv->timer_sleep);
hybrid_tuner_release_state(priv);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index e5dff969ed57..fbaa542c8259 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1921,9 +1921,8 @@ int au0828_analog_register(struct au0828_dev *dev,
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint = &iface_desc->endpoint[i].desc;
- if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- == USB_DIR_IN) &&
- ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ if (usb_endpoint_dir_in(endpoint) &&
+ (usb_endpoint_type(endpoint)
== USB_ENDPOINT_XFER_ISOC)) {
/* we find our isoc in endpoint */
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index e585c8f6e4c5..c695a97e202b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1499,7 +1499,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct cx231xx *dev = video_drvdata(file);
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index cb61fd6cc6c6..3122d4bdfc59 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -68,6 +68,7 @@ config VIDEO_EM28XX_DVB
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MXL692 if MEDIA_SUBDRV_AUTOSELECT
+ select GPIOLIB_LEGACY if GPIOLIB && DVB_CXD2820R
help
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 9fce59979e3b..b94f5c70ab75 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -727,7 +727,7 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
struct em28xx *dev = i2c_bus->dev;
-#ifdef CONFIG_GPIOLIB
+#ifdef CONFIG_GPIOLIB_LEGACY
struct em28xx_dvb *dvb = dev->dvb;
int ret;
unsigned long flags;
@@ -1705,7 +1705,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
-#ifdef CONFIG_GPIOLIB
+#ifdef CONFIG_GPIOLIB_LEGACY
/* enable LNA for DVB-T, DVB-T2 and DVB-C */
result = gpio_request_one(dvb->lna_gpio,
GPIOF_OUT_INIT_LOW, NULL);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 25edd2189654..3fc15d16df8e 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1029,15 +1029,15 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_fmt_vid_cap(struct file *file, void *_priv,
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- u32 priv = fmt->fmt.pix.priv;
+ u32 fmt_priv = fmt->fmt.pix.priv;
fmt->fmt.pix = gspca_dev->pixfmt;
/* some drivers use priv internally, so keep the original value */
- fmt->fmt.pix.priv = priv;
+ fmt->fmt.pix.priv = fmt_priv;
return 0;
}
@@ -1075,24 +1075,24 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
return mode; /* used when s_fmt */
}
-static int vidioc_try_fmt_vid_cap(struct file *file, void *_priv,
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- u32 priv = fmt->fmt.pix.priv;
+ u32 fmt_priv = fmt->fmt.pix.priv;
if (try_fmt_vid_cap(gspca_dev, fmt) < 0)
return -EINVAL;
/* some drivers use priv internally, so keep the original value */
- fmt->fmt.pix.priv = priv;
+ fmt->fmt.pix.priv = fmt_priv;
return 0;
}
-static int vidioc_s_fmt_vid_cap(struct file *file, void *_priv,
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- u32 priv = fmt->fmt.pix.priv;
+ u32 fmt_priv = fmt->fmt.pix.priv;
int mode;
if (vb2_is_busy(&gspca_dev->queue))
@@ -1109,7 +1109,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *_priv,
else
gspca_dev->pixfmt = gspca_dev->cam.cam_mode[mode];
/* some drivers use priv internally, so keep the original value */
- fmt->fmt.pix.priv = priv;
+ fmt->fmt.pix.priv = fmt_priv;
return 0;
}
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index fd7d2a9d0449..8c7ae362d992 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -52,6 +52,11 @@ struct hdpvr_fh {
bool legacy_mode;
};
+static inline struct hdpvr_fh *file_to_hdpvr_fh(struct file *file)
+{
+ return container_of(file_to_v4l2_fh(file), struct hdpvr_fh, fh);
+}
+
static uint list_size(struct list_head *list)
{
struct list_head *tmp;
@@ -380,8 +385,7 @@ static int hdpvr_open(struct file *file)
return -ENOMEM;
fh->legacy_mode = true;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
@@ -390,7 +394,7 @@ static int hdpvr_release(struct file *file)
struct hdpvr_device *dev = video_drvdata(file);
mutex_lock(&dev->io_mutex);
- if (file->private_data == dev->owner) {
+ if (file_to_v4l2_fh(file) == dev->owner) {
hdpvr_stop_streaming(dev);
dev->owner = NULL;
}
@@ -426,7 +430,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
mutex_unlock(&dev->io_mutex);
goto err;
}
- dev->owner = file->private_data;
+ dev->owner = file_to_v4l2_fh(file);
print_buffer_status();
}
mutex_unlock(&dev->io_mutex);
@@ -541,7 +545,7 @@ static __poll_t hdpvr_poll(struct file *filp, poll_table *wait)
"start_streaming failed\n");
dev->status = STATUS_IDLE;
} else {
- dev->owner = filp->private_data;
+ dev->owner = file_to_v4l2_fh(filp);
}
print_buffer_status();
@@ -586,11 +590,11 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_std(struct file *file, void *_fh,
+static int vidioc_s_std(struct file *file, void *priv,
v4l2_std_id std)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
u8 std_type = 1;
if (!fh->legacy_mode && dev->options.video_input == HDPVR_COMPONENT)
@@ -606,11 +610,12 @@ static int vidioc_s_std(struct file *file, void *_fh,
return hdpvr_config_call(dev, CTRL_VIDEO_STD_TYPE, std_type);
}
-static int vidioc_g_std(struct file *file, void *_fh,
+static int vidioc_g_std(struct file *file, void *priv,
v4l2_std_id *std)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
+
if (!fh->legacy_mode && dev->options.video_input == HDPVR_COMPONENT)
return -ENODATA;
@@ -618,11 +623,11 @@ static int vidioc_g_std(struct file *file, void *_fh,
return 0;
}
-static int vidioc_querystd(struct file *file, void *_fh, v4l2_std_id *a)
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *a)
{
struct hdpvr_device *dev = video_drvdata(file);
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
struct hdpvr_video_info vid_info;
- struct hdpvr_fh *fh = _fh;
int ret;
*a = V4L2_STD_UNKNOWN;
@@ -637,11 +642,11 @@ static int vidioc_querystd(struct file *file, void *_fh, v4l2_std_id *a)
return ret;
}
-static int vidioc_s_dv_timings(struct file *file, void *_fh,
+static int vidioc_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
int i;
fh->legacy_mode = false;
@@ -660,11 +665,11 @@ static int vidioc_s_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int vidioc_g_dv_timings(struct file *file, void *_fh,
+static int vidioc_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
fh->legacy_mode = false;
if (dev->options.video_input)
@@ -673,11 +678,11 @@ static int vidioc_g_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int vidioc_query_dv_timings(struct file *file, void *_fh,
+static int vidioc_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
struct hdpvr_video_info vid_info;
bool interlaced;
int ret = 0;
@@ -715,11 +720,11 @@ static int vidioc_query_dv_timings(struct file *file, void *_fh,
return ret;
}
-static int vidioc_enum_dv_timings(struct file *file, void *_fh,
+static int vidioc_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
fh->legacy_mode = false;
memset(timings->reserved, 0, sizeof(timings->reserved));
@@ -731,11 +736,11 @@ static int vidioc_enum_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int vidioc_dv_timings_cap(struct file *file, void *_fh,
+static int vidioc_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
fh->legacy_mode = false;
if (dev->options.video_input)
@@ -758,7 +763,7 @@ static const char *iname[] = {
[HDPVR_COMPOSITE] = "Composite",
};
-static int vidioc_enum_input(struct file *file, void *_fh, struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
unsigned int n;
@@ -778,7 +783,7 @@ static int vidioc_enum_input(struct file *file, void *_fh, struct v4l2_input *i)
return 0;
}
-static int vidioc_s_input(struct file *file, void *_fh,
+static int vidioc_s_input(struct file *file, void *priv,
unsigned int index)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -812,7 +817,7 @@ static int vidioc_s_input(struct file *file, void *_fh,
return retval;
}
-static int vidioc_g_input(struct file *file, void *private_data,
+static int vidioc_g_input(struct file *file, void *priv,
unsigned int *index)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -844,7 +849,7 @@ static int vidioc_enumaudio(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_audio(struct file *file, void *private_data,
+static int vidioc_s_audio(struct file *file, void *priv,
const struct v4l2_audio *audio)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -863,7 +868,7 @@ static int vidioc_s_audio(struct file *file, void *private_data,
return retval;
}
-static int vidioc_g_audio(struct file *file, void *private_data,
+static int vidioc_g_audio(struct file *file, void *priv,
struct v4l2_audio *audio)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -980,7 +985,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index != 0)
@@ -991,11 +996,11 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data,
return 0;
}
-static int vidioc_g_fmt_vid_cap(struct file *file, void *_fh,
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
int ret;
/*
@@ -1048,7 +1053,7 @@ static int vidioc_encoder_cmd(struct file *filp, void *priv,
switch (a->cmd) {
case V4L2_ENC_CMD_START:
- if (dev->owner && filp->private_data != dev->owner) {
+ if (dev->owner && file_to_v4l2_fh(filp) != dev->owner) {
res = -EBUSY;
break;
}
@@ -1056,12 +1061,12 @@ static int vidioc_encoder_cmd(struct file *filp, void *priv,
break;
res = hdpvr_start_streaming(dev);
if (!res)
- dev->owner = filp->private_data;
+ dev->owner = file_to_v4l2_fh(filp);
else
dev->status = STATUS_IDLE;
break;
case V4L2_ENC_CMD_STOP:
- if (dev->owner && filp->private_data != dev->owner) {
+ if (dev->owner && file_to_v4l2_fh(filp) != dev->owner) {
res = -EBUSY;
break;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index ad38e1240541..f9535a484738 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -21,8 +21,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-struct pvr2_v4l2_dev;
-struct pvr2_v4l2_fh;
struct pvr2_v4l2;
struct pvr2_v4l2_dev {
@@ -48,6 +46,11 @@ struct pvr2_v4l2_fh {
unsigned int input_cnt;
};
+static inline struct pvr2_v4l2_fh *to_pvr2_v4l2_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct pvr2_v4l2_fh, fh);
+}
+
struct pvr2_v4l2 {
struct pvr2_channel channel;
@@ -108,7 +111,7 @@ static struct v4l2_format pvr_format [] = {
*/
static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
strscpy(cap->driver, "pvrusb2", sizeof(cap->driver));
@@ -123,7 +126,7 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val = 0;
int ret;
@@ -136,7 +139,7 @@ static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id std)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -148,7 +151,7 @@ static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id std)
static int pvr2_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val = 0;
int ret;
@@ -161,7 +164,7 @@ static int pvr2_querystd(struct file *file, void *priv, v4l2_std_id *std)
static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *cptr;
struct v4l2_input tmp;
@@ -209,7 +212,7 @@ static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
static int pvr2_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
unsigned int idx;
struct pvr2_ctrl *cptr;
@@ -231,7 +234,7 @@ static int pvr2_g_input(struct file *file, void *priv, unsigned int *i)
static int pvr2_s_input(struct file *file, void *priv, unsigned int inp)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -286,7 +289,7 @@ static int pvr2_s_audio(struct file *file, void *priv, const struct v4l2_audio *
static int pvr2_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
if (vt->index != 0)
@@ -298,7 +301,7 @@ static int pvr2_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
static int pvr2_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -314,7 +317,7 @@ static int pvr2_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *
static int pvr2_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
unsigned long fv;
struct v4l2_tuner vt;
@@ -349,7 +352,7 @@ static int pvr2_s_frequency(struct file *file, void *priv, const struct v4l2_fre
static int pvr2_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val = 0;
int cur_input;
@@ -391,7 +394,7 @@ static int pvr2_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtd
static int pvr2_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val;
@@ -411,7 +414,7 @@ static int pvr2_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format
static int pvr2_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int lmin, lmax, ldef;
struct pvr2_ctrl *hcp, *vcp;
@@ -449,7 +452,7 @@ static int pvr2_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_forma
static int pvr2_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *hcp, *vcp;
int ret = pvr2_try_fmt_vid_cap(file, fh, vf);
@@ -466,7 +469,7 @@ static int pvr2_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format
static int pvr2_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_v4l2_dev *pdi = fh->pdi;
int ret;
@@ -485,7 +488,7 @@ static int pvr2_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
if (!fh->pdi->stream) {
@@ -500,7 +503,7 @@ static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
static int pvr2_query_ext_ctrl(struct file *file, void *priv,
struct v4l2_query_ext_ctrl *vc)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *cptr;
int val;
@@ -561,7 +564,7 @@ static int pvr2_query_ext_ctrl(struct file *file, void *priv,
static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *vm)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
unsigned int cnt = 0;
int ret;
@@ -577,7 +580,7 @@ static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *
static int pvr2_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_ext_control *ctrl;
struct pvr2_ctrl *cptr;
@@ -612,7 +615,7 @@ static int pvr2_g_ext_ctrls(struct file *file, void *priv,
static int pvr2_s_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_ext_control *ctrl;
unsigned int idx;
@@ -637,7 +640,7 @@ commit:
static int pvr2_try_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_ext_control *ctrl;
struct pvr2_ctrl *pctl;
@@ -659,7 +662,7 @@ static int pvr2_try_ext_ctrls(struct file *file, void *priv,
static int pvr2_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_cropcap cap = { .type = type };
int ret;
@@ -675,7 +678,7 @@ static int pvr2_g_pixelaspect(struct file *file, void *priv,
static int pvr2_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_cropcap cap;
int val = 0;
@@ -726,7 +729,7 @@ static int pvr2_g_selection(struct file *file, void *priv,
static int pvr2_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -758,7 +761,7 @@ commit:
static int pvr2_log_status(struct file *file, void *priv)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
pvr2_hdw_trigger_module_log(hdw);
@@ -882,7 +885,7 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
static int pvr2_v4l2_release(struct file *file)
{
- struct pvr2_v4l2_fh *fhp = file->private_data;
+ struct pvr2_v4l2_fh *fhp = to_pvr2_v4l2_fh(file);
struct pvr2_v4l2 *vp = fhp->pdi->v4lp;
struct pvr2_hdw *hdw = fhp->channel.mc_head->hdw;
@@ -897,9 +900,8 @@ static int pvr2_v4l2_release(struct file *file)
fhp->rhp = NULL;
}
- v4l2_fh_del(&fhp->fh);
+ v4l2_fh_del(&fhp->fh, file);
v4l2_fh_exit(&fhp->fh);
- file->private_data = NULL;
pvr2_channel_done(&fhp->channel);
pvr2_trace(PVR2_TRACE_STRUCT,
@@ -1000,10 +1002,9 @@ static int pvr2_v4l2_open(struct file *file)
}
fhp->file = file;
- file->private_data = fhp;
fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
- v4l2_fh_add(&fhp->fh);
+ v4l2_fh_add(&fhp->fh, file);
return 0;
}
@@ -1055,7 +1056,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
static ssize_t pvr2_v4l2_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
int ret;
if (fh->fw_mode_flag) {
@@ -1117,7 +1118,7 @@ static ssize_t pvr2_v4l2_read(struct file *file,
static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait)
{
__poll_t mask = 0;
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
int ret;
if (fh->fw_mode_flag) {
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index ce717502ea4c..25d725c2ab3c 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -195,8 +195,7 @@ static int stk1160_scan_usb(struct usb_interface *intf, struct usb_device *udev,
if (udev->speed == USB_SPEED_HIGH)
size = size * hb_mult(sizedescr);
- if (usb_endpoint_xfer_isoc(desc) &&
- usb_endpoint_dir_in(desc)) {
+ if (usb_endpoint_is_isoc_in(desc)) {
switch (desc->bEndpointAddress) {
case STK1160_EP_AUDIO:
has_audio = true;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 416cb74377eb..f4baf9263286 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -408,8 +408,13 @@ static int stk1160_fill_urb(struct stk1160 *dev, struct stk1160_urb *stk_urb,
stk_urb->transfer_buffer = usb_alloc_noncoherent(dev->udev, sb_size,
GFP_KERNEL, &stk_urb->dma,
DMA_FROM_DEVICE, &stk_urb->sgt);
- if (!stk_urb->transfer_buffer)
+ if (!stk_urb->transfer_buffer) {
+ /*
+ * If the buffer allocation failed, we exit but return 0 since
+ * we allow the driver working with less buffers.
+ */
goto free_urb;
+ }
stk_urb->dev = dev;
return 0;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index efe609d70877..2905505c240c 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -376,6 +376,15 @@ static const struct uvc_control_info uvc_ctrls[] = {
| UVC_CTRL_FLAG_GET_DEF
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
+ {
+ .entity = UVC_GUID_CHROMEOS_XU,
+ .selector = UVC_CROSXU_CONTROL_IQ_PROFILE,
+ .index = 3,
+ .size = 1,
+ .flags = UVC_CTRL_FLAG_SET_CUR
+ | UVC_CTRL_FLAG_GET_RANGE
+ | UVC_CTRL_FLAG_RESTORE,
+ },
};
static const u32 uvc_control_classes[] = {
@@ -384,6 +393,19 @@ static const u32 uvc_control_classes[] = {
};
static const int exposure_auto_mapping[] = { 2, 1, 4, 8 };
+static const int cros_colorfx_mapping[] = {
+ 1, /* V4L2_COLORFX_NONE */
+ -1, /* V4L2_COLORFX_BW */
+ -1, /* V4L2_COLORFX_SEPIA */
+ -1, /* V4L2_COLORFX_NEGATIVE */
+ -1, /* V4L2_COLORFX_EMBOSS */
+ -1, /* V4L2_COLORFX_SKETCH */
+ -1, /* V4L2_COLORFX_SKY_BLUE */
+ -1, /* V4L2_COLORFX_GRASS_GREEN */
+ -1, /* V4L2_COLORFX_SKIN_WHITEN */
+ 0, /* V4L2_COLORFX_VIVID */
+};
+
static bool uvc_ctrl_mapping_is_compound(struct uvc_control_mapping *mapping)
{
@@ -975,6 +997,18 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
.name = "Region of Interest Auto Ctrls",
},
+ {
+ .id = V4L2_CID_COLORFX,
+ .entity = UVC_GUID_CHROMEOS_XU,
+ .selector = UVC_CROSXU_CONTROL_IQ_PROFILE,
+ .size = 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mapping = cros_colorfx_mapping,
+ .menu_mask = BIT(V4L2_COLORFX_VIVID) |
+ BIT(V4L2_COLORFX_NONE),
+ },
};
/* ------------------------------------------------------------------------
@@ -1619,7 +1653,7 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
if (ret == -EIO) {
- dev_warn_ratelimited(&chain->dev->udev->dev,
+ dev_warn_ratelimited(&chain->dev->intf->dev,
"UVC non compliance: Error %d querying master control %x (%s)\n",
ret, master_map->id,
uvc_map_get_name(master_map));
@@ -1643,7 +1677,7 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
if (ret && !mapping->disabled) {
- dev_warn(&chain->dev->udev->dev,
+ dev_warn(&chain->dev->intf->dev,
"UVC non compliance: permanently disabling control %x (%s), due to error %d\n",
mapping->id, uvc_map_get_name(mapping), ret);
mapping->disabled = true;
@@ -1858,7 +1892,7 @@ static int uvc_ctrl_set_handle(struct uvc_control *ctrl, struct uvc_fh *handle)
lockdep_assert_held(&handle->chain->ctrl_mutex);
if (ctrl->handle) {
- dev_warn_ratelimited(&handle->stream->dev->udev->dev,
+ dev_warn_ratelimited(&handle->stream->dev->intf->dev,
"UVC non compliance: Setting an async control with a pending operation.");
if (ctrl->handle == handle)
@@ -1956,7 +1990,7 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
w->urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(w->urb, GFP_KERNEL);
if (ret < 0)
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Failed to resubmit status URB (%d).\n", ret);
}
@@ -2895,7 +2929,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
- dev_dbg(&dev->udev->dev,
+ dev_dbg(&dev->intf->dev,
"restoring control %pUl/%u/%u\n",
ctrl->info.entity, ctrl->info.index,
ctrl->info.selector);
@@ -3181,15 +3215,6 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
{
unsigned int i;
- /*
- * XU controls initialization requires querying the device for control
- * information. As some buggy UVC devices will crash when queried
- * repeatedly in a tight loop, delay XU controls initialization until
- * first use.
- */
- if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT)
- return;
-
for (i = 0; i < ARRAY_SIZE(uvc_ctrls); ++i) {
const struct uvc_control_info *info = &uvc_ctrls[i];
@@ -3307,7 +3332,6 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
{
struct uvc_entity *entity;
- int i;
guard(mutex)(&handle->chain->ctrl_mutex);
@@ -3325,7 +3349,7 @@ void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
if (!WARN_ON(handle->pending_async_ctrls))
return;
- for (i = 0; i < handle->pending_async_ctrls; i++)
+ for (unsigned int i = 0; i < handle->pending_async_ctrls; i++)
uvc_pm_put(handle->stream->dev);
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 775bede0d93d..fa61f1d0ea2c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -137,6 +137,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
{
struct uvc_entity *entity;
+ if (id == UVC_INVALID_ENTITY_ID)
+ return NULL;
+
list_for_each_entry(entity, &dev->entities, list) {
if (entity->id == id)
return entity;
@@ -183,8 +186,6 @@ static void uvc_stream_delete(struct uvc_streaming *stream)
if (stream->async_wq)
destroy_workqueue(stream->async_wq);
- mutex_destroy(&stream->mutex);
-
usb_put_intf(stream->intf);
kfree(stream->formats);
@@ -201,8 +202,6 @@ static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev,
if (stream == NULL)
return NULL;
- mutex_init(&stream->mutex);
-
stream->dev = dev;
stream->intf = usb_get_intf(intf);
stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -795,14 +794,27 @@ static const u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
- unsigned int num_pads, unsigned int extra_size)
+static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ u16 id, unsigned int num_pads,
+ unsigned int extra_size)
{
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
unsigned int i;
+ /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
+ if (id == 0) {
+ dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
+ id = UVC_INVALID_ENTITY_ID;
+ }
+
+ /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
+ if (uvc_entity_by_id(dev, id)) {
+ dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
+ id = UVC_INVALID_ENTITY_ID;
+ }
+
extra_size = roundup(extra_size, sizeof(*entity->pads));
if (num_pads)
num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
@@ -812,7 +824,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
entity->id = id;
entity->type = type;
@@ -882,7 +894,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
unsigned int n, p;
int handled = 0;
- switch (le16_to_cpu(dev->udev->descriptor.idVendor)) {
+ switch (le16_to_cpu(udev->descriptor.idVendor)) {
case 0x046d: /* Logitech */
if (buffer[1] != 0x41 || buffer[2] != 0x01)
break;
@@ -924,10 +936,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
break;
}
- unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
- p + 1, 2*n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
+ buffer[3], p + 1, 2 * n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1036,10 +1048,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
- 1, n + p);
- if (term == NULL)
- return -ENOMEM;
+ term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
+ buffer[3], 1, n + p);
+ if (IS_ERR(term))
+ return PTR_ERR(term);
if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
term->camera.bControlSize = n;
@@ -1095,10 +1107,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return 0;
}
- term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
- 1, 0);
- if (term == NULL)
- return -ENOMEM;
+ term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
+ buffer[3], 1, 0);
+ if (IS_ERR(term))
+ return PTR_ERR(term);
memcpy(term->baSourceID, &buffer[7], 1);
@@ -1117,9 +1129,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+ p + 1, 0);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->baSourceID, &buffer[5], p);
@@ -1139,9 +1152,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->baSourceID, &buffer[4], 1);
unit->processing.wMaxMultiplier =
@@ -1168,9 +1181,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+ p + 1, n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1315,9 +1329,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
return dev_err_probe(&dev->intf->dev, irq,
"No IRQ for privacy GPIO\n");
- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
- if (!unit)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
+ UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
unit->gpio.gpio_privacy = gpio_privacy;
unit->gpio.irq = irq;
@@ -1868,7 +1883,7 @@ static int uvc_scan_device(struct uvc_device *dev)
uvc_scan_fallback(dev);
if (list_empty(&dev->chains)) {
- dev_info(&dev->udev->dev, "No valid video chain found.\n");
+ dev_info(&dev->intf->dev, "No valid video chain found.\n");
return -ENODEV;
}
@@ -1958,11 +1973,11 @@ static void uvc_unregister_video(struct uvc_device *dev)
list_for_each_entry(stream, &dev->streams, list) {
/* Nothing to do here, continue. */
- if (!video_is_registered(&stream->vdev))
+ if (!video_is_registered(&stream->queue.vdev))
continue;
- vb2_video_unregister_device(&stream->vdev);
- vb2_video_unregister_device(&stream->meta.vdev);
+ vb2_video_unregister_device(&stream->queue.vdev);
+ vb2_video_unregister_device(&stream->meta.queue.vdev);
/*
* Now both vdevs are not streaming and all the ioctls will
@@ -1984,12 +1999,12 @@ static void uvc_unregister_video(struct uvc_device *dev)
int uvc_register_video_device(struct uvc_device *dev,
struct uvc_streaming *stream,
- struct video_device *vdev,
struct uvc_video_queue *queue,
enum v4l2_buf_type type,
const struct v4l2_file_operations *fops,
const struct v4l2_ioctl_ops *ioctl_ops)
{
+ struct video_device *vdev = &queue->vdev;
int ret;
/* Initialize the video buffers queue. */
@@ -2071,9 +2086,9 @@ static int uvc_register_video(struct uvc_device *dev,
uvc_debugfs_init_stream(stream);
/* Register the device with V4L. */
- return uvc_register_video_device(dev, stream, &stream->vdev,
- &stream->queue, stream->type,
- &uvc_fops, &uvc_ioctl_ops);
+ return uvc_register_video_device(dev, stream, &stream->queue,
+ stream->type, &uvc_fops,
+ &uvc_ioctl_ops);
}
/*
@@ -2092,7 +2107,7 @@ static int uvc_register_terms(struct uvc_device *dev,
stream = uvc_stream_by_id(dev, term->id);
if (stream == NULL) {
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"No streaming interface found for terminal %u.",
term->id);
continue;
@@ -2109,7 +2124,7 @@ static int uvc_register_terms(struct uvc_device *dev,
*/
uvc_meta_register(stream);
- term->vdev = &stream->vdev;
+ term->vdev = &stream->queue.vdev;
}
return 0;
@@ -2128,7 +2143,7 @@ static int uvc_register_chains(struct uvc_device *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
ret = uvc_mc_register_entities(chain);
if (ret < 0)
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Failed to register entities (%d).\n", ret);
#endif
}
@@ -2229,23 +2244,23 @@ static int uvc_probe(struct usb_interface *intf,
if (ret < 0)
goto error;
- dev_info(&dev->udev->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n",
+ dev_info(&dev->intf->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n",
dev->uvc_version >> 8, dev->uvc_version & 0xff,
udev->product ? udev->product : "<unnamed>",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
if (dev->quirks != dev->info->quirks) {
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Forcing device quirks to 0x%x by module parameter for testing purpose.\n",
dev->quirks);
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Please report required quirks to the linux-media mailing list.\n");
}
if (dev->info->uvc_version) {
dev->uvc_version = dev->info->uvc_version;
- dev_info(&dev->udev->dev, "Forcing UVC version to %u.%02x\n",
+ dev_info(&dev->intf->dev, "Forcing UVC version to %u.%02x\n",
dev->uvc_version >> 8, dev->uvc_version & 0xff);
}
@@ -2281,21 +2296,21 @@ static int uvc_probe(struct usb_interface *intf,
/* Initialize the interrupt URB. */
ret = uvc_status_init(dev);
if (ret < 0) {
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n",
ret);
}
ret = uvc_gpio_init_irq(dev);
if (ret < 0) {
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Unable to request privacy GPIO IRQ (%d)\n", ret);
goto error;
}
ret = uvc_meta_init(dev);
if (ret < 0) {
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Error initializing the metadata formats (%d)\n", ret);
goto error;
}
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index cc68dd24eb42..3823ac9c8045 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -140,7 +140,7 @@ int uvc_mc_register_entities(struct uvc_video_chain *chain)
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_mc_init_entity(chain, entity);
if (ret < 0) {
- dev_info(&chain->dev->udev->dev,
+ dev_info(&chain->dev->intf->dev,
"Failed to initialize entity for entity %u\n",
entity->id);
return ret;
@@ -150,7 +150,7 @@ int uvc_mc_register_entities(struct uvc_video_chain *chain)
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_mc_create_links(chain, entity);
if (ret < 0) {
- dev_info(&chain->dev->udev->dev,
+ dev_info(&chain->dev->intf->dev,
"Failed to create links for entity %u\n",
entity->id);
return ret;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 229e08ff323e..c23b174965c3 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -23,10 +23,10 @@
* V4L2 ioctls
*/
-static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
+static int uvc_meta_v4l2_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_video_chain *chain = stream->chain;
@@ -39,28 +39,26 @@ static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
return 0;
}
-static int uvc_meta_v4l2_get_format(struct file *file, void *fh,
+static int uvc_meta_v4l2_get_format(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct v4l2_meta_format *fmt = &format->fmt.meta;
if (format->type != vfh->vdev->queue->type)
return -EINVAL;
- memset(fmt, 0, sizeof(*fmt));
-
fmt->dataformat = stream->meta.format;
fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
-static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
+static int uvc_meta_v4l2_try_format(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_device *dev = stream->dev;
struct v4l2_meta_format *fmt = &format->fmt.meta;
@@ -69,11 +67,12 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
if (format->type != vfh->vdev->queue->type)
return -EINVAL;
- for (unsigned int i = 0; i < dev->nmeta_formats; i++)
+ for (unsigned int i = 0; i < dev->nmeta_formats; i++) {
if (dev->meta_formats[i] == fmt->dataformat) {
fmeta = fmt->dataformat;
break;
}
+ }
memset(fmt, 0, sizeof(*fmt));
@@ -83,15 +82,15 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
return 0;
}
-static int uvc_meta_v4l2_set_format(struct file *file, void *fh,
+static int uvc_meta_v4l2_set_format(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct v4l2_meta_format *fmt = &format->fmt.meta;
int ret;
- ret = uvc_meta_v4l2_try_format(file, fh, format);
+ ret = uvc_meta_v4l2_try_format(file, priv, format);
if (ret < 0)
return ret;
@@ -100,37 +99,28 @@ static int uvc_meta_v4l2_set_format(struct file *file, void *fh,
* Metadata buffers would still be perfectly parseable, but it's more
* consistent and cleaner to disallow that.
*/
- mutex_lock(&stream->mutex);
-
if (vb2_is_busy(&stream->meta.queue.queue))
- ret = -EBUSY;
- else
- stream->meta.format = fmt->dataformat;
+ return -EBUSY;
- mutex_unlock(&stream->mutex);
+ stream->meta.format = fmt->dataformat;
- return ret;
+ return 0;
}
-static int uvc_meta_v4l2_enum_formats(struct file *file, void *fh,
+static int uvc_meta_v4l2_enum_formats(struct file *file, void *priv,
struct v4l2_fmtdesc *fdesc)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_device *dev = stream->dev;
- u32 i = fdesc->index;
if (fdesc->type != vfh->vdev->queue->type)
return -EINVAL;
- if (i >= dev->nmeta_formats)
+ if (fdesc->index >= dev->nmeta_formats)
return -EINVAL;
- memset(fdesc, 0, sizeof(*fdesc));
-
- fdesc->type = vfh->vdev->queue->type;
- fdesc->index = i;
- fdesc->pixelformat = dev->meta_formats[i];
+ fdesc->pixelformat = dev->meta_formats[fdesc->index];
return 0;
}
@@ -177,7 +167,6 @@ static struct uvc_entity *uvc_meta_find_msxu(struct uvc_device *dev)
return NULL;
}
-#define MSXU_CONTROL_METADATA 0x9
static int uvc_meta_detect_msxu(struct uvc_device *dev)
{
u32 *data __free(kfree) = NULL;
@@ -196,9 +185,12 @@ static int uvc_meta_detect_msxu(struct uvc_device *dev)
if (!data)
return -ENOMEM;
- /* Check if the metadata is already enabled. */
+ /*
+ * Check if the metadata is already enabled, or if the device always
+ * returns metadata.
+ */
ret = uvc_query_ctrl(dev, UVC_GET_CUR, entity->id, dev->intfnum,
- MSXU_CONTROL_METADATA, data, sizeof(*data));
+ UVC_MSXU_CONTROL_METADATA, data, sizeof(*data));
if (ret)
return 0;
@@ -208,21 +200,23 @@ static int uvc_meta_detect_msxu(struct uvc_device *dev)
}
/*
- * We have seen devices that require 1 to enable the metadata, others
- * requiring a value != 1 and others requiring a value >1. Luckily for
- * us, the value from GET_MAX seems to work all the time.
+ * Set the value of UVC_MSXU_CONTROL_METADATA to the value reported by
+ * GET_MAX to enable production of MSXU metadata. The GET_MAX request
+ * reports the maximum size of the metadata, if its value is 0 then MSXU
+ * metadata is not supported. For more information, see
+ * https://learn.microsoft.com/en-us/windows-hardware/drivers/stream/uvc-extensions-1-5#2229-metadata-control
*/
ret = uvc_query_ctrl(dev, UVC_GET_MAX, entity->id, dev->intfnum,
- MSXU_CONTROL_METADATA, data, sizeof(*data));
+ UVC_MSXU_CONTROL_METADATA, data, sizeof(*data));
if (ret || !*data)
return 0;
/*
- * If we can set MSXU_CONTROL_METADATA, the device will report
+ * If we can set UVC_MSXU_CONTROL_METADATA, the device will report
* metadata.
*/
ret = uvc_query_ctrl(dev, UVC_SET_CUR, entity->id, dev->intfnum,
- MSXU_CONTROL_METADATA, data, sizeof(*data));
+ UVC_MSXU_CONTROL_METADATA, data, sizeof(*data));
if (!ret)
dev->quirks |= UVC_QUIRK_MSXU_META;
@@ -232,12 +226,11 @@ static int uvc_meta_detect_msxu(struct uvc_device *dev)
int uvc_meta_register(struct uvc_streaming *stream)
{
struct uvc_device *dev = stream->dev;
- struct video_device *vdev = &stream->meta.vdev;
struct uvc_video_queue *queue = &stream->meta.queue;
stream->meta.format = V4L2_META_FMT_UVC;
- return uvc_register_video_device(dev, stream, vdev, queue,
+ return uvc_register_video_device(dev, stream, queue,
V4L2_BUF_TYPE_META_CAPTURE,
&uvc_meta_fops, &uvc_meta_ioctl_ops);
}
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index ee01dce4b783..231cfee8e7c2 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -62,7 +62,8 @@ static int uvc_input_init(struct uvc_device *dev)
__set_bit(EV_KEY, input->evbit);
__set_bit(KEY_CAMERA, input->keybit);
- if ((ret = input_register_device(input)) < 0)
+ ret = input_register_device(input);
+ if (ret < 0)
goto error;
dev->input = input;
@@ -215,7 +216,7 @@ static void uvc_status_complete(struct urb *urb)
return;
default:
- dev_warn(&dev->udev->dev,
+ dev_warn(&dev->intf->dev,
"Non-zero status (%d) in status completion handler.\n",
urb->status);
return;
@@ -247,7 +248,7 @@ static void uvc_status_complete(struct urb *urb)
urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0)
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Failed to resubmit status URB (%d).\n", ret);
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 160f9cf6e6db..9e4a251eca88 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -329,14 +329,12 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
* developers test their webcams with the Linux driver as well as with
* the Windows driver).
*/
- mutex_lock(&stream->mutex);
if (stream->dev->quirks & UVC_QUIRK_PROBE_EXTRAFIELDS)
probe->dwMaxVideoFrameSize =
stream->ctrl.dwMaxVideoFrameSize;
/* Probe the device. */
ret = uvc_probe_video(stream, probe);
- mutex_unlock(&stream->mutex);
if (ret < 0)
return ret;
@@ -388,26 +386,22 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
return ret;
}
-static int uvc_ioctl_g_fmt(struct file *file, void *fh,
+static int uvc_ioctl_g_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format;
const struct uvc_frame *frame;
- int ret = 0;
if (fmt->type != stream->type)
return -EINVAL;
- mutex_lock(&stream->mutex);
format = stream->cur_format;
frame = stream->cur_frame;
- if (format == NULL || frame == NULL) {
- ret = -EINVAL;
- goto done;
- }
+ if (!format || !frame)
+ return -EINVAL;
fmt->fmt.pix.pixelformat = format->fcc;
fmt->fmt.pix.width = frame->wWidth;
@@ -419,15 +413,13 @@ static int uvc_ioctl_g_fmt(struct file *file, void *fh,
fmt->fmt.pix.xfer_func = format->xfer_func;
fmt->fmt.pix.ycbcr_enc = format->ycbcr_enc;
-done:
- mutex_unlock(&stream->mutex);
- return ret;
+ return 0;
}
-static int uvc_ioctl_s_fmt(struct file *file, void *fh,
+static int uvc_ioctl_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
const struct uvc_format *format;
@@ -441,35 +433,27 @@ static int uvc_ioctl_s_fmt(struct file *file, void *fh,
if (ret < 0)
return ret;
- mutex_lock(&stream->mutex);
- if (vb2_is_busy(&stream->queue.queue)) {
- ret = -EBUSY;
- goto done;
- }
+ if (vb2_is_busy(&stream->queue.queue))
+ return -EBUSY;
stream->ctrl = probe;
stream->cur_format = format;
stream->cur_frame = frame;
-done:
- mutex_unlock(&stream->mutex);
- return ret;
+ return 0;
}
-static int uvc_ioctl_g_parm(struct file *file, void *fh,
+static int uvc_ioctl_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
u32 numerator, denominator;
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
if (parm->type != stream->type)
return -EINVAL;
- mutex_lock(&stream->mutex);
numerator = stream->ctrl.dwFrameInterval;
- mutex_unlock(&stream->mutex);
-
denominator = 10000000;
v4l2_simplify_fraction(&numerator, &denominator, 8, 333);
@@ -493,10 +477,10 @@ static int uvc_ioctl_g_parm(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_s_parm(struct file *file, void *fh,
+static int uvc_ioctl_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
struct v4l2_fract timeperframe;
@@ -519,12 +503,8 @@ static int uvc_ioctl_s_parm(struct file *file, void *fh,
uvc_dbg(stream->dev, FORMAT, "Setting frame interval to %u/%u (%u)\n",
timeperframe.numerator, timeperframe.denominator, interval);
- mutex_lock(&stream->mutex);
-
- if (uvc_queue_streaming(&stream->queue)) {
- mutex_unlock(&stream->mutex);
+ if (uvc_queue_streaming(&stream->queue))
return -EBUSY;
- }
format = stream->cur_format;
frame = stream->cur_frame;
@@ -556,14 +536,11 @@ static int uvc_ioctl_s_parm(struct file *file, void *fh,
/* Probe the device with the new settings. */
ret = uvc_probe_video(stream, &probe);
- if (ret < 0) {
- mutex_unlock(&stream->mutex);
+ if (ret < 0)
return ret;
- }
stream->ctrl = probe;
stream->cur_frame = frame;
- mutex_unlock(&stream->mutex);
/* Return the actual frame period. */
timeperframe.numerator = probe.dwFrameInterval;
@@ -599,18 +576,17 @@ static int uvc_v4l2_open(struct file *file)
if (!handle)
return -ENOMEM;
- v4l2_fh_init(&handle->vfh, &stream->vdev);
- v4l2_fh_add(&handle->vfh);
+ v4l2_fh_init(&handle->vfh, &stream->queue.vdev);
+ v4l2_fh_add(&handle->vfh, file);
handle->chain = stream->chain;
handle->stream = stream;
- file->private_data = handle;
return 0;
}
static int uvc_v4l2_release(struct file *file)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
@@ -623,10 +599,10 @@ static int uvc_v4l2_release(struct file *file)
return 0;
}
-static int uvc_ioctl_querycap(struct file *file, void *fh,
+static int uvc_ioctl_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
struct uvc_streaming *stream = handle->stream;
@@ -639,10 +615,10 @@ static int uvc_ioctl_querycap(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_enum_fmt(struct file *file, void *fh,
+static int uvc_ioctl_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
enum v4l2_buf_type type = fmt->type;
const struct uvc_format *format;
@@ -663,20 +639,20 @@ static int uvc_ioctl_enum_fmt(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_try_fmt(struct file *file, void *fh,
+static int uvc_ioctl_try_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
}
-static int uvc_ioctl_enum_input(struct file *file, void *fh,
+static int uvc_ioctl_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
const struct uvc_entity *selector = chain->selector;
struct uvc_entity *iterm = NULL;
@@ -716,9 +692,9 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
+static int uvc_ioctl_g_input(struct file *file, void *priv, unsigned int *input)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
u8 *buf;
int ret;
@@ -744,9 +720,9 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
return ret;
}
-static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
+static int uvc_ioctl_s_input(struct file *file, void *priv, unsigned int input)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_video_chain *chain = handle->chain;
u8 *buf;
@@ -778,10 +754,10 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
return ret;
}
-static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
+static int uvc_ioctl_query_ext_ctrl(struct file *file, void *priv,
struct v4l2_query_ext_ctrl *qec)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
return uvc_query_v4l2_ctrl(chain, qec);
@@ -806,10 +782,10 @@ static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
return ret;
}
-static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
+static int uvc_ioctl_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctrls)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
struct v4l2_ext_control *ctrl = ctrls->controls;
unsigned int i;
@@ -890,35 +866,35 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
return uvc_ctrl_rollback(handle);
}
-static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
+static int uvc_ioctl_s_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctrls)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_S_EXT_CTRLS);
}
-static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
+static int uvc_ioctl_try_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctrls)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_TRY_EXT_CTRLS);
}
-static int uvc_ioctl_querymenu(struct file *file, void *fh,
+static int uvc_ioctl_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *qm)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
return uvc_query_v4l2_menu(chain, qm);
}
-static int uvc_ioctl_g_selection(struct file *file, void *fh,
+static int uvc_ioctl_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
if (sel->type != stream->type)
@@ -941,18 +917,16 @@ static int uvc_ioctl_g_selection(struct file *file, void *fh,
sel->r.left = 0;
sel->r.top = 0;
- mutex_lock(&stream->mutex);
sel->r.width = stream->cur_frame->wWidth;
sel->r.height = stream->cur_frame->wHeight;
- mutex_unlock(&stream->mutex);
return 0;
}
-static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
+static int uvc_ioctl_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format = NULL;
const struct uvc_frame *frame = NULL;
@@ -989,10 +963,10 @@ static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
+static int uvc_ioctl_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format = NULL;
const struct uvc_frame *frame = NULL;
@@ -1061,10 +1035,10 @@ static int uvc_ioctl_subscribe_event(struct v4l2_fh *fh,
}
}
-static long uvc_ioctl_default(struct file *file, void *fh, bool valid_prio,
+static long uvc_ioctl_default(struct file *file, void *priv, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
switch (cmd) {
@@ -1170,7 +1144,7 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
union {
struct uvc_xu_control_mapping xmap;
struct uvc_xu_control_query xqry;
@@ -1221,7 +1195,7 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
static long uvc_v4l2_unlocked_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
unsigned int converted_cmd = v4l2_translate_cmd(cmd);
int ret;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a5013a7fbca4..784be9330320 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -95,14 +95,14 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
*/
if (ret > 0 && query != UVC_GET_INFO) {
memset(data + ret, 0, size - ret);
- dev_warn_once(&dev->udev->dev,
+ dev_warn_once(&dev->intf->dev,
"UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
uvc_query_name(query), cs, unit, ret, size);
return 0;
}
if (ret != -EPIPE) {
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
uvc_query_name(query), cs, unit, ret, size);
return ret < 0 ? ret : -EPIPE;
@@ -119,7 +119,7 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
*(u8 *)data = tmp;
if (ret != 1) {
- dev_err_ratelimited(&dev->udev->dev,
+ dev_err_ratelimited(&dev->intf->dev,
"Failed to query (%s) UVC error code control %u on unit %u: %d (exp. 1).\n",
uvc_query_name(query), cs, unit, ret);
return ret < 0 ? ret : -EPIPE;
@@ -266,7 +266,7 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
if (stream->intf->num_altsetting > 1 &&
ctrl->dwMaxPayloadTransferSize > stream->maxpsize) {
dev_warn_ratelimited(&stream->intf->dev,
- "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n",
+ "UVC non compliance: Reducing max payload transfer size (%u) to fit endpoint limit (%u).\n",
ctrl->dwMaxPayloadTransferSize,
stream->maxpsize);
ctrl->dwMaxPayloadTransferSize = stream->maxpsize;
@@ -1691,7 +1691,7 @@ static void uvc_video_complete(struct urb *urb)
struct uvc_streaming *stream = uvc_urb->stream;
struct uvc_video_queue *queue = &stream->queue;
struct uvc_video_queue *qmeta = &stream->meta.queue;
- struct vb2_queue *vb2_qmeta = stream->meta.vdev.queue;
+ struct vb2_queue *vb2_qmeta = stream->meta.queue.vdev.queue;
struct uvc_buffer *buf = NULL;
struct uvc_buffer *buf_meta = NULL;
unsigned long flags;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 757254fc4fe9..24292efbe47d 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -41,6 +41,8 @@
#define UVC_EXT_GPIO_UNIT 0x7ffe
#define UVC_EXT_GPIO_UNIT_ID 0x100
+#define UVC_INVALID_ENTITY_ID 0xffff
+
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
@@ -328,6 +330,7 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
struct uvc_video_queue {
+ struct video_device vdev;
struct vb2_queue queue;
struct mutex mutex; /*
* Serializes vb2_queue and
@@ -450,7 +453,6 @@ struct uvc_urb {
struct uvc_streaming {
struct list_head list;
struct uvc_device *dev;
- struct video_device vdev;
struct uvc_video_chain *chain;
atomic_t active;
@@ -469,12 +471,6 @@ struct uvc_streaming {
const struct uvc_format *cur_format;
const struct uvc_frame *cur_frame;
- /*
- * Protect access to ctrl, cur_format, cur_frame and hardware video
- * probe control.
- */
- struct mutex mutex;
-
/* Buffers queue. */
unsigned int frozen : 1;
struct uvc_video_queue queue;
@@ -483,7 +479,6 @@ struct uvc_streaming {
struct uvc_buffer *meta_buf);
struct {
- struct video_device vdev;
struct uvc_video_queue queue;
u32 format;
} meta;
@@ -637,6 +632,11 @@ struct uvc_fh {
unsigned int pending_async_ctrls;
};
+static inline struct uvc_fh *to_uvc_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct uvc_fh, vfh);
+}
+
/* ------------------------------------------------------------------------
* Debugging, printing and logging
*/
@@ -667,7 +667,7 @@ extern unsigned int uvc_hw_timestamps_param;
#define uvc_dbg(_dev, flag, fmt, ...) \
do { \
if (uvc_dbg_param & UVC_DBG_##flag) \
- dev_printk(KERN_DEBUG, &(_dev)->udev->dev, fmt, \
+ dev_printk(KERN_DEBUG, &(_dev)->intf->dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -680,7 +680,7 @@ do { \
#define uvc_warn_once(_dev, warn, fmt, ...) \
do { \
if (!test_and_set_bit(warn, &(_dev)->warnings)) \
- dev_info(&(_dev)->udev->dev, fmt, ##__VA_ARGS__); \
+ dev_info(&(_dev)->intf->dev, fmt, ##__VA_ARGS__); \
} while (0)
/* --------------------------------------------------------------------------
@@ -733,7 +733,6 @@ int uvc_meta_register(struct uvc_streaming *stream);
int uvc_register_video_device(struct uvc_device *dev,
struct uvc_streaming *stream,
- struct video_device *vdev,
struct uvc_video_queue *queue,
enum v4l2_buf_type type,
const struct v4l2_file_operations *fops,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 6e585bc76367..b367d479d6b3 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -34,6 +34,9 @@
* Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -509,8 +512,9 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
- unsigned int mul, unsigned int div)
+#ifdef CONFIG_MEDIA_CONTROLLER
+static s64 v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
+ unsigned int mul, unsigned int div)
{
struct v4l2_ctrl *ctrl;
s64 freq;
@@ -545,11 +549,9 @@ s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_ctrl);
-#ifdef CONFIG_MEDIA_CONTROLLER
-s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
- unsigned int div)
+s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul,
+ unsigned int div)
{
struct v4l2_mbus_config mbus_config = {};
struct v4l2_subdev *sd;
@@ -568,10 +570,10 @@ s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
* Fall back to using the link frequency control if the media bus config
* doesn't provide a link frequency.
*/
- return __v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
+ return v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
}
-EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_pad);
-#endif /* CONFIG_MEDIA_CONTROLLER */
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+#endif
/*
* Simplify a fraction using a simple continued fraction decomposition. The
@@ -705,3 +707,73 @@ int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
+
+struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id,
+ bool legacy, bool fixed_rate,
+ unsigned long clk_rate)
+{
+ bool of_node = is_of_node(dev_fwnode(dev));
+ const char *clk_id __free(kfree) = NULL;
+ struct clk_hw *clk_hw;
+ struct clk *clk;
+ u32 rate = clk_rate;
+ int ret = 0;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ /*
+ * If the caller didn't request a fixed rate, retrieve it from the
+ * clock-frequency property. -EINVAL indicates the property is absent,
+ * and is not a failure. Other errors, or success with a clock-frequency
+ * value of 0, are hard failures.
+ */
+ if (!fixed_rate || !clk_rate) {
+ ret = device_property_read_u32(dev, "clock-frequency", &rate);
+ if ((ret && ret != -EINVAL) || (!ret && !rate))
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (clk) {
+ /*
+ * On non-OF platforms, or when legacy behaviour is requested,
+ * set the clock rate if a rate has been specified by the caller
+ * or by the clock-frequency property.
+ */
+ if (rate && (!of_node || legacy)) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "Failed to set clock rate: %u\n",
+ rate);
+ return ERR_PTR(ret);
+ }
+ }
+ return clk;
+ }
+
+ /*
+ * Register a dummy fixed clock on non-OF platforms or when legacy
+ * behaviour is requested. This required the common clock framework.
+ */
+ if (!IS_ENABLED(CONFIG_COMMON_CLK) || (of_node && !legacy))
+ return ERR_PTR(-ENOENT);
+
+ /* We need a rate to create a clock. */
+ if (ret)
+ return ERR_PTR(ret == -EINVAL ? -EPROBE_DEFER : ret);
+
+ if (!id) {
+ clk_id = kasprintf(GFP_KERNEL, "clk-%s", dev_name(dev));
+ if (!clk_id)
+ return ERR_PTR(-ENOMEM);
+ id = clk_id;
+ }
+
+ clk_hw = devm_clk_hw_register_fixed_rate(dev, id, NULL, 0, rate);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+EXPORT_SYMBOL_GPL(__devm_v4l2_sensor_clk_get);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8c07400bd280..2c88e1175a10 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -8,7 +8,7 @@
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
* Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
* Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
- * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
@@ -672,15 +672,12 @@ struct v4l2_ext_control32 {
static inline bool ctrl_is_pointer(struct file *file, u32 id)
{
struct video_device *vdev = video_devdata(file);
- struct v4l2_fh *fh = NULL;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct v4l2_ctrl_handler *hdl = NULL;
struct v4l2_query_ext_ctrl qec = { id };
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
- fh = file->private_data;
-
- if (fh && fh->ctrl_handler)
+ if (fh->ctrl_handler)
hdl = fh->ctrl_handler;
else if (vdev->ctrl_handler)
hdl = vdev->ctrl_handler;
@@ -694,7 +691,7 @@ static inline bool ctrl_is_pointer(struct file *file, u32 id)
if (!ops || !ops->vidioc_query_ext_ctrl)
return false;
- return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
+ return !ops->vidioc_query_ext_ctrl(file, NULL, &qec) &&
(qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index d49a68b36c28..0078a04c5445 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework uAPI implementation:
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
@@ -1250,14 +1250,17 @@ EXPORT_SYMBOL(v4l2_querymenu);
* VIDIOC_LOG_STATUS helpers
*/
-int v4l2_ctrl_log_status(struct file *file, void *fh)
+int v4l2_ctrl_log_status(struct file *file, void *priv)
{
struct video_device *vfd = video_devdata(file);
- struct v4l2_fh *vfh = file->private_data;
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
+ if (vfd->v4l2_dev) {
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
vfd->v4l2_dev->name);
+ }
+
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_log_status);
@@ -1348,7 +1351,7 @@ EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
*/
__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 98b960775e87..85d07ef44f62 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework core implementation.
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/export.h>
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 1ea52011247a..ad41f65374e2 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework control definitions.
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/export.h>
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
index aba6176fab6c..f4cf273ecf30 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-priv.h
+++ b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework private header.
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_CTRLS_PRIV_H_
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-request.c b/drivers/media/v4l2-core/v4l2-ctrls-request.c
index c637049d7a2b..e77f722b36a4 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-request.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-request.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework Request API implementation.
*
- * Copyright (C) 2018-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2018-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c369235113d9..10a126e50c1c 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -411,7 +411,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
- int ret = 0;
+ int ret;
/* Check if the video device is available */
mutex_lock(&videodev_lock);
@@ -424,16 +424,27 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
- if (vdev->fops->open) {
- if (video_is_registered(vdev))
- ret = vdev->fops->open(filp);
- else
- ret = -ENODEV;
+
+ if (!video_is_registered(vdev)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ ret = vdev->fops->open(filp);
+ if (ret)
+ goto done;
+
+ /* All drivers must use v4l2_fh. */
+ if (WARN_ON(!test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))) {
+ vdev->fops->release(filp);
+ ret = -ENODEV;
}
+done:
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: open (%d)\n",
video_device_node_name(vdev), ret);
+
/* decrease the refcount in case of an error */
if (ret)
video_put(vdev);
@@ -444,7 +455,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
static int v4l2_release(struct inode *inode, struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
- int ret = 0;
+ int ret;
/*
* We need to serialize the release() with queueing new requests.
@@ -452,14 +463,12 @@ static int v4l2_release(struct inode *inode, struct file *filp)
* operation, and that should not be mixed with queueing a new
* request at the same time.
*/
- if (vdev->fops->release) {
- if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
- mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
- ret = vdev->fops->release(filp);
- mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
- } else {
- ret = vdev->fops->release(filp);
- }
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
+ ret = vdev->fops->release(filp);
}
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
@@ -922,6 +931,9 @@ int __video_register_device(struct video_device *vdev,
/* the device_caps field MUST be set for all but subdevs */
if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps))
return -EINVAL;
+ /* the open and release file operations are mandatory */
+ if (WARN_ON(!vdev->fops || !vdev->fops->open || !vdev->fops->release))
+ return -EINVAL;
/* v4l2_fh support */
spin_lock_init(&vdev->fh_lock);
@@ -1114,8 +1126,7 @@ void video_unregister_device(struct video_device *vdev)
*/
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_unlock(&videodev_lock);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
- v4l2_event_wake_all(vdev);
+ v4l2_event_wake_all(vdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 5e537454f5cd..63b12ef9d4d9 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -2,7 +2,7 @@
/*
V4L2 device support.
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 7710cb26bea0..346d1b0e10ce 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -1226,6 +1226,7 @@ DEBUGFS_FOPS(avi, V4L2_DEBUGFS_IF_AVI);
DEBUGFS_FOPS(audio, V4L2_DEBUGFS_IF_AUDIO);
DEBUGFS_FOPS(spd, V4L2_DEBUGFS_IF_SPD);
DEBUGFS_FOPS(hdmi, V4L2_DEBUGFS_IF_HDMI);
+DEBUGFS_FOPS(drm, V4L2_DEBUGFS_IF_DRM);
struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
void *priv,
@@ -1255,6 +1256,9 @@ struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
if (if_types & V4L2_DEBUGFS_IF_HDMI)
debugfs_create_file("hdmi", 0400, infoframes->if_dir,
infoframes, &infoframe_hdmi_fops);
+ if (if_types & V4L2_DEBUGFS_IF_DRM)
+ debugfs_create_file("hdr_drm", 0400, infoframes->if_dir,
+ infoframes, &infoframe_drm_fops);
return infoframes;
}
EXPORT_SYMBOL_GPL(v4l2_debugfs_if_alloc);
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 90eec79ee995..df3ba9d4674b 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -41,10 +41,12 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
-void v4l2_fh_add(struct v4l2_fh *fh)
+void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp)
{
unsigned long flags;
+ filp->private_data = fh;
+
v4l2_prio_open(fh->vdev->prio, &fh->prio);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&fh->list, &fh->vdev->fh_list);
@@ -57,16 +59,15 @@ int v4l2_fh_open(struct file *filp)
struct video_device *vdev = video_devdata(filp);
struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- filp->private_data = fh;
if (fh == NULL)
return -ENOMEM;
v4l2_fh_init(fh, vdev);
- v4l2_fh_add(fh);
+ v4l2_fh_add(fh, filp);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_open);
-void v4l2_fh_del(struct v4l2_fh *fh)
+void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp)
{
unsigned long flags;
@@ -74,6 +75,8 @@ void v4l2_fh_del(struct v4l2_fh *fh)
list_del_init(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
v4l2_prio_close(fh->vdev->prio, fh->prio);
+
+ filp->private_data = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_del);
@@ -90,13 +93,12 @@ EXPORT_SYMBOL_GPL(v4l2_fh_exit);
int v4l2_fh_release(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
if (fh) {
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
kfree(fh);
- filp->private_data = NULL;
}
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 46da373066f4..01cf52c3ea33 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1089,8 +1089,8 @@ static void v4l_sanitize_format(struct v4l2_format *fmt)
}
}
-static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querycap(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
struct video_device *vfd = video_devdata(file);
@@ -1103,7 +1103,7 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
media_set_bus_info(cap->bus_info, sizeof(cap->bus_info),
vfd->dev_parent);
- ret = ops->vidioc_querycap(file, fh, cap);
+ ret = ops->vidioc_querycap(file, NULL, cap);
/*
* Drivers must not change device_caps, so check for this and
@@ -1123,8 +1123,8 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
return ret;
}
-static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_input(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
@@ -1133,11 +1133,11 @@ static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_g_input(file, fh, arg);
+ return ops->vidioc_g_input(file, NULL, arg);
}
-static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_output(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
@@ -1146,11 +1146,11 @@ static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_g_output(file, fh, arg);
+ return ops->vidioc_g_output(file, NULL, arg);
}
-static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_input(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
@@ -1162,22 +1162,22 @@ static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
if (vfd->device_caps & V4L2_CAP_IO_MC)
return *(int *)arg ? -EINVAL : 0;
- return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_s_input(file, NULL, *(unsigned int *)arg);
}
-static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_output(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
if (vfd->device_caps & V4L2_CAP_IO_MC)
return *(int *)arg ? -EINVAL : 0;
- return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_s_output(file, NULL, *(unsigned int *)arg);
}
-static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd;
u32 *p = arg;
@@ -1187,22 +1187,20 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
return 0;
}
-static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd;
struct v4l2_fh *vfh;
u32 *p = arg;
vfd = video_devdata(file);
- if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
- return -ENOTTY;
- vfh = file->private_data;
+ vfh = file_to_v4l2_fh(file);
return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
}
-static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_input *p = arg;
@@ -1224,11 +1222,11 @@ static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_enum_input(file, fh, p);
+ return ops->vidioc_enum_input(file, NULL, p);
}
-static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_output *p = arg;
@@ -1250,7 +1248,7 @@ static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_enum_output(file, fh, p);
+ return ops->vidioc_enum_output(file, NULL, p);
}
static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
@@ -1589,8 +1587,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
fmt->flags |= flags;
}
-static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_fmtdesc *p = arg;
@@ -1620,12 +1618,12 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
break;
- ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_cap(file, NULL, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
break;
- ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_overlay(file, NULL, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
@@ -1637,27 +1635,27 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_enum_fmt_vid_out))
break;
- ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_out(file, NULL, arg);
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
break;
- ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_sdr_cap(file, NULL, arg);
break;
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
break;
- ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_sdr_out(file, NULL, arg);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
break;
- ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_meta_cap(file, NULL, arg);
break;
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_enum_fmt_meta_out))
break;
- ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_meta_out(file, NULL, arg);
break;
}
if (ret == 0)
@@ -1680,8 +1678,8 @@ static void v4l_pix_format_touch(struct v4l2_pix_format *p)
p->xfer_func = 0;
}
-static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
@@ -1697,50 +1695,50 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_g_fmt_vid_cap))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
- ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_g_fmt_vid_cap(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
- return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_g_fmt_vid_out))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
- ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_g_fmt_vid_out(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
- return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_g_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
- return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_sdr_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
- return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
+ return ops->vidioc_g_fmt_sdr_out(file, NULL, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
- return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_meta_cap(file, NULL, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
- return ops->vidioc_g_fmt_meta_out(file, fh, arg);
+ return ops->vidioc_g_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
@@ -1760,7 +1758,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_cap))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_s_fmt_vid_cap(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
@@ -1773,7 +1771,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
break;
@@ -1781,22 +1779,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_s_fmt_vid_out(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
@@ -1807,7 +1805,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
break;
@@ -1815,43 +1813,43 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_s_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_sdr_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
+ return ops->vidioc_s_fmt_sdr_out(file, NULL, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_meta_cap(file, NULL, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_meta_out))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_s_fmt_meta_out(file, fh, arg);
+ return ops->vidioc_s_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
@@ -1868,7 +1866,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_cap))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_try_fmt_vid_cap(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
@@ -1881,7 +1879,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
break;
@@ -1889,22 +1887,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_try_fmt_vid_out(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
@@ -1915,7 +1913,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
break;
@@ -1923,55 +1921,55 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_try_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_sdr_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
+ return ops->vidioc_try_fmt_sdr_out(file, NULL, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_meta_cap(file, NULL, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_meta_out))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_try_fmt_meta_out(file, fh, arg);
+ return ops->vidioc_try_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_streamon(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_streamon(file, NULL, *(unsigned int *)arg);
}
-static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_streamoff(file, NULL, *(unsigned int *)arg);
}
-static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
@@ -1979,14 +1977,14 @@ static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- err = ops->vidioc_g_tuner(file, fh, p);
+ err = ops->vidioc_g_tuner(file, NULL, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
-static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
@@ -1997,11 +1995,11 @@ static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
return ret;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- return ops->vidioc_s_tuner(file, fh, p);
+ return ops->vidioc_s_tuner(file, NULL, p);
}
static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
@@ -2010,14 +2008,14 @@ static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
if (vfd->vfl_type == VFL_TYPE_RADIO)
p->type = V4L2_TUNER_RADIO;
- err = ops->vidioc_g_modulator(file, fh, p);
+ err = ops->vidioc_g_modulator(file, NULL, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
@@ -2025,11 +2023,11 @@ static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
if (vfd->vfl_type == VFL_TYPE_RADIO)
p->type = V4L2_TUNER_RADIO;
- return ops->vidioc_s_modulator(file, fh, p);
+ return ops->vidioc_s_modulator(file, NULL, p);
}
static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency *p = arg;
@@ -2039,11 +2037,11 @@ static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
else
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- return ops->vidioc_g_frequency(file, fh, p);
+ return ops->vidioc_g_frequency(file, NULL, p);
}
static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
const struct v4l2_frequency *p = arg;
@@ -2062,11 +2060,11 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
if (type != p->type)
return -EINVAL;
}
- return ops->vidioc_s_frequency(file, fh, p);
+ return ops->vidioc_s_frequency(file, NULL, p);
}
-static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_standard *p = arg;
@@ -2074,8 +2072,8 @@ static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
return v4l_video_std_enumstd(p, vfd->tvnorms);
}
-static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_std(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id id = *(v4l2_std_id *)arg, norm;
@@ -2089,11 +2087,11 @@ static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
/* Calls the specific handler */
- return ops->vidioc_s_std(file, fh, norm);
+ return ops->vidioc_s_std(file, NULL, norm);
}
-static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querystd(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id *p = arg;
@@ -2111,11 +2109,11 @@ static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
* their efforts to improve the standards detection.
*/
*p = vfd->tvnorms;
- return ops->vidioc_querystd(file, fh, arg);
+ return ops->vidioc_querystd(file, NULL, arg);
}
static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_hw_freq_seek *p = arg;
@@ -2133,26 +2131,26 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (p->type != type)
return -EINVAL;
- return ops->vidioc_s_hw_freq_seek(file, fh, p);
+ return ops->vidioc_s_hw_freq_seek(file, NULL, p);
}
-static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_framebuffer *p = arg;
p->base = NULL;
- return ops->vidioc_s_fbuf(file, fh, p);
+ return ops->vidioc_s_fbuf(file, NULL, p);
}
-static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_overlay(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_overlay(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_overlay(file, NULL, *(unsigned int *)arg);
}
-static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_requestbuffers *p = arg;
@@ -2167,38 +2165,38 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
- return ops->vidioc_reqbufs(file, fh, p);
+ return ops->vidioc_reqbufs(file, NULL, p);
}
-static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_querybuf(file, fh, p);
+ return ret ? ret : ops->vidioc_querybuf(file, NULL, p);
}
-static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_qbuf(file, fh, p);
+ return ret ? ret : ops->vidioc_qbuf(file, NULL, p);
}
-static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
+ return ret ? ret : ops->vidioc_dqbuf(file, NULL, p);
}
static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_create_buffers *create = arg;
@@ -2215,7 +2213,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
- ret = ops->vidioc_create_bufs(file, fh, create);
+ ret = ops->vidioc_create_bufs(file, NULL, create);
if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -2225,27 +2223,27 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
}
static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_buffer *b = arg;
int ret = check_fmt(file, b->type);
- return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
+ return ret ? ret : ops->vidioc_prepare_buf(file, NULL, b);
}
static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_remove_buffers *remove = arg;
if (ops->vidioc_remove_bufs)
- return ops->vidioc_remove_bufs(file, fh, remove);
+ return ops->vidioc_remove_bufs(file, NULL, remove);
return -ENOTTY;
}
-static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_streamparm *p = arg;
@@ -2255,20 +2253,20 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
if (ops->vidioc_g_parm)
- return ops->vidioc_g_parm(file, fh, p);
+ return ops->vidioc_g_parm(file, NULL, p);
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
if (vfd->device_caps & V4L2_CAP_READWRITE)
p->parm.capture.readbuffers = 2;
- ret = ops->vidioc_g_std(file, fh, &std);
+ ret = ops->vidioc_g_std(file, NULL, &std);
if (ret == 0)
v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
return ret;
}
-static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_streamparm *p = arg;
int ret = check_fmt(file, p->type);
@@ -2288,17 +2286,16 @@ static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
p->parm.capture.extendedmode = 0;
p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
}
- return ops->vidioc_s_parm(file, fh, p);
+ return ops->vidioc_s_parm(file, NULL, p);
}
-static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_query_ext_ctrl qec = {};
struct v4l2_queryctrl *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
int ret;
if (vfh && vfh->ctrl_handler)
@@ -2310,7 +2307,7 @@ static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
/* Simulate query_ext_ctr using query_ctrl. */
qec.id = p->id;
- ret = ops->vidioc_query_ext_ctrl(file, fh, &qec);
+ ret = ops->vidioc_query_ext_ctrl(file, NULL, &qec);
if (ret)
return ret;
v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec);
@@ -2318,46 +2315,43 @@ static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
}
static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_query_ext_ctrl *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
if (vfh && vfh->ctrl_handler)
return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
if (ops->vidioc_query_ext_ctrl)
- return ops->vidioc_query_ext_ctrl(file, fh, p);
+ return ops->vidioc_query_ext_ctrl(file, NULL, p);
return -ENOTTY;
}
-static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_querymenu *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
if (vfh && vfh->ctrl_handler)
return v4l2_querymenu(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_querymenu(vfd->ctrl_handler, p);
if (ops->vidioc_querymenu)
- return ops->vidioc_querymenu(file, fh, p);
+ return ops->vidioc_querymenu(file, NULL, p);
return -ENOTTY;
}
-static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
@@ -2374,7 +2368,7 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
ctrl.id = p->id;
ctrl.value = p->value;
if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
- int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
+ int ret = ops->vidioc_g_ext_ctrls(file, NULL, &ctrls);
if (ret == 0)
p->value = ctrl.value;
@@ -2383,13 +2377,12 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
int ret;
@@ -2408,18 +2401,17 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
ctrl.value = p->value;
if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
return -EINVAL;
- ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ ret = ops->vidioc_s_ext_ctrls(file, NULL, &ctrls);
p->value = ctrl.value;
return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
@@ -2431,16 +2423,15 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
- ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
+ ops->vidioc_g_ext_ctrls(file, NULL, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
@@ -2452,16 +2443,15 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
- ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
+ ops->vidioc_s_ext_ctrls(file, NULL, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
@@ -2473,7 +2463,7 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
- ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
+ ops->vidioc_try_ext_ctrls(file, NULL, p) : -EINVAL;
}
/*
@@ -2486,7 +2476,7 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
* type and drivers don't need to check for both.
*/
static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_selection *p = arg;
u32 old_type = p->type;
@@ -2496,13 +2486,13 @@ static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ret = ops->vidioc_g_selection(file, fh, p);
+ ret = ops->vidioc_g_selection(file, NULL, p);
p->type = old_type;
return ret;
}
static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_selection *p = arg;
u32 old_type = p->type;
@@ -2512,13 +2502,13 @@ static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ret = ops->vidioc_s_selection(file, fh, p);
+ ret = ops->vidioc_s_selection(file, NULL, p);
p->type = old_type;
return ret;
}
-static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
@@ -2539,7 +2529,7 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
- ret = v4l_g_selection(ops, file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
/* copying results to old structure on success */
if (!ret)
@@ -2547,8 +2537,8 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
return ret;
}
-static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
@@ -2569,11 +2559,11 @@ static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
- return v4l_s_selection(ops, file, fh, &s);
+ return v4l_s_selection(ops, file, &s);
}
-static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_cropcap *p = arg;
@@ -2597,7 +2587,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
return -ENOTTY;
if (ops->vidioc_g_pixelaspect)
- ret = ops->vidioc_g_pixelaspect(file, fh, s.type,
+ ret = ops->vidioc_g_pixelaspect(file, NULL, s.type,
&p->pixelaspect);
/*
@@ -2619,7 +2609,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ?
V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS;
- ret = v4l_g_selection(ops, file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
if (ret)
return ret;
p->bounds = s.r;
@@ -2630,7 +2620,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_DEFAULT;
- ret = v4l_g_selection(ops, file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
if (ret)
return ret;
p->defrect = s.r;
@@ -2638,8 +2628,8 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
return 0;
}
-static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_log_status(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
@@ -2647,7 +2637,7 @@ static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
if (vfd->v4l2_dev)
pr_info("%s: ================= START STATUS =================\n",
vfd->v4l2_dev->name);
- ret = ops->vidioc_log_status(file, fh);
+ ret = ops->vidioc_log_status(file, NULL);
if (vfd->v4l2_dev)
pr_info("%s: ================== END STATUS ==================\n",
vfd->v4l2_dev->name);
@@ -2655,7 +2645,7 @@ static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
@@ -2675,7 +2665,7 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
}
if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
- return ops->vidioc_g_register(file, fh, p);
+ return ops->vidioc_g_register(file, NULL, p);
return -EINVAL;
#else
return -ENOTTY;
@@ -2683,7 +2673,7 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
const struct v4l2_dbg_register *p = arg;
@@ -2703,7 +2693,7 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
}
if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
- return ops->vidioc_s_register(file, fh, p);
+ return ops->vidioc_s_register(file, NULL, p);
return -EINVAL;
#else
return -ENOTTY;
@@ -2711,7 +2701,7 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vfd = video_devdata(file);
@@ -2727,7 +2717,7 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
p->flags |= V4L2_CHIP_FL_READABLE;
strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
if (ops->vidioc_g_chip_info)
- return ops->vidioc_g_chip_info(file, fh, arg);
+ return ops->vidioc_g_chip_info(file, NULL, arg);
if (p->match.addr)
return -EINVAL;
return 0;
@@ -2753,26 +2743,32 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
#endif
}
-static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
}
static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
- return ops->vidioc_subscribe_event(fh, arg);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return ops->vidioc_subscribe_event(vfh, arg);
}
static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
- return ops->vidioc_unsubscribe_event(fh, arg);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return ops->vidioc_unsubscribe_event(vfh, arg);
}
static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_sliced_vbi_cap *p = arg;
int ret = check_fmt(file, p->type);
@@ -2783,11 +2779,11 @@ static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
/* Clear up to type, everything after type is zeroed already */
memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
- return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
+ return ops->vidioc_g_sliced_vbi_cap(file, NULL, p);
}
static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency_band *p = arg;
@@ -2805,7 +2801,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
if (ops->vidioc_enum_freq_bands) {
- err = ops->vidioc_enum_freq_bands(file, fh, p);
+ err = ops->vidioc_enum_freq_bands(file, NULL, p);
if (err != -ENOTTY)
return err;
}
@@ -2817,7 +2813,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
if (p->index)
return -EINVAL;
- err = ops->vidioc_g_tuner(file, fh, &t);
+ err = ops->vidioc_g_tuner(file, NULL, &t);
if (err)
return err;
p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
@@ -2836,7 +2832,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
if (p->index)
return -EINVAL;
- err = ops->vidioc_g_modulator(file, fh, &m);
+ err = ops->vidioc_g_modulator(file, NULL, &m);
if (err)
return err;
p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
@@ -2853,7 +2849,7 @@ struct v4l2_ioctl_info {
u32 flags;
const char * const name;
int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file,
- void *fh, void *p);
+ void *p);
void (*debug)(const void *arg, bool write_only);
};
@@ -2874,9 +2870,9 @@ struct v4l2_ioctl_info {
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
static int v4l_stub_ ## _vidioc( \
const struct v4l2_ioctl_ops *ops, \
- struct file *file, void *fh, void *p) \
+ struct file *file, void *p) \
{ \
- return ops->vidioc_ ## _vidioc(file, fh, p); \
+ return ops->vidioc_ ## _vidioc(file, NULL, p); \
}
#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \
@@ -3072,8 +3068,7 @@ static long __video_do_ioctl(struct file *file,
bool write_only = false;
struct v4l2_ioctl_info default_info;
const struct v4l2_ioctl_info *info;
- void *fh = file->private_data;
- struct v4l2_fh *vfh = NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
int dev_debug = vfd->dev_debug;
long ret = -ENOTTY;
@@ -3083,9 +3078,6 @@ static long __video_do_ioctl(struct file *file,
return ret;
}
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
- vfh = file->private_data;
-
/*
* We need to serialize streamon/off with queueing new requests.
* These ioctls may trigger the cancellation of a streaming
@@ -3117,10 +3109,10 @@ static long __video_do_ioctl(struct file *file,
info = &v4l2_ioctls[_IOC_NR(cmd)];
if (!is_valid_ioctl(vfd, cmd) &&
- !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ !((info->flags & INFO_FL_CTRL) && vfh->ctrl_handler))
goto done;
- if (vfh && (info->flags & INFO_FL_PRIO)) {
+ if (info->flags & INFO_FL_PRIO) {
ret = v4l2_prio_check(vfd->prio, vfh->prio);
if (ret)
goto done;
@@ -3134,12 +3126,12 @@ static long __video_do_ioctl(struct file *file,
write_only = _IOC_DIR(cmd) == _IOC_WRITE;
if (info != &default_info) {
- ret = info->func(ops, file, fh, arg);
+ ret = info->func(ops, file, arg);
} else if (!ops->vidioc_default) {
ret = -ENOTTY;
} else {
- ret = ops->vidioc_default(file, fh,
- vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ ret = ops->vidioc_default(file, NULL,
+ v4l2_prio_check(vfd->prio, vfh->prio) >= 0,
cmd, arg);
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index eb22d6172462..21acd9bc8607 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -951,7 +951,7 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
- struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
__poll_t req_events = poll_requested_events(wait);
@@ -970,13 +970,9 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- rc |= EPOLLPRI;
- }
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ rc |= EPOLLPRI;
return rc;
}
@@ -1004,7 +1000,7 @@ unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
unsigned long offset = pgoff << PAGE_SHIFT;
struct vb2_queue *vq;
@@ -1371,7 +1367,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
}
@@ -1380,7 +1376,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *create)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
}
@@ -1389,7 +1385,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
struct v4l2_remove_buffers *remove)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type);
if (!q)
@@ -1404,7 +1400,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs);
int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
}
@@ -1413,7 +1409,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
}
@@ -1422,7 +1418,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
}
@@ -1431,7 +1427,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
}
@@ -1440,7 +1436,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
}
@@ -1449,7 +1445,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
}
@@ -1458,13 +1454,13 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
-int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
@@ -1475,7 +1471,7 @@ int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
-int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
@@ -1542,7 +1538,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
}
@@ -1551,13 +1547,13 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
-int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_FLUSH)
@@ -1572,7 +1568,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct vb2_v4l2_buffer *out_vb, *cap_vb;
struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
unsigned long flags;
@@ -1617,7 +1613,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
}
@@ -1625,7 +1621,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
__poll_t ret;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 4fd25fea3b58..1da953629010 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -26,6 +26,30 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
+/**
+ * struct v4l2_subdev_stream_config - Used for storing stream configuration.
+ *
+ * @pad: pad number
+ * @stream: stream number
+ * @enabled: has the stream been enabled with v4l2_subdev_enable_streams()
+ * @fmt: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
+ *
+ * This structure stores configuration for a stream.
+ */
+struct v4l2_subdev_stream_config {
+ u32 pad;
+ u32 stream;
+ bool enabled;
+
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
+};
+
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
/*
* The Streams API is an experimental feature. To use the Streams API, set
@@ -86,8 +110,7 @@ static int subdev_open(struct file *file)
}
v4l2_fh_init(&subdev_fh->vfh, vdev);
- v4l2_fh_add(&subdev_fh->vfh);
- file->private_data = &subdev_fh->vfh;
+ v4l2_fh_add(&subdev_fh->vfh, file);
if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
struct module *owner;
@@ -110,7 +133,7 @@ static int subdev_open(struct file *file)
err:
module_put(subdev_fh->owner);
- v4l2_fh_del(&subdev_fh->vfh);
+ v4l2_fh_del(&subdev_fh->vfh, file);
v4l2_fh_exit(&subdev_fh->vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
@@ -122,17 +145,16 @@ static int subdev_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
if (sd->internal_ops && sd->internal_ops->close)
sd->internal_ops->close(sd, subdev_fh);
module_put(subdev_fh->owner);
- v4l2_fh_del(vfh);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
- file->private_data = NULL;
return 0;
}
@@ -612,7 +634,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
@@ -1135,7 +1157,7 @@ static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
if (video_is_registered(vdev)) {
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
struct v4l2_subdev_state *state;
@@ -1192,7 +1214,7 @@ static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return EPOLLERR;
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index e73dd330af47..d913fb901973 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev)
return -ENOMEM;
srom->dev = dev;
- srom->reg_base = of_iomap(np, 0);
- if (!srom->reg_base) {
+ srom->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(srom->reg_base)) {
dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
- return -ENOMEM;
+ return PTR_ERR(srom->reg_base);
}
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
ARRAY_SIZE(exynos_srom_offsets));
- if (!srom->reg_offset) {
- iounmap(srom->reg_base);
+ if (!srom->reg_offset)
return -ENOMEM;
- }
for_each_child_of_node(np, child) {
if (exynos_srom_configure_bank(srom, child)) {
diff --git a/drivers/memory/stm32_omm.c b/drivers/memory/stm32_omm.c
index bee2ecc8c2b9..5d06623f3f68 100644
--- a/drivers/memory/stm32_omm.c
+++ b/drivers/memory/stm32_omm.c
@@ -238,7 +238,7 @@ static int stm32_omm_configure(struct device *dev)
if (mux & CR_MUXEN) {
ret = of_property_read_u32(dev->of_node, "st,omm-req2ack-ns",
&req2ack);
- if (!ret && !req2ack) {
+ if (!ret && req2ack) {
req2ack = DIV_ROUND_UP(req2ack, NSEC_PER_SEC / clk_rate_max) - 1;
if (req2ack > 256)
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 8ab6498dbe7d..cfa61dd88557 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -9,11 +9,11 @@
static const struct tegra_mc_client tegra210_mc_clients[] = {
{
- .id = 0x00,
+ .id = TEGRA210_MC_PTCR,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
}, {
- .id = 0x01,
+ .id = TEGRA210_MC_DISPLAY0A,
.name = "display0a",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -29,7 +29,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x02,
+ .id = TEGRA210_MC_DISPLAY0AB,
.name = "display0ab",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -45,7 +45,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x03,
+ .id = TEGRA210_MC_DISPLAY0B,
.name = "display0b",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -61,7 +61,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x04,
+ .id = TEGRA210_MC_DISPLAY0BB,
.name = "display0bb",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -77,7 +77,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x05,
+ .id = TEGRA210_MC_DISPLAY0C,
.name = "display0c",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -93,7 +93,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x06,
+ .id = TEGRA210_MC_DISPLAY0CB,
.name = "display0cb",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -109,7 +109,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x0e,
+ .id = TEGRA210_MC_AFIR,
.name = "afir",
.swgroup = TEGRA_SWGROUP_AFI,
.regs = {
@@ -125,7 +125,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x0f,
+ .id = TEGRA210_MC_AVPCARM7R,
.name = "avpcarm7r",
.swgroup = TEGRA_SWGROUP_AVPC,
.regs = {
@@ -141,7 +141,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x10,
+ .id = TEGRA210_MC_DISPLAYHC,
.name = "displayhc",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -157,7 +157,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x11,
+ .id = TEGRA210_MC_DISPLAYHCB,
.name = "displayhcb",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -173,7 +173,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x15,
+ .id = TEGRA210_MC_HDAR,
.name = "hdar",
.swgroup = TEGRA_SWGROUP_HDA,
.regs = {
@@ -189,7 +189,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x16,
+ .id = TEGRA210_MC_HOST1XDMAR,
.name = "host1xdmar",
.swgroup = TEGRA_SWGROUP_HC,
.regs = {
@@ -205,7 +205,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x17,
+ .id = TEGRA210_MC_HOST1XR,
.name = "host1xr",
.swgroup = TEGRA_SWGROUP_HC,
.regs = {
@@ -221,7 +221,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1c,
+ .id = TEGRA210_MC_NVENCSRD,
.name = "nvencsrd",
.swgroup = TEGRA_SWGROUP_NVENC,
.regs = {
@@ -237,7 +237,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1d,
+ .id = TEGRA210_MC_PPCSAHBDMAR,
.name = "ppcsahbdmar",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -253,7 +253,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1e,
+ .id = TEGRA210_MC_PPCSAHBSLVR,
.name = "ppcsahbslvr",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -269,7 +269,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1f,
+ .id = TEGRA210_MC_SATAR,
.name = "satar",
.swgroup = TEGRA_SWGROUP_SATA,
.regs = {
@@ -285,7 +285,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x27,
+ .id = TEGRA210_MC_MPCORER,
.name = "mpcorer",
.swgroup = TEGRA_SWGROUP_MPCORE,
.regs = {
@@ -297,7 +297,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x2b,
+ .id = TEGRA210_MC_NVENCSWR,
.name = "nvencswr",
.swgroup = TEGRA_SWGROUP_NVENC,
.regs = {
@@ -313,7 +313,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x31,
+ .id = TEGRA210_MC_AFIW,
.name = "afiw",
.swgroup = TEGRA_SWGROUP_AFI,
.regs = {
@@ -329,7 +329,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x32,
+ .id = TEGRA210_MC_AVPCARM7W,
.name = "avpcarm7w",
.swgroup = TEGRA_SWGROUP_AVPC,
.regs = {
@@ -345,7 +345,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x35,
+ .id = TEGRA210_MC_HDAW,
.name = "hdaw",
.swgroup = TEGRA_SWGROUP_HDA,
.regs = {
@@ -361,7 +361,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x36,
+ .id = TEGRA210_MC_HOST1XW,
.name = "host1xw",
.swgroup = TEGRA_SWGROUP_HC,
.regs = {
@@ -377,7 +377,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x39,
+ .id = TEGRA210_MC_MPCOREW,
.name = "mpcorew",
.swgroup = TEGRA_SWGROUP_MPCORE,
.regs = {
@@ -389,7 +389,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x3b,
+ .id = TEGRA210_MC_PPCSAHBDMAW,
.name = "ppcsahbdmaw",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -405,7 +405,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x3c,
+ .id = TEGRA210_MC_PPCSAHBSLVW,
.name = "ppcsahbslvw",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -421,7 +421,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x3d,
+ .id = TEGRA210_MC_SATAW,
.name = "sataw",
.swgroup = TEGRA_SWGROUP_SATA,
.regs = {
@@ -437,7 +437,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x44,
+ .id = TEGRA210_MC_ISPRA,
.name = "ispra",
.swgroup = TEGRA_SWGROUP_ISP2,
.regs = {
@@ -453,7 +453,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x46,
+ .id = TEGRA210_MC_ISPWA,
.name = "ispwa",
.swgroup = TEGRA_SWGROUP_ISP2,
.regs = {
@@ -469,7 +469,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x47,
+ .id = TEGRA210_MC_ISPWB,
.name = "ispwb",
.swgroup = TEGRA_SWGROUP_ISP2,
.regs = {
@@ -485,7 +485,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4a,
+ .id = TEGRA210_MC_XUSB_HOSTR,
.name = "xusb_hostr",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
.regs = {
@@ -501,7 +501,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4b,
+ .id = TEGRA210_MC_XUSB_HOSTW,
.name = "xusb_hostw",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
.regs = {
@@ -517,7 +517,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4c,
+ .id = TEGRA210_MC_XUSB_DEVR,
.name = "xusb_devr",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
.regs = {
@@ -533,7 +533,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4d,
+ .id = TEGRA210_MC_XUSB_DEVW,
.name = "xusb_devw",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
.regs = {
@@ -549,7 +549,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4e,
+ .id = TEGRA210_MC_ISPRAB,
.name = "isprab",
.swgroup = TEGRA_SWGROUP_ISP2B,
.regs = {
@@ -565,7 +565,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x50,
+ .id = TEGRA210_MC_ISPWAB,
.name = "ispwab",
.swgroup = TEGRA_SWGROUP_ISP2B,
.regs = {
@@ -581,7 +581,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x51,
+ .id = TEGRA210_MC_ISPWBB,
.name = "ispwbb",
.swgroup = TEGRA_SWGROUP_ISP2B,
.regs = {
@@ -597,7 +597,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x54,
+ .id = TEGRA210_MC_TSECSRD,
.name = "tsecsrd",
.swgroup = TEGRA_SWGROUP_TSEC,
.regs = {
@@ -613,7 +613,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x55,
+ .id = TEGRA210_MC_TSECSWR,
.name = "tsecswr",
.swgroup = TEGRA_SWGROUP_TSEC,
.regs = {
@@ -629,7 +629,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x56,
+ .id = TEGRA210_MC_A9AVPSCR,
.name = "a9avpscr",
.swgroup = TEGRA_SWGROUP_A9AVP,
.regs = {
@@ -645,7 +645,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x57,
+ .id = TEGRA210_MC_A9AVPSCW,
.name = "a9avpscw",
.swgroup = TEGRA_SWGROUP_A9AVP,
.regs = {
@@ -661,7 +661,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x58,
+ .id = TEGRA210_MC_GPUSRD,
.name = "gpusrd",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
@@ -678,7 +678,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x59,
+ .id = TEGRA210_MC_GPUSWR,
.name = "gpuswr",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
@@ -695,7 +695,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x5a,
+ .id = TEGRA210_MC_DISPLAYT,
.name = "displayt",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -711,7 +711,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x60,
+ .id = TEGRA210_MC_SDMMCRA,
.name = "sdmmcra",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
.regs = {
@@ -727,7 +727,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x61,
+ .id = TEGRA210_MC_SDMMCRAA,
.name = "sdmmcraa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
.regs = {
@@ -743,7 +743,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x62,
+ .id = TEGRA210_MC_SDMMCR,
.name = "sdmmcr",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
.regs = {
@@ -759,7 +759,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x63,
+ .id = TEGRA210_MC_SDMMCRAB,
.swgroup = TEGRA_SWGROUP_SDMMC4A,
.name = "sdmmcrab",
.regs = {
@@ -775,7 +775,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x64,
+ .id = TEGRA210_MC_SDMMCWA,
.name = "sdmmcwa",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
.regs = {
@@ -791,7 +791,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x65,
+ .id = TEGRA210_MC_SDMMCWAA,
.name = "sdmmcwaa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
.regs = {
@@ -807,7 +807,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x66,
+ .id = TEGRA210_MC_SDMMCW,
.name = "sdmmcw",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
.regs = {
@@ -823,7 +823,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x67,
+ .id = TEGRA210_MC_SDMMCWAB,
.name = "sdmmcwab",
.swgroup = TEGRA_SWGROUP_SDMMC4A,
.regs = {
@@ -839,7 +839,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x6c,
+ .id = TEGRA210_MC_VICSRD,
.name = "vicsrd",
.swgroup = TEGRA_SWGROUP_VIC,
.regs = {
@@ -855,7 +855,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x6d,
+ .id = TEGRA210_MC_VICSWR,
.name = "vicswr",
.swgroup = TEGRA_SWGROUP_VIC,
.regs = {
@@ -871,7 +871,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x72,
+ .id = TEGRA210_MC_VIW,
.name = "viw",
.swgroup = TEGRA_SWGROUP_VI,
.regs = {
@@ -887,7 +887,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x73,
+ .id = TEGRA210_MC_DISPLAYD,
.name = "displayd",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -903,7 +903,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x78,
+ .id = TEGRA210_MC_NVDECSRD,
.name = "nvdecsrd",
.swgroup = TEGRA_SWGROUP_NVDEC,
.regs = {
@@ -919,7 +919,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x79,
+ .id = TEGRA210_MC_NVDECSWR,
.name = "nvdecswr",
.swgroup = TEGRA_SWGROUP_NVDEC,
.regs = {
@@ -935,7 +935,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7a,
+ .id = TEGRA210_MC_APER,
.name = "aper",
.swgroup = TEGRA_SWGROUP_APE,
.regs = {
@@ -951,7 +951,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7b,
+ .id = TEGRA210_MC_APEW,
.name = "apew",
.swgroup = TEGRA_SWGROUP_APE,
.regs = {
@@ -967,7 +967,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7e,
+ .id = TEGRA210_MC_NVJPGRD,
.name = "nvjpgsrd",
.swgroup = TEGRA_SWGROUP_NVJPG,
.regs = {
@@ -983,7 +983,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7f,
+ .id = TEGRA210_MC_NVJPGWR,
.name = "nvjpgswr",
.swgroup = TEGRA_SWGROUP_NVJPG,
.regs = {
@@ -999,7 +999,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x80,
+ .id = TEGRA210_MC_SESRD,
.name = "sesrd",
.swgroup = TEGRA_SWGROUP_SE,
.regs = {
@@ -1015,7 +1015,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x81,
+ .id = TEGRA210_MC_SESRD,
.name = "seswr",
.swgroup = TEGRA_SWGROUP_SE,
.regs = {
@@ -1031,7 +1031,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x82,
+ .id = TEGRA210_MC_AXIAPR,
.name = "axiapr",
.swgroup = TEGRA_SWGROUP_AXIAP,
.regs = {
@@ -1047,7 +1047,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x83,
+ .id = TEGRA210_MC_AXIAPW,
.name = "axiapw",
.swgroup = TEGRA_SWGROUP_AXIAP,
.regs = {
@@ -1063,7 +1063,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x84,
+ .id = TEGRA210_MC_ETRR,
.name = "etrr",
.swgroup = TEGRA_SWGROUP_ETR,
.regs = {
@@ -1079,7 +1079,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x85,
+ .id = TEGRA210_MC_ETRR,
.name = "etrw",
.swgroup = TEGRA_SWGROUP_ETR,
.regs = {
@@ -1095,7 +1095,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x86,
+ .id = TEGRA210_MC_TSECSRDB,
.name = "tsecsrdb",
.swgroup = TEGRA_SWGROUP_TSECB,
.regs = {
@@ -1111,7 +1111,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x87,
+ .id = TEGRA210_MC_TSECSWRB,
.name = "tsecswrb",
.swgroup = TEGRA_SWGROUP_TSECB,
.regs = {
@@ -1127,7 +1127,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x88,
+ .id = TEGRA210_MC_GPUSRD2,
.name = "gpusrd2",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
@@ -1144,7 +1144,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x89,
+ .id = TEGRA210_MC_GPUSWR2,
.name = "gpuswr2",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index d34892782f6e..1af157ce0a63 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1953,10 +1953,10 @@ static void msb_data_clear(struct msb_data *msb)
msb->card = NULL;
}
-static int msb_bd_getgeo(struct block_device *bdev,
+static int msb_bd_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct msb_data *msb = bdev->bd_disk->private_data;
+ struct msb_data *msb = disk->private_data;
*geo = msb->geometry;
return 0;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c9853d887d28..e507bb11c802 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -189,10 +189,10 @@ static void mspro_block_bd_free_disk(struct gendisk *disk)
kfree(msb);
}
-static int mspro_block_bd_getgeo(struct block_device *bdev,
+static int mspro_block_bd_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct mspro_block_data *msb = bdev->bd_disk->private_data;
+ struct mspro_block_data *msb = disk->private_data;
geo->heads = msb->heads;
geo->sectors = msb->sectors_per_track;
@@ -560,8 +560,7 @@ has_int_reg:
t_offset += msb->current_page * msb->page_size;
sg_set_page(&t_sg,
- nth_page(sg_page(&(msb->req_sg[msb->current_seg])),
- t_offset >> PAGE_SHIFT),
+ sg_page(&(msb->req_sg[msb->current_seg])) + (t_offset >> PAGE_SHIFT),
msb->page_size, offset_in_page(t_offset));
memstick_init_req_sg(*mrq, msb->data_dir == READ
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index cddddb3a5a27..79e66e30417c 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -317,8 +317,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
unsigned int p_off;
if (host->req->long_data) {
- pg = nth_page(sg_page(&host->req->sg),
- off >> PAGE_SHIFT);
+ pg = sg_page(&host->req->sg) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index db7f3a088fb0..0b6a90661eee 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -201,8 +201,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
unsigned int p_off;
if (host->req->long_data) {
- pg = nth_page(sg_page(&host->req->sg),
- off >> PAGE_SHIFT);
+ pg = sg_page(&host->req->sg) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3a64dc7a7e27..3304f8824cf7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2074,7 +2074,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
* This is anyones guess quite frankly.
*/
int
-mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
+mptscsih_bios_param(struct scsi_device * sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads;
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 8c2bb2331fc1..f9678d48100c 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -123,7 +123,7 @@ extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_bus_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_host_reset(struct scsi_cmnd *SCpnt);
-extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
+extern int mptscsih_bios_param(struct scsi_device * sdev, struct gendisk *unused, sector_t capacity, int geom[]);
extern int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 67b54e0fd452..6cec1858947b 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -139,7 +139,7 @@ config MFD_AAT2870_CORE
config MFD_AT91_USART
tristate "AT91 USART Driver"
select MFD_CORE
- depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
Select this to get support for AT91 USART IP. This is a wrapper
over at91-usart-serial driver and usart-spi-driver. Only one function
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
index 62dbc63efa8d..e1a9bfd65856 100644
--- a/drivers/mfd/max77705.c
+++ b/drivers/mfd/max77705.c
@@ -108,6 +108,9 @@ static int max77705_i2c_probe(struct i2c_client *i2c)
if (pmic_rev != MAX77705_PASS3)
return dev_err_probe(dev, -ENODEV, "Rev.0x%x is not tested\n", pmic_rev);
+ /* Active Discharge Enable */
+ regmap_update_bits(max77705->regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
+
ret = devm_regmap_add_irq_chip(dev, max77705->regmap,
i2c->irq,
IRQF_ONESHOT, 0,
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 7575fee96cc6..f8b04e49e4ba 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -81,6 +81,19 @@ config INTEL_MEI_VSC
This driver can also be built as a module. If so, the module
will be called mei-vsc.
+config INTEL_MEI_LB
+ tristate "Intel Late Binding (LB) support on ME Interface"
+ depends on INTEL_MEI_ME
+ depends on DRM_XE
+ help
+ Enable support for Intel Late Binding (LB) via the MEI interface.
+
+ Late Binding is a method for applying firmware updates at runtime,
+ allowing the Intel Xe driver to load firmware payloads such as
+ fan controller or voltage regulator. These firmware updates are
+ authenticated and versioned, and do not require firmware flashing
+ or system reboot.
+
source "drivers/misc/mei/hdcp/Kconfig"
source "drivers/misc/mei/pxp/Kconfig"
source "drivers/misc/mei/gsc_proxy/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 6f9fdbf1a495..a203ed766b33 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -31,6 +31,7 @@ CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/
+obj-$(CONFIG_INTEL_MEI_LB) += mei_lb.o
obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o
mei-vsc-hw-y := vsc-tp.o
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 5cc3ad07d5be..09aae8f9d225 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -615,6 +615,19 @@ u8 mei_cldev_ver(const struct mei_cl_device *cldev)
EXPORT_SYMBOL_GPL(mei_cldev_ver);
/**
+ * mei_cldev_mtu - max message that client can send and receive
+ *
+ * @cldev: mei client device
+ *
+ * Return: mtu or 0 if client is not connected
+ */
+size_t mei_cldev_mtu(const struct mei_cl_device *cldev)
+{
+ return mei_cl_mtu(cldev->cl);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_mtu);
+
+/**
* mei_cldev_enabled - check whether the device is enabled
*
* @cldev: mei client device
diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c
new file mode 100644
index 000000000000..77686b108d3c
--- /dev/null
+++ b/drivers/misc/mei/mei_lb.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <linux/component.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_lb_mei_interface.h>
+
+#include "mkhi.h"
+
+/**
+ * DOC: Late Binding Firmware Update/Upload
+ *
+ * Late Binding is a firmware update/upload mechanism that allows configuration
+ * payloads to be securely delivered and applied at runtime, rather than
+ * being embedded in the system firmware image (e.g., IFWI or SPI flash).
+ *
+ * This mechanism is used to update device-level configuration such as:
+ * - Fan controller
+ * - Voltage regulator (VR)
+ *
+ * Key Characteristics:
+ * ---------------------
+ * - Runtime Delivery:
+ * Firmware blobs are loaded by the host driver (e.g., Xe KMD)
+ * after the GPU or SoC has booted.
+ *
+ * - Secure and Authenticated:
+ * All payloads are signed and verified by the authentication firmware.
+ *
+ * - No Firmware Flashing Required:
+ * Updates are applied in volatile memory and do not require SPI flash
+ * modification or system reboot.
+ *
+ * - Re-entrant:
+ * Multiple updates of the same or different types can be applied
+ * sequentially within a single boot session.
+ *
+ * - Version Controlled:
+ * Each payload includes version and security version number (SVN)
+ * metadata to support anti-rollback enforcement.
+ *
+ * Upload Flow:
+ * ------------
+ * 1. Host driver (KMD or user-space tool) loads the late binding firmware.
+ * 2. Firmware is passed to the MEI interface and forwarded to
+ * authentication firmware.
+ * 3. Authentication firmware authenticates the payload and extracts
+ * command and data arrays.
+ * 4. Authentication firmware delivers the configuration to PUnit/PCODE.
+ * 5. Status is returned back to the host via MEI.
+ */
+
+#define INTEL_LB_CMD 0x12
+#define INTEL_LB_RSP (INTEL_LB_CMD | 0x80)
+
+#define INTEL_LB_SEND_TIMEOUT_MSEC 3000
+#define INTEL_LB_RECV_TIMEOUT_MSEC 3000
+
+/**
+ * struct mei_lb_req - Late Binding request structure
+ * @header: MKHI message header (see struct mkhi_msg_hdr)
+ * @type: Type of the Late Binding payload
+ * @flags: Flags to be passed to the authentication firmware (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
+ * @reserved: Reserved for future use by authentication firmware, must be set to 0
+ * @payload_size: Size of the payload data in bytes
+ * @payload: Payload data to be sent to the authentication firmware
+ */
+struct mei_lb_req {
+ struct mkhi_msg_hdr header;
+ __le32 type;
+ __le32 flags;
+ __le32 reserved[2];
+ __le32 payload_size;
+ u8 payload[] __counted_by(payload_size);
+} __packed;
+
+/**
+ * struct mei_lb_rsp - Late Binding response structure
+ * @header: MKHI message header (see struct mkhi_msg_hdr)
+ * @type: Type of the Late Binding payload
+ * @reserved: Reserved for future use by authentication firmware, must be set to 0
+ * @status: Status returned by authentication firmware (see &enum intel_lb_status)
+ */
+struct mei_lb_rsp {
+ struct mkhi_msg_hdr header;
+ __le32 type;
+ __le32 reserved[2];
+ __le32 status;
+} __packed;
+
+static bool mei_lb_check_response(const struct device *dev, ssize_t bytes,
+ struct mei_lb_rsp *rsp)
+{
+ /*
+ * Received message size may be smaller than the full message size when
+ * reply contains only MKHI header with result field set to the error code.
+ * Check the header size and content first to output exact error, if needed,
+ * and then process to the whole message.
+ */
+ if (bytes < sizeof(rsp->header)) {
+ dev_err(dev, "Received less than header size from the firmware: %zd < %zu\n",
+ bytes, sizeof(rsp->header));
+ return false;
+ }
+ if (rsp->header.group_id != MKHI_GROUP_ID_GFX) {
+ dev_err(dev, "Mismatch group id: 0x%x instead of 0x%x\n",
+ rsp->header.group_id, MKHI_GROUP_ID_GFX);
+ return false;
+ }
+ if (rsp->header.command != INTEL_LB_RSP) {
+ dev_err(dev, "Mismatch command: 0x%x instead of 0x%x\n",
+ rsp->header.command, INTEL_LB_RSP);
+ return false;
+ }
+ if (rsp->header.result) {
+ dev_err(dev, "Error in result: 0x%x\n", rsp->header.result);
+ return false;
+ }
+ if (bytes < sizeof(*rsp)) {
+ dev_err(dev, "Received less than message size from the firmware: %zd < %zu\n",
+ bytes, sizeof(*rsp));
+ return false;
+ }
+
+ return true;
+}
+
+static int mei_lb_push_payload(struct device *dev,
+ enum intel_lb_type type, u32 flags,
+ const void *payload, size_t payload_size)
+{
+ struct mei_cl_device *cldev;
+ struct mei_lb_req *req = NULL;
+ struct mei_lb_rsp rsp;
+ size_t req_size;
+ ssize_t bytes;
+ int ret;
+
+ cldev = to_mei_cl_device(dev);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret) {
+ dev_dbg(dev, "Failed to enable firmware client. %d\n", ret);
+ return ret;
+ }
+
+ req_size = struct_size(req, payload, payload_size);
+ if (req_size > mei_cldev_mtu(cldev)) {
+ dev_err(dev, "Payload is too big: %zu\n", payload_size);
+ ret = -EMSGSIZE;
+ goto end;
+ }
+
+ req = kmalloc(req_size, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ req->header.group_id = MKHI_GROUP_ID_GFX;
+ req->header.command = INTEL_LB_CMD;
+ req->type = cpu_to_le32(type);
+ req->flags = cpu_to_le32(flags);
+ req->reserved[0] = 0;
+ req->reserved[1] = 0;
+ req->payload_size = cpu_to_le32(payload_size);
+ memcpy(req->payload, payload, payload_size);
+
+ bytes = mei_cldev_send_timeout(cldev, (u8 *)req, req_size,
+ INTEL_LB_SEND_TIMEOUT_MSEC);
+ if (bytes < 0) {
+ dev_err(dev, "Failed to send late binding request to firmware. %zd\n", bytes);
+ ret = bytes;
+ goto end;
+ }
+
+ bytes = mei_cldev_recv_timeout(cldev, (u8 *)&rsp, sizeof(rsp),
+ INTEL_LB_RECV_TIMEOUT_MSEC);
+ if (bytes < 0) {
+ dev_err(dev, "Failed to receive late binding reply from MEI firmware. %zd\n",
+ bytes);
+ ret = bytes;
+ goto end;
+ }
+ if (!mei_lb_check_response(dev, bytes, &rsp)) {
+ dev_err(dev, "Bad response from the firmware. header: %02x %02x %02x %02x\n",
+ rsp.header.group_id, rsp.header.command,
+ rsp.header.reserved, rsp.header.result);
+ ret = -EPROTO;
+ goto end;
+ }
+
+ dev_dbg(dev, "status = %u\n", le32_to_cpu(rsp.status));
+ ret = (int)le32_to_cpu(rsp.status);
+end:
+ mei_cldev_disable(cldev);
+ kfree(req);
+ return ret;
+}
+
+static const struct intel_lb_component_ops mei_lb_ops = {
+ .push_payload = mei_lb_push_payload,
+};
+
+static int mei_lb_component_master_bind(struct device *dev)
+{
+ return component_bind_all(dev, (void *)&mei_lb_ops);
+}
+
+static void mei_lb_component_master_unbind(struct device *dev)
+{
+ component_unbind_all(dev, (void *)&mei_lb_ops);
+}
+
+static const struct component_master_ops mei_lb_component_master_ops = {
+ .bind = mei_lb_component_master_bind,
+ .unbind = mei_lb_component_master_unbind,
+};
+
+static int mei_lb_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ /*
+ * This function checks if requester is Intel %PCI_CLASS_DISPLAY_VGA or
+ * %PCI_CLASS_DISPLAY_OTHER device, and checks if the requester is the
+ * grand parent of mei_if i.e. late bind MEI device
+ */
+ struct device *base = data;
+ struct pci_dev *pdev;
+
+ if (!dev)
+ return 0;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) &&
+ pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8))
+ return 0;
+
+ if (subcomponent != INTEL_COMPONENT_LB)
+ return 0;
+
+ base = base->parent;
+ if (!base) /* mei device */
+ return 0;
+
+ base = base->parent; /* pci device */
+
+ return !!base && dev == base;
+}
+
+static int mei_lb_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct component_match *master_match = NULL;
+ int ret;
+
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_lb_component_match, &cldev->dev);
+ if (IS_ERR_OR_NULL(master_match))
+ return -ENOMEM;
+
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_lb_component_master_ops,
+ master_match);
+ if (ret < 0)
+ dev_err(&cldev->dev, "Failed to add late binding master component. %d\n", ret);
+
+ return ret;
+}
+
+static void mei_lb_remove(struct mei_cl_device *cldev)
+{
+ component_master_del(&cldev->dev, &mei_lb_component_master_ops);
+}
+
+#define MEI_GUID_MKHI UUID_LE(0xe2c2afa2, 0x3817, 0x4d19, \
+ 0x9d, 0x95, 0x6, 0xb1, 0x6b, 0x58, 0x8a, 0x5d)
+
+static const struct mei_cl_device_id mei_lb_tbl[] = {
+ { .uuid = MEI_GUID_MKHI, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_lb_tbl);
+
+static struct mei_cl_driver mei_lb_driver = {
+ .id_table = mei_lb_tbl,
+ .name = "mei_lb",
+ .probe = mei_lb_probe,
+ .remove = mei_lb_remove,
+};
+
+module_mei_cl_driver(mei_lb_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI Late Binding Firmware Update/Upload");
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6653fc53c951..6df51ee8db62 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1806,7 +1806,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* the list after acquiring the lock.
*/
get_page(newpage);
- ret = MIGRATEPAGE_SUCCESS;
+ ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1817,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
- if (ret == MIGRATEPAGE_SUCCESS) {
+ if (!ret) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index a74e75df93b0..9399bf6c766a 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -439,9 +439,9 @@ static void mmc_blk_release(struct gendisk *disk)
}
static int
-mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+mmc_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->cylinders = get_capacity(disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index d2aec6cf9773..82dd906bb002 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/mmc/host.h>
#include <linux/of_address.h>
#include <linux/mmc/slot-gpio.h>
@@ -24,8 +25,6 @@
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 };
@@ -148,9 +147,11 @@ static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int
raw_value |= nineties;
if (sample)
- mci_writel(host, TIMING_CON1, HIWORD_UPDATE(raw_value, 0x07ff, 1));
+ mci_writel(host, TIMING_CON1,
+ FIELD_PREP_WM16(GENMASK(11, 1), raw_value));
else
- mci_writel(host, TIMING_CON0, HIWORD_UPDATE(raw_value, 0x07ff, 1));
+ mci_writel(host, TIMING_CON0,
+ FIELD_PREP_WM16(GENMASK(11, 1), raw_value));
dev_dbg(host->dev, "set %s_phase(%d) delay_nums=%u actual_degrees=%d\n",
sample ? "sample" : "drv", degrees, delay_num,
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index ac636efd911d..2cd69c9e9571 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -191,7 +191,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
@@ -240,7 +240,7 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 85b49c07918b..3bccf800339b 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -323,7 +323,7 @@ static void usdhi6_blk_bounce(struct usdhi6_host *host,
host->head_pg.page = host->pg.page;
host->head_pg.mapped = host->pg.mapped;
- host->pg.page = nth_page(host->pg.page, 1);
+ host->pg.page = host->pg.page + 1;
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->bounce_buf;
@@ -503,7 +503,7 @@ static void usdhi6_sg_advance(struct usdhi6_host *host)
/* We cannot get here after crossing a page border */
/* Next page in the same SG */
- host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
+ host->pg.page = sg_page(host->sg) + host->page_idx;
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->pg.mapped;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 847c11542f02..28e09d080440 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -246,9 +246,9 @@ unlock:
blktrans_dev_put(dev);
}
-static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int blktrans_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = disk->private_data;
int ret = -ENXIO;
mutex_lock(&dev->lock);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 39cc0a6a4d37..b53fd147fa65 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -282,12 +282,12 @@ static void ubiblock_release(struct gendisk *gd)
mutex_unlock(&dev->dev_mutex);
}
-static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int ubiblock_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* Some tools might require this information */
geo->heads = 1;
geo->cylinders = 1;
- geo->sectors = get_capacity(bdev->bd_disk);
+ geo->sectors = get_capacity(disk);
geo->start = 0;
return 0;
}
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index dc50797a2ed0..c01e2c2f7d6c 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -67,8 +67,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
- memset(s[i].name, 0, sizeof(s[i].name));
- strscpy(s[i].name, name, IFNAMSIZ);
+ strscpy_pad(s[i].name, name);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index ed86537b2f61..902c817a0dea 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
+#include <net/flow.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -28,6 +29,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/inet_common.h>
+#include <net/inet_dscp.h>
#include <net/ip6_checksum.h>
static struct workqueue_struct *amt_wq;
@@ -1018,7 +1020,7 @@ static bool amt_send_membership_update(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = amt->remote_ip;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
@@ -1133,7 +1135,7 @@ static bool amt_send_membership_query(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = tunnel->ip4;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 4edc8e6b6b64..49717b7b82a2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -436,6 +436,7 @@ static void __ad_actor_update_port(struct port *port)
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+ port->actor_port_priority = SLAVE_AD_INFO(port->slave)->port_priority;
}
/* Conversions */
@@ -746,6 +747,18 @@ static int __agg_active_ports(struct aggregator *agg)
return active;
}
+static unsigned int __agg_ports_priority(const struct aggregator *agg)
+{
+ struct port *port = agg->lag_ports;
+ unsigned int prio = 0;
+
+ for (; port; port = port->next_port_in_aggregator)
+ if (port->is_enabled)
+ prio += port->actor_port_priority;
+
+ return prio;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -1707,6 +1720,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
* 4. Therefore, current and best both have partner replies or
* both do not, so perform selection policy:
*
+ * BOND_AD_PRIO: Select by total priority of ports. If priority
+ * is equal, select by count.
+ *
* BOND_AD_COUNT: Select by count of ports. If count is equal,
* select by bandwidth.
*
@@ -1728,6 +1744,14 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
switch (__get_agg_selection_mode(curr->lag_ports)) {
+ case BOND_AD_PRIO:
+ if (__agg_ports_priority(curr) > __agg_ports_priority(best))
+ return curr;
+
+ if (__agg_ports_priority(curr) < __agg_ports_priority(best))
+ return best;
+
+ fallthrough;
case BOND_AD_COUNT:
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
@@ -1793,6 +1817,10 @@ static int agg_device_up(const struct aggregator *agg)
* (slaves), and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
+ * BOND_AD_PRIO: select the aggregator with highest total priority of ports
+ * (slaves), and reselect whenever a link state change takes place or the
+ * set of slaves in the bond changes.
+ *
* FIXME: this function MUST be called with the first agg in the bond, or
* __get_active_agg() won't work correctly. This function should be better
* called with the bond itself, and retrieve the first agg from it.
@@ -2209,6 +2237,9 @@ void bond_3ad_bind_slave(struct slave *slave)
ad_initialize_port(port, &bond->params);
+ /* Port priority is initialized. Update it to slave's ad info */
+ SLAVE_AD_INFO(slave)->port_priority = port->actor_port_priority;
+
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
/* key is determined according to the link speed, duplex and
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 57be04f6cb11..4da619210c1f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -142,8 +142,7 @@ module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
-MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
- "0 for off, 1 for on (default)");
+MODULE_PARM_DESC(use_carrier, "option obsolete, use_carrier cannot be disabled");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
@@ -830,77 +829,6 @@ const char *bond_slave_link_status(s8 link)
}
}
-/* if <dev> supports MII link status reporting, check its link status.
- *
- * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
- * depending upon the setting of the use_carrier parameter.
- *
- * Return either BMSR_LSTATUS, meaning that the link is up (or we
- * can't tell and just pretend it is), or 0, meaning that the link is
- * down.
- *
- * If reporting is non-zero, instead of faking link up, return -1 if
- * both ETHTOOL and MII ioctls fail (meaning the device does not
- * support them). If use_carrier is set, return whatever it says.
- * It'd be nice if there was a good way to tell if a driver supports
- * netif_carrier, but there really isn't.
- */
-static int bond_check_dev_link(struct bonding *bond,
- struct net_device *slave_dev, int reporting)
-{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- struct mii_ioctl_data *mii;
- struct ifreq ifr;
- int ret;
-
- if (!reporting && !netif_running(slave_dev))
- return 0;
-
- if (bond->params.use_carrier)
- return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
-
- /* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops->get_link) {
- netdev_lock_ops(slave_dev);
- ret = slave_dev->ethtool_ops->get_link(slave_dev);
- netdev_unlock_ops(slave_dev);
-
- return ret ? BMSR_LSTATUS : 0;
- }
-
- /* Ethtool can't be used, fallback to MII ioctls. */
- if (slave_ops->ndo_eth_ioctl) {
- /* TODO: set pointer to correct ioctl on a per team member
- * bases to make this more efficient. that is, once
- * we determine the correct ioctl, we will always
- * call it and not the others for that team
- * member.
- */
-
- /* We cannot assume that SIOCGMIIPHY will also read a
- * register; not all network drivers (e.g., e100)
- * support that.
- */
-
- /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
- mii = if_mii(&ifr);
-
- if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
- mii->reg_num = MII_BMSR;
- if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
- return mii->val_out & BMSR_LSTATUS;
- }
- }
-
- /* If reporting, report that either there's no ndo_eth_ioctl,
- * or both SIOCGMIIREG and get_link failed (meaning that we
- * cannot report link status). If not reporting, pretend
- * we're ok.
- */
- return reporting ? -1 : BMSR_LSTATUS;
-}
-
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
@@ -1966,7 +1894,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
- int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
@@ -1976,12 +1903,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
return -EPERM;
}
- if (!bond->params.use_carrier &&
- slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_eth_ioctl == NULL) {
- slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
- }
-
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
@@ -2196,29 +2117,10 @@ skip_mac_set:
new_slave->last_tx = new_slave->last_rx;
- if (bond->params.miimon && !bond->params.use_carrier) {
- link_reporting = bond_check_dev_link(bond, slave_dev, 1);
-
- if ((link_reporting == -1) && !bond->params.arp_interval) {
- /* miimon is set but a bonded network driver
- * does not support ETHTOOL/MII and
- * arp_interval is not set. Note: if
- * use_carrier is enabled, we will never go
- * here (because netif_carrier is always
- * supported); thus, we don't need to change
- * the messages for netif_carrier.
- */
- slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
- } else if (link_reporting == -1) {
- /* unable get link status using mii/ethtool */
- slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
- }
- }
-
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+ if (netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2760,7 +2662,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = bond_check_dev_link(bond, slave->dev, 0);
+ link_state = netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -4411,7 +4313,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
@@ -6257,10 +6159,10 @@ static int __init bond_check_params(struct bond_params *params)
downdelay = 0;
}
- if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
- use_carrier = 1;
+ if (use_carrier != 1) {
+ pr_err("Error: invalid use_carrier parameter (%d)\n",
+ use_carrier);
+ return -EINVAL;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
@@ -6507,7 +6409,6 @@ static int __init bond_check_params(struct bond_params *params)
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
- params->use_carrier = use_carrier;
params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 57fff2421f1b..286f11c517f7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -28,6 +28,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_ACTOR_PORT_PRIO */
0;
}
@@ -77,6 +78,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
ad_port->partner_oper.port_state))
goto nla_put_failure;
}
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
+ SLAVE_AD_INFO(slave)->port_priority))
+ goto nla_put_failure;
}
return 0;
@@ -130,6 +135,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
[IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
+ [IFLA_BOND_SLAVE_ACTOR_PORT_PRIO] = { .type = NLA_U16 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -180,6 +186,16 @@ static int bond_slave_changelink(struct net_device *bond_dev,
return err;
}
+ if (data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]) {
+ u16 ad_prio = nla_get_u16(data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, ad_prio);
+ err = __bond_opt_set(bond, BOND_OPT_ACTOR_PORT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -259,13 +275,11 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
if (data[IFLA_BOND_USE_CARRIER]) {
- int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
-
- bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
- data[IFLA_BOND_USE_CARRIER], extack);
- if (err)
- return err;
+ if (nla_get_u8(data[IFLA_BOND_USE_CARRIER]) != 1) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_USE_CARRIER],
+ "option obsolete, use_carrier cannot be disabled");
+ return -EINVAL;
+ }
}
if (data[IFLA_BOND_ARP_INTERVAL]) {
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
@@ -579,20 +593,22 @@ static int bond_newlink(struct net_device *bond_dev,
struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct nlattr **data = params->data;
struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
@@ -688,7 +704,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.peer_notif_delay * bond->params.miimon))
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, 1))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 3b6f815c55ff..495a87f2ea7c 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -79,6 +79,8 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
@@ -160,10 +162,11 @@ static const struct bond_opt_value bond_lacp_rate_tbl[] = {
};
static const struct bond_opt_value bond_ad_select_tbl[] = {
- { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
- { "bandwidth", BOND_AD_BANDWIDTH, 0},
- { "count", BOND_AD_COUNT, 0},
- { NULL, -1, 0},
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { "actor_port_prio", BOND_AD_PRIO, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
@@ -187,7 +190,6 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = {
};
static const struct bond_opt_value bond_use_carrier_tbl[] = {
- { "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
@@ -223,6 +225,13 @@ static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_actor_port_prio_tbl[] = {
+ { "minval", 0, BOND_VALFLAG_MIN},
+ { "maxval", 65535, BOND_VALFLAG_MAX},
+ { "default", 255, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", 1023, BOND_VALFLAG_MAX},
@@ -370,7 +379,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_AD_SELECT] = {
.id = BOND_OPT_AD_SELECT,
.name = "ad_select",
- .desc = "803.ad aggregation selection logic",
+ .desc = "802.3ad aggregation selection logic",
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_select_tbl,
.set = bond_option_ad_select_set
@@ -419,7 +428,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_USE_CARRIER] = {
.id = BOND_OPT_USE_CARRIER,
.name = "use_carrier",
- .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .desc = "option obsolete, use_carrier cannot be disabled",
.values = bond_use_carrier_tbl,
.set = bond_option_use_carrier_set
},
@@ -484,6 +493,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
+ [BOND_OPT_ACTOR_PORT_PRIO] = {
+ .id = BOND_OPT_ACTOR_PORT_PRIO,
+ .name = "actor_port_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_actor_port_prio_tbl,
+ .set = bond_option_actor_port_prio_set,
+ },
[BOND_OPT_AD_ACTOR_SYSTEM] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
@@ -1091,10 +1107,6 @@ static int bond_option_peer_notif_delay_set(struct bonding *bond,
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
- newval->value);
- bond->params.use_carrier = newval->value;
-
return 0;
}
@@ -1817,6 +1829,26 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
return 0;
}
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(bond->dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+
+ netdev_dbg(newval->slave_dev, "Setting actor_port_prio to %llu\n",
+ newval->value);
+
+ SLAVE_AD_INFO(slave)->port_priority = newval->value;
+ bond_3ad_update_ad_actor_settings(bond);
+
+ return 0;
+}
+
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1e13bb170515..9a75ad3181ab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -467,14 +467,12 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
-/* Show the use_carrier flag. */
+/* use_carrier is obsolete, but print value for compatibility */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct bonding *bond = to_bond(d);
-
- return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "1\n");
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index a94bd67c670c..394d6974f481 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -173,13 +173,15 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK;
+
+ if (!tdc_const || !(ctrlmode_supported & tdc_auto))
return;
- *ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ *ctrlmode &= ~tdc_mask;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
@@ -193,6 +195,6 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
if (sample_point_in_tc < tdc_const->tdco_min)
return;
tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ *ctrlmode |= tdc_auto;
}
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 3913971125de..15ccedbb3f8d 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,17 +4,17 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
#include <linux/gpio/consumer.h>
+#include <linux/if_arp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
@@ -88,6 +88,39 @@ const char *can_get_state_str(const enum can_state state)
}
EXPORT_SYMBOL_GPL(can_get_state_str);
+const char *can_get_ctrlmode_str(u32 ctrlmode)
+{
+ switch (ctrlmode & ~(ctrlmode - 1)) {
+ case 0:
+ return "none";
+ case CAN_CTRLMODE_LOOPBACK:
+ return "loopback";
+ case CAN_CTRLMODE_LISTENONLY:
+ return "listen-only";
+ case CAN_CTRLMODE_3_SAMPLES:
+ return "triple-sampling";
+ case CAN_CTRLMODE_ONE_SHOT:
+ return "one-shot";
+ case CAN_CTRLMODE_BERR_REPORTING:
+ return "berr-reporting";
+ case CAN_CTRLMODE_FD:
+ return "fd";
+ case CAN_CTRLMODE_PRESUME_ACK:
+ return "presume-ack";
+ case CAN_CTRLMODE_FD_NON_ISO:
+ return "fd-non-iso";
+ case CAN_CTRLMODE_CC_LEN8_DLC:
+ return "cc-len8-dlc";
+ case CAN_CTRLMODE_TDC_AUTO:
+ return "fd-tdc-auto";
+ case CAN_CTRLMODE_TDC_MANUAL:
+ return "fd-tdc-manual";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_ctrlmode_str);
+
static enum can_state can_state_err_to_state(u16 err)
{
if (err < CAN_ERROR_WARNING_THRESHOLD)
@@ -240,6 +273,8 @@ void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
@@ -309,6 +344,21 @@ void free_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(free_candev);
+void can_set_default_mtu(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ dev->mtu = CANFD_MTU;
+ dev->min_mtu = CANFD_MTU;
+ dev->max_mtu = CANFD_MTU;
+ } else {
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ }
+}
+
/* changing MTU and control mode for CAN/CANFD devices */
int can_change_mtu(struct net_device *dev, int new_mtu)
{
@@ -347,6 +397,26 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
+/* helper to define static CAN controller features at device creation time */
+int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
+ priv->ctrlmode = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ can_set_default_mtu(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_set_static_ctrlmode);
+
/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
* supporting hardware timestamps
*/
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index d9f6ab3efb97..0591406b6f32 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -36,116 +36,245 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
-static int can_validate_bittiming(const struct can_bittiming *bt,
- struct netlink_ext_ack *extack)
+static int can_validate_bittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_bittiming)
{
+ struct can_bittiming *bt;
+
+ if (!data[ifla_can_bittiming])
+ return 0;
+
+ static_assert(__alignof__(*bt) <= NLA_ALIGNTO);
+ bt = nla_data(data[ifla_can_bittiming]);
+
/* sample point is in one-tenth of a percent */
if (bt->sample_point >= 1000) {
NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
-
return -EINVAL;
}
return 0;
}
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_validate_tdc(struct nlattr *data_tdc,
+ struct netlink_ext_ack *extack, u32 tdc_flags)
{
- bool is_can_fd = false;
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK;
+ int err;
+
+ if (tdc_auto && tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual and auto modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ /* If one of the CAN_CTRLMODE_TDC_* flag is set then TDC
+ * must be set and vice-versa
+ */
+ if ((tdc_auto || tdc_manual) && !data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC parameters are missing");
+ return -EOPNOTSUPP;
+ }
+ if (!(tdc_auto || tdc_manual) && data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing");
+ return -EOPNOTSUPP;
+ }
+
+ /* If providing TDC parameters, at least TDCO is needed. TDCV
+ * is needed if and only if CAN_CTRLMODE_TDC_MANUAL is set
+ */
+ if (data_tdc) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data_tdc, can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_auto) {
+ NL_SET_ERR_MSG(extack,
+ "TDCV is incompatible with TDC auto mode");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual mode requires TDCV");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ NL_SET_ERR_MSG(extack, "TDCO is missing");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate_databittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_data_bittiming, u32 flags)
+{
+ struct nlattr *data_tdc;
+ const char *type;
+ u32 tdc_flags;
+ bool is_on;
int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
* - data bittiming
* - control mode with CAN_CTRLMODE_FD set
- * - TDC parameters are coherent (details below)
+ * - TDC parameters are coherent (details in can_validate_tdc())
*/
+ if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) {
+ data_tdc = data[IFLA_CAN_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_FD;
+ type = "FD";
+ } else {
+ return -EOPNOTSUPP; /* Place holder for CAN XL */
+ }
+
+ if (is_on) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Provide both nominal and %s data bittiming",
+ type);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s data bittiming requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ if (data_tdc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s TDC requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ err = can_validate_bittiming(data, extack, ifla_can_data_bittiming);
+ if (err)
+ return err;
+
+ err = can_validate_tdc(data_tdc, extack, tdc_flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 flags = 0;
+ int err;
+
if (!data)
return 0;
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_FD_TDC_MASK;
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ flags = cm->flags & cm->mask;
+ }
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_FD_TDC_MASK)
- return -EOPNOTSUPP;
- /* If one of the CAN_CTRLMODE_TDC_* flag is set then
- * TDC must be set and vice-versa
- */
- if (!!tdc_flags != !!data[IFLA_CAN_TDC])
- return -EOPNOTSUPP;
- /* If providing TDC parameters, at least TDCO is
- * needed. TDCV is needed if and only if
- * CAN_CTRLMODE_TDC_MANUAL is set
- */
- if (data[IFLA_CAN_TDC]) {
- struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
+ if (err)
+ return err;
- err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
- data[IFLA_CAN_TDC],
- can_tdc_policy, extack);
- if (err)
- return err;
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_DATA_BITTIMING, flags);
+ if (err)
+ return err;
- if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
- if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
- return -EOPNOTSUPP;
- } else {
- if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
- return -EOPNOTSUPP;
- }
+ return 0;
+}
- if (!tb_tdc[IFLA_CAN_TDC_TDCO])
- return -EOPNOTSUPP;
- }
- }
+static int can_ctrlmode_changelink(struct net_device *dev,
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic, maskedflags, notsupp, ctrlstatic_missing;
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
+ if (!data[IFLA_CAN_CTRLMODE])
+ return 0;
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
- }
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = can_get_static_ctrlmode(priv);
+ maskedflags = cm->flags & cm->mask;
+ notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
+ ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
+
+ if (notsupp) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "requested control mode %s not supported",
+ can_get_ctrlmode_str(notsupp));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
- if (!is_can_fd)
- return -EOPNOTSUPP;
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ if (ctrlstatic_missing) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "missing required %s static control mode",
+ can_get_ctrlmode_str(ctrlstatic_missing));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming bt;
+ /* If a top dependency flag is provided, reset all its dependencies */
+ if (cm->mask & CAN_CTRLMODE_FD)
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
- memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* Wipe potential leftovers from previous CAN FD config */
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
+ can_set_default_mtu(dev);
+
return 0;
}
-static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+static int can_tdc_changelink(struct data_bittiming_params *dbt_params,
+ const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
+ const struct can_tdc_const *tdc_const = dbt_params->tdc_const;
int err;
- if (!tdc_const || !can_fd_tdc_is_enabled(priv))
+ if (!tdc_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support TDC");
return -EOPNOTSUPP;
+ }
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
can_tdc_policy, extack);
@@ -179,69 +308,107 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->fd.tdc = tdc;
+ dbt_params->tdc = tdc;
return 0;
}
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
+ bool fd, struct netlink_ext_ack *extack)
{
+ struct nlattr *data_bittiming, *data_tdc;
struct can_priv *priv = netdev_priv(dev);
- bool fd_tdc_flag_provided = false;
+ struct data_bittiming_params *dbt_params;
+ struct can_bittiming dbt;
+ bool need_tdc_calc = false;
+ u32 tdc_mask;
int err;
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
+ if (fd) {
+ data_bittiming = data[IFLA_CAN_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_TDC];
+ dbt_params = &priv->fd;
+ tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
+ } else {
+ return -EOPNOTSUPP; /* Place holder for CAN XL */
+ }
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
+ if (!data_bittiming)
+ return 0;
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = can_get_static_ctrlmode(priv);
- maskedflags = cm->flags & cm->mask;
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
- /* check whether provided bits are allowed to be passed */
- if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
+ /* Calculate bittiming parameters based on data_bittiming_const
+ * if set, otherwise pass bitrate directly via do_set_bitrate().
+ * Bail out if neither is given.
+ */
+ if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming &&
+ !dbt_params->data_bitrate_const)
+ return -EOPNOTSUPP;
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const,
+ dbt_params->data_bitrate_const,
+ dbt_params->data_bitrate_const_cnt, extack);
+ if (err)
+ return err;
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
+ memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc));
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD) {
- dev->mtu = CANFD_MTU;
- } else {
- dev->mtu = CAN_MTU;
- memset(&priv->fd.data_bittiming, 0,
- sizeof(priv->fd.data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
- memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
+ need_tdc_calc = !(cm->mask & tdc_mask);
+ }
+ if (data_tdc) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(dbt_params, data_tdc, extack);
+ if (err) {
+ priv->ctrlmode &= ~tdc_mask;
+ return err;
}
-
- fd_tdc_flag_provided = cm->mask & CAN_CTRLMODE_FD_TDC_MASK;
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
- * exclusive: make sure to turn the other one off
+ } else if (need_tdc_calc) {
+ /* Neither of TDC parameters nor TDC flags are provided:
+ * do calculation
*/
- if (fd_tdc_flag_provided)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_FD_TDC_MASK;
+ can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
+ tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
+
+ memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt));
+
+ if (dbt_params->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = dbt_params->do_set_data_bittiming(dev);
+ if (err)
+ return err;
}
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ can_ctrlmode_changelink(dev, data, extack);
+
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -312,75 +479,21 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->fd.data_bittiming_const && !priv->fd.do_set_data_bittiming &&
- !priv->fd.data_bitrate_const)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->fd.data_bittiming_const,
- priv->fd.data_bitrate_const,
- priv->fd.data_bitrate_const_cnt,
- extack);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- NL_SET_ERR_MSG_FMT(extack,
- "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
- dbt.bitrate, priv->bitrate_max);
- return -EINVAL;
- }
-
- memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
- if (data[IFLA_CAN_TDC]) {
- /* TDC parameters are provided: use them */
- err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
- extack);
- if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
- return err;
- }
- } else if (!fd_tdc_flag_provided) {
- /* Neither of TDC parameters nor TDC flags are
- * provided: do calculation
- */
- can_calc_tdco(&priv->fd.tdc, priv->fd.tdc_const, &dbt,
- &priv->ctrlmode, priv->ctrlmode_supported);
- } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
- * turned off. TDC is disabled: do nothing
- */
-
- memcpy(&priv->fd.data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->fd.do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->fd.do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
+ /* CAN FD */
+ err = can_dbt_changelink(dev, data, true, extack);
+ if (err)
+ return err;
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
unsigned int i;
- if (!priv->do_set_termination)
+ if (!priv->do_set_termination) {
+ NL_SET_ERR_MSG(extack,
+ "Termination is not configurable on this device");
return -EOPNOTSUPP;
+ }
/* check whether given value is supported by the interface */
for (i = 0; i < num_term; i++) {
@@ -401,38 +514,55 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
-static size_t can_tdc_get_size(const struct net_device *dev)
+static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
{
- struct can_priv *priv = netdev_priv(dev);
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
size_t size;
- if (!priv->fd.tdc_const)
+ if (!dbt_params->tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->fd.tdc_const->tdcf_max) {
+ if (dbt_params->tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_fd_tdc_is_enabled(priv)) {
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->fd.do_get_auto_tdcv)
+ if (tdc_flags) {
+ if (tdc_manual || dbt_params->do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->fd.tdc_const->tdcf_max)
+ if (dbt_params->tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
return size;
}
+static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ size_t size = 0;
+
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(dbt_params->data_bittiming));
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
+ dbt_params->data_bitrate_const_cnt);
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_TDC */
+
+ return size;
+}
+
static size_t can_ctrlmode_ext_get_size(void)
{
return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
@@ -454,10 +584,6 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->fd.data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->fd.data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
@@ -466,31 +592,69 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->fd.data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->fd.data_bitrate_const) *
- priv->fd.data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
- size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
+ size += can_data_bittiming_get_size(&priv->fd,
+ priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+
return size;
}
-static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming,
+ struct can_bittiming *bittiming)
+{
+ return bittiming->bitrate != CAN_BITRATE_UNSET &&
+ bittiming->bitrate != CAN_BITRATE_UNKNOWN &&
+ nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming);
+}
+
+static int can_bittiming_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bittiming_const,
+ const struct can_bittiming_const *bittiming_const)
+{
+ return bittiming_const &&
+ nla_put(skb, ifla_can_bittiming_const,
+ sizeof(*bittiming_const), bittiming_const);
+}
+
+static int can_bitrate_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bitrate_const,
+ const u32 *bitrate_const, unsigned int cnt)
+{
+ return bitrate_const &&
+ nla_put(skb, ifla_can_bitrate_const,
+ sizeof(*bitrate_const) * cnt, bitrate_const);
+}
+
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
+ int ifla_can_tdc)
{
- struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->fd.tdc;
- const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
+ struct data_bittiming_params *dbt_params;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc *tdc;
+ struct nlattr *nest;
+ bool tdc_is_enabled, tdc_manual;
+
+ if (ifla_can_tdc == IFLA_CAN_TDC) {
+ dbt_params = &priv->fd;
+ tdc_is_enabled = can_fd_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
+ } else {
+ return -EOPNOTSUPP; /* Place holder for CAN XL */
+ }
+ tdc_const = dbt_params->tdc_const;
+ tdc = &dbt_params->tdc;
if (!tdc_const)
return 0;
- nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ nest = nla_nest_start(skb, ifla_can_tdc);
if (!nest)
return -EMSGSIZE;
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ if (tdc_manual &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
goto err_cancel;
@@ -502,15 +666,15 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_fd_tdc_is_enabled(priv)) {
+ if (tdc_is_enabled) {
u32 tdcv;
int err = -EINVAL;
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->fd.do_get_auto_tdcv) {
- err = priv->fd.do_get_auto_tdcv(dev, &tdcv);
+ } else if (dbt_params->do_get_auto_tdcv) {
+ err = dbt_params->do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -558,14 +722,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
- priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
+ if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING,
+ &priv->bittiming) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST,
+ priv->bittiming_const) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
@@ -576,14 +737,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->fd.data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->fd.data_bittiming), &priv->fd.data_bittiming)) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING,
+ &priv->fd.data_bittiming) ||
- (priv->fd.data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->fd.data_bittiming_const),
- priv->fd.data_bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ priv->fd.data_bittiming_const) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -592,23 +750,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->termination_const_cnt,
priv->termination_const))) ||
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt) ||
- (priv->fd.data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->fd.data_bitrate_const) *
- priv->fd.data_bitrate_const_cnt,
- priv->fd.data_bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- can_tdc_fill_info(skb, dev) ||
+ can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
can_ctrlmode_ext_fill_info(skb, priv)
)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index fe74dbd2c966..e1d725979685 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2213,11 +2213,9 @@ static int m_can_set_coalesce(struct net_device *dev,
cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
if (cdev->rx_coalesce_usecs_irq)
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
else
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 77292afaed22..b5bc80ac7876 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
- * Copyright (C) 2016 PEAK System-Technik GmbH
+ * Copyright (C) 2016-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/can.h>
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index a72719dc3b74..60c6542028cf 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PEAK_CANFD_USER_H
#define PEAK_CANFD_USER_H
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 1df3c4b54f03..93558e33bc02 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -19,7 +19,7 @@
#include "peak_canfd_user.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 87c134bcd48d..5f85f4e27205 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -5,6 +5,8 @@
* Copyright (C) 2013 Renesas Solutions Corp.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -16,6 +18,7 @@
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
@@ -92,7 +95,6 @@ struct rcar_can_priv {
struct net_device *ndev;
struct napi_struct napi;
struct rcar_can_regs __iomem *regs;
- struct clk *clk;
struct clk *can_clk;
u32 tx_head;
u32 tx_tail;
@@ -113,100 +115,102 @@ static const struct can_bittiming_const rcar_can_bittiming_const = {
};
/* Control Register bits */
-#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
-#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
- /* at bus-off entry */
-#define RCAR_CAN_CTLR_SLPM (1 << 10)
-#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
-#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
-#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
-#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
-#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
-#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
-#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
-#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+#define RCAR_CAN_CTLR_BOM GENMASK(12, 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT 1 /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM BIT(10) /* Sleep Mode */
+#define RCAR_CAN_CTLR_CANM GENMASK(9, 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_OPER 0 /* Operation Mode */
+#define RCAR_CAN_CTLR_CANM_RESET 1 /* Reset Mode */
+#define RCAR_CAN_CTLR_CANM_HALT 2 /* Halt Mode */
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET 3 /* Reset Mode (forcible) */
+#define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM GENMASK(2, 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_STD 0 /* Standard ID mode */
+#define RCAR_CAN_CTLR_IDFM_EXT 1 /* Extended ID mode */
+#define RCAR_CAN_CTLR_IDFM_MIXED 2 /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */
/* Status Register bits */
-#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+#define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */
/* FIFO Received ID Compare Registers 0 and 1 bits */
-#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
-#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+#define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR BIT(30) /* Remote Transmission Request Bit */
/* Receive FIFO Control Register bits */
-#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
-#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+#define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */
/* Transmit FIFO Control Register bits */
-#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
- /* Number Status Bits */
-#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
- /* Message Number Status Bits */
-#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
-
-#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
- /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_TFCR_TFUST GENMASK(3, 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
#define RCAR_CAN_N_RX_MKREGS2 8
/* Bit Configuration Register settings */
-#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
-#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
-#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
-#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+#define RCAR_CAN_BCR_TSEG1 GENMASK(23, 20)
+#define RCAR_CAN_BCR_BRP GENMASK(17, 8)
+#define RCAR_CAN_BCR_SJW GENMASK(5, 4)
+#define RCAR_CAN_BCR_TSEG2 GENMASK(2, 0)
/* Mailbox and Mask Registers bits */
-#define RCAR_CAN_IDE (1 << 31)
-#define RCAR_CAN_RTR (1 << 30)
-#define RCAR_CAN_SID_SHIFT 18
+#define RCAR_CAN_IDE BIT(31) /* ID Extension */
+#define RCAR_CAN_RTR BIT(30) /* Remote Transmission Request */
+#define RCAR_CAN_SID GENMASK(28, 18) /* Standard ID */
+#define RCAR_CAN_EID GENMASK(28, 0) /* Extended ID */
/* Mailbox Interrupt Enable Register 1 bits */
-#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
-#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_RXFIE BIT(28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE BIT(24) /* Transmit FIFO Interrupt Enable */
/* Interrupt Enable Register bits */
-#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
-#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
- /* Enable Bit */
-#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
- /* Enable Bit */
+#define RCAR_CAN_IER_ERSIE BIT(5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE BIT(4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE BIT(3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
/* Interrupt Status Register bits */
-#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
-#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
- /* Status Bit */
-#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
- /* Status Bit */
+#define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
/* Error Interrupt Enable Register bits */
-#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
-#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
- /* Interrupt Enable */
-#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
-#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
-#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
-#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
-#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
-#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+#define RCAR_CAN_EIER_BLIE BIT(7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE BIT(6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE BIT(5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE BIT(4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE BIT(3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE BIT(2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE BIT(1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE BIT(0) /* Bus Error Interrupt Enable */
/* Error Interrupt Factor Judge Register bits */
-#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
-#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
- /* Detect Flag */
-#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
-#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
-#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
-#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
-#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
-#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+#define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */
/* Error Code Store Register bits */
-#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
-#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
-#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
-#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
-#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
-#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
-#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
-#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+#define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */
#define RCAR_CAN_NAPI_WEIGHT 4
#define MAX_STR_READS 0x100
@@ -248,35 +252,35 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_ADEF) {
netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
}
if (ecsr & RCAR_CAN_ECSR_BE0F) {
netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT0;
}
if (ecsr & RCAR_CAN_ECSR_BE1F) {
netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT1;
}
if (ecsr & RCAR_CAN_ECSR_CEF) {
netdev_dbg(priv->ndev, "CRC Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (ecsr & RCAR_CAN_ECSR_AEF) {
netdev_dbg(priv->ndev, "ACK Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
if (skb) {
cf->can_id |= CAN_ERR_ACK;
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
@@ -285,14 +289,14 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_FEF) {
netdev_dbg(priv->ndev, "Form Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_FORM;
}
if (ecsr & RCAR_CAN_ECSR_SEF) {
netdev_dbg(priv->ndev, "Stuff Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_STUFF;
}
@@ -300,14 +304,14 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.can_stats.bus_error++;
ndev->stats.rx_errors += rx_errors;
ndev->stats.tx_errors += tx_errors;
- writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
}
if (eifr & RCAR_CAN_EIFR_EWIF) {
netdev_dbg(priv->ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -317,7 +321,7 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -329,7 +333,7 @@ static void rcar_can_error(struct net_device *ndev)
writeb(priv->ier, &priv->regs->ier);
priv->can.state = CAN_STATE_BUS_OFF;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
if (skb)
@@ -343,7 +347,7 @@ static void rcar_can_error(struct net_device *ndev)
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -354,7 +358,7 @@ static void rcar_can_error(struct net_device *ndev)
"Overload Frame Transmission error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
@@ -372,10 +376,9 @@ static void rcar_can_tx_done(struct net_device *ndev)
u8 isr;
while (1) {
- u8 unsent = readb(&priv->regs->tfcr);
+ u8 unsent = FIELD_GET(RCAR_CAN_TFCR_TFUST,
+ readb(&priv->regs->tfcr));
- unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
- RCAR_CAN_TFCR_TFUST_SHIFT;
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
@@ -420,15 +423,16 @@ static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_can_set_bittiming(struct net_device *dev)
+static void rcar_can_set_bittiming(struct net_device *ndev)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
u32 bcr;
- bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
- RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
- RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ bcr = FIELD_PREP(RCAR_CAN_BCR_TSEG1, bt->phase_seg1 + bt->prop_seg - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_BRP, bt->brp - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_SJW, bt->sjw - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_TSEG2, bt->phase_seg2 - 1);
/* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
* All the registers are big-endian but they get byte-swapped on 32-bit
* read/write (but not on 8-bit, contrary to the manuals)...
@@ -452,16 +456,17 @@ static void rcar_can_start(struct net_device *ndev)
ctlr &= ~RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
/* Go to reset mode */
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
break;
}
rcar_can_set_bittiming(ndev);
- ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
- ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
- /* at bus-off */
+ /* Select mixed ID mode */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_IDFM, RCAR_CAN_CTLR_IDFM_MIXED);
+ /* Entry to halt mode automatically at bus-off */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_BOM, RCAR_CAN_CTLR_BOM_ENT);
ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
writew(ctlr, &priv->regs->ctlr);
@@ -491,7 +496,9 @@ static void rcar_can_start(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Go to operation mode */
- writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_OPER);
+ writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
break;
@@ -506,29 +513,28 @@ static int rcar_can_open(struct net_device *ndev)
struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err) {
- netdev_err(ndev,
- "failed to enable peripheral clock, error %d\n",
- err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
goto out;
}
err = clk_prepare_enable(priv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n",
- err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n",
+ ERR_PTR(err));
+ goto out_rpm;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed %pe\n", ERR_PTR(err));
goto out_can_clock;
}
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "request_irq(%d) failed, error %d\n",
- ndev->irq, err);
+ netdev_err(ndev, "request_irq(%d) failed %pe\n", ndev->irq,
+ ERR_PTR(err));
goto out_close;
}
rcar_can_start(ndev);
@@ -539,8 +545,8 @@ out_close:
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(priv->can_clk);
-out_clock:
- clk_disable_unprepare(priv->clk);
+out_rpm:
+ pm_runtime_put(ndev->dev.parent);
out:
return err;
}
@@ -553,7 +559,7 @@ static void rcar_can_stop(struct net_device *ndev)
/* Go to (force) reset mode */
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
@@ -578,7 +584,7 @@ static int rcar_can_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi);
clk_disable_unprepare(priv->can_clk);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(ndev->dev.parent);
close_candev(ndev);
return 0;
}
@@ -594,9 +600,10 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
- data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ data = FIELD_PREP(RCAR_CAN_EID, cf->can_id & CAN_EFF_MASK) |
+ RCAR_CAN_IDE;
else /* Standard frame format */
- data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+ data = FIELD_PREP(RCAR_CAN_SID, cf->can_id & CAN_SFF_MASK);
if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
data |= RCAR_CAN_RTR;
@@ -651,9 +658,9 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
if (data & RCAR_CAN_IDE)
- cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cf->can_id = FIELD_GET(RCAR_CAN_EID, data) | CAN_EFF_FLAG;
else
- cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+ cf->can_id = FIELD_GET(RCAR_CAN_SID, data);
dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
cf->len = can_cc_dlc2len(dlc);
@@ -715,18 +722,21 @@ static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_can_get_berr_counter(const struct net_device *dev,
+static int rcar_can_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
return err;
+
bec->txerr = readb(&priv->regs->tecr);
bec->rxerr = readb(&priv->regs->recr);
- clk_disable_unprepare(priv->clk);
+
+ pm_runtime_put(ndev->dev.parent);
+
return 0;
}
@@ -738,6 +748,7 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_can_priv *priv;
struct net_device *ndev;
void __iomem *addr;
@@ -745,7 +756,7 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ of_property_read_u32(dev->of_node, "renesas,can-clock-select",
&clock_select);
irq = platform_get_irq(pdev, 0);
@@ -762,30 +773,21 @@ static int rcar_can_probe(struct platform_device *pdev)
ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
err = -ENOMEM;
goto fail;
}
priv = netdev_priv(ndev);
- priv->clk = devm_clk_get(&pdev->dev, "clkp1");
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_clk;
- }
-
if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
- dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ dev_err(dev, "invalid CAN clock selected\n");
goto fail_clk;
}
- priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+ priv->can_clk = devm_clk_get(dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "cannot get CAN clock: %pe\n", priv->can_clk);
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -802,21 +804,24 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
RCAR_CAN_NAPI_WEIGHT);
+
+ pm_runtime_enable(dev);
+
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev, "register_candev() failed, error %d\n",
- err);
- goto fail_candev;
+ dev_err(dev, "register_candev() failed %pe\n", ERR_PTR(err));
+ goto fail_rpm;
}
- dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
+ dev_info(dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
-fail_candev:
+fail_rpm:
+ pm_runtime_disable(dev);
netif_napi_del(&priv->napi);
fail_clk:
free_candev(ndev);
@@ -830,6 +835,7 @@ static void rcar_can_remove(struct platform_device *pdev)
struct rcar_can_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
netif_napi_del(&priv->napi);
free_candev(ndev);
}
@@ -847,28 +853,28 @@ static int rcar_can_suspend(struct device *dev)
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_HALT);
writew(ctlr, &priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ pm_runtime_put(dev);
return 0;
}
static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
if (!netif_running(ndev))
return 0;
- err = clk_enable(priv->clk);
+ err = pm_runtime_resume_and_get(dev);
if (err) {
- netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
return err;
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 7e8b1d2f1af6..45d36adb51b7 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -103,22 +103,13 @@
/* Channel register bits */
/* RSCFDnCmCFG - Classical CAN only */
-#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
-#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
-#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_CFG_SJW GENMASK(25, 24)
+#define RCANFD_CFG_TSEG2 GENMASK(22, 20)
+#define RCANFD_CFG_TSEG1 GENMASK(19, 16)
+#define RCANFD_CFG_BRP GENMASK(9, 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & ((gpriv)->info->nom_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->ntseg2)
-
-#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & ((gpriv)->info->nom_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->ntseg1)
-
-#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & ((gpriv)->info->nom_bittiming->sjw_max - 1)) << (gpriv)->info->sh->nsjw)
-
-#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_NCFG_NBRP GENMASK(9, 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
#define RCANFD_CCTR_CTME BIT(24)
@@ -178,15 +169,7 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & ((gpriv)->info->data_bittiming->sjw_max - 1)) << 24)
-
-#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & ((gpriv)->info->data_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->dtseg2)
-
-#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & ((gpriv)->info->data_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->dtseg1)
-
-#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+#define RCANFD_DCFG_DBRP GENMASK(7, 0)
/* RSCFDnCFDCmFDCFG */
#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
@@ -1389,6 +1372,41 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static inline u32 rcar_canfd_compute_nominal_bit_rate_cfg(struct rcar_canfd_channel *priv,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ const struct rcar_canfd_hw_info *info = gpriv->info;
+ u32 ntseg1, ntseg2, nsjw, nbrp;
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
+ ntseg1 = (tseg1 & (info->nom_bittiming->tseg1_max - 1)) << info->sh->ntseg1;
+ ntseg2 = (tseg2 & (info->nom_bittiming->tseg2_max - 1)) << info->sh->ntseg2;
+ nsjw = (sjw & (info->nom_bittiming->sjw_max - 1)) << info->sh->nsjw;
+ nbrp = FIELD_PREP(RCANFD_NCFG_NBRP, brp);
+ } else {
+ ntseg1 = FIELD_PREP(RCANFD_CFG_TSEG1, tseg1);
+ ntseg2 = FIELD_PREP(RCANFD_CFG_TSEG2, tseg2);
+ nsjw = FIELD_PREP(RCANFD_CFG_SJW, sjw);
+ nbrp = FIELD_PREP(RCANFD_CFG_BRP, brp);
+ }
+
+ return (ntseg1 | ntseg2 | nsjw | nbrp);
+}
+
+static inline u32 rcar_canfd_compute_data_bit_rate_cfg(const struct rcar_canfd_hw_info *info,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ u32 dtseg1, dtseg2, dsjw, dbrp;
+
+ dtseg1 = (tseg1 & (info->data_bittiming->tseg1_max - 1)) << info->sh->dtseg1;
+ dtseg2 = (tseg2 & (info->data_bittiming->tseg2_max - 1)) << info->sh->dtseg2;
+ dsjw = (sjw & (info->data_bittiming->sjw_max - 1)) << 24;
+ dbrp = FIELD_PREP(RCANFD_DCFG_DBRP, brp);
+
+ return (dtseg1 | dtseg2 | dsjw | dbrp);
+}
+
static void rcar_canfd_set_bittiming(struct net_device *ndev)
{
u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
@@ -1407,15 +1425,7 @@ static void rcar_canfd_set_bittiming(struct net_device *ndev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
-
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
- } else {
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
- }
-
+ cfg = rcar_canfd_compute_nominal_bit_rate_cfg(priv, tseg1, tseg2, sjw, brp);
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
@@ -1426,10 +1436,7 @@ static void rcar_canfd_set_bittiming(struct net_device *ndev)
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
-
+ cfg = rcar_canfd_compute_data_bit_rate_cfg(gpriv->info, tseg1, tseg2, sjw, brp);
writel(cfg, &gpriv->fcbase[ch].dcfg);
/* Transceiver Delay Compensation */
@@ -1913,7 +1920,10 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
- priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ if (gpriv->info->shared_can_regs)
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ else
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index da396d641e24..10d88cbda465 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -22,7 +22,7 @@
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index ce18e9e56059..e1610b527d13 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
- *
* CAN driver for PEAK-System PCAN-PC Card
* Derived from the PCAN project file driver/src/pcan_pccard.c
- * Copyright (C) 2006-2010 PEAK System-Technik GmbH
+ *
+ * Copyright (C) 2006-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,7 +19,7 @@
#include <linux/can/dev.h>
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 963ea8510dd9..6d4b643e135f 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -896,7 +896,8 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ priv->wq = alloc_workqueue("hi3110_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!priv->wq) {
ret = -ENOMEM;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 313e1d241f01..b797e08499d7 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1378,7 +1378,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ priv->wq = alloc_workqueue("mcp251x_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!priv->wq) {
ret = -ENOMEM;
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 27a3818885c2..9bc1824d7be6 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -9,6 +9,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -274,6 +275,7 @@ struct esd_usb {
int net_count;
u32 version;
int rxinitdone;
+ int in_usb_disconnect;
void *rxbuf[ESD_USB_MAX_RX_URBS];
dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS];
};
@@ -480,7 +482,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
static void esd_usb_read_bulk_callback(struct urb *urb)
{
struct esd_usb *dev = urb->context;
- int retval;
+ int err;
int pos = 0;
int i;
@@ -496,7 +498,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
default:
dev_info(dev->udev->dev.parent,
- "Rx URB aborted (%d)\n", urb->status);
+ "Rx URB aborted (%pe)\n", ERR_PTR(urb->status));
goto resubmit_urb;
}
@@ -539,15 +541,15 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval == -ENODEV) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
- } else if (retval) {
+ } else if (err) {
dev_err(dev->udev->dev.parent,
- "failed resubmitting read bulk urb: %d\n", retval);
+ "failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}
}
@@ -572,7 +574,7 @@ static void esd_usb_write_bulk_callback(struct urb *urb)
return;
if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+ netdev_info(netdev, "Tx URB aborted (%pe)\n", ERR_PTR(urb->status));
netif_trans_update(netdev);
}
@@ -758,7 +760,7 @@ out:
if (err == -ENODEV)
netif_device_detach(netdev);
if (err)
- netdev_err(netdev, "couldn't start device: %d\n", err);
+ netdev_err(netdev, "couldn't start device: %pe\n", ERR_PTR(err));
kfree(msg);
return err;
@@ -800,7 +802,6 @@ static int esd_usb_open(struct net_device *netdev)
/* finally start device */
err = esd_usb_start(priv);
if (err) {
- netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
return err;
}
@@ -923,7 +924,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
if (err == -ENODEV)
netif_device_detach(netdev);
else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
+ netdev_warn(netdev, "failed tx_urb %pe\n", ERR_PTR(err));
goto releasebuf;
}
@@ -947,10 +948,11 @@ nourbmem:
return ret;
}
-static int esd_usb_close(struct net_device *netdev)
+/* Stop interface */
+static int esd_usb_stop(struct esd_usb_net_priv *priv)
{
- struct esd_usb_net_priv *priv = netdev_priv(netdev);
union esd_usb_msg *msg;
+ int err;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -964,8 +966,11 @@ static int esd_usb_close(struct net_device *netdev)
msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++)
msg->filter.mask[i] = 0;
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending idadd message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0) {
+ netdev_err(priv->netdev, "sending idadd message failed: %pe\n", ERR_PTR(err));
+ goto bail;
+ }
/* set CAN controller to reset mode */
msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
@@ -973,8 +978,25 @@ static int esd_usb_close(struct net_device *netdev)
msg->setbaud.net = priv->index;
msg->setbaud.rsvd = 0;
msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending setbaud message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0)
+ netdev_err(priv->netdev, "sending setbaud message failed: %pe\n", ERR_PTR(err));
+
+bail:
+ kfree(msg);
+
+ return err;
+}
+
+static int esd_usb_close(struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!priv->usb->in_usb_disconnect) {
+ /* It's moot to try this in usb_disconnect()! */
+ err = esd_usb_stop(priv);
+ }
priv->can.state = CAN_STATE_STOPPED;
@@ -982,9 +1004,7 @@ static int esd_usb_close(struct net_device *netdev)
close_candev(netdev);
- kfree(msg);
-
- return 0;
+ return err;
}
static const struct net_device_ops esd_usb_netdev_ops = {
@@ -1251,14 +1271,14 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
err = register_candev(netdev);
if (err) {
- dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+ dev_err(&intf->dev, "couldn't register CAN device: %pe\n", ERR_PTR(err));
free_candev(netdev);
err = -ENOMEM;
goto done;
}
dev->nets[index] = priv;
- netdev_info(netdev, "device %s registered\n", netdev->name);
+ netdev_info(netdev, "registered\n");
done:
return err;
@@ -1354,9 +1374,11 @@ static void esd_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (dev) {
+ dev->in_usb_disconnect = 1;
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i]) {
netdev = dev->nets[i]->netdev;
+ netdev_info(netdev, "unregister\n");
unregister_netdev(netdev);
free_candev(netdev);
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 6b293a9056c2..9278a1522aae 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB adapter
* Derived from the PCAN project file driver/src/pcan_usb.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -919,7 +919,7 @@ static int pcan_usb_init(struct peak_usb_device *dev)
CAN_CTRLMODE_LOOPBACK;
} else {
dev_info(dev->netdev->dev.parent,
- "Firmware update available. Please contact support@peak-system.com\n");
+ "Firmware update available. Please contact support.peak@hms-networks.com\n");
}
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index dd5caa1c302b..c74302ca7cee 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -24,7 +24,7 @@
#include "pcan_usb_core.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index abab00930b9d..d1c1897d47b9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ebefc274b50a..be84191cde56 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -2,7 +2,8 @@
/*
* CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
*
- * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2013-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index f736196383ac..7be286293b1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro.c
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 28e740af905d..162c7546d3a8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro_fw.h
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PCAN_USB_PRO_H
#define PCAN_USB_PRO_H
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f67e85807100..fdc662aea279 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -156,7 +156,7 @@ static const struct ethtool_ops vcan_ethtool_ops = {
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 99a78a757167..b2c19f8c5f8e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -156,7 +156,7 @@ static void vxcan_setup(struct net_device *dev)
struct can_ml_priv *can_ml;
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index ec759f8cb0e2..4d9af691b989 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -26,13 +26,7 @@ config NET_DSA_LOOP
source "drivers/net/dsa/hirschmann/Kconfig"
-config NET_DSA_LANTIQ_GSWIP
- tristate "Lantiq / Intel GSWIP"
- depends on HAS_IOMEM
- select NET_DSA_TAG_GSWIP
- help
- This enables support for the Lantiq / Intel GSWIP 2.1 found in
- the xrx200 / VR9 SoC.
+source "drivers/net/dsa/lantiq/Kconfig"
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
@@ -99,6 +93,14 @@ config NET_DSA_RZN1_A5PSW
This driver supports the A5PSW switch, which is embedded in Renesas
RZ/N1 SoC.
+config NET_DSA_KS8995
+ tristate "Micrel KS8995 family 5-ports 10/100 Ethernet switches"
+ depends on SPI
+ select NET_DSA_TAG_NONE
+ help
+ This driver supports the Micrel KS8995 family of 10/100 Mbit ethernet
+ switches, managed over SPI.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index cb9a97340e58..0f8ff4a1a313 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -2,10 +2,7 @@
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
-ifdef CONFIG_NET_DSA_LOOP
-obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
-endif
-obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_KS8995) += ks8995.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MT7530_MDIO) += mt7530-mdio.o
obj-$(CONFIG_NET_DSA_MT7530_MMIO) += mt7530-mmio.o
@@ -19,6 +16,7 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
obj-y += hirschmann/
+obj-y += lantiq/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index f06c3e0cc42a..f4a59d8fbdd6 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -29,8 +29,13 @@
#include "b53_priv.h"
#define BCM63XX_EPHY_REG 0x3C
+#define BCM63268_GPHY_REG 0x54
+
+#define GPHY_CTRL_LOW_PWR BIT(3)
+#define GPHY_CTRL_IDDQ_BIAS BIT(0)
struct b53_phy_info {
+ u32 gphy_port_mask;
u32 ephy_enable_mask;
u32 ephy_port_mask;
u32 ephy_bias_bit;
@@ -65,6 +70,7 @@ static const struct b53_phy_info bcm6368_ephy_info = {
static const u32 bcm63268_ephy_offsets[] = {4, 9, 14};
static const struct b53_phy_info bcm63268_ephy_info = {
+ .gphy_port_mask = BIT(3),
.ephy_enable_mask = GENMASK(4, 0),
.ephy_port_mask = GENMASK((ARRAY_SIZE(bcm63268_ephy_offsets) - 1), 0),
.ephy_bias_bit = 24,
@@ -290,13 +296,30 @@ static int bcm63xx_ephy_set(struct b53_device *dev, int port, bool enable)
return regmap_update_bits(gpio_ctrl, BCM63XX_EPHY_REG, mask, val);
}
+static int bcm63268_gphy_set(struct b53_device *dev, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask = GPHY_CTRL_IDDQ_BIAS | GPHY_CTRL_LOW_PWR;
+ u32 val = 0;
+
+ if (!enable)
+ val = mask;
+
+ return regmap_update_bits(gpio_ctrl, BCM63268_GPHY_REG, mask, val);
+}
+
static void b53_mmap_phy_enable(struct b53_device *dev, int port)
{
struct b53_mmap_priv *priv = dev->priv;
int ret = 0;
- if (priv->phy_info && (BIT(port) & priv->phy_info->ephy_port_mask))
- ret = bcm63xx_ephy_set(dev, port, true);
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, true);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, true);
+ }
if (!ret)
priv->phys_enabled |= BIT(port);
@@ -307,8 +330,12 @@ static void b53_mmap_phy_disable(struct b53_device *dev, int port)
struct b53_mmap_priv *priv = dev->priv;
int ret = 0;
- if (priv->phy_info && (BIT(port) & priv->phy_info->ephy_port_mask))
- ret = bcm63xx_ephy_set(dev, port, false);
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, false);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, false);
+ }
if (!ret)
priv->phys_enabled &= ~BIT(port);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index d8a35f25a4c8..650d93226d9f 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -17,7 +17,19 @@
#include <linux/dsa/loop.h>
#include <net/dsa.h>
-#include "dsa_loop.h"
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
@@ -27,6 +39,7 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
};
static struct phy_device *phydevs[PHY_MAX_ADDR];
+static struct mdio_device *switch_mdiodev;
enum dsa_loop_devlink_resource_id {
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
@@ -382,17 +395,48 @@ static struct mdio_driver dsa_loop_drv = {
.shutdown = dsa_loop_drv_shutdown,
};
-#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
-
static void dsa_loop_phydevs_unregister(void)
{
- unsigned int i;
-
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i])) {
+ for (int i = 0; i < NUM_FIXED_PHYS; i++) {
+ if (!IS_ERR(phydevs[i]))
fixed_phy_unregister(phydevs[i]);
- phy_device_free(phydevs[i]);
- }
+ }
+}
+
+static int __init dsa_loop_create_switch_mdiodev(void)
+{
+ static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
+ };
+ struct mii_bus *bus;
+ int ret = -ENODEV;
+
+ bus = mdio_find_bus("fixed-0");
+ if (WARN_ON(!bus))
+ return ret;
+
+ switch_mdiodev = mdio_device_create(bus, 31);
+ if (IS_ERR(switch_mdiodev))
+ goto out;
+
+ strscpy(switch_mdiodev->modalias, "dsa-loop");
+ switch_mdiodev->dev.platform_data = &dsa_loop_pdata;
+
+ ret = mdio_device_register(switch_mdiodev);
+ if (ret)
+ mdio_device_free(switch_mdiodev);
+out:
+ put_device(&bus->dev);
+ return ret;
}
static int __init dsa_loop_init(void)
@@ -402,14 +446,22 @@ static int __init dsa_loop_init(void)
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
- unsigned int i, ret;
+ unsigned int i;
+ int ret;
+
+ ret = dsa_loop_create_switch_mdiodev();
+ if (ret)
+ return ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(&status, NULL);
ret = mdio_driver_register(&dsa_loop_drv);
- if (ret)
+ if (ret) {
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
+ }
return ret;
}
@@ -419,10 +471,11 @@ static void __exit dsa_loop_exit(void)
{
mdio_driver_unregister(&dsa_loop_drv);
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
}
module_exit(dsa_loop_exit);
-MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
deleted file mode 100644
index 93e5c15d0efd..000000000000
--- a/drivers/net/dsa/dsa_loop.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DSA_LOOP_H
-#define __DSA_LOOP_H
-
-struct dsa_chip_data;
-
-struct dsa_loop_pdata {
- /* Must be first, such that dsa_register_switch() can access this
- * without gory pointer manipulations
- */
- struct dsa_chip_data cd;
- const char *name;
- unsigned int enabled_ports;
- const char *netdev;
-};
-
-#define DSA_LOOP_NUM_PORTS 6
-#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
-
-#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
deleted file mode 100644
index 14ca42491512..000000000000
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-
-#include "dsa_loop.h"
-
-static struct dsa_loop_pdata dsa_loop_pdata = {
- .cd = {
- .port_names[0] = "lan1",
- .port_names[1] = "lan2",
- .port_names[2] = "lan3",
- .port_names[3] = "lan4",
- .port_names[DSA_LOOP_CPU_PORT] = "cpu",
- },
- .name = "DSA mockup driver",
- .enabled_ports = 0x1f,
- .netdev = "eth0",
-};
-
-static const struct mdio_board_info bdinfo = {
- .bus_id = "fixed-0",
- .modalias = "dsa-loop",
- .mdio_addr = 31,
- .platform_data = &dsa_loop_pdata,
-};
-
-static int __init dsa_loop_bdinfo_init(void)
-{
- return mdiobus_register_board_info(&bdinfo, 1);
-}
-arch_initcall(dsa_loop_bdinfo_init)
-
-MODULE_DESCRIPTION("DSA mock-up switch driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/dsa/ks8995.c
index d135b061d810..5c4c83e00477 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/dsa/ks8995.c
@@ -3,6 +3,7 @@
* SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
*
* Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ * Copyright (C) 2025 Linus Walleij <linus.walleij@linaro.org>
*
* This file was based on: drivers/spi/at25.c
* Copyright (C) 2006 David Brownell
@@ -10,6 +11,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bits.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,8 +21,8 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-
#include <linux/spi/spi.h>
+#include <net/dsa.h>
#define DRV_VERSION "0.1.1"
#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
@@ -29,18 +33,59 @@
#define KS8995_REG_ID1 0x01 /* Chip ID1 */
#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+
+#define KS8995_GC0_P5_PHY BIT(3) /* Port 5 PHY enabled */
+
#define KS8995_REG_GC1 0x03 /* Global Control 1 */
#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+
+#define KS8995_GC2_HUGE BIT(2) /* Huge packet support */
+#define KS8995_GC2_LEGAL BIT(1) /* Legal size override */
+
#define KS8995_REG_GC3 0x05 /* Global Control 3 */
#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+
+#define KS8995_GC4_10BT BIT(4) /* Force switch to 10Mbit */
+#define KS8995_GC4_MII_FLOW BIT(5) /* MII full-duplex flow control enable */
+#define KS8995_GC4_MII_HD BIT(6) /* MII half-duplex mode enable */
+
#define KS8995_REG_GC5 0x07 /* Global Control 5 */
#define KS8995_REG_GC6 0x08 /* Global Control 6 */
#define KS8995_REG_GC7 0x09 /* Global Control 7 */
#define KS8995_REG_GC8 0x0a /* Global Control 8 */
#define KS8995_REG_GC9 0x0b /* Global Control 9 */
-#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
-#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
+#define KS8995_GC9_SPECIAL BIT(0) /* Special tagging mode (DSA) */
+
+/* In DSA the ports 1-4 are numbered 0-3 and the CPU port is port 4 */
+#define KS8995_REG_PC(p, r) (0x10 + (0x10 * (p)) + (r)) /* Port Control */
+#define KS8995_REG_PS(p, r) (0x1e + (0x10 * (p)) + (r)) /* Port Status */
+
+#define KS8995_REG_PC0 0x00 /* Port Control 0 */
+#define KS8995_REG_PC1 0x01 /* Port Control 1 */
+#define KS8995_REG_PC2 0x02 /* Port Control 2 */
+#define KS8995_REG_PC3 0x03 /* Port Control 3 */
+#define KS8995_REG_PC4 0x04 /* Port Control 4 */
+#define KS8995_REG_PC5 0x05 /* Port Control 5 */
+#define KS8995_REG_PC6 0x06 /* Port Control 6 */
+#define KS8995_REG_PC7 0x07 /* Port Control 7 */
+#define KS8995_REG_PC8 0x08 /* Port Control 8 */
+#define KS8995_REG_PC9 0x09 /* Port Control 9 */
+#define KS8995_REG_PC10 0x0a /* Port Control 10 */
+#define KS8995_REG_PC11 0x0b /* Port Control 11 */
+#define KS8995_REG_PC12 0x0c /* Port Control 12 */
+#define KS8995_REG_PC13 0x0d /* Port Control 13 */
+
+#define KS8995_PC0_TAG_INS BIT(2) /* Enable tag insertion on port */
+#define KS8995_PC0_TAG_REM BIT(1) /* Enable tag removal on port */
+#define KS8995_PC0_PRIO_EN BIT(0) /* Enable priority handling */
+
+#define KS8995_PC2_TXEN BIT(2) /* Enable TX on port */
+#define KS8995_PC2_RXEN BIT(1) /* Enable RX on port */
+#define KS8995_PC2_LEARN_DIS BIT(0) /* Disable learning on port */
+
+#define KS8995_PC13_TXDIS BIT(6) /* Disable transmitter */
+#define KS8995_PC13_PWDN BIT(3) /* Power down */
#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
@@ -91,6 +136,8 @@
#define KS8995_CMD_WRITE 0x02U
#define KS8995_CMD_READ 0x03U
+#define KS8995_CPU_PORT 4
+#define KS8995_NUM_PORTS 5 /* 5 ports including the CPU port */
#define KS8995_RESET_DELAY 10 /* usec */
enum ks8995_chip_variant {
@@ -138,11 +185,14 @@ static const struct ks8995_chip_params ks8995_chip[] = {
struct ks8995_switch {
struct spi_device *spi;
+ struct device *dev;
+ struct dsa_switch *ds;
struct mutex lock;
struct gpio_desc *reset_gpio;
struct bin_attribute regs_attr;
const struct ks8995_chip_params *chip;
int revision_id;
+ unsigned int max_mtu[KS8995_NUM_PORTS];
};
static const struct spi_device_id ks8995_id[] = {
@@ -288,30 +338,6 @@ static int ks8995_reset(struct ks8995_switch *ks)
return ks8995_start(ks);
}
-static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct ks8995_switch *ks8995;
-
- dev = kobj_to_dev(kobj);
- ks8995 = dev_get_drvdata(dev);
-
- return ks8995_read(ks8995, buf, off, count);
-}
-
-static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct ks8995_switch *ks8995;
-
- dev = kobj_to_dev(kobj);
- ks8995 = dev_get_drvdata(dev);
-
- return ks8995_write(ks8995, buf, off, count);
-}
-
/* ks8995_get_revision - get chip revision
* @ks: pointer to switch instance
*
@@ -395,14 +421,325 @@ err_out:
return err;
}
-static const struct bin_attribute ks8995_registers_attr = {
- .attr = {
- .name = "registers",
- .mode = 0600,
- },
- .size = KS8995_REGS_SIZE,
- .read = ks8995_registers_read,
- .write = ks8995_registers_write,
+static int ks8995_check_config(struct ks8995_switch *ks)
+{
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC0, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC0\n");
+ return ret;
+ }
+
+ dev_dbg(ks->dev, "port 5 PHY %senabled\n",
+ (val & KS8995_GC0_P5_PHY) ? "" : "not ");
+
+ val |= KS8995_GC0_P5_PHY;
+ ret = ks8995_write_reg(ks, KS8995_REG_GC0, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC0\n");
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC0 to 0x%02x\n", val);
+
+ return 0;
+}
+
+static void
+ks8995_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+ks8995_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+ int ret;
+ u8 val;
+
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link up on CPU port (%d)\n", port);
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC4, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC4\n");
+ return;
+ }
+
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val |= KS8995_GC4_10BT;
+ break;
+ case SPEED_100:
+ default:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val &= ~KS8995_GC4_10BT;
+ break;
+ }
+
+ if (duplex == DUPLEX_HALF) {
+ dev_dbg(ks->dev, "set switch MII to half duplex\n");
+ val |= KS8995_GC4_MII_HD;
+ } else {
+ dev_dbg(ks->dev, "set switch MII to full duplex\n");
+ val &= ~KS8995_GC4_MII_HD;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC4 to %02x\n", val);
+
+ /* Enable the CPU port */
+ ret = ks8995_write_reg(ks, KS8995_REG_GC4, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC4\n");
+}
+
+static void
+ks8995_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+}
+
+static const struct phylink_mac_ops ks8995_phylink_mac_ops = {
+ .mac_config = ks8995_mac_config,
+ .mac_link_up = ks8995_mac_link_up,
+ .mac_link_down = ks8995_mac_link_down,
+};
+
+static enum
+dsa_tag_protocol ks8995_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* This switch actually uses the 6 byte KS8995 protocol */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int ks8995_setup(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static int ks8995_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "enable port %d\n", port);
+
+ return 0;
+}
+
+static void ks8995_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "disable port %d\n", port);
+}
+
+static int ks8995_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ks8995_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+
+ if (flags.val & BR_LEARNING)
+ val &= ~KS8995_PC2_LEARN_DIS;
+ else
+ val |= KS8995_PC2_LEARN_DIS;
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ks8995_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ /* Set the bits for the different STP states in accordance with
+ * the datasheet, pages 36-37 "Spanning tree support".
+ */
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val |= KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_LEARNING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_FORWARDING:
+ val |= KS8995_PC2_TXEN;
+ val |= KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ default:
+ dev_err(ks->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_PC2 for port %d to %02x\n", port, val);
+}
+
+static void ks8995_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+
+ if (port == KS8995_CPU_PORT)
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+
+ if (port <= 3) {
+ /* Internal PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* phylib default */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
+
+/* Huge packet support up to 1916 byte packages "inclusive"
+ * which means that tags are included. If the bit is not set
+ * it is 1536 bytes "inclusive". We present the length without
+ * tags or ethernet headers. The setting affects all ports.
+ */
+static int ks8995_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ks8995_switch *ks = ds->priv;
+ unsigned int max_mtu;
+ int ret;
+ u8 val;
+ int i;
+
+ ks->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ */
+ max_mtu = ETH_DATA_LEN;
+ for (i = 0; i < KS8995_NUM_PORTS; i++) {
+ if (ks->max_mtu[i] > max_mtu)
+ max_mtu = ks->max_mtu[i];
+ }
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC2, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC2\n");
+ return ret;
+ }
+
+ if (max_mtu <= 1522) {
+ val &= ~KS8995_GC2_HUGE;
+ val &= ~KS8995_GC2_LEGAL;
+ } else if (max_mtu > 1522 && max_mtu <= 1536) {
+ /* This accepts packets up to 1536 bytes */
+ val &= ~KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ } else {
+ /* This accepts packets up to 1916 bytes */
+ val |= KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ }
+
+ dev_dbg(ks->dev, "new max MTU %d bytes (inclusive)\n", max_mtu);
+
+ ret = ks8995_write_reg(ks, KS8995_REG_GC2, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC2\n");
+
+ return ret;
+}
+
+static int ks8995_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 1916 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static const struct dsa_switch_ops ks8995_ds_ops = {
+ .get_tag_protocol = ks8995_get_tag_protocol,
+ .setup = ks8995_setup,
+ .port_pre_bridge_flags = ks8995_port_pre_bridge_flags,
+ .port_bridge_flags = ks8995_port_bridge_flags,
+ .port_enable = ks8995_port_enable,
+ .port_disable = ks8995_port_disable,
+ .port_stp_state_set = ks8995_port_stp_state_set,
+ .port_change_mtu = ks8995_change_mtu,
+ .port_max_mtu = ks8995_get_max_mtu,
+ .phylink_get_caps = ks8995_phylink_get_caps,
};
/* ------------------------------------------------------------------------ */
@@ -423,6 +760,7 @@ static int ks8995_probe(struct spi_device *spi)
mutex_init(&ks->lock);
ks->spi = spi;
+ ks->dev = &spi->dev;
ks->chip = &ks8995_chip[variant];
ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
@@ -438,9 +776,15 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- /* de-assert switch reset */
- /* FIXME: this likely requires a delay */
- gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ if (ks->reset_gpio) {
+ /*
+ * If a reset line was obtained, wait for 100us after
+ * de-asserting RESET before accessing any registers, see
+ * the KS8995MA datasheet, page 44.
+ */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ udelay(100);
+ }
spi_set_drvdata(spi, ks);
@@ -456,24 +800,32 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
- ks->regs_attr.size = ks->chip->regs_size;
-
err = ks8995_reset(ks);
if (err)
return err;
- sysfs_attr_init(&ks->regs_attr.attr);
- err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
- if (err) {
- dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
- err);
- return err;
- }
-
dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
ks->chip->name, ks->chip->chip_id, ks->revision_id);
+ err = ks8995_check_config(ks);
+ if (err)
+ return err;
+
+ ks->ds = devm_kzalloc(&spi->dev, sizeof(*ks->ds), GFP_KERNEL);
+ if (!ks->ds)
+ return -ENOMEM;
+
+ ks->ds->dev = &spi->dev;
+ ks->ds->num_ports = KS8995_NUM_PORTS;
+ ks->ds->ops = &ks8995_ds_ops;
+ ks->ds->phylink_mac_ops = &ks8995_phylink_mac_ops;
+ ks->ds->priv = ks;
+
+ err = dsa_register_switch(ks->ds);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "unable to register DSA switch\n");
+
return 0;
}
@@ -481,8 +833,7 @@ static void ks8995_remove(struct spi_device *spi)
{
struct ks8995_switch *ks = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
-
+ dsa_unregister_switch(ks->ds);
/* assert reset */
gpiod_set_value_cansleep(ks->reset_gpio, 1);
}
diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig
new file mode 100644
index 000000000000..1cb053c823f7
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Kconfig
@@ -0,0 +1,7 @@
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_GSWIP
+ help
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile
new file mode 100644
index 000000000000..849f85ebebd6
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
index 84dc6e517acf..2169c0814a48 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -25,7 +25,9 @@
* between all LAN ports by default.
*/
-#include <linux/clk.h>
+#include "lantiq_gswip.h"
+#include "lantiq_pce.h"
+
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
@@ -39,258 +41,13 @@
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/phylink.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <net/dsa.h>
#include <dt-bindings/mips/lantiq_rcu_gphy.h>
-#include "lantiq_pce.h"
-
-/* GSWIP MDIO Registers */
-#define GSWIP_MDIO_GLOB 0x00
-#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
-#define GSWIP_MDIO_CTRL 0x08
-#define GSWIP_MDIO_CTRL_BUSY BIT(12)
-#define GSWIP_MDIO_CTRL_RD BIT(11)
-#define GSWIP_MDIO_CTRL_WR BIT(10)
-#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
-#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
-#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
-#define GSWIP_MDIO_READ 0x09
-#define GSWIP_MDIO_WRITE 0x0A
-#define GSWIP_MDIO_MDC_CFG0 0x0B
-#define GSWIP_MDIO_MDC_CFG1 0x0C
-#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
-#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
-#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
-#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
-#define GSWIP_MDIO_PHY_LINK_UP 0x2000
-#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
-#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
-#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
-#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
-#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
-#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
-#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
-#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
-#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
-#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
-#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
-#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
-#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
-#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
-#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
-#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
- GSWIP_MDIO_PHY_FCONRX_MASK | \
- GSWIP_MDIO_PHY_FCONTX_MASK | \
- GSWIP_MDIO_PHY_LINK_MASK | \
- GSWIP_MDIO_PHY_SPEED_MASK | \
- GSWIP_MDIO_PHY_FDUP_MASK)
-
-/* GSWIP MII Registers */
-#define GSWIP_MII_CFGp(p) (0x2 * (p))
-#define GSWIP_MII_CFG_RESET BIT(15)
-#define GSWIP_MII_CFG_EN BIT(14)
-#define GSWIP_MII_CFG_ISOLATE BIT(13)
-#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
-#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
-#define GSWIP_MII_CFG_RMII_CLK BIT(7)
-#define GSWIP_MII_CFG_MODE_MIIP 0x0
-#define GSWIP_MII_CFG_MODE_MIIM 0x1
-#define GSWIP_MII_CFG_MODE_RMIIP 0x2
-#define GSWIP_MII_CFG_MODE_RMIIM 0x3
-#define GSWIP_MII_CFG_MODE_RGMII 0x4
-#define GSWIP_MII_CFG_MODE_GMII 0x9
-#define GSWIP_MII_CFG_MODE_MASK 0xf
-#define GSWIP_MII_CFG_RATE_M2P5 0x00
-#define GSWIP_MII_CFG_RATE_M25 0x10
-#define GSWIP_MII_CFG_RATE_M125 0x20
-#define GSWIP_MII_CFG_RATE_M50 0x30
-#define GSWIP_MII_CFG_RATE_AUTO 0x40
-#define GSWIP_MII_CFG_RATE_MASK 0x70
-#define GSWIP_MII_PCDU0 0x01
-#define GSWIP_MII_PCDU1 0x03
-#define GSWIP_MII_PCDU5 0x05
-#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
-#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
-
-/* GSWIP Core Registers */
-#define GSWIP_SWRES 0x000
-#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
-#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
-#define GSWIP_VERSION 0x013
-#define GSWIP_VERSION_REV_SHIFT 0
-#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
-#define GSWIP_VERSION_MOD_SHIFT 8
-#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
-#define GSWIP_VERSION_2_0 0x100
-#define GSWIP_VERSION_2_1 0x021
-#define GSWIP_VERSION_2_2 0x122
-#define GSWIP_VERSION_2_2_ETC 0x022
-
-#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
-#define GSWIP_BM_RAM_ADDR 0x044
-#define GSWIP_BM_RAM_CTRL 0x045
-#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
-#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
-#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_BM_QUEUE_GCTRL 0x04A
-#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
-/* buffer management Port Configuration Register */
-#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
-#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
-#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
-/* buffer management Port Control Register */
-#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
-#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
-#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
-
-/* PCE */
-#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
-#define GSWIP_PCE_TBL_MASK 0x448
-#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
-#define GSWIP_PCE_TBL_ADDR 0x44E
-#define GSWIP_PCE_TBL_CTRL 0x44F
-#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
-#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
-#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
-#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
-#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
-#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
-#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
-#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
-#define GSWIP_PCE_GCTRL_0 0x456
-#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
-#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
-#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
-#define GSWIP_PCE_GCTRL_1 0x457
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
-#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
-#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
-#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
-#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
-#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
-#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
-#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
-#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
-#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
-#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
-#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
-#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
-#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
-#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
-#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
-
-#define GSWIP_MAC_FLEN 0x8C5
-#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
-#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
-#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
-#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
-#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
-#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
-#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
-#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
-#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
-#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
-#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
-#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
-#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
-#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
-#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
-
-/* Ethernet Switch Fetch DMA Port Control Register */
-#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
-#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
-#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
-#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-
-/* Ethernet Switch Store DMA Port Control Register */
-#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
-#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
-#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
-
-#define GSWIP_TABLE_ACTIVE_VLAN 0x01
-#define GSWIP_TABLE_VLAN_MAPPING 0x02
-#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
-#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
-#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
-
-#define XRX200_GPHY_FW_ALIGN (16 * 1024)
-
-/* Maximum packet size supported by the switch. In theory this should be 10240,
- * but long packets currently cause lock-ups with an MTU of over 2526. Medium
- * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
- * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
- * packet reception. This is probably caused by the PPA engine, which is on the
- * RX part of the device. Packet transmission works properly up to 10240.
- */
-#define GSWIP_MAX_PACKET_LENGTH 2400
-
-struct gswip_hw_info {
- int max_ports;
- int cpu_port;
- const struct dsa_switch_ops *ops;
-};
-
struct xway_gphy_match_data {
char *fe_firmware_name;
char *ge_firmware_name;
};
-struct gswip_gphy_fw {
- struct clk *clk_gate;
- struct reset_control *reset;
- u32 fw_addr_offset;
- char *fw_name;
-};
-
-struct gswip_vlan {
- struct net_device *bridge;
- u16 vid;
- u8 fid;
-};
-
-struct gswip_priv {
- __iomem void *gswip;
- __iomem void *mdio;
- __iomem void *mii;
- const struct gswip_hw_info *hw_info;
- const struct xway_gphy_match_data *gphy_fw_name_cfg;
- struct dsa_switch *ds;
- struct device *dev;
- struct regmap *rcu_regmap;
- struct gswip_vlan vlans[64];
- int num_gphy_fw;
- struct gswip_gphy_fw *gphy_fw;
- u32 port_vlan_filter;
- struct mutex pce_table_lock;
-};
-
struct gswip_pce_table_entry {
u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
u16 table; // PCE_TBL_CTRL.ADDR = pData->table
@@ -426,15 +183,29 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- /* There's no MII_CFG register for the CPU port */
- if (!dsa_is_cpu_port(priv->ds, port))
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
+ int reg_port;
+
+ /* MII_CFG register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(reg_port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
+ int reg_port;
+
+ /* MII_PCDU register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ switch (reg_port) {
case 0:
gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
break;
@@ -515,6 +286,9 @@ static int gswip_mdio(struct gswip_priv *priv)
int err = 0;
mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
+ if (!mdio_np)
+ mdio_np = of_get_child_by_name(switch_np, "mdio");
+
if (!of_device_is_available(mdio_np))
goto out_put_node;
@@ -654,7 +428,6 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
{
struct gswip_pce_table_entry vlan_active = {0,};
struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
int err;
vlan_active.index = port + 1;
@@ -674,7 +447,7 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
vlan_mapping.index = port + 1;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
vlan_mapping.val[0] = 0 /* vid */;
- vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
+ vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds);
vlan_mapping.val[2] = 0;
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
@@ -747,15 +520,15 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
- for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
+ for (i = 0; i < priv->hw_info->pce_microcode_size; i++) {
gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
- gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_0,
GSWIP_PCE_TBL_VAL(0));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_1,
GSWIP_PCE_TBL_VAL(1));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_2,
GSWIP_PCE_TBL_VAL(2));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_3,
GSWIP_PCE_TBL_VAL(3));
/* start the table access: */
@@ -813,10 +586,10 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
static int gswip_setup(struct dsa_switch *ds)
{
+ unsigned int cpu_ports = dsa_cpu_ports(ds);
struct gswip_priv *priv = ds->priv;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int i;
- int err;
+ struct dsa_port *cpu_dp;
+ int err, i;
gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
usleep_range(5000, 10000);
@@ -838,9 +611,9 @@ static int gswip_setup(struct dsa_switch *ds)
}
/* Default unknown Broadcast/Multicast/Unicast port maps */
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
+ gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP1);
+ gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP2);
+ gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP3);
/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
* interoperability problem with this auto polling mechanism because
@@ -863,19 +636,28 @@ static int gswip_setup(struct dsa_switch *ds)
/* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
+ /* bring up the mdio bus */
+ err = gswip_mdio(priv);
+ if (err) {
+ dev_err(priv->dev, "mdio bus setup failed\n");
+ return err;
+ }
+
/* Disable the xMII interface and clear it's isolation bit */
for (i = 0; i < priv->hw_info->max_ports; i++)
gswip_mii_mask_cfg(priv,
GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
0, i);
- /* enable special tag insertion on cpu port */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
- GSWIP_FDMA_PCTRLp(cpu_port));
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ /* enable special tag insertion on cpu port */
+ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
+ GSWIP_FDMA_PCTRLp(cpu_dp->index));
- /* accept special tag in ingress direction */
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(cpu_port));
+ /* accept special tag in ingress direction */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
+ GSWIP_PCE_PCTRL_0p(cpu_dp->index));
+ }
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
@@ -904,7 +686,9 @@ static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
- return DSA_TAG_PROTO_GSWIP;
+ struct gswip_priv *priv = ds->priv;
+
+ return priv->hw_info->tag_protocol;
}
static int gswip_vlan_active_create(struct gswip_priv *priv,
@@ -971,7 +755,6 @@ static int gswip_vlan_add_unaware(struct gswip_priv *priv,
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
bool active_vlan_created = false;
int idx = -1;
int i;
@@ -1011,7 +794,7 @@ static int gswip_vlan_add_unaware(struct gswip_priv *priv,
}
/* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= dsa_cpu_ports(priv->ds);
vlan_mapping.val[1] |= BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
@@ -1033,7 +816,7 @@ static int gswip_vlan_add_aware(struct gswip_priv *priv,
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int cpu_ports = dsa_cpu_ports(priv->ds);
bool active_vlan_created = false;
int idx = -1;
int fid = -1;
@@ -1080,8 +863,8 @@ static int gswip_vlan_add_aware(struct gswip_priv *priv,
vlan_mapping.val[0] = vid;
/* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[2] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= cpu_ports;
+ vlan_mapping.val[2] |= cpu_ports;
vlan_mapping.val[1] |= BIT(port);
if (untagged)
vlan_mapping.val[2] &= ~BIT(port);
@@ -1108,7 +891,6 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
int idx = -1;
int i;
int err;
@@ -1144,7 +926,7 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
}
/* In case all ports are removed from the bridge, remove the VLAN */
- if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
+ if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) {
err = gswip_vlan_active_remove(priv, idx);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n",
@@ -1564,6 +1346,14 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
MAC_10 | MAC_100 | MAC_1000;
}
+static void gswip_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ priv->hw_info->phylink_get_caps(ds, port, config);
+}
+
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
{
u32 mdio_phy;
@@ -1682,6 +1472,10 @@ static void gswip_phylink_mac_config(struct phylink_config *config,
miicfg |= GSWIP_MII_CFG_LDCLKDIS;
switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_INTERNAL:
miicfg |= GSWIP_MII_CFG_MODE_MIIM;
@@ -1830,13 +1624,26 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(gswip_rmon_cnt);
}
+static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ if (priv->hw_info->mac_select_pcs)
+ return priv->hw_info->mac_select_pcs(config, interface);
+
+ return NULL;
+}
+
static const struct phylink_mac_ops gswip_phylink_mac_ops = {
- .mac_config = gswip_phylink_mac_config,
- .mac_link_down = gswip_phylink_mac_link_down,
- .mac_link_up = gswip_phylink_mac_link_up,
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+ .mac_select_pcs = gswip_phylink_mac_select_pcs,
};
-static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
+static const struct dsa_switch_ops gswip_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
.port_setup = gswip_port_setup,
@@ -1854,30 +1661,7 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_fdb_dump = gswip_port_fdb_dump,
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx200_phylink_get_caps,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .port_change_mtu = gswip_port_change_mtu,
- .port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .phylink_get_caps = gswip_phylink_get_caps,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
@@ -1946,8 +1730,7 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
memcpy(fw_addr, fw->data, fw->size);
} else {
release_firmware(fw);
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc firmware memory\n");
+ return -ENOMEM;
}
release_firmware(fw);
@@ -2104,6 +1887,30 @@ remove_gphy:
return err;
}
+static int gswip_validate_cpu_port(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int cpu_port = -1;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ if (cpu_port != -1)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "only a single CPU port is supported\n");
+
+ cpu_port = cpu_dp->index;
+ }
+
+ if (cpu_port == -1)
+ return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n");
+
+ if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "unsupported CPU port defined\n");
+
+ return 0;
+}
+
static int gswip_probe(struct platform_device *pdev)
{
struct device_node *np, *gphy_fw_np;
@@ -2140,12 +1947,22 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->dev = dev;
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
- priv->ds->ops = priv->hw_info->ops;
+ priv->ds->ops = &gswip_switch_ops;
priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
priv->dev = dev;
mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
+ /* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Construct a 16-bit unsigned integer
+ * having the REV field as most significant byte and the MOD field as
+ * least significant byte. This is effectively swapping the two bytes of
+ * the version variable, but other than using swab16 it doesn't affect
+ * the source variable.
+ */
+ priv->version = GSWIP_VERSION_REV(version) << 8 |
+ GSWIP_VERSION_MOD(version);
+
np = dev->of_node;
switch (version) {
case GSWIP_VERSION_2_0:
@@ -2174,30 +1991,20 @@ static int gswip_probe(struct platform_device *pdev)
"gphy fw probe failed\n");
}
- /* bring up the mdio bus */
- err = gswip_mdio(priv);
- if (err) {
- dev_err_probe(dev, err, "mdio probe failed\n");
- goto gphy_fw_remove;
- }
-
err = dsa_register_switch(priv->ds);
if (err) {
dev_err_probe(dev, err, "dsa switch registration failed\n");
goto gphy_fw_remove;
}
- if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- err = dev_err_probe(dev, -EINVAL,
- "wrong CPU port defined, HW only supports port: %i\n",
- priv->hw_info->cpu_port);
+
+ err = gswip_validate_cpu_port(priv->ds);
+ if (err)
goto disable_switch;
- }
platform_set_drvdata(pdev, priv);
dev_info(dev, "probed GSWIP version %lx mod %lx\n",
- (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
- (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
+ GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
return 0;
disable_switch:
@@ -2240,14 +2047,24 @@ static void gswip_shutdown(struct platform_device *pdev)
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx200_switch_ops,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(1) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
};
static const struct gswip_hw_info gswip_xrx300 = {
.max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx300_switch_ops,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
};
static const struct of_device_id gswip_of_match[] = {
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
new file mode 100644
index 000000000000..2df9c8e8cfd0
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __LANTIQ_GSWIP_H
+#define __LANTIQ_GSWIP_H
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/swab.h>
+#include <net/dsa.h>
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_REV(v) FIELD_GET(GSWIP_VERSION_REV_MASK, v)
+#define GSWIP_VERSION_MOD(v) FIELD_GET(GSWIP_VERSION_MOD_MASK, v)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+/* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Swap the bytes of the 16-bit value
+ * to end up with REV being the most significant byte and MOD being the
+ * least significant byte, which then allows comparing it with the
+ * value stored in struct gswip_priv.
+ */
+#define GSWIP_VERSION_GE(priv, ver) ((priv)->version >= swab16(ver))
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
+
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+struct gswip_hw_info {
+ int max_ports;
+ unsigned int allowed_cpu_ports;
+ unsigned int mii_ports;
+ int mii_port_reg_offset;
+ const struct gswip_pce_microcode (*pce_microcode)[];
+ size_t pce_microcode_size;
+ enum dsa_tag_protocol tag_protocol;
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
+struct gswip_priv {
+ __iomem void *gswip;
+ __iomem void *mdio;
+ __iomem void *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+ u32 port_vlan_filter;
+ struct mutex pce_table_lock;
+ u16 version;
+};
+
+#endif /* __LANTIQ_GSWIP_H */
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq/lantiq_pce.h
index e2be31f3672a..659f9a0638d9 100644
--- a/drivers/net/dsa/lantiq_pce.h
+++ b/drivers/net/dsa/lantiq/lantiq_pce.h
@@ -7,6 +7,8 @@
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
+#include "lantiq_gswip.h"
+
enum {
OUT_MAC0 = 0,
OUT_MAC1,
@@ -74,13 +76,6 @@ enum {
FLAG_NO, /*13*/
};
-struct gswip_pce_microcode {
- u16 val_3;
- u16 val_2;
- u16 val_1;
- u16 val_0;
-};
-
#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
{ val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 9568cc391fe3..a962055bfdbd 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -23,6 +23,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
+#include <linux/pinctrl/consumer.h>
#include <net/dsa.h>
#include <net/ieee8021q.h>
#include <net/pkt_cls.h>
@@ -5345,6 +5346,38 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
}
+static int ksz8463_configure_straps_spi(struct ksz_device *dev)
+{
+ struct pinctrl *pinctrl;
+ struct gpio_desc *rxd0;
+ struct gpio_desc *rxd1;
+
+ rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(rxd0))
+ return PTR_ERR(rxd0);
+
+ rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(rxd1))
+ return PTR_ERR(rxd1);
+
+ if (!rxd0 && !rxd1)
+ return 0;
+
+ if ((rxd0 && !rxd1) || (rxd1 && !rxd0))
+ return -EINVAL;
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "reset");
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ return 0;
+}
+
+static int ksz8463_release_straps_spi(struct ksz_device *dev)
+{
+ return pinctrl_select_default_state(dev->dev);
+}
+
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
@@ -5360,10 +5393,22 @@ int ksz_switch_register(struct ksz_device *dev)
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_configure_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
+
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
msleep(100);
+
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_release_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
}
mutex_init(&dev->dev_mutex);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 2281d6ab8c9a..b4d48997bf46 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3965,6 +3965,8 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
}
@@ -4105,7 +4107,7 @@ unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
@@ -4114,7 +4116,7 @@ unlock:
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
@@ -4130,7 +4132,9 @@ out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
-out_mdios:
+out_hwtstamp:
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
return err;
@@ -5088,7 +5092,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
- .ptp_ops = &mv88e6250_ptp_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6250_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -7439,11 +7443,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
chip = ds->priv;
- if (chip->info->ptp_support) {
- mv88e6xxx_hwtstamp_free(chip);
- mv88e6xxx_ptp_free(chip);
- }
-
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index feddf505c918..2f211e55cb47 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -424,8 +424,6 @@ struct mv88e6xxx_chip {
struct ptp_clock_info ptp_clock_info;
struct delayed_work tai_event_work;
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
- u16 trig_config;
- u16 evcap_config;
u16 enable_count;
/* Current ingress and egress monitor ports */
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index f663799b0b3b..6e6472a3b75a 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -570,7 +570,7 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
}
/* Set the ethertype of L2 PTP messages */
- err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_ETHERTYPE, ETH_P_1588);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index 22e4acc957f0..c359821d5a6e 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -124,6 +124,7 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct kernel_ethtool_ts_info *info);
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index e8c9207e932e..f7603573d3a9 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -144,7 +144,7 @@ static u64 mv88e6352_ptp_clock_read(struct cyclecounter *cc)
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -158,7 +158,7 @@ static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6165_PTP_GC_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -167,42 +167,26 @@ static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
}
/* mv88e6352_config_eventcap - configure TAI event capture
- * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
- int rising)
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int rising)
{
- u16 global_config;
- u16 cap_config;
+ u16 evcap_config;
int err;
- chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
- MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ evcap_config = MV88E6352_TAI_CFG_CAP_OVERWRITE |
+ MV88E6352_TAI_CFG_CAP_CTR_START;
if (!rising)
- chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+ evcap_config |= MV88E6352_TAI_CFG_EVREQ_FALLING;
- global_config = (chip->evcap_config | chip->trig_config);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_CFG, evcap_config);
if (err)
return err;
- if (event == PTP_CLOCK_PPS) {
- cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
- } else if (event == PTP_CLOCK_EXTTS) {
- /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
- cap_config = 0;
- } else {
- return -EINVAL;
- }
-
/* Write the capture config; this also clears the capture counter */
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
- cap_config);
-
- return err;
+ return mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, 0);
}
static void mv88e6352_tai_event_work(struct work_struct *ugly)
@@ -215,7 +199,7 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
mv88e6xxx_reg_unlock(chip);
@@ -223,19 +207,19 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
dev_err(chip->dev, "failed to read TAI status register\n");
return;
}
- if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ if (status[0] & MV88E6352_TAI_EVENT_STATUS_ERROR) {
dev_warn(chip->dev, "missed event capture\n");
return;
}
- if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ if (!(status[0] & MV88E6352_TAI_EVENT_STATUS_VALID))
goto out;
raw_ts = ((u32)status[2] << 16) | status[1];
/* Clear the valid bit so the next timestamp can come in */
- status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ status[0] &= ~MV88E6352_TAI_EVENT_STATUS_VALID;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, status[0]);
mv88e6xxx_reg_unlock(chip);
if (err) {
dev_err(chip->dev, "failed to write TAI status register\n");
@@ -355,7 +339,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
@@ -413,29 +397,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
};
-const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
- .clock_read = mv88e6352_ptp_clock_read,
- .ptp_enable = mv88e6352_ptp_enable,
- .ptp_verify = mv88e6352_ptp_verify,
- .event_work = mv88e6352_tai_event_work,
- .port_enable = mv88e6352_hwtstamp_port_enable,
- .port_disable = mv88e6352_hwtstamp_port_disable,
- .n_ext_ts = 1,
- .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
- .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
- .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
- .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
-};
-
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
@@ -590,6 +551,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
return 0;
}
+/* This must never be called holding the register lock */
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 6c4d09adc93c..95bdddb0bf39 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -16,132 +16,56 @@
#include "chip.h"
/* Offset 0x00: TAI Global Config */
-#define MV88E6XXX_TAI_CFG 0x00
-#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
-#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
-#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
-#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
-#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
-#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
-#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
-#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
-#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
-#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
-#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
-#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+#define MV88E6352_TAI_CFG 0x00
+#define MV88E6352_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6352_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6352_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6352_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6352_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6352_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6352_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6352_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6352_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6352_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6352_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6352_TAI_CFG_TRIG_ENABLE 0x0001
/* Offset 0x01: Timestamp Clock Period (ps) */
#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
-/* Offset 0x02/0x03: Trigger Generation Amount */
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
-
-/* Offset 0x04: Clock Compensation */
-#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
-
-/* Offset 0x05: Trigger Configuration */
-#define MV88E6XXX_TAI_TRIG_CFG 0x05
-
-/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
-#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
-
-/* Offset 0x07: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP 0x07
-
-/* Offset 0x08: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
-
/* Offset 0x09: Event Status */
-#define MV88E6XXX_TAI_EVENT_STATUS 0x09
-#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
-#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
-#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
-#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
-
-/* Offset 0x0A/0x0B: Event Time */
-#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
-#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+#define MV88E6352_TAI_EVENT_STATUS 0x09
+#define MV88E6352_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6352_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6352_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+/* Offset 0x0A/0x0B: Event Time Lo/Hi. Always read with Event Status. */
/* Offset 0x0E/0x0F: PTP Global Time */
-#define MV88E6XXX_TAI_TIME_LO 0x0e
-#define MV88E6XXX_TAI_TIME_HI 0x0f
-
-/* Offset 0x10/0x11: Trig Generation Time */
-#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
-#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
-
-/* Offset 0x12: Lock Status */
-#define MV88E6XXX_TAI_LOCK_STATUS 0x12
-
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
+#define MV88E6352_TAI_TIME_LO 0x0e
+#define MV88E6352_TAI_TIME_HI 0x0f
/* 6165 Global Control Registers */
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
-
-/* Offset 0x01: Message ID */
-#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
-
-/* Offset 0x02: Time Stamp Arrive Time */
-#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
-
-/* Offset 0x03: Port Arrival Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
-
-/* Offset 0x04: Port Departure Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
-
-/* Offset 0x05: Configuration */
-#define MV88E6XXX_PTP_GC_CONFIG 0x05
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
-
-/* Offset 0x8: Interrupt Status */
-#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
-
/* Offset 0x9/0xa: Global Time */
-#define MV88E6XXX_PTP_GC_TIME_LO 0x09
-#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+#define MV88E6165_PTP_GC_TIME_LO 0x09
+#define MV88E6165_PTP_GC_TIME_HI 0x0A
-/* 6165 Per Port Registers */
+/* 6165 Per Port Registers. The arrival and departure registers are a
+ * common block consisting of status, two time registers and the sequence ID
+ */
/* Offset 0: Arrival Time 0 Status */
#define MV88E6165_PORT_PTP_ARR0_STS 0x00
-/* Offset 0x01/0x02: PTP Arrival 0 Time */
-#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
-#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
-
-/* Offset 0x03: PTP Arrival 0 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
-
/* Offset 0x04: PTP Arrival 1 Status */
#define MV88E6165_PORT_PTP_ARR1_STS 0x04
-/* Offset 0x05/0x6E: PTP Arrival 1 Time */
-#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
-#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
-
-/* Offset 0x07: PTP Arrival 1 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
-
/* Offset 0x08: PTP Departure Status */
#define MV88E6165_PORT_PTP_DEP_STS 0x08
-/* Offset 0x09/0x0a: PTP Deperture Time */
-#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
-#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
-
-/* Offset 0x0b: PTP Departure Sequence ID */
-#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
-
/* Offset 0x0d: Port Status */
#define MV88E6164_PORT_STATUS 0x0d
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
-long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
@@ -149,17 +73,11 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
ptp_clock_info)
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
-extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
-{
- return -1;
-}
-
static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
return 0;
@@ -170,7 +88,6 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
-static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 2dd4e56e1cf1..20ab558fde24 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1153,6 +1153,9 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(ocelot->ports[port]->phy_mode,
config->supported_interfaces);
+ if (ocelot->ports[port]->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
+ config->supported_interfaces);
}
static void felix_phylink_mac_config(struct phylink_config *config,
@@ -1359,6 +1362,7 @@ static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
[PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
[PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
[PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_10G_QXGMII] = OCELOT_PORT_MODE_10G_QXGMII,
[PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
[PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
};
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 211991f494e3..a657b190c5d7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -12,8 +12,9 @@
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
#define OCELOT_PORT_MODE_2500BASEX BIT(3)
-#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_USXGMII BIT(4) /* compatibility */
#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+#define OCELOT_PORT_MODE_10G_QXGMII BIT(6)
struct device_node;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 7b35d24c38d7..8cf4c8986587 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -34,7 +34,8 @@
OCELOT_PORT_MODE_QSGMII | \
OCELOT_PORT_MODE_1000BASEX | \
OCELOT_PORT_MODE_2500BASEX | \
- OCELOT_PORT_MODE_USXGMII)
+ OCELOT_PORT_MODE_USXGMII | \
+ OCELOT_PORT_MODE_10G_QXGMII)
static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
VSC9959_PORT_MODE_SERDES,
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index a1b2e0b529d5..c03485a80d93 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -19,9 +19,6 @@
struct phylink_mac_ops;
struct realtek_ops;
-struct dentry;
-struct inode;
-struct file;
struct rtl8366_mib_counter {
unsigned int base;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f86d4557d8d7..aead145dd91d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -188,6 +188,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
+source "drivers/net/ethernet/spacemit/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/sunplus/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 67182339469a..998dd628b202 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
+obj-$(CONFIG_NET_VENDOR_SPACEMIT) += spacemit/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index e6b802e3d844..81ea01a652b9 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -698,7 +698,8 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- airoha_ppe_check_skb(eth->ppe, q->skb, hash);
+ airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
+ false);
done++;
napi_gro_receive(&q->napi, q->skb);
@@ -2599,13 +2600,15 @@ static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
if (!tc_can_offload(dev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return airoha_ppe_setup_tc_block_cb(dev, type_data);
+ return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
case TC_SETUP_CLSMATCHALL:
return airoha_dev_tc_matchall(dev, type_data);
default:
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index a970b789cf23..cd13c1c1224f 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/reset.h>
+#include <linux/soc/airoha/airoha_offload.h>
#include <net/dsa.h>
#define AIROHA_MAX_NUM_GDM_PORTS 4
@@ -229,10 +230,6 @@ struct airoha_hw_stats {
};
enum {
- PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
-};
-
-enum {
AIROHA_FOE_STATE_INVALID,
AIROHA_FOE_STATE_UNBIND,
AIROHA_FOE_STATE_BIND,
@@ -252,6 +249,10 @@ enum {
#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+#define AIROHA_FOE_MAC_WDMA_QOS GENMASK(15, 12)
+#define AIROHA_FOE_MAC_WDMA_BAND BIT(11)
+#define AIROHA_FOE_MAC_WDMA_WCID GENMASK(10, 0)
+
struct airoha_foe_mac_info_common {
u16 vlan1;
u16 etype;
@@ -470,7 +471,6 @@ struct airoha_flow_table_entry {
};
};
- struct airoha_foe_entry data;
struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
u32 hash;
@@ -479,6 +479,16 @@ struct airoha_flow_table_entry {
struct rhash_head node;
unsigned long cookie;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct airoha_foe_entry data;
+};
+
+struct airoha_wdma_info {
+ u8 idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
};
/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
@@ -535,6 +545,7 @@ struct airoha_gdm_port {
#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
struct airoha_ppe {
+ struct airoha_ppe_dev dev;
struct airoha_eth *eth;
void *foe;
@@ -609,9 +620,9 @@ static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
struct airoha_gdm_port *port);
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
- u16 hash);
-int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan);
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index a802f95df99d..8c883f2b2d36 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include "airoha_eth.h"
-#include "airoha_npu.h"
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
@@ -42,6 +41,22 @@
#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+#define NPU_WLAN_BASE_ADDR 0x30d000
+
+#define REG_IRQ_STATUS (NPU_WLAN_BASE_ADDR + 0x030)
+#define REG_IRQ_RXDONE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
+#define NPU_IRQ_RX_MASK(_n) ((_n) == 1 ? BIT(17) : BIT(16))
+
+#define REG_TX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
+#define REG_TX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
+#define REG_TX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
+#define REG_TX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
+
+#define REG_RX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
+#define REG_RX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
+#define REG_RX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
+#define REG_RX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
+
#define NPU_TIMER_BASE_ADDR 0x310100
#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
#define WDT_EN_MASK BIT(25)
@@ -124,6 +139,13 @@ struct ppe_mbox_data {
};
};
+struct wlan_mbox_data {
+ u32 ifindex:4;
+ u32 func_type:4;
+ u32 func_id;
+ DECLARE_FLEX_ARRAY(u8, d);
+};
+
static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
void *p, int size)
{
@@ -357,15 +379,13 @@ out:
return err;
}
-static int airoha_npu_stats_setup(struct airoha_npu *npu,
- dma_addr_t foe_stats_addr)
+static int airoha_npu_ppe_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr,
+ u32 num_stats_entries)
{
- int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
+ int err, size = num_stats_entries * sizeof(*npu->stats);
struct ppe_mbox_data *ppe_data;
- if (!size) /* flow stats are disabled */
- return 0;
-
ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
if (!ppe_data)
return -ENOMEM;
@@ -390,7 +410,137 @@ out:
return err;
}
-struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_SET;
+ wlan_data->func_id = func_id;
+ memcpy(wlan_data->d, data, data_len);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_GET;
+ wlan_data->func_id = func_id;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ if (!err)
+ memcpy(data, wlan_data->d, data_len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int
+airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
+ int ifindex, const char *name,
+ enum airoha_npu_wlan_set_cmd func_id)
+{
+ struct device *dev = npu->dev;
+ struct resource res;
+ int err;
+ u32 val;
+
+ err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
+ &res);
+ if (err)
+ return err;
+
+ val = res.start;
+ return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
+ sizeof(val), GFP_KERNEL);
+}
+
+static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
+{
+ enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
+ u32 val = 0;
+ int err;
+
+ err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
+ return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+}
+
+static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
+ bool xmit)
+{
+ if (xmit)
+ return REG_TX_BASE(qid + 2);
+
+ return REG_RX_BASE(qid);
+}
+
+static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
+{
+ regmap_write(npu->regmap, REG_IRQ_STATUS, val);
+}
+
+static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
+{
+ u32 val;
+
+ regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
+ return val;
+}
+
+static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
+{
+ regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
+{
+ regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
{
struct platform_device *pdev;
struct device_node *np;
@@ -429,17 +579,6 @@ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
goto error_module_put;
}
- if (stats_addr) {
- int err;
-
- err = airoha_npu_stats_setup(npu, *stats_addr);
- if (err) {
- dev_err(dev, "failed to allocate npu stats buffer\n");
- npu = ERR_PTR(err);
- goto error_module_put;
- }
- }
-
return npu;
error_module_put:
@@ -491,8 +630,17 @@ static int airoha_npu_probe(struct platform_device *pdev)
npu->dev = dev;
npu->ops.ppe_init = airoha_npu_ppe_init;
npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_init_stats = airoha_npu_ppe_stats_setup;
npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
+ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
+ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
+ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
+ npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
+ npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
+ npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
+ npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
if (IS_ERR(npu->regmap))
@@ -529,6 +677,15 @@ static int airoha_npu_probe(struct platform_device *pdev)
INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
}
+ /* wlan IRQ lines */
+ for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
+ irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
+ if (irq < 0)
+ return irq;
+
+ npu->irqs[i] = irq;
+ }
+
err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (err)
return err;
@@ -550,8 +707,7 @@ static int airoha_npu_probe(struct platform_device *pdev)
usleep_range(1000, 2000);
/* enable NPU cores */
- /* do not start core3 since it is used for WiFi offloading */
- regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
msleep(100);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
deleted file mode 100644
index 98ec3be74ce4..000000000000
--- a/drivers/net/ethernet/airoha/airoha_npu.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2025 AIROHA Inc
- * Author: Lorenzo Bianconi <lorenzo@kernel.org>
- */
-
-#define NPU_NUM_CORES 8
-
-struct airoha_npu {
- struct device *dev;
- struct regmap *regmap;
-
- struct airoha_npu_core {
- struct airoha_npu *npu;
- /* protect concurrent npu memory accesses */
- spinlock_t lock;
- struct work_struct wdt_work;
- } cores[NPU_NUM_CORES];
-
- struct airoha_foe_stats __iomem *stats;
-
- struct {
- int (*ppe_init)(struct airoha_npu *npu);
- int (*ppe_deinit)(struct airoha_npu *npu);
- int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
- dma_addr_t foe_addr,
- int sram_num_entries);
- int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
- dma_addr_t foe_addr,
- u32 entry_size, u32 hash,
- bool ppe2);
- } ops;
-};
-
-struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
-void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 88694b08afa1..691361b25407 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -6,11 +6,12 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/rhashtable.h>
#include <net/ipv6.h>
#include <net/pkt_cls.h>
-#include "airoha_npu.h"
#include "airoha_regs.h"
#include "airoha_eth.h"
@@ -190,6 +191,31 @@ static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
return 0;
}
+static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
+ struct airoha_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->idx = path->mtk_wdma.wdma_idx;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
static int airoha_get_dsa_port(struct net_device **dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
@@ -220,9 +246,9 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
struct airoha_flow_data *data,
int l4proto)
{
- int dsa_port = airoha_get_dsa_port(&dev);
+ u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
+ int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
struct airoha_foe_mac_info_common *l2;
- u32 qdata, ports_pad, val;
u8 smac_id = 0xf;
memset(hwe, 0, sizeof(*hwe));
@@ -236,31 +262,47 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
AIROHA_FOE_IB1_BIND_TTL;
hwe->ib1 = val;
- val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
- AIROHA_FOE_IB2_PSE_QOS;
- if (dsa_port >= 0)
- val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
-
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
if (dev) {
- struct airoha_gdm_port *port = netdev_priv(dev);
- u8 pse_port;
-
- if (!airoha_is_valid_gdm_port(eth, port))
- return -EINVAL;
-
- if (dsa_port >= 0)
- pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
- else
- pse_port = 2; /* uplink relies on GDM2 loopback */
- val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
-
- /* For downlink traffic consume SRAM memory for hw forwarding
- * descriptors queue.
- */
- if (airhoa_is_lan_gdm_port(port))
- val |= AIROHA_FOE_IB2_FAST_PATH;
-
- smac_id = port->id;
+ struct airoha_wdma_info info = {};
+
+ if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
+ FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
+ FE_PSE_PORT_CDM4);
+ qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
+ wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
+ info.idx) |
+ FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
+ info.wcid);
+ } else {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (!airoha_is_valid_gdm_port(eth, port))
+ return -EINVAL;
+
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
+ : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2
+ * loopback
+ */
+
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ /* For downlink traffic consume SRAM memory for hw
+ * forwarding descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
+ dsa_port);
+
+ smac_id = port->id;
+ }
}
if (is_multicast_ether_addr(data->eth.h_dest))
@@ -272,7 +314,6 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
hwe->ipv6.ports = ports_pad;
- qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
if (type == PPE_PKT_TYPE_BRIDGE) {
airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
hwe->bridge.data = qdata;
@@ -313,7 +354,9 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
l2->vlan2 = data->vlan.hdr[1].id;
}
- if (dsa_port >= 0) {
+ if (wlan_etype >= 0) {
+ l2->etype = wlan_etype;
+ } else if (dsa_port >= 0) {
l2->etype = BIT(dsa_port);
l2->etype |= !data->vlan.num ? BIT(15) : 0;
} else if (data->pppoe.num) {
@@ -490,6 +533,10 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
meter = &hwe->ipv4.l2.meter;
}
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ if (pse_port == FE_PSE_PORT_CDM4)
+ return;
+
airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
@@ -500,7 +547,6 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
*meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
- pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
nbq = pse_port == 1 ? 6 : 5;
*ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
AIROHA_FOE_IB2_PSE_QOS);
@@ -570,7 +616,7 @@ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
struct airoha_foe_entry *e,
- u32 hash)
+ u32 hash, bool rx_wlan)
{
struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
u32 ts = airoha_ppe_get_timestamp(ppe);
@@ -593,7 +639,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
goto unlock;
}
- airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+ if (!rx_wlan)
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
if (hash < PPE_SRAM_NUM_ENTRIES) {
dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
@@ -619,7 +666,7 @@ static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
AIROHA_FOE_STATE_INVALID);
- airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
e->hash = 0xffff;
}
if (e->type == FLOW_TYPE_L2_SUBFLOW) {
@@ -658,7 +705,7 @@ static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e,
- u32 hash)
+ u32 hash, bool rx_wlan)
{
u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
struct airoha_foe_entry *hwe_p, hwe;
@@ -699,14 +746,14 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
}
hwe.bridge.data = e->data.bridge.data;
- airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
return 0;
}
static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
struct sk_buff *skb,
- u32 hash)
+ u32 hash, bool rx_wlan)
{
struct airoha_flow_table_entry *e;
struct airoha_foe_bridge br = {};
@@ -739,7 +786,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
if (!airoha_ppe_foe_compare_entry(e, hwe))
continue;
- airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
commit_done = true;
e->hash = hash;
}
@@ -751,7 +798,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
airoha_l2_flow_table_params);
if (e)
- airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
unlock:
spin_unlock_bh(&ppe_lock);
}
@@ -890,11 +937,10 @@ static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
}
-static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
struct airoha_flow_data data = {};
struct net_device *odev = NULL;
@@ -1091,10 +1137,9 @@ free_entry:
return err;
}
-static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
- struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
e = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -1137,10 +1182,9 @@ void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
rcu_read_unlock();
}
-static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
- struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
u32 idle;
@@ -1164,16 +1208,16 @@ static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
return 0;
}
-static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
switch (f->command) {
case FLOW_CLS_REPLACE:
- return airoha_ppe_flow_offload_replace(port, f);
+ return airoha_ppe_flow_offload_replace(eth, f);
case FLOW_CLS_DESTROY:
- return airoha_ppe_flow_offload_destroy(port, f);
+ return airoha_ppe_flow_offload_destroy(eth, f);
case FLOW_CLS_STATS:
- return airoha_ppe_flow_offload_stats(port, f);
+ return airoha_ppe_flow_offload_stats(eth, f);
default:
break;
}
@@ -1199,12 +1243,11 @@ static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
{
- struct airoha_npu *npu = airoha_npu_get(eth->dev,
- &eth->ppe->foe_stats_dma);
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
if (IS_ERR(npu)) {
request_module("airoha-npu");
- npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
+ npu = airoha_npu_get(eth->dev);
}
return npu;
@@ -1213,6 +1256,7 @@ static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
static int airoha_ppe_offload_setup(struct airoha_eth *eth)
{
struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ struct airoha_ppe *ppe = eth->ppe;
int err;
if (IS_ERR(npu))
@@ -1222,12 +1266,19 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
if (err)
goto error_npu_put;
- airoha_ppe_hw_init(eth->ppe);
- err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
+ if (PPE_STATS_NUM_ENTRIES) {
+ err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
+ PPE_STATS_NUM_ENTRIES);
+ if (err)
+ goto error_npu_put;
+ }
+
+ airoha_ppe_hw_init(ppe);
+ err = airoha_ppe_flush_sram_entries(ppe, npu);
if (err)
goto error_npu_put;
- airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
+ airoha_ppe_foe_flow_stats_reset(ppe, npu);
rcu_assign_pointer(eth->npu, npu);
synchronize_rcu();
@@ -1240,11 +1291,10 @@ error_npu_put:
return err;
}
-int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
{
- struct airoha_gdm_port *port = netdev_priv(dev);
- struct flow_cls_offload *cls = type_data;
- struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
int err = 0;
mutex_lock(&flow_offload_mutex);
@@ -1252,16 +1302,17 @@ int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
if (!eth->npu)
err = airoha_ppe_offload_setup(eth);
if (!err)
- err = airoha_ppe_flow_offload_cmd(port, cls);
+ err = airoha_ppe_flow_offload_cmd(eth, type_data);
mutex_unlock(&flow_offload_mutex);
return err;
}
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
- u16 hash)
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
{
+ struct airoha_ppe *ppe = dev->priv;
u16 now, diff;
if (hash > PPE_HASH_MASK)
@@ -1273,7 +1324,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
return;
ppe->foe_check_time[hash] = now;
- airoha_ppe_foe_insert_entry(ppe, skb, hash);
+ airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
}
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
@@ -1297,6 +1348,61 @@ void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
}
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_eth *eth;
+
+ np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ goto error_pdev_put;
+ }
+
+ eth = platform_get_drvdata(pdev);
+ if (!eth)
+ goto error_module_put;
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ goto error_module_put;
+ }
+
+ return &eth->ppe->dev;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
+
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+
+ module_put(THIS_MODULE);
+ put_device(eth->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
+
int airoha_ppe_init(struct airoha_eth *eth)
{
struct airoha_ppe *ppe;
@@ -1306,6 +1412,10 @@ int airoha_ppe_init(struct airoha_eth *eth)
if (!ppe)
return -ENOMEM;
+ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
+ ppe->dev.ops.check_skb = airoha_ppe_check_skb;
+ ppe->dev.priv = ppe;
+
foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 150c85995cc1..e1c15c20be8e 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -237,8 +237,8 @@
#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
-#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
-#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(15, 0)
#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a81d3a7a3bb9..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -865,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 9b81e1c260c2..c7a2eff57632 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -280,7 +280,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
goto err_out_del_dev;
}
- hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, pdsc);
if (IS_ERR(hr)) {
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 5b0ab6240cf2..980e27652237 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-hwtstamp.o xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
xgbe-platform.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 009fbc9b11ce..62b01de93db4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -227,7 +227,11 @@
#define MAC_TICSNR 0x0d5C
#define MAC_TECNR 0x0d60
#define MAC_TECSNR 0x0d64
-
+#define MAC_PPSCR 0x0d70
+#define MAC_PPS0_TTSR 0x0d80
+#define MAC_PPS0_TTNSR 0x0d84
+#define MAC_PPS0_INTERVAL 0x0d88
+#define MAC_PPS0_WIDTH 0x0d8C
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
#define MAC_HTR_INC 4
@@ -235,6 +239,18 @@
#define MAC_RQC2_INC 4
#define MAC_RQC2_Q_PER_REG 4
+/* PPS helpers */
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TTSR(x) ((MAC_PPS0_TTSR) + ((x) * 0x10))
+#define MAC_PPSx_TTNSR(x) ((MAC_PPS0_TTNSR) + ((x) * 0x10))
+#define MAC_PPSx_INTERVAL(x) ((MAC_PPS0_INTERVAL) + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) ((MAC_PPS0_WIDTH) + ((x) * 0x10))
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define XGBE_PPSCMD_STOP 0x5
+#define XGBE_PPSCMD_START 0x2
+#define XGBE_PPSTARGET_PULSE 0x2
+
/* MAC register entry bit positions and sizes */
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
@@ -496,8 +512,10 @@
#define MAC_VR_SNPSVER_WIDTH 8
#define MAC_VR_USERVER_INDEX 16
#define MAC_VR_USERVER_WIDTH 8
+#define MAC_PPSx_TTNSR_TRGTBUSY0_INDEX 31
+#define MAC_PPSx_TTNSR_TRGTBUSY0_WIDTH 1
-/* MMC register offsets */
+ /* MMC register offsets */
#define MMC_CR 0x0800
#define MMC_RISR 0x0804
#define MMC_TISR 0x0808
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2e9b95a94f89..f0989aa01855 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -691,6 +691,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+ /* Sanity check and warn if hardware reports more than supported */
+ if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u PPS outputs, limiting to %u\n",
+ hw_feat->pps_out_num, XGBE_MAX_PPS_OUT);
+ hw_feat->pps_out_num = XGBE_MAX_PPS_OUT;
+ }
+
+ if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u aux snapshot inputs, limiting to %u\n",
+ hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP);
+ hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP;
+ }
+
/* Translate the Hash Table size into actual number */
switch (hw_feat->hash_table_size) {
case 0:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index be0d2c7d08dc..b6e1b67a2d0e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -329,6 +329,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
ec->tx_max_coalesced_frames = pdata->tx_frames;
return 0;
@@ -342,7 +343,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames;
+ unsigned int tx_frames, tx_usecs;
+ unsigned int jiffy_us = jiffies_to_usecs(1);
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -364,20 +366,42 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ tx_usecs = ec->tx_coalesce_usecs;
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
+ if (!tx_usecs) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs must not be 0");
+ return -EINVAL;
+ }
+ if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
+ XGMAC_MAX_COAL_TX_TICK);
+ return -EINVAL;
+ }
if (tx_frames > pdata->tx_desc_count) {
netdev_err(netdev, "tx-frames is limited to %d frames\n",
pdata->tx_desc_count);
return -EINVAL;
}
+ /* Round tx-usecs to nearest multiple of jiffy granularity */
+ if (tx_usecs % jiffy_us) {
+ tx_usecs = rounddown(tx_usecs, jiffy_us);
+ if (!tx_usecs)
+ tx_usecs = jiffy_us;
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
+ tx_usecs, jiffy_us);
+ }
+
pdata->rx_riwt = rx_riwt;
pdata->rx_usecs = rx_usecs;
pdata->rx_frames = rx_frames;
hw_if->config_rx_coalesce(pdata);
+ pdata->tx_usecs = tx_usecs;
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
@@ -440,7 +464,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int ret;
+ int ret;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -709,7 +733,7 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index d40011e8ddf2..65eb7b577b65 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -70,7 +70,7 @@ static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
{
- unsigned int ret;
+ int ret;
ret = xgbe_i2c_set_enable(pdata, false);
if (ret) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 23c39e92e783..a56efc1bee33 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -2902,7 +2902,7 @@ static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int ret;
+ int ret;
ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
if (ret)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pps.c b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
new file mode 100644
index 000000000000..6d03ae7ab36f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static u32 get_pps_mask(unsigned int x)
+{
+ return GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x));
+}
+
+static u32 get_pps_cmd(unsigned int x, u32 val)
+{
+ return (val & GENMASK(3, 0)) << PPS_MINIDX(x);
+}
+
+static u32 get_target_mode_sel(unsigned int x, u32 val)
+{
+ return (val & GENMASK(1, 0)) << (PPS_MAXIDX(x) - 2);
+}
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata,
+ struct xgbe_pps_config *cfg, int index, bool on)
+{
+ unsigned int ppscr = 0;
+ unsigned int tnsec;
+ u64 period;
+
+ /* Check if target time register is busy */
+ tnsec = XGMAC_IOREAD(pdata, MAC_PPSx_TTNSR(index));
+ if (XGMAC_GET_BITS(tnsec, MAC_PPSx_TTNSR, TRGTBUSY0))
+ return -EBUSY;
+
+ ppscr = XGMAC_IOREAD(pdata, MAC_PPSCR);
+ ppscr &= ~get_pps_mask(index);
+
+ if (!on) {
+ /* Disable PPS output */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_STOP);
+ ppscr |= PPSEN0;
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+ }
+
+ /* Configure start time */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTSR(index), cfg->start.tv_sec);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTNSR(index), cfg->start.tv_nsec);
+
+ period = cfg->period.tv_sec * NSEC_PER_SEC + cfg->period.tv_nsec;
+ period = div_u64(period, XGBE_V2_TSTAMP_SSINC);
+
+ if (period < 4)
+ return -EINVAL;
+
+ /* Configure interval and pulse width (50% duty cycle) */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_INTERVAL(index), period - 1);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_WIDTH(index), (period >> 1) - 1);
+
+ /* Enable PPS with pulse train mode */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_START);
+ ppscr |= get_target_mode_sel(index, XGBE_PPSTARGET_PULSE);
+ ppscr |= PPSEN0;
+
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 3658afc7801d..0e0b8ec3b504 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -106,7 +106,29 @@ static int xgbe_settime(struct ptp_clock_info *info,
static int xgbe_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
- return -EOPNOTSUPP;
+ struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data,
+ ptp_clock_info);
+ struct xgbe_pps_config *pps_cfg;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(pdata->dev, "rq->type %d on %d\n", request->type, on);
+
+ if (request->type != PTP_CLK_REQ_PEROUT)
+ return -EOPNOTSUPP;
+
+ pps_cfg = &pdata->pps[request->perout.index];
+
+ pps_cfg->start.tv_sec = request->perout.start.sec;
+ pps_cfg->start.tv_nsec = request->perout.start.nsec;
+ pps_cfg->period.tv_sec = request->perout.period.sec;
+ pps_cfg->period.tv_nsec = request->perout.period.nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ ret = xgbe_pps_config(pdata, pps_cfg, request->perout.index, on);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return ret;
}
void xgbe_ptp_register(struct xgbe_prv_data *pdata)
@@ -122,6 +144,8 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->adjtime = xgbe_adjtime;
info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
+ info->n_per_out = pdata->hw_feat.pps_out_num;
+ info->n_ext_ts = pdata->hw_feat.aux_snap_num;
info->enable = xgbe_enable;
clock = ptp_clock_register(info, pdata->dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d7e03e292ec4..e8bbb6805901 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -142,6 +142,10 @@
#define XGBE_V2_TSTAMP_SNSINC 0
#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
+/* Define maximum supported values */
+#define XGBE_MAX_PPS_OUT 4
+#define XGBE_MAX_AUX_SNAP 4
+
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
@@ -168,6 +172,7 @@
/* Default coalescing parameters */
#define XGMAC_INIT_DMA_TX_USECS 1000
#define XGMAC_INIT_DMA_TX_FRAMES 25
+#define XGMAC_MAX_COAL_TX_TICK 100000
#define XGMAC_MAX_DMA_RIWT 0xff
#define XGMAC_INIT_DMA_RX_USECS 30
@@ -672,6 +677,11 @@ struct xgbe_ext_stats {
u64 rx_vxlan_csum_errors;
};
+struct xgbe_pps_config {
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -1142,6 +1152,9 @@ struct xgbe_prv_data {
struct sk_buff *tx_tstamp_skb;
u64 tx_tstamp;
+ /* Pulse Per Second output */
+ struct xgbe_pps_config pps[XGBE_MAX_PPS_OUT];
+
/* DCB support */
struct ieee_ets *ets;
struct ieee_pfc *pfc;
@@ -1304,6 +1317,10 @@ void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
int xgbe_init_ptp(struct xgbe_prv_data *pdata);
void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
unsigned int nsec);
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg,
+ int index, bool on);
+
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 0fc10e6c6902..9fdef874f5ca 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -257,6 +257,7 @@ config BNGE
tristate "Broadcom Ethernet device support"
depends on PCI
select NET_DEVLINK
+ select PAGE_POOL
help
This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
The module will be called bng_en. To compile this driver as a module,
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 6fb3683b6b04..7aed5f81cd51 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -102,6 +102,10 @@ struct bnge_dev {
u16 chip_num;
u8 chip_rev;
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
int db_offset; /* db_offset within db_size */
int db_size;
@@ -129,6 +133,7 @@ struct bnge_dev {
unsigned long state;
#define BNGE_STATE_DRV_REGISTERED 0
+#define BNGE_STATE_OPEN 1
u64 fw_cap;
@@ -155,6 +160,7 @@ struct bnge_dev {
u16 rss_indir_tbl_entries;
u32 rss_cap;
+ u32 rss_hash_cfg;
u16 rx_nr_rings;
u16 tx_nr_rings;
@@ -213,6 +219,27 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
return true;
}
+static inline void bnge_writeq(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells */
+static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db,
+ u32 idx)
+{
+ bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
index 68da656f2894..2c72dd34d50d 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_core.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -96,6 +96,16 @@ static void bnge_fw_unregister_dev(struct bnge_dev *bd)
bnge_free_ctx_mem(bd);
}
+static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd)
+{
+ bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+}
+
static int bnge_fw_register_dev(struct bnge_dev *bd)
{
int rc;
@@ -137,6 +147,8 @@ static int bnge_fw_register_dev(struct bnge_dev *bd)
goto err_func_unrgtr;
}
+ bnge_set_dflt_rss_hash_type(bd);
+
return 0;
err_func_unrgtr:
@@ -296,6 +308,10 @@ static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_config_uninit;
}
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bd->db_lock);
+#endif
+
rc = bnge_alloc_irqs(bd);
if (rc) {
dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_db.h b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
new file mode 100644
index 000000000000..950ed582f1d8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DB_H_
+#define _BNGE_DB_H_
+
+/* 64-bit doorbell */
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_SFT 25
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
+
+struct bnge_db_info {
+ void __iomem *doorbell;
+ u64 db_key64;
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
+};
+
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
+#endif /* _BNGE_DB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
index 5c178fade065..198f49b40dbf 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -6,6 +6,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/bnxt/hsi.h>
+#include <linux/if_vlan.h>
+#include <net/netdev_queues.h>
#include "bnge.h"
#include "bnge_hwrm.h"
@@ -701,3 +703,483 @@ qportcfg_exit:
bnge_hwrm_req_drop(bd, req);
return rc;
}
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
+ struct hwrm_vnic_plcmodes_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
+ req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
+
+ if (bnge_is_agg_reqd(bd)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
+ }
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static void
+__bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct hwrm_vnic_rss_cfg_input *req,
+ struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_fill_hw_rss_tbl(bn, vnic);
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+
+ req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+}
+
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss)
+{
+ struct hwrm_vnic_rss_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss)
+ return bnge_hwrm_req_send(bd, req);
+
+ __bnge_hwrm_vnic_set_rss(bn, req, vnic);
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
+ req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+ req->ring_table_pair_index = i;
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
+ struct hwrm_vnic_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
+ if (rc)
+ return rc;
+
+ req->default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req->default_cmpl_ring_id =
+ cpu_to_le16(bnge_cp_ring_for_rx(rxr));
+ req->enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ if (bd->flags & BNGE_EN_STRIP_VLAN)
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
+ req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
+
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = bnge_hwrm_req_hold(bd, req);
+ if (!bnge_hwrm_req_send(bd, req))
+ bd->rss_hash_cfg =
+ le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->l2_filter_id = fltr->base.filter_id;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ fltr->base.filter_id = resp->l2_filter_id;
+
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
+ req->mask = cpu_to_le32(vnic->rx_mask);
+ return bnge_hwrm_req_send_silent(bd, req);
+}
+
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings)
+{
+ struct hwrm_vnic_alloc_output *resp;
+ struct hwrm_vnic_alloc_input *req;
+ unsigned int i;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
+ req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
+{
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
+ return;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ }
+}
+
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+ return;
+
+ req->rss_cos_lb_ctx_id =
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+}
+
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
+ return;
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
+ bnge_hwrm_req_send(bd, req);
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_alloc_output *resp;
+ struct hwrm_stat_ctx_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc, i;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
+ req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ break;
+
+ nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+ bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int hwrm_ring_free_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ struct hwrm_ring_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
+ if (rc)
+ goto exit;
+
+ req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+ req->ring_type = ring_type;
+ req->ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ bnge_hwrm_req_drop(bd, req);
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index)
+{
+ struct bnge_ring_mem_info *rmem = &ring->ring_mem;
+ struct bnge_ring_grp_info *grp_info;
+ struct hwrm_ring_alloc_output *resp;
+ struct hwrm_ring_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ u16 ring_id, flags = 0;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
+ if (rc)
+ goto exit;
+
+ req->enables = 0;
+ if (rmem->nr_pages > 1) {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
+ /* Page size is in log2 units */
+ req->page_size = BNGE_PAGE_SHIFT;
+ req->page_tbl_depth = 1;
+ } else {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
+ }
+ req->fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req->logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX: {
+ struct bnge_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnge_tx_ring_info,
+ tx_ring_struct);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
+ req->length = cpu_to_le32(bn->tx_ring_mask + 1);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->queue_id = cpu_to_le16(ring->queue_id);
+ req->flags = cpu_to_le16(flags);
+ break;
+ }
+ case HWRM_RING_ALLOC_RX:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = cpu_to_le32(bn->rx_ring_mask + 1);
+
+ /* Association of rx ring with stats context */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req->flags = cpu_to_le16(flags);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ /* Association of cp ring with nq */
+ grp_info = &bn->grp_info[map_index];
+ req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
+ req->cq_handle = cpu_to_le64(ring->handle);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
+ return -EINVAL;
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ ring_id = le16_to_cpu(resp->ring_id);
+ bnge_hwrm_req_drop(bd, req);
+
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req->async_event_cr = cpu_to_le16(idx);
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
index 6c03923eb559..042f28e84a05 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -4,6 +4,13 @@
#ifndef _BNGE_HWRM_LIB_H_
#define _BNGE_HWRM_LIB_H_
+#define BNGE_PLC_EN_JUMBO_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
+#define BNGE_PLC_EN_HDS_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
+#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \
+ VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
+
int bnge_hwrm_ver_get(struct bnge_dev *bd);
int bnge_hwrm_func_reset(struct bnge_dev *bd);
int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
@@ -24,4 +31,28 @@ int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss);
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn);
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings);
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic);
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic);
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn);
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn);
+int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id);
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index);
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index 02254934f3d0..832eeb960bd2 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -14,10 +14,2194 @@
#include <linux/if.h>
#include <net/ip.h>
#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
#include "bnge.h"
#include "bnge_hwrm_lib.h"
#include "bnge_ethtool.h"
+#include "bnge_rmem.h"
+
+#define BNGE_RING_TO_TC_OFF(bd, tx) \
+ ((tx) % (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_RING_TO_TC(bd, tx) \
+ ((tx) / (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ if (stats->hw_stats) {
+ dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
+
+static int bnge_alloc_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnge_free_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ bnge_free_stats_mem(bn, &nqr->stats);
+ }
+}
+
+static int bnge_alloc_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u32 size, i;
+ int rc;
+
+ size = bd->hw_ring_stats_size;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ nqr->stats.len = size;
+ rc = bnge_alloc_stats_mem(bn, &nqr->stats);
+ if (rc)
+ goto err_free_ring_stats;
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+
+err_free_ring_stats:
+ bnge_free_ring_stats(bn);
+ return rc;
+}
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ goto err_free_nq_arrays;
+ }
+ return 0;
+
+err_free_nq_arrays:
+ bnge_free_nq_arrays(bn);
+ return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc)
+ return -ENOMEM;
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc)
+ goto err_free_cp_desc_arr;
+ return rc;
+
+err_free_cp_desc_arr:
+ bnge_free_cp_desc_arr(cpr);
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, ulp_msix, rc;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_nq_tree;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr) {
+ rc = -ENOMEM;
+ goto err_free_nq_tree;
+ }
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ goto err_free_nq_tree;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+ return 0;
+
+err_free_nq_tree:
+ bnge_free_nq_tree(bn);
+ return rc;
+}
+
+static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
+}
+
+static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_buf_ring)
+ return;
+
+ max_idx = bn->rx_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ void *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ rx_buf->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, true);
+ }
+}
+
+static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_agg_buf_ring)
+ return;
+
+ max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
+ netmem_ref netmem = rx_agg_buf->netmem;
+
+ if (!netmem)
+ continue;
+
+ rx_agg_buf->netmem = 0;
+ __clear_bit(i, rxr->rx_agg_bmap);
+
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
+ }
+}
+
+static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+}
+
+static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->rx_ring)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
+}
+
+static void bnge_free_all_rings_bufs(struct bnge_net *bn)
+{
+ bnge_free_rx_ring_pair_bufs(bn);
+}
+
+static void bnge_free_rx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int numa_node)
+{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
+ struct page_pool_params pp = { 0 };
+ struct bnge_dev *bd = bn->bd;
+ struct page_pool *pool;
+
+ pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
+ pp.nid = numa_node;
+ pp.netdev = bn->netdev;
+ pp.dev = bd->dev;
+ pp.dma_dir = bn->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnge_separate_head_pool(rxr)) {
+ pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_alloc_rx_rings(struct bnge_net *bn)
+{
+ int i, rc = 0, agg_rings = 0, cpu;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_is_agg_reqd(bd))
+ agg_rings = 1;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+ int cpu_node;
+
+ ring = &rxr->rx_ring_struct;
+
+ cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
+ if (rc)
+ goto err_free_rx_rings;
+ bnge_enable_rx_page_pool(rxr);
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ if (agg_rings) {
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ rc = bnge_alloc_rx_agg_bmap(bn, rxr);
+ if (rc)
+ goto err_free_rx_rings;
+ }
+ }
+ return rc;
+
+err_free_rx_rings:
+ bnge_free_rx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ ring = &txr->tx_ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, rc;
+
+ for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+ u8 qidx;
+
+ ring = &txr->tx_ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_tx_rings;
+
+ ring->grp_idx = txr->bnapi->index;
+ qidx = bd->tc_to_qidx[j];
+ ring->queue_id = bd->q_info[qidx].queue_id;
+ if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+
+err_free_tx_rings:
+ bnge_free_tx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_vnic_attributes(struct bnge_net *bn)
+{
+ struct pci_dev *pdev = bn->bd->pdev;
+ struct bnge_vnic_info *vnic;
+ int i;
+
+ if (!bn->vnic_info)
+ return;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_vnic_info *vnic;
+ int i, size;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
+ int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(bd->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ /* Allocate rss table and hash key */
+ size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(bd->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ goto err_free_vnic_attributes;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+err_free_vnic_attributes:
+ bnge_free_vnic_attributes(bn);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_vnics(struct bnge_net *bn)
+{
+ int num_vnics;
+
+ /* Allocate only 1 VNIC for now
+ * Additional VNICs will be added based on RFS/NTUPLE in future patches
+ */
+ num_vnics = 1;
+
+ bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
+ GFP_KERNEL);
+ if (!bn->vnic_info)
+ return -ENOMEM;
+
+ bn->nr_vnics = num_vnics;
+
+ return 0;
+}
+
+static void bnge_free_vnics(struct bnge_net *bn)
+{
+ kfree(bn->vnic_info);
+ bn->vnic_info = NULL;
+ bn->nr_vnics = 0;
+}
+
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
+static int bnge_init_ring_grps(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bn->grp_info = kcalloc(bd->nq_nr_rings,
+ sizeof(struct bnge_ring_grp_info),
+ GFP_KERNEL);
+ if (!bn->grp_info)
+ return -ENOMEM;
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnge_free_core(struct bnge_net *bn)
+{
+ bnge_free_vnic_attributes(bn);
+ bnge_free_tx_rings(bn);
+ bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_stats(bn);
+ bnge_free_ring_grps(bn);
+ bnge_free_vnics(bn);
+ kfree(bn->tx_ring_map);
+ bn->tx_ring_map = NULL;
+ kfree(bn->tx_ring);
+ bn->tx_ring = NULL;
+ kfree(bn->rx_ring);
+ bn->rx_ring = NULL;
+ kfree(bn->bnapi);
+ bn->bnapi = NULL;
+}
+
+static int bnge_alloc_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, size, arr_size;
+ int rc = -ENOMEM;
+ void *bnapi;
+
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
+ bd->nq_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
+ bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return rc;
+
+ bn->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
+ struct bnge_nq_ring_info *nqr;
+
+ bn->bnapi[i] = bnapi;
+ bn->bnapi[i]->index = i;
+ bn->bnapi[i]->bn = bn;
+ nqr = &bn->bnapi[i]->nq_ring;
+ nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ }
+
+ bn->rx_ring = kcalloc(bd->rx_nr_rings,
+ sizeof(struct bnge_rx_ring_info),
+ GFP_KERNEL);
+ if (!bn->rx_ring)
+ goto err_free_core;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->bnapi = bn->bnapi[i];
+ bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
+ }
+
+ bn->tx_ring = kcalloc(bd->tx_nr_rings,
+ sizeof(struct bnge_tx_ring_info),
+ GFP_KERNEL);
+ if (!bn->tx_ring)
+ goto err_free_core;
+
+ bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+ if (!bn->tx_ring_map)
+ goto err_free_core;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ j = 0;
+ else
+ j = bd->rx_nr_rings;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_napi *bnapi2;
+ int k;
+
+ txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ bn->tx_ring_map[i] = i;
+ k = j + BNGE_RING_TO_TC_OFF(bd, i);
+
+ bnapi2 = bn->bnapi[k];
+ txr->txq_index = i;
+ txr->tx_napi_idx =
+ BNGE_RING_TO_TC(bd, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ txr->bnapi = bnapi2;
+ }
+
+ rc = bnge_alloc_ring_stats(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_vnics(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
+ bnge_init_ring_struct(bn);
+
+ rc = bnge_alloc_rx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_tx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_tree(bn);
+ if (rc)
+ goto err_free_core;
+
+ bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
+ BNGE_VNIC_MCAST_FLAG |
+ BNGE_VNIC_UCAST_FLAG;
+ rc = bnge_alloc_vnic_attributes(bn);
+ if (rc)
+ goto err_free_core;
+ return 0;
+
+err_free_core:
+ bnge_free_core(bn);
+ return rc;
+}
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->rx_cpr->ring_struct.fw_ring_id;
+}
+
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
+{
+ return txr->tx_cpr->ring_struct.fw_ring_id;
+}
+
+static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_init_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
+ dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ BNGE_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
+static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bn->rx_buf_size, gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
+ return page_address(page) + offset;
+}
+
+static int bnge_alloc_rx_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ u8 *data;
+
+ rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bn->rx_offset;
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_ring_size; i++) {
+ rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX(prod);
+ }
+
+ /* Abort if not a single buffer can be allocated */
+ if (rc && !i) {
+ netdev_err(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, abort\n",
+ ring_nr, i, bn->rx_ring_size);
+ return rc;
+ }
+
+ rxr->rx_prod = prod;
+
+ if (i < bn->rx_ring_size)
+ netdev_warn(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_ring_size);
+ return 0;
+}
+
+static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_agg_bd *rx_agg_buf;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ netmem_ref netmem;
+
+ rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
+ netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
+ if (!netmem)
+ return -ENOMEM;
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+
+ rx_agg_buf->netmem = netmem;
+ rx_agg_buf->offset = offset;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_agg_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_agg_ring_size; i++) {
+ rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX_AGG(prod);
+ }
+
+ if (rc && i < MAX_SKB_FRAGS) {
+ netdev_err(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
+ ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
+ goto err_free_one_agg_ring_bufs;
+ }
+
+ rxr->rx_agg_prod = prod;
+
+ if (i < bn->rx_agg_ring_size)
+ netdev_warn(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_agg_ring_size);
+ return 0;
+
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
+ int rc;
+
+ rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bn->bd)) {
+ rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ goto err_free_one_rx_ring_bufs;
+ }
+ return 0;
+
+err_free_one_rx_ring_bufs:
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ return rc;
+}
+
+static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
+{
+ struct rx_bd **rx_desc_ring;
+ u32 prod;
+ int i;
+
+ rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
+ struct rx_bd *rxbd = rx_desc_ring[i];
+ int j;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_init_rxbd_pages(ring, type);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ if (bnge_is_agg_reqd(bn->bd)) {
+ type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnge_init_rxbd_pages(ring, type);
+ }
+}
+
+static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr;
+
+ rxr = &bn->rx_ring[ring_nr];
+ bnge_init_one_rx_ring_rxbd(bn, rxr);
+
+ netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ bnge_init_one_agg_ring_rxbd(bn, rxr);
+}
+
+static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ int i, rc;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++) {
+ rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+ }
+ return 0;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static void bnge_init_rx_rings(struct bnge_net *bn)
+{
+ int i;
+
+#define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNGE_RX_DMA_OFFSET NET_SKB_PAD
+ bn->rx_offset = BNGE_RX_OFFSET;
+ bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++)
+ bnge_init_one_rx_ring_pair(bn, i);
+}
+
+static void bnge_init_tx_rings(struct bnge_net *bn)
+{
+ int i;
+
+ bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
+ }
+}
+
+static void bnge_init_vnics(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+ int j;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
+ if (bn->vnic_info[i].rss_hash_key) {
+ if (i == BNGE_VNIC_DEFAULT) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ if (!bn->rss_hash_key_valid &&
+ !bn->rss_hash_key_updated) {
+ get_random_bytes(bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bn->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bn->rss_hash_key_updated)
+ continue;
+
+ bn->rss_hash_key_updated = false;
+ bn->rss_hash_key_valid = true;
+
+ bn->toeplitz_prefix = 0;
+ for (k = 0; k < 8; k++) {
+ bn->toeplitz_prefix <<= 8;
+ bn->toeplitz_prefix |= key[k];
+ }
+ } else {
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+ }
+}
+
+static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bn->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bn->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bn->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bn->cp_ring_mask;
+ break;
+ }
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+}
+
+static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type, u32 map_idx, u32 xid)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
+
+ db->doorbell = bd->bar1 + bd->db_offset;
+ bnge_set_db_mask(bn, db, ring_type);
+}
+
+static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+
+ return 0;
+}
+
+static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ struct bnge_dev *bd = bn->bd;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bd->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnge_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ bool agg_rings;
+ int i, rc = 0;
+
+ agg_rings = !!(bnge_is_agg_reqd(bd));
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+ u32 type = HWRM_RING_ALLOC_NQ;
+ u32 map_idx = ring->map_idx;
+ unsigned int vector;
+
+ vector = bd->irq_tbl[map_idx].vector;
+ disable_irq_nosync(vector);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc) {
+ enable_irq(vector);
+ goto err_out;
+ }
+ bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ enable_irq(vector);
+ bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
+ }
+ }
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+
+ rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
+ if (rc)
+ goto err_out;
+ rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
+ if (rc)
+ goto err_out;
+ }
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ struct bnge_napi *bnapi;
+ u32 map_idx, type;
+
+ rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
+ if (rc)
+ goto err_out;
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+
+ cpr = rxr->rx_cpr;
+ bnapi = rxr->bnapi;
+ type = HWRM_RING_ALLOC_CMPL;
+ map_idx = bnapi->index;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ goto err_out;
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+ }
+
+ if (agg_rings) {
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
+ if (rc)
+ goto err_out;
+ }
+ }
+err_out:
+ return rc;
+}
+
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_dev *bd = bn->bd;
+ u16 tbl_size, i;
+
+ tbl_size = bnge_get_rxfh_indir_size(bd);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bd->rss_indir_tbl[i];
+ rxr = &bn->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnge_cp_ring_for_rx(rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnge_hwrm_vnic_cfg(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
+ vnic->vnic_id, i, rc);
+ return -ENOMEM;
+ }
+ bn->rsscos_nr_ctxs++;
+ }
+
+ rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bd)) {
+ rc = bnge_hwrm_vnic_set_hds(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
+ vnic->vnic_id, rc);
+ }
+ return rc;
+}
+
+static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
+{
+ if (!refcount_dec_and_test(&fltr->refcnt))
+ return;
+ hlist_del_rcu(&fltr->base.hash);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static void bnge_init_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_filter *fltr,
+ struct bnge_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNGE_FLTR_TYPE_L2;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ refcount_set(&fltr->refcnt, 1);
+}
+
+static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+ struct hlist_head *head;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnge_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+
+ rcu_read_lock();
+ fltr = __bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ refcount_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnge_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
+ BNGE_L2_FLTR_HASH_MASK;
+ fltr = bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+
+ bnge_init_l2_filter(bn, fltr, key, idx);
+ return fltr;
+}
+
+static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
+ const u8 *mac_addr)
+{
+ struct bnge_l2_filter *fltr;
+ struct bnge_l2_key key;
+ int rc;
+
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
+ if (rc)
+ goto err_del_l2_filter;
+ bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
+ return rc;
+
+err_del_l2_filter:
+ bnge_del_l2_filter(bn, fltr);
+ return rc;
+}
+
+static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int mc_count = 0, off = 0;
+ bool update = false;
+ u8 *haddr;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNGE_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnge_uc_list_updated(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static bool bnge_promisc_ok(struct bnge_net *bn)
+{
+ return true;
+}
+
+static int bnge_cfg_def_vnic(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnge_uc_list_updated(bn);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
+ vnic->uc_filter_count = i;
+ return rc;
+ }
+ }
+
+skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnge_promisc_ok(bn))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
+ netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ }
+ if (rc)
+ netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
+ rc);
+
+ return rc;
+}
+
+static void bnge_hwrm_vnic_free(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++)
+ bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
+}
+
+static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
+{
+ int i, j;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
+ }
+ }
+ bn->rsscos_nr_ctxs = 0;
+}
+
+static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bn->bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 0;
+}
+
+static void bnge_clear_vnic(struct bnge_net *bn)
+{
+ bnge_hwrm_clear_vnic_filter(bn);
+ bnge_hwrm_vnic_free(bn);
+ bnge_hwrm_vnic_ctx_free(bn);
+}
+
+static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring;
+
+ ring = &cpr->ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
+ bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
+ }
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
+ bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
+
+ ring = &nqr->ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_setup_msix(bn);
+
+ return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+}
+
+static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
+{
+ bnge_clear_vnic(bn);
+ bnge_hwrm_ring_free(bn, close_path);
+ bnge_hwrm_stat_ctx_free(bn);
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ goto err_free_irq;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ goto err_free_irq;
+ }
+ }
+ }
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+ return rc;
+}
+
+static int bnge_init_chip(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+#define BNGE_DEF_STATS_COAL_TICKS 1000000
+ bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
+
+ rc = bnge_hwrm_stat_ctx_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_ring_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_setup_vnic(bn, vnic);
+ if (rc)
+ goto err_out;
+
+ if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bnge_hwrm_update_rss_hash_cfg(bn);
+
+ /* Filter for default vnic 0 */
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
+ if (rc) {
+ netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
+ goto err_out;
+ }
+ vnic->uc_filter_count = 1;
+
+ vnic->rx_mask = 0;
+
+ if (bn->netdev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if (bn->netdev->flags & IFF_PROMISC)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ if (bn->netdev->flags & IFF_ALLMULTI) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else if (bn->netdev->flags & IFF_MULTICAST) {
+ u32 mask = 0;
+
+ bnge_mc_list_updated(bn, &mask);
+ vnic->rx_mask |= mask;
+ }
+
+ rc = bnge_cfg_def_vnic(bn);
+ if (rc)
+ goto err_out;
+ return 0;
+
+err_out:
+ bnge_hwrm_resource_free(bn, 0);
+ return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+ bnge_napi_poll, bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del_locked(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static int bnge_init_nic(struct bnge_net *bn)
+{
+ int rc;
+
+ bnge_init_nq_tree(bn);
+
+ bnge_init_rx_rings(bn);
+ rc = bnge_alloc_rx_ring_pair_bufs(bn);
+ if (rc)
+ return rc;
+
+ bnge_init_tx_rings(bn);
+
+ rc = bnge_init_ring_grps(bn);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+
+ bnge_init_vnics(bn);
+
+ rc = bnge_init_chip(bn);
+ if (rc)
+ goto err_free_ring_grps;
+ return rc;
+
+err_free_ring_grps:
+ bnge_free_ring_grps(bn);
+ return rc;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static int bnge_open_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ netif_carrier_off(bn->netdev);
+
+ rc = bnge_reserve_rings(bd);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_core(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
+ return rc;
+ }
+
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
+ rc = bnge_init_nic(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
+ goto err_free_irq;
+ }
+ set_bit(BNGE_STATE_OPEN, &bd->state);
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+ return rc;
+}
static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -28,11 +2212,42 @@ static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int bnge_open(struct net_device *dev)
{
+ struct bnge_net *bn = netdev_priv(dev);
+ int rc;
+
+ rc = bnge_open_core(bn);
+ if (rc)
+ netdev_err(dev, "bnge_open_core err: %d\n", rc);
+
+ return rc;
+}
+
+static int bnge_shutdown_nic(struct bnge_net *bn)
+{
+ /* TODO: close_path = 0 until we make NAPI functional */
+ bnge_hwrm_resource_free(bn, 0);
return 0;
}
+static void bnge_close_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_shutdown_nic(bn);
+ bnge_free_all_rings_bufs(bn);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
+ bnge_free_core(bn);
+}
+
static int bnge_close(struct net_device *dev)
{
+ struct bnge_net *bn = netdev_priv(dev);
+
+ bnge_close_core(bn);
+
return 0;
}
@@ -238,6 +2453,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+ bn->rx_dir = DMA_FROM_DEVICE;
bnge_set_tpa_flags(bd);
bnge_set_ring_params(bd);
@@ -245,6 +2461,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bnge_init_l2_fltr_tbl(bn);
bnge_init_mac_addr(bd);
+ netdev->request_ops_lock = true;
rc = register_netdev(netdev);
if (rc) {
dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
index a650d71a58db..fb3b961536ba 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -5,6 +5,9 @@
#define _BNGE_NETDEV_H_
#include <linux/bnxt/hsi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/refcount.h>
+#include "bnge_db.h"
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -113,11 +116,25 @@ struct bnge_sw_rx_bd {
};
struct bnge_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
+
+struct bnge_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 nq_fw_ring_id;
+};
+
#define BNGE_RX_COPY_THRESH 256
#define BNGE_HW_FEATURE_VLAN_ALL_RX \
@@ -133,6 +150,32 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define BNGE_NQ_HDL_TYPE_SHIFT 24
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -164,6 +207,30 @@ struct bnge_net {
struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
u32 hash_seed;
u64 toeplitz_prefix;
+
+ struct bnge_napi **bnapi;
+
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring;
+
+ u16 *tx_ring_map;
+ enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ struct bnge_vnic_info *vnic_info;
+ int nr_vnics;
+ int total_irqs;
+
+ u32 tx_wake_thresh;
+ u16 rx_offset;
+ u16 rx_dma_offset;
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
+ int rsscos_nr_ctxs;
+ u32 stats_coal_ticks;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -203,4 +270,185 @@ void bnge_set_ring_params(struct bnge_dev *bd);
#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+#define BNGE_MAX_TXR_PER_NAPI 8
+
+#define bnge_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
+#define BNGE_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+struct bnge_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
+struct bnge_cp_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct tx_cmp **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
+ u32 cp_raw_cons;
+ struct bnge_db_info cp_db;
+};
+
+struct bnge_nq_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct nqe_cn **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u32 nq_raw_cons;
+ struct bnge_db_info nq_db;
+
+ struct bnge_stats_mem stats;
+ u32 hw_stats_ctx_id;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
+};
+
+struct bnge_rx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *rx_cpr;
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ u16 rx_next_cons;
+ struct bnge_db_info rx_db;
+ struct bnge_db_info rx_agg_db;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnge_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnge_sw_rx_agg_bd *rx_agg_buf_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnge_ring_struct rx_ring_struct;
+ struct bnge_ring_struct rx_agg_ring_struct;
+ struct page_pool *page_pool;
+ struct page_pool *head_pool;
+ bool need_head_pool;
+};
+
+struct bnge_tx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *tx_cpr;
+ u16 tx_prod;
+ u16 tx_cons;
+ u16 tx_hw_cons;
+ u16 txq_index;
+ u8 tx_napi_idx;
+ u8 kick_pending;
+ struct bnge_db_info tx_db;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnge_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ u32 dev_state;
+#define BNGE_DEV_STATE_CLOSING 0x1
+
+ struct bnge_ring_struct tx_ring_struct;
+};
+
+struct bnge_napi {
+ struct napi_struct napi;
+ struct bnge_net *bn;
+ int index;
+
+ struct bnge_nq_ring_info nq_ring;
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+};
+
+#define INVALID_STATS_CTX_ID -1
+#define BNGE_VNIC_DEFAULT 0
+#define BNGE_MAX_UC_ADDRS 4
+
+struct bnge_vnic_info {
+ u16 fw_vnic_id;
+#define BNGE_MAX_CTX_PER_VNIC 8
+ u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
+ u16 mru;
+ /* index 0 always dev_addr */
+ struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
+ u16 uc_filter_count;
+ u8 *uc_list;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ int rss_table_size;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNGE_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNGE_VNIC_RSS_FLAG 1
+#define BNGE_VNIC_MCAST_FLAG 4
+#define BNGE_VNIC_UCAST_FLAG 8
+ u32 vnic_id;
+};
+
+struct bnge_filter_base {
+ struct hlist_node hash;
+ struct list_head list;
+ __le64 filter_id;
+ u8 type;
+#define BNGE_FLTR_TYPE_L2 2
+ u8 flags;
+ u16 rxq;
+ u16 fw_vnic_id;
+ u16 vf_idx;
+ unsigned long state;
+#define BNGE_FLTR_VALID 0
+#define BNGE_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnge_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4)
+struct bnge_l2_filter {
+ /* base filter must be the first member */
+ struct bnge_filter_base base;
+ struct bnge_l2_key l2_key;
+ refcount_t refcnt;
+};
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
index c79a3607a1b7..62ebe03a0dcf 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
return min_t(int, roce_msix, num_online_cpus() + 1);
}
-static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
{
if (bnge_is_roce_en(bd))
return bd->aux_num_msix;
@@ -164,7 +164,7 @@ static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
return bnge_fix_rings_count(rx, tx, max_nq, sh);
}
-static int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+int bnge_cal_nr_rss_ctxs(u16 rx_rings)
{
if (!rx_rings)
return 0;
@@ -184,7 +184,7 @@ static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
return 1;
}
-static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
{
return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
BNGE_RSS_TABLE_ENTRIES;
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
index 54ef1c7d8822..0d6213b27580 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -72,6 +72,8 @@ void bnge_free_irqs(struct bnge_dev *bd);
int bnge_net_init_dflt_config(struct bnge_dev *bd);
void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd);
+int bnge_cal_nr_rss_ctxs(u16 rx_rings);
static inline u32
bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
index 52ada65943a0..79f5ce2e5d08 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -95,7 +95,7 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
&rmem->dma_arr[i],
GFP_KERNEL);
if (!rmem->pg_arr[i])
- return -ENOMEM;
+ goto err_free_ring;
if (rmem->ctx_mem)
bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
@@ -116,10 +116,13 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
if (rmem->vmem_size) {
*rmem->vmem = vzalloc(rmem->vmem_size);
if (!(*rmem->vmem))
- return -ENOMEM;
+ goto err_free_ring;
}
-
return 0;
+
+err_free_ring:
+ bnge_free_ring(bd, rmem);
+ return -ENOMEM;
}
static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
@@ -436,3 +439,61 @@ skip_rdma:
return 0;
}
+
+void bnge_init_ring_struct(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_tx_ring_info *txr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)nqr->desc_ring;
+ rmem->dma_arr = nqr->desc_mapping;
+ rmem->vmem_size = 0;
+
+ rxr = bnapi->rx_ring;
+ if (!rxr)
+ goto skip_rx;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_buf_ring;
+
+skip_rx:
+ bnge_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
index 300f1d8268ef..341c7f81ed09 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -6,6 +6,7 @@
struct bnge_ctx_mem_type;
struct bnge_dev;
+struct bnge_net;
#define PTU_PTE_VALID 0x1UL
#define PTU_PTE_LAST 0x2UL
@@ -180,9 +181,22 @@ struct bnge_ctx_mem_info {
struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
};
+struct bnge_ring_struct {
+ struct bnge_ring_mem_info ring_mem;
+
+ u16 fw_ring_id;
+ union {
+ u16 grp_idx;
+ u16 map_idx; /* Used by NQs */
+ };
+ u32 handle;
+ u8 queue_id;
+};
+
int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
int bnge_alloc_ctx_mem(struct bnge_dev *bd);
void bnge_free_ctx_mem(struct bnge_dev *bd);
+void bnge_init_ring_struct(struct bnge_net *bn);
#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0daa08cecaf2..3fc33b1b4dfb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -142,6 +142,7 @@ static const struct {
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
+ [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -217,6 +218,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
+ { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -263,6 +265,8 @@ const u16 bnxt_bstore_to_trace[] = {
[BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
[BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
[BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
+ [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
+ [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -315,7 +319,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
+ idx == NETXTREME_E_P7_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -3797,8 +3802,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool(rxr))
- page_pool_destroy(rxr->head_pool);
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
@@ -3845,6 +3849,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pool = page_pool_create(&pp);
if (IS_ERR(pool))
goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
}
rxr->head_pool = pool;
@@ -6832,7 +6838,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -6969,6 +6975,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
@@ -9144,7 +9152,7 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
return rc;
}
-static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
@@ -9152,7 +9160,7 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
int rc = 0;
u16 type;
- for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
ctxm = &ctx->ctx_arr[type];
if (!bnxt_bs_trace_avail(bp, type))
continue;
@@ -9170,12 +9178,13 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
}
if (last_type == BNXT_CTX_INV) {
- if (!ena)
+ for (type = 0; type < BNXT_CTX_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (ctxm->mem_valid)
+ last_type = type;
+ }
+ if (last_type == BNXT_CTX_INV)
return 0;
- else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
- last_type = BNXT_CTX_MAX - 1;
- else
- last_type = BNXT_CTX_L2_MAX - 1;
}
ctx->ctx_arr[last_type].last = 1;
@@ -9302,6 +9311,10 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ goto skip_legacy;
+
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
@@ -9310,7 +9323,6 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
- ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
if (BNXT_SW_RES_LMT(bp)) {
@@ -9404,8 +9416,9 @@ skip_rdma:
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+skip_legacy:
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
- rc = bnxt_backing_store_cfg_v2(bp, ena);
+ rc = bnxt_backing_store_cfg_v2(bp);
else
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
@@ -9619,10 +9632,10 @@ no_ptp:
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
+ u32 flags, flags_ext, flags_ext2, flags_ext3;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -9689,6 +9702,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
+ flags_ext3 = le32_to_cpu(resp->flags_ext3);
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
+
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
BNXT_FW_MAJ(bp) > 217)
@@ -13261,12 +13280,6 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
- case SIOCSHWTSTAMP:
- return bnxt_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return bnxt_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14731,6 +14744,23 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
return false;
}
+static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
+ int rc;
+
+ bp->max_pfcwd_tmo_ms = 0;
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
+ if (rc)
+ return;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -14808,6 +14838,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+ bnxt_hwrm_pfcwd_qcaps(bp);
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -15771,6 +15802,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_hwtstamp_get = bnxt_hwtstamp_get,
+ .ndo_hwtstamp_set = bnxt_hwtstamp_set,
};
static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
@@ -15922,8 +15955,7 @@ err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
page_pool_destroy(clone->page_pool);
- if (bnxt_separate_head_pool(clone))
- page_pool_destroy(clone->head_pool);
+ page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
clone->head_pool = NULL;
return rc;
@@ -15941,8 +15973,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool(rxr))
- page_pool_destroy(rxr->head_pool);
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
rxr->head_pool = NULL;
@@ -16071,7 +16102,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ mru = bp->dev->mtu + VLAN_ETH_HLEN;
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
@@ -16152,7 +16183,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp))
- bnxt_sriov_disable(bp);
+ __bnxt_sriov_disable(bp);
bnxt_rdma_aux_device_del(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index fda0d3cc6227..741b2d854789 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1968,10 +1968,12 @@ struct bnxt_ctx_mem_type {
#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
+#define BNXT_CTX_KONG FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE
+#define BNXT_CTX_QPC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
-#define BNXT_CTX_V2_MAX (BNXT_CTX_RIGP1 + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_QPC + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info {
@@ -2130,6 +2132,7 @@ enum board_idx {
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
NETXTREME_E_P7_VF,
+ NETXTREME_E_P7_VF_HV,
};
#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
@@ -2407,6 +2410,7 @@ struct bnxt {
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
+#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@@ -2422,6 +2426,8 @@ struct bnxt {
u8 max_q;
u8 num_tc;
+ u16 max_pfcwd_tmo_ms;
+
u8 tph_mode;
unsigned int current_interval;
@@ -2475,6 +2481,7 @@ struct bnxt {
#define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5)
#define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
#define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
@@ -2507,6 +2514,7 @@ struct bnxt {
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
#define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
+ #define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
u32 fw_dbg_cap;
@@ -2519,6 +2527,8 @@ struct bnxt {
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_ROCE_VF_DYN_ALLOC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT)
#define BNXT_SUPPORTS_QUEUE_API(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
@@ -2528,6 +2538,8 @@ struct bnxt {
((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
#define BNXT_SW_RES_LMT(bp) \
((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+#define BNXT_MIRROR_ON_ROCE_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_MIRROR_ON_ROCE)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2542,6 +2554,7 @@ struct bnxt {
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
+ u16 pcie_stat_len;
u8 pri2cos_idx[8];
u8 pri2cos_valid;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 18d6c94d5cb8..0181ab1f2dfd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -36,6 +36,8 @@ static const u16 bnxt_bstore_to_seg_id[] = {
[BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
[BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
[BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+ [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG,
+ [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC,
};
static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
@@ -359,7 +361,7 @@ static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
if (buf)
buf += offset;
- for (type = 0 ; type <= BNXT_CTX_RIGP1; type++) {
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
bool trace = bnxt_bs_trace_avail(bp, type);
u32 seg_id = bnxt_bstore_to_seg_id[type];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index d1cd6387f3ab..c087df88154a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -102,6 +102,8 @@ struct bnxt_driver_segment_record {
#define BNXT_CTX_MEM_SEG_CA1 0x9
#define BNXT_CTX_MEM_SEG_CA2 0xa
#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+#define BNXT_CTX_MEM_SEG_QPC 0xc
+#define BNXT_CTX_MEM_SEG_KONG 0xd
#define BNXT_CRASH_DUMP_LEN (8 << 20)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4c4581b0342e..02961d93ed35 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -40,12 +40,6 @@ bnxt_dl_flash_update(struct devlink *dl,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (!BNXT_PF(bp)) {
- NL_SET_ERR_MSG_MOD(extack,
- "flash update not supported from a VF");
- return -EPERM;
- }
-
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
@@ -220,7 +214,7 @@ __bnxt_dl_reporter_create(struct bnxt *bp,
{
struct devlink_health_reporter *reporter;
- reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ reporter = devlink_health_reporter_create(bp->dl, ops, bp);
if (IS_ERR(reporter)) {
netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
ops->name, PTR_ERR(reporter));
@@ -1080,16 +1074,9 @@ static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
union devlink_param_value *val)
{
- struct hwrm_nvm_get_variable_input *req = msg;
const struct bnxt_dl_nvm_param *nvm_param;
int i;
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
nvm_param = &nvm_params[i];
if (nvm_param->id == param_id)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1b37612b1c01..41686a6f84b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1584,6 +1584,8 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
{
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
return RXH_IP_SRC | RXH_IP_DST;
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL)
+ return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL;
return 0;
}
@@ -1662,13 +1664,18 @@ static int bnxt_set_rxfh_fields(struct net_device *dev,
if (cmd->data == RXH_4TUPLE)
tuple = 4;
- else if (cmd->data == RXH_2TUPLE)
+ else if (cmd->data == RXH_2TUPLE ||
+ cmd->data == (RXH_2TUPLE | RXH_IP6_FL))
tuple = 2;
else if (!cmd->data)
tuple = 0;
else
return -EINVAL;
+ if (cmd->data & RXH_IP6_FL &&
+ !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP))
+ return -EINVAL;
+
if (cmd->flow_type == TCP_V4_FLOW) {
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
if (tuple == 4)
@@ -1732,10 +1739,15 @@ static int bnxt_set_rxfh_fields(struct net_device *dev,
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
- if (tuple == 2)
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL);
+ if (!tuple)
+ break;
+ if (cmd->data & RXH_IP6_FL)
+ rss_hash_cfg |=
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL;
+ else if (tuple == 2)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
- else if (!tuple)
- rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
break;
}
@@ -2049,38 +2061,52 @@ static void bnxt_get_drvinfo(struct net_device *dev,
static int bnxt_get_regs_len(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int reg_len;
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- reg_len = BNXT_PXP_REG_LEN;
+ return BNXT_PXP_REG_LEN + bp->pcie_stat_len;
+}
+
+static void *
+__bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req)
+{
+ struct pcie_ctx_hw_stats_v2 *hw_pcie_stats;
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr);
+ if (!hw_pcie_stats)
+ return NULL;
- if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
- reg_len += sizeof(struct pcie_ctx_hw_stats);
+ req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
- return reg_len;
+ return rc ? NULL : hw_pcie_stats;
}
#define BNXT_PCIE_32B_ENTRY(start, end) \
- { offsetof(struct pcie_ctx_hw_stats, start), \
- offsetof(struct pcie_ctx_hw_stats, end) }
+ { offsetof(struct pcie_ctx_hw_stats_v2, start),\
+ offsetof(struct pcie_ctx_hw_stats_v2, end) }
static const struct {
u16 start;
u16 end;
} bnxt_pcie_32b_entries[] = {
BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+ BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1),
+ BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2),
};
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
- struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_output *resp;
struct hwrm_pcie_qstats_input *req;
struct bnxt *bp = netdev_priv(dev);
- dma_addr_t hw_pcie_stats_addr;
- int rc;
+ u8 *src;
regs->version = 0;
if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
@@ -2092,24 +2118,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
return;
- hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
- &hw_pcie_stats_addr);
- if (!hw_pcie_stats) {
- hwrm_req_drop(bp, req);
- return;
- }
-
- regs->version = 1;
- hwrm_req_hold(bp, req); /* hold on to slice */
- req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
- req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
- rc = hwrm_req_send(bp, req);
- if (!rc) {
+ resp = hwrm_req_hold(bp, req);
+ src = __bnxt_hwrm_pcie_qstats(bp, req);
+ if (src) {
u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
- u8 *src = (u8 *)hw_pcie_stats;
- int i, j;
+ int i, j, len;
+
+ len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size));
+ if (len <= sizeof(struct pcie_ctx_hw_stats))
+ regs->version = 1;
+ else if (len < sizeof(struct pcie_ctx_hw_stats_v2))
+ regs->version = 2;
+ else
+ regs->version = 3;
- for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
+ for (i = 0, j = 0; i < len; ) {
if (i >= bnxt_pcie_32b_entries[j].start &&
i <= bnxt_pcie_32b_entries[j].end) {
u32 *dst32 = (u32 *)(dst + i);
@@ -3185,7 +3208,8 @@ static int bnxt_get_fecparam(struct net_device *dev,
}
static void bnxt_get_fec_stats(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct bnxt *bp = netdev_priv(dev);
u64 *rx;
@@ -4376,12 +4400,42 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val)
+{
+ struct hwrm_queue_pfcwd_timeout_qcfg_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *val = le16_to_cpu(resp->pfcwd_timeout_value);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val)
+{
+ struct hwrm_queue_pfcwd_timeout_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG);
+ if (rc)
+ return rc;
+ req->pfcwd_timeout_value = cpu_to_le16(val);
+ rc = hwrm_req_send(bp, req);
+ return rc;
+}
+
static int bnxt_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct bnxt *bp = netdev_priv(dev);
- u32 rx_copybreak;
+ u32 rx_copybreak, val;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
@@ -4394,6 +4448,15 @@ static int bnxt_set_tunable(struct net_device *dev,
bp->rx_copybreak = rx_copybreak;
}
return 0;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+
+ val = *(u16 *)data;
+ if (val > bp->max_pfcwd_tmo_ms &&
+ val != PFC_STORM_PREVENTION_AUTO)
+ return -EINVAL;
+ return bnxt_hwrm_pfcwd_cfg(bp, val);
default:
return -EOPNOTSUPP;
}
@@ -4408,6 +4471,10 @@ static int bnxt_get_tunable(struct net_device *dev,
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = bp->rx_copybreak;
break;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (!bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+ return bnxt_hwrm_pfcwd_qcfg(bp, data);
default:
return -EOPNOTSUPP;
}
@@ -5254,6 +5321,26 @@ static int bnxt_get_ts_info(struct net_device *dev,
return 0;
}
+static void bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_output *resp;
+ struct hwrm_pcie_qstats_input *req;
+
+ bp->pcie_stat_len = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ if (__bnxt_hwrm_pcie_qstats(bp, req))
+ bp->pcie_stat_len = min_t(u16,
+ le16_to_cpu(resp->pcie_stat_size),
+ sizeof(struct pcie_ctx_hw_stats_v2));
+ hwrm_req_drop(bp, req);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp;
@@ -5262,6 +5349,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
+ bnxt_hwrm_pcie_qstats(bp);
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index ca660e6d28a4..db81cf6d5289 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -560,10 +560,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
return bnxt_ptp_cfg_tstamp_filters(bp);
}
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
u16 old_rxctl;
int old_rx_filter, rc;
@@ -573,17 +574,14 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (!ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
old_rx_filter = ptp->rx_filter;
old_rxctl = ptp->rxctl;
old_tx_tstamp_en = ptp->tx_tstamp_en;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
@@ -616,7 +614,7 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
ptp->tx_tstamp_en = 1;
else
ptp->tx_tstamp_en = 0;
@@ -625,9 +623,8 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (rc)
goto ts_set_err;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
ts_set_err:
ptp->rx_filter = old_rx_filter;
@@ -636,22 +633,22 @@ ts_set_err:
return rc;
}
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
if (!ptp)
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ stmpconf->flags = 0;
+ stmpconf->tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON
+ : HWTSTAMP_TX_OFF;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
}
static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 0481161d26ef..8cc2fae47644 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -160,8 +160,11 @@ void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack);
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf);
void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 480e18a32caa..80fed2c07b9e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -541,6 +541,13 @@ static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
if (rc)
goto err;
+ /* In case of VF Dynamic resource allocation, driver will provision
+ * maximum resources to all the VFs. FW will dynamically allocate
+ * resources to VFs on the fly, so always divide the resources by 1.
+ */
+ if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp))
+ num_vfs = 1;
+
cfg_req->fid = cpu_to_le16(0xffff);
cfg_req->enables2 =
cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
@@ -734,7 +741,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
@@ -919,7 +926,7 @@ err_out1:
return rc;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -943,6 +950,14 @@ void bnxt_sriov_disable(struct bnxt *bp)
devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
+}
+
+static void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!pci_num_vf(bp->pdev))
+ return;
+
+ __bnxt_sriov_disable(bp);
/* Reclaim all resources for the PF. */
rtnl_lock();
@@ -1321,7 +1336,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
return 0;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 9a4bacba477b..e4979d729312 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -38,7 +38,7 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
-void bnxt_sriov_disable(struct bnxt *);
+void __bnxt_sriov_disable(struct bnxt *bp);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 61cf201bb0dc..f8c2c72b382d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -100,6 +100,12 @@ void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
if (BNXT_PF(bp) && !bp->pf.port_id &&
bp->port_count > 1)
bp->edev->ulp_num_ctxs++;
+
+ /* Reserve one additional stat_ctx when the device is capable
+ * of supporting port mirroring on RDMA device.
+ */
+ if (BNXT_MIRROR_ON_ROCE_CAP(bp))
+ bp->edev->ulp_num_ctxs++;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 58d579dca3f1..3e77a96e5a3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -468,9 +468,8 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
if (!skb)
return NULL;
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * num_frags,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ BNXT_RX_PAGE_SIZE * num_frags,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b4dc93a48718..7f00ec7fd7b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -13929,22 +13929,20 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tp->rxptpctl = 0;
break;
@@ -14004,74 +14002,72 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
tw32(TG3_RX_PTP_CTL,
tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
tg3_flag_set(tp, TX_TSTAMP_EN);
else
tg3_flag_clear(tp, TX_TSTAMP_EN);
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
-static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+ stmpconf->flags = 0;
+ stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (tp->rxptpctl) {
case 0:
- stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -14126,12 +14122,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
- case SIOCSHWTSTAMP:
- return tg3_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return tg3_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14407,6 +14397,8 @@ static const struct net_device_ops tg3_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
+ .ndo_hwtstamp_get = tg3_hwtstamp_get,
+ .ndo_hwtstamp_set = tg3_hwtstamp_set,
};
static void tg3_get_eeprom_size(struct tg3 *tp)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c9a5c8beb2fa..0830c48973aa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -184,6 +184,13 @@
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_ENST_START_TIME_Q0 0x0800 /* ENST Q0 start time */
+#define GEM_ENST_START_TIME_Q1 0x0804 /* ENST Q1 start time */
+#define GEM_ENST_ON_TIME_Q0 0x0820 /* ENST Q0 on time */
+#define GEM_ENST_ON_TIME_Q1 0x0824 /* ENST Q1 on time */
+#define GEM_ENST_OFF_TIME_Q0 0x0840 /* ENST Q0 off time */
+#define GEM_ENST_OFF_TIME_Q1 0x0844 /* ENST Q1 off time */
+#define GEM_ENST_CONTROL 0x0880 /* ENST control register */
#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
@@ -213,14 +220,19 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+#define GEM_ENST_START_TIME(hw_q) (0x0800 + ((hw_q) << 2))
+#define GEM_ENST_ON_TIME(hw_q) (0x0820 + ((hw_q) << 2))
+#define GEM_ENST_OFF_TIME(hw_q) (0x0840 + ((hw_q) << 2))
+
+/* Bitfields in ENST_CONTROL */
+#define GEM_ENST_DISABLE_QUEUE_OFFSET 16
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0 /* reserved */
#define MACB_LB_SIZE 1
@@ -554,6 +566,23 @@
#define GEM_HIGH_SPEED_OFFSET 26
#define GEM_HIGH_SPEED_SIZE 1
+/* Bitfields in ENST_START_TIME_Qx. */
+#define GEM_START_TIME_SEC_OFFSET 30
+#define GEM_START_TIME_SEC_SIZE 2
+#define GEM_START_TIME_NSEC_OFFSET 0
+#define GEM_START_TIME_NSEC_SIZE 30
+
+/* Bitfields in ENST_ON_TIME_Qx. */
+#define GEM_ON_TIME_OFFSET 0
+#define GEM_ON_TIME_SIZE 17
+
+/* Bitfields in ENST_OFF_TIME_Qx. */
+#define GEM_OFF_TIME_OFFSET 0
+#define GEM_OFF_TIME_SIZE 17
+
+/* Hardware ENST timing registers granularity */
+#define ENST_TIME_GRANULARITY_NS 8
+
/* Bitfields in USX_CONTROL. */
#define GEM_USX_CTRL_SPEED_OFFSET 14
#define GEM_USX_CTRL_SPEED_SIZE 3
@@ -739,6 +768,7 @@
#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_NEED_TSUCLK 0x00000400
#define MACB_CAPS_QUEUE_DISABLE 0x00000800
+#define MACB_CAPS_QBV 0x00001000
#define MACB_CAPS_PCS 0x01000000
#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
@@ -1214,10 +1244,13 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
+
+ /* ENST register offsets for this queue */
+ unsigned int ENST_START_TIME;
+ unsigned int ENST_ON_TIME;
+ unsigned int ENST_OFF_TIME;
/* Lock to protect tx_head and tx_tail */
spinlock_t tx_ptr_lock;
@@ -1397,6 +1430,19 @@ static inline bool gem_has_ptp(struct macb *bp)
return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
+/* ENST Helper functions */
+static inline u64 enst_ns_to_hw_units(size_t ns, u32 speed_mbps)
+{
+ return DIV_ROUND_UP((ns) * (speed_mbps),
+ (ENST_TIME_GRANULARITY_NS * 1000));
+}
+
+static inline u64 enst_max_hw_interval(u32 speed_mbps)
+{
+ return DIV_ROUND_UP(GENMASK(GEM_ON_TIME_SIZE - 1, 0) *
+ ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps));
+}
+
/**
* struct macb_platform_data - platform data for MACB Ethernet used for PCI registration
* @pclk: platform clock
@@ -1407,4 +1453,21 @@ struct macb_platform_data {
struct clk *hclk;
};
+/**
+ * struct macb_queue_enst_config - Configuration for Enhanced Scheduled Traffic
+ * @start_time_mask: Bitmask representing the start time for the queue
+ * @on_time_bytes: "on" time nsec expressed in bytes
+ * @off_time_bytes: "off" time nsec expressed in bytes
+ * @queue_id: Identifier for the queue
+ *
+ * This structure holds the configuration parameters for an ENST queue,
+ * used to control time-based transmission scheduling in the MACB driver.
+ */
+struct macb_queue_enst_config {
+ u32 start_time_mask;
+ u32 on_time_bytes;
+ u32 off_time_bytes;
+ u8 queue_id;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c769b7dbd3ba..ca2386b83473 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -36,6 +36,7 @@
#include <linux/reset.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/inetdevice.h>
+#include <net/pkt_sched.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -51,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -278,9 +275,9 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
@@ -495,19 +492,19 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
+ /* Single register for all queues' high 32 bits. */
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
#endif
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -1166,10 +1163,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -2474,35 +2467,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
if (bp->rx_ring_tieoff) {
- dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
bp->rx_ring_tieoff = NULL;
}
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2544,35 +2544,45 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -4088,6 +4098,223 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxflow_feature(bp, features);
}
+static int macb_taprio_setup_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *conf)
+{
+ u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
+ u32 configured_queues = 0, speed = 0, start_time_nsec;
+ struct macb_queue_enst_config *enst_queue;
+ struct tc_taprio_sched_entry *entry;
+ struct macb *bp = netdev_priv(ndev);
+ struct ethtool_link_ksettings kset;
+ struct macb_queue *queue;
+ size_t i;
+ int err;
+
+ if (conf->num_entries > bp->num_queues) {
+ netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
+ conf->num_entries, bp->num_queues);
+ return -EINVAL;
+ }
+
+ if (conf->base_time < 0) {
+ netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
+ conf->base_time);
+ return -ERANGE;
+ }
+
+ /* Get the current link speed */
+ err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
+ if (unlikely(err)) {
+ netdev_err(ndev, "Failed to get link settings: %d\n", err);
+ return err;
+ }
+
+ speed = kset.base.speed;
+ if (unlikely(speed <= 0)) {
+ netdev_err(ndev, "Invalid speed: %d\n", speed);
+ return -EINVAL;
+ }
+
+ enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
+ if (unlikely(!enst_queue))
+ return -ENOMEM;
+
+ /* Pre-validate all entries before making any hardware changes */
+ for (i = 0; i < conf->num_entries; i++) {
+ entry = &conf->entries[i];
+
+ if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
+ netdev_err(ndev, "Entry %zu: unsupported command %d\n",
+ i, entry->command);
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ /* Validate gate_mask: must be nonzero, single queue, and within range */
+ if (!is_power_of_2(entry->gate_mask)) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
+ i, entry->gate_mask);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* gate_mask must not select queues outside the valid queue_mask */
+ if (entry->gate_mask & ~bp->queue_mask) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
+ i, entry->gate_mask, bp->num_queues);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Check for start time limits */
+ start_time_sec = start_time;
+ start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
+ if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
+ netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
+ i, start_time_sec);
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for on time limit */
+ if (entry->interval > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
+ i, entry->interval, enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for off time limit*/
+ if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
+ i, conf->cycle_time - entry->interval,
+ enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ enst_queue[i].queue_id = order_base_2(entry->gate_mask);
+ enst_queue[i].start_time_mask =
+ (start_time_sec << GEM_START_TIME_SEC_OFFSET) |
+ start_time_nsec;
+ enst_queue[i].on_time_bytes =
+ enst_ns_to_hw_units(entry->interval, speed);
+ enst_queue[i].off_time_bytes =
+ enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
+
+ configured_queues |= entry->gate_mask;
+ total_on_time += entry->interval;
+ start_time += entry->interval;
+ }
+
+ /* Check total interval doesn't exceed cycle time */
+ if (total_on_time > conf->cycle_time) {
+ netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
+ total_on_time, conf->cycle_time);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
+ conf->num_entries, conf->base_time, conf->cycle_time);
+
+ /* All validations passed - proceed with hardware configuration */
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Disable ENST queues if running before configuring */
+ gem_writel(bp, ENST_CONTROL,
+ bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ for (i = 0; i < conf->num_entries; i++) {
+ queue = &bp->queues[enst_queue[i].queue_id];
+ /* Configure queue timing registers */
+ queue_writel(queue, ENST_START_TIME,
+ enst_queue[i].start_time_mask);
+ queue_writel(queue, ENST_ON_TIME,
+ enst_queue[i].on_time_bytes);
+ queue_writel(queue, ENST_OFF_TIME,
+ enst_queue[i].off_time_bytes);
+ }
+
+ /* Enable ENST for all configured queues in one write */
+ gem_writel(bp, ENST_CONTROL, configured_queues);
+ }
+
+ netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
+ conf->num_entries, hweight32(configured_queues));
+
+cleanup:
+ kfree(enst_queue);
+ return err;
+}
+
+static void macb_taprio_destroy(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ u32 enst_disable_mask;
+ unsigned int q;
+
+ netdev_reset_tc(ndev);
+ enst_disable_mask = bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET;
+
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Single disable command for all queues */
+ gem_writel(bp, ENST_CONTROL, enst_disable_mask);
+
+ /* Clear all queue ENST registers in batch */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, ENST_START_TIME, 0);
+ queue_writel(queue, ENST_ON_TIME, 0);
+ queue_writel(queue, ENST_OFF_TIME, 0);
+ }
+ }
+ netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
+}
+
+static int macb_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct macb *bp = netdev_priv(ndev);
+ int err = 0;
+
+ if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
+ return -EOPNOTSUPP;
+
+ /* Check if Device is in runtime suspend */
+ if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
+ netdev_err(ndev, "Device is in runtime suspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = macb_taprio_setup_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ macb_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (!dev || !type_data)
+ return -EINVAL;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return macb_setup_taprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
.ndo_stop = macb_close,
@@ -4105,6 +4332,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_features_check = macb_features_check,
.ndo_hwtstamp_set = macb_hwtstamp_set,
.ndo_hwtstamp_get = macb_hwtstamp_get,
+ .ndo_setup_tc = macb_setup_tc,
};
/* Configure peripheral capabilities according to device tree
@@ -4309,12 +4537,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -4323,14 +4545,12 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
+ queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
+ queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
+ queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
+
/* get irq: here we use the linux queue index, not the hardware
* queue index. the queue irq definitions in the device tree
* must remove the optional gaps that could exist in the
@@ -4383,6 +4603,10 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
+ /* Enable HW_TC if hardware supports QBV */
+ if (bp->caps & MACB_CAPS_QBV)
+ dev->hw_features |= NETIF_F_HW_TC;
+
dev->features = dev->hw_features;
/* Check RX Flow Filters support.
@@ -4826,36 +5050,45 @@ static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
return mgmt->rate;
}
-static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- if (WARN_ON(rate < 2500000))
- return 2500000;
- else if (rate == 2500000)
- return 2500000;
- else if (WARN_ON(rate < 13750000))
- return 2500000;
- else if (WARN_ON(rate < 25000000))
- return 25000000;
- else if (rate == 25000000)
- return 25000000;
- else if (WARN_ON(rate < 75000000))
- return 25000000;
- else if (WARN_ON(rate < 125000000))
- return 125000000;
- else if (rate == 125000000)
- return 125000000;
-
- WARN_ON(rate > 125000000);
+static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ if (WARN_ON(req->rate < 2500000))
+ req->rate = 2500000;
+ else if (req->rate == 2500000)
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 13750000))
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 25000000))
+ req->rate = 25000000;
+ else if (req->rate == 25000000)
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 75000000))
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 125000000))
+ req->rate = 125000000;
+ else if (req->rate == 125000000)
+ req->rate = 125000000;
+ else if (WARN_ON(req->rate > 125000000))
+ req->rate = 125000000;
+ else
+ req->rate = 125000000;
- return 125000000;
+ return 0;
}
static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
- if (rate != 125000000)
+ struct clk_rate_request req;
+ int ret;
+
+ clk_hw_init_rate_request(hw, &req, rate);
+ ret = fu540_macb_tx_determine_rate(hw, &req);
+ if (ret != 0)
+ return ret;
+
+ if (req.rate != 125000000)
iowrite32(1, mgmt->reg);
else
iowrite32(0, mgmt->reg);
@@ -4866,7 +5099,7 @@ static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops fu540_c000_ops = {
.recalc_rate = fu540_macb_tx_recalc_rate,
- .round_rate = fu540_macb_tx_round_rate,
+ .determine_rate = fu540_macb_tx_determine_rate,
.set_rate = fu540_macb_tx_set_rate,
};
@@ -5127,8 +5360,9 @@ static const struct macb_config sama7g5_emac_config = {
static const struct macb_config versal_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK |
- MACB_CAPS_QUEUE_DISABLE,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_QBV,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = init_reset_optional,
@@ -5136,6 +5370,17 @@ static const struct macb_config versal_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config raspberrypi_rp1_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
@@ -5156,6 +5401,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
{ .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "xlnx,zynq-gem", .data = &zynq_config },
{ .compatible = "xlnx,versal-gem", .data = &versal_config},
@@ -5452,6 +5698,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
*/
tmp = macb_readl(bp, NCR);
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable RX queues */
@@ -5461,10 +5712,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Tie off RX queues */
queue_writel(queue, RBQP,
lower_32_bits(bp->rx_ring_tieoff_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, RBQPH,
- upper_32_bits(bp->rx_ring_tieoff_dma));
-#endif
}
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 674c54831875..215dac201b4a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -472,7 +472,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev)
q_no = lio->linfo.rxpciq[q].s.q_no;
wq = &lio->rxq_status_wq[q_no];
wq->wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!wq->wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 1d79f6eaa41f..8e2fcec26ea1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -526,7 +526,8 @@ static inline int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -659,7 +660,8 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->sync_octeon_time_wq.wq =
- alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("update-octeon-time",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->sync_octeon_time_wq.wq) {
dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
return -1;
@@ -1734,7 +1736,7 @@ static inline int setup_tx_poll_fn(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->txq_status_wq.wq = alloc_workqueue("txq-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return -1;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 62c2eadc33e3..3230dff5ba05 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -304,7 +304,8 @@ static int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 12105ffb5dac..d7cfb20eea00 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -132,7 +132,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->fn_list.setup_iq_regs(oct, iq_no);
oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!oct->check_db_wq[iq_no].wq) {
vfree(iq->request_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 861050966e18..de1a8335b545 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -39,7 +39,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
}
spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 6f6525983130..4ee970f3bad6 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -171,7 +171,7 @@ static void chtls_purge_receive_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- skb_dst_set(skb, (void *)NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -194,7 +194,7 @@ static void chtls_purge_recv_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -505,7 +505,7 @@ static void reset_listen_child(struct sock *child)
chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
if (child->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(child);
}
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
chtls_release_resources(child);
chtls_conn_done(child);
} else {
@@ -951,6 +951,7 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
struct tcp_sock *tp;
unsigned int mss;
struct sock *sk;
+ u16 user_mss;
mss = ntohs(req->tcpopt.mss);
sk = csk->sk;
@@ -969,8 +970,9 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
tp->advmss = dst_metric_advmss(dst);
- if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
- tp->advmss = USER_MSS(tp);
+ user_mss = USER_MSS(tp);
+ if (user_mss && tp->advmss > user_mss)
+ tp->advmss = user_mss;
if (tp->advmss > pmtu - iphdrsz)
tp->advmss = pmtu - iphdrsz;
if (mss && tp->advmss > mss)
@@ -1734,7 +1736,7 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_data, sk, skb);
return 0;
}
@@ -1786,7 +1788,7 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_pdu, sk, skb);
return 0;
}
@@ -1855,7 +1857,7 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_rx_hdr, sk, skb);
return 0;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index f61ca657601c..29ceff5a5fcb 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -90,12 +90,11 @@ struct deferred_skb_cb {
#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
-#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define USER_MSS(tp) (READ_ONCE((tp)->rx_opt.user_mss))
#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
@@ -171,14 +170,14 @@ static inline void chtls_set_req_addr(struct request_sock *oreq,
static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 465fa8077964..4036db466e18 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1434,7 +1434,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
continue;
found_ok_skb:
if (!skb->len) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 6bbf6e5584e5..1996d2e4e3e2 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -964,15 +964,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0f4efd505332..c96d1d6ba8fe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4884,7 +4884,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
priv->rx_tstamp = false;
- priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", WQ_PERCPU, 0);
if (!priv->dpaa2_ptp_wq) {
err = -ENOMEM;
goto err_wq_alloc;
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 54b0f0a5a6bb..117038104b69 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -28,6 +28,7 @@ config NXP_NTMP
config FSL_ENETC
tristate "ENETC PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_IERB
@@ -45,6 +46,7 @@ config FSL_ENETC
config NXP_ENETC4
tristate "ENETC4 PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
@@ -62,6 +64,7 @@ config NXP_ENETC4
config FSL_ENETC_VF
tristate "ENETC VF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index e4287725832e..aae462a0cf5a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -221,22 +221,111 @@ static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
}
}
+static void enetc_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = ENETC_PM0_SINGLE_STEP_EN;
+
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ /* The "Correction" field of a packet is updated based on the
+ * current time and the timestamp provided
+ */
+ enetc_port_mac_wr(si, ENETC_PM0_SINGLE_STEP, val);
+}
+
+static void enetc4_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = PM_SINGLE_STEP_EN;
+
+ val |= PM_SINGLE_STEP_OFFSET_SET(offset);
+ if (udp)
+ val |= PM_SINGLE_STEP_CH;
+
+ enetc_port_mac_wr(si, ENETC4_PM_SINGLE_STEP(0), val);
+}
+
+static u32 enetc_update_ptp_sync_msg(struct enetc_ndev_priv *priv,
+ struct sk_buff *skb, bool csum_offload)
+{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+ u16 tstamp_off = enetc_cb->origin_tstamp_off;
+ u16 corr_off = enetc_cb->correction_off;
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ __be32 new_sec_l, new_nsec;
+ __be16 new_sec_h;
+ u32 lo, hi, nsec;
+ u8 *data;
+ u64 sec;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ *
+ * In addition, if csum_offload is false, the UDP checksum needs
+ * to be updated by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong checksum when
+ * updating the correction field and update it to the packet.
+ */
+
+ data = skb_mac_header(skb);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (enetc_cb->udp && !csum_offload) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + tstamp_off);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + tstamp_off + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + tstamp_off + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + tstamp_off) = new_sec_h;
+ *(__be32 *)(data + tstamp_off + 2) = new_sec_l;
+ *(__be32 *)(data + tstamp_off + 6) = new_nsec;
+
+ /* Configure single-step register */
+ if (is_enetc_rev1(si))
+ enetc_set_one_step_ts(si, enetc_cb->udp, corr_off);
+ else
+ enetc4_set_one_step_ts(si, enetc_cb->udp, corr_off);
+
+ return lo & ENETC_TXBD_TSTAMP;
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_tx_swbd *tx_swbd;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
- u8 msgtype, twostep, udp;
+ bool csum_offload = false;
union enetc_tx_bd *txbd;
- u16 offset1, offset2;
int i, count = 0;
skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
+ u32 tstamp;
enetc_clear_tx_bd(&temp_bd);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -256,11 +345,19 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
ENETC_TXBD_L4T_UDP);
flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ csum_offload = true;
} else if (skb_checksum_help(skb)) {
return 0;
}
}
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ do_onestep_tstamp = true;
+ tstamp = enetc_update_ptp_sync_msg(priv, skb, csum_offload);
+ } else if (enetc_cb->flag & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -280,17 +377,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
- if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
- &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep)
- WARN_ONCE(1, "Bad packet for one-step timestamping\n");
- else
- do_onestep_tstamp = true;
- } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
- do_twostep_tstamp = true;
- }
-
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
@@ -333,65 +419,9 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- __be32 new_sec_l, new_nsec;
- u32 lo, hi, nsec, val;
- __be16 new_sec_h;
- u8 *data;
- u64 sec;
-
- lo = enetc_rd_hot(hw, ENETC_SICTR0);
- hi = enetc_rd_hot(hw, ENETC_SICTR1);
- sec = (u64)hi << 32 | lo;
- nsec = do_div(sec, 1000000000);
-
/* Configure extension BD */
- temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ temp_bd.ext.tstamp = cpu_to_le32(tstamp);
e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
-
- /* Update originTimestamp field of Sync packet
- * - 48 bits seconds field
- * - 32 bits nanseconds field
- *
- * In addition, the UDP checksum needs to be updated
- * by software after updating originTimestamp field,
- * otherwise the hardware will calculate the wrong
- * checksum when updating the correction field and
- * update it to the packet.
- */
- data = skb_mac_header(skb);
- new_sec_h = htons((sec >> 32) & 0xffff);
- new_sec_l = htonl(sec & 0xffffffff);
- new_nsec = htonl(nsec);
- if (udp) {
- struct udphdr *uh = udp_hdr(skb);
- __be32 old_sec_l, old_nsec;
- __be16 old_sec_h;
-
- old_sec_h = *(__be16 *)(data + offset2);
- inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
- new_sec_h, false);
-
- old_sec_l = *(__be32 *)(data + offset2 + 2);
- inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
- new_sec_l, false);
-
- old_nsec = *(__be32 *)(data + offset2 + 6);
- inet_proto_csum_replace4(&uh->check, skb, old_nsec,
- new_nsec, false);
- }
-
- *(__be16 *)(data + offset2) = new_sec_h;
- *(__be32 *)(data + offset2 + 2) = new_sec_l;
- *(__be32 *)(data + offset2 + 6) = new_nsec;
-
- /* Configure single-step register */
- val = ENETC_PM0_SINGLE_STEP_EN;
- val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
- if (udp)
- val |= ENETC_PM0_SINGLE_STEP_CH;
-
- enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
- val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -938,12 +968,13 @@ err_chained_bd:
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
int count;
/* Queue one-step Sync packet if already locked */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
&priv->flags)) {
skb_queue_tail(&priv->tx_skbs, skb);
@@ -1005,24 +1036,29 @@ drop_packet_err:
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
u8 udp, msgtype, twostep;
u16 offset1, offset2;
- /* Mark tx timestamp type on skb->cb[0] if requires */
+ /* Mark tx timestamp type on enetc_cb->flag if requires */
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
- skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
- } else {
- skb->cb[0] = 0;
- }
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK))
+ enetc_cb->flag = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ else
+ enetc_cb->flag = 0;
/* Fall back to two-step timestamp if not one-step Sync packet */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
&offset1, &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
- skb->cb[0] = ENETC_F_TX_TSTAMP;
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0) {
+ enetc_cb->flag = ENETC_F_TX_TSTAMP;
+ } else {
+ enetc_cb->udp = !!udp;
+ enetc_cb->correction_off = offset1;
+ enetc_cb->origin_tstamp_off = offset2;
+ }
}
return enetc_start_xmit(skb, ndev);
@@ -1214,7 +1250,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+
+ if (unlikely(enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -1397,8 +1435,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) &&
- (priv->active_offloads & ENETC_F_RX_TSTAMP))
+ if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
}
@@ -3301,7 +3338,7 @@ int enetc_hwtstamp_set(struct net_device *ndev,
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ if (!enetc_ptp_clock_is_enabled(priv->si))
return -EOPNOTSUPP;
switch (config->tx_type) {
@@ -3351,7 +3388,7 @@ int enetc_hwtstamp_get(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ if (!enetc_ptp_clock_is_enabled(priv->si))
return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 62e8ee4d2f04..0ec010a7d640 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -54,6 +54,15 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_skb_cb {
+ u8 flag;
+ bool udp;
+ u16 correction_off;
+ u16 origin_tstamp_off;
+};
+
+#define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
+
struct enetc_lso_t {
bool ipv6;
bool tcp;
@@ -217,7 +226,7 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
+ if (rx_ring->ext_en)
hw_idx = 2 * i;
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
@@ -231,7 +240,7 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
+ if (rx_ring->ext_en)
new_rxbd++;
if (unlikely(++new_index == rx_ring->bd_count)) {
@@ -484,9 +493,6 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
-/* PTP driver exports */
-extern int enetc_phc_index;
-
/* SI common */
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
@@ -589,6 +595,14 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
void enetc_reset_ptcmsdur(struct enetc_hw *hw);
void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+static inline bool enetc_ptp_clock_is_enabled(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK);
+
+ return IS_ENABLED(CONFIG_PTP_NETC_V4_TIMER);
+}
+
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index aa25b445d301..19bf0e89cdc2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -171,6 +171,12 @@
/* Port MAC 0/1 Pause Quanta Threshold Register */
#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400)
+#define ENETC4_PM_SINGLE_STEP(mac) (0x50c0 + (mac) * 0x400)
+#define PM_SINGLE_STEP_CH BIT(6)
+#define PM_SINGLE_STEP_OFFSET GENMASK(15, 7)
+#define PM_SINGLE_STEP_OFFSET_SET(o) FIELD_PREP(PM_SINGLE_STEP_OFFSET, o)
+#define PM_SINGLE_STEP_EN BIT(31)
+
/* Port MAC 0 Interface Mode Control Register */
#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400)
#define PM_IF_MODE_IFMODE GENMASK(2, 0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index b3dc1afeefd1..82c443b28b15 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -569,6 +569,9 @@ static const struct net_device_ops enetc4_ndev_ops = {
.ndo_set_features = enetc4_pf_set_features,
.ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static struct phylink_pcs *
@@ -1016,8 +1019,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
if (err)
- return dev_err_probe(dev, err,
- "Add enetc4_pci_remove() action failed\n");
+ return err;
/* si is the private data. */
si = pci_get_drvdata(pdev);
@@ -1030,7 +1032,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
- "Could not get VF driver data\n");
+ "Could not get PF driver data\n");
err = enetc4_pf_struct_init(si);
if (err)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 961e76cd8489..71d052de669a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -4,6 +4,9 @@
#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+
#include "enetc.h"
static const u32 enetc_si_regs[] = {
@@ -877,23 +880,58 @@ static int enetc_set_coalesce(struct net_device *ndev,
return 0;
}
-static int enetc_get_ts_info(struct net_device *ndev,
- struct kernel_ethtool_ts_info *info)
+static int enetc_get_phc_index_by_pdev(struct enetc_si *si)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int *phc_idx;
-
- phc_idx = symbol_get(enetc_phc_index);
- if (phc_idx) {
- info->phc_index = *phc_idx;
- symbol_put(enetc_phc_index);
+ struct pci_bus *bus = si->pdev->bus;
+ struct pci_dev *timer_pdev;
+ unsigned int devfn;
+ int phc_index;
+
+ switch (si->revision) {
+ case ENETC_REV_1_0:
+ devfn = PCI_DEVFN(0, 4);
+ break;
+ case ENETC_REV_4_1:
+ devfn = PCI_DEVFN(24, 0);
+ break;
+ default:
+ return -1;
}
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+ timer_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(bus),
+ bus->number, devfn);
+ if (!timer_pdev)
+ return -1;
- return 0;
- }
+ phc_index = ptp_clock_index_by_dev(&timer_pdev->dev);
+ pci_dev_put(timer_pdev);
+
+ return phc_index;
+}
+
+static int enetc_get_phc_index(struct enetc_si *si)
+{
+ struct device_node *np = si->pdev->dev.of_node;
+ struct device_node *timer_np;
+ int phc_index;
+
+ if (!np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ phc_index = ptp_clock_index_by_of_node(timer_np);
+ of_node_put(timer_np);
+
+ return phc_index;
+}
+
+static void enetc_get_ts_generic_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -908,6 +946,27 @@ static int enetc_get_ts_info(struct net_device *ndev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ if (!enetc_ptp_clock_is_enabled(si))
+ goto timestamp_tx_sw;
+
+ info->phc_index = enetc_get_phc_index(si);
+ if (info->phc_index < 0)
+ goto timestamp_tx_sw;
+
+ enetc_get_ts_generic_info(ndev, info);
+
+ return 0;
+
+timestamp_tx_sw:
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
@@ -1296,6 +1355,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
.get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ts_info = enetc_get_ts_info,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 73763e8f4879..377c96325814 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -614,6 +614,7 @@ enum enetc_txbd_flags {
#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+#define ENETC_TXBD_TSTAMP GENMASK(29, 0)
static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 5243fc031058..b8413d3b4f16 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,9 +7,6 @@
#include "enetc.h"
-int enetc_phc_index = -1;
-EXPORT_SYMBOL_GPL(enetc_phc_index);
-
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -92,7 +89,6 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
- enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -118,7 +114,6 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
- enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
index ba32c1bbd9e1..0c1d343253bf 100644
--- a/drivers/net/ethernet/freescale/enetc/ntmp.c
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -52,24 +52,19 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
spin_lock_init(&cbdr->ring_lock);
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
- /* Step 2: Configure the producer index register */
- netc_write(cbdr->regs.pir, cbdr->next_to_clean);
-
- /* Step 3: Configure the consumer index register */
- netc_write(cbdr->regs.cir, cbdr->next_to_use);
-
- /* Step4: Configure the number of BDs of the Control BD Ring */
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
netc_write(cbdr->regs.lenr, cbdr->bd_num);
- /* Step 5: Enable the Control BD Ring */
+ /* Step 3: Enable the Control BD Ring */
netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5c8fdcef759b..41e0d85d15da 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -348,10 +348,11 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_DRV_RESERVE_SPACE (XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
- - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -513,6 +514,9 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+/* Jumbo Frame support */
+#define FEC_QUIRK_JUMBO_FRAME BIT(25)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -619,6 +623,9 @@ struct fec_enet_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ unsigned int max_buf_size;
+ unsigned int pagepool_order;
+ unsigned int rx_frame_size;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index adf1f2bbcbb1..1edcfaee6819 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -167,7 +167,8 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
+ FEC_QUIRK_JUMBO_FRAME,
};
static const struct fec_devinfo fec_s32v234_info = {
@@ -233,6 +234,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* 2048 byte skbufs are allocated. However, alignment requirements
* varies between FEC variants. Worst case is 64, so round down by 64.
*/
+#define MAX_JUMBO_BUF_SIZE (round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64))
#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
@@ -253,9 +255,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64)
-#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#define OPT_ARCH_HAS_MAX_FL 1
#else
-#define OPT_FRAME_SIZE 0
+#define OPT_ARCH_HAS_MAX_FL 0
#endif
/* FEC MII MMFR bits definition */
@@ -470,14 +472,14 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
{
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
- .order = 0,
+ .order = fep->pagepool_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = FEC_ENET_XDP_HEADROOM,
- .max_len = FEC_ENET_RX_FRSIZE,
+ .max_len = fep->rx_frame_size,
};
int err;
@@ -1083,7 +1085,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -1145,8 +1147,11 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII;
u32 ecntl = FEC_ECR_ETHEREN;
+ u32 rcntl = FEC_RCR_MII;
+
+ if (OPT_ARCH_HAS_MAX_FL)
+ rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
@@ -1191,7 +1196,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL);
}
#endif
@@ -1278,8 +1283,18 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= FEC_ECR_BYTESWP;
- /* enable ENET store and forward mode */
- writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+
+ /* When Jumbo Frame is enabled, the FIFO may not be large enough
+ * to hold an entire frame. In such cases, if the MTU exceeds
+ * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
+ * to operate in cut-through mode, triggered by the FIFO threshold.
+ * Otherwise, enable the ENET store-and-forward mode.
+ */
+ if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
+ (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ writel(0xF, fep->hwp + FEC_X_WMRK);
+ else
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
@@ -1780,7 +1795,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1850,7 +1865,8 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page), PAGE_SIZE);
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
@@ -4021,6 +4037,23 @@ static int fec_hwtstamp_set(struct net_device *ndev,
return fec_ptp_set(ndev, config, extack);
}
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int order;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ + FEC_DRV_RESERVE_SPACE);
+ fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
+ fep->pagepool_order = order;
+ WRITE_ONCE(ndev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -4030,6 +4063,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_change_mtu = fec_change_mtu,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
@@ -4560,7 +4594,15 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+ fep->pagepool_order = 0;
+ fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
+
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
+ else
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index a39fcea6a77a..f27ff625fe29 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -14,8 +14,6 @@
#include <linux/device.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
-#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index ba83dbf4ed22..1966dba512f8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -930,7 +930,8 @@ static void fun_get_rmon_stats(struct net_device *netdev,
}
static void fun_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *stats)
+ struct ethtool_fec_stats *stats,
+ struct ethtool_fec_hist *hist)
{
const struct funeth_priv *fp = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 8f5021e59e0a..0e2b703c673a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
+ if (priv->header_split_enabled) {
+ pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rx->q_num;
+ }
+
return page_pool_create(&pp);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 7380c2b7a2d8..55393b784317 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+ struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+ if (rx->dqo.page_pool) {
+ page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+ page_info->netmem,
+ page_info->page_offset,
+ buf_len);
+ } else {
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ page_info->page_offset +
+ page_info->pad,
+ buf_len, DMA_FROM_DEVICE);
+ }
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
+ } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem)) {
+ /* when header split is disabled, the header went to the packet
+ * buffer. If the packet buffer is a net_iov, those can't be
+ * easily mapped into the kernel space to access the header
+ * required to process the packet.
+ */
+ goto error;
}
/* Sync the portion of dma buffer for CPU to read. */
- dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset +
- buf_state->page_info.pad,
- buf_len, DMA_FROM_DEVICE);
+ gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -837,7 +860,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
- if (eop && buf_len <= priv->rx_copybreak) {
+ if (eop && buf_len <= priv->rx_copybreak &&
+ !(rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 2e64dc1ab355..0b92a2e5e986 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -417,7 +417,7 @@ static int hbg_pci_init(struct pci_dev *pdev)
priv->io_base = pcim_iomap_table(pdev)[0];
if (!priv->io_base)
- return dev_err_probe(dev, -ENOMEM, "failed to get io base\n");
+ return -ENOMEM;
pci_set_master(pdev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 8b7b476ed7fb..37791de47f6f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -278,8 +278,7 @@ int hbg_mdio_init(struct hbg_priv *priv)
mdio_bus = devm_mdiobus_alloc(dev);
if (!mdio_bus)
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc MDIO bus\n");
+ return -ENOMEM;
mdio_bus->parent = dev;
mdio_bus->priv = priv;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 0255c8acb744..4cce4f4ba6b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -843,7 +843,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
max_queue_num = hns3_get_max_available_channels(handle);
- data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
+ data = devm_kcalloc(&handle->pdev->dev, max_queue_num, sizeof(*data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index d5454e126c85..a5eefa28454c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1659,7 +1659,8 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
}
static void hns3_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
@@ -1927,6 +1928,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
return ret;
}
+static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (copybreak < priv->min_tx_copybreak) {
+ netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
+ copybreak, priv->min_tx_copybreak);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (buf_size < priv->min_tx_spare_buf_size) {
+ netdev_err(netdev,
+ "tx spare buf size %u should be no less than %u!\n",
+ buf_size, priv->min_tx_spare_buf_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
@@ -1943,6 +1969,10 @@ static int hns3_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
+ ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
priv->tx_copybreak = *(u32 *)data;
for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -1957,6 +1987,10 @@ static int hns3_set_tunable(struct net_device *netdev,
break;
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f209a05e2033..9d34d28ff168 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2182,8 +2182,8 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
-static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static bool hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER 0x3C00
#define COMPENSATE_HALF_MPS_NUM 5
@@ -12912,7 +12912,8 @@ static int __init hclge_init(void)
{
pr_debug("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0,
+ HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 03e42512a2d5..300bc267a259 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -443,8 +443,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
struct devlink *devlink = priv_to_devlink(priv);
priv->hw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_hw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->hw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
PTR_ERR(priv->hw_fault_reporter));
@@ -452,8 +453,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
}
priv->fw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_fw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->fw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
PTR_ERR(priv->fw_fault_reporter));
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
index 509dfbfb0e96..c3efa45a6a42 100644
--- a/drivers/net/ethernet/huawei/hinic3/Makefile
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -3,7 +3,9 @@
obj-$(CONFIG_HINIC3) += hinic3.o
-hinic3-objs := hinic3_common.o \
+hinic3-objs := hinic3_cmdq.o \
+ hinic3_common.o \
+ hinic3_eqs.o \
hinic3_hw_cfg.o \
hinic3_hw_comm.o \
hinic3_hwdev.o \
@@ -12,10 +14,12 @@ hinic3-objs := hinic3_common.o \
hinic3_lld.o \
hinic3_main.o \
hinic3_mbox.o \
+ hinic3_mgmt.o \
hinic3_netdev_ops.o \
hinic3_nic_cfg.o \
hinic3_nic_io.o \
hinic3_queue_common.o \
+ hinic3_rss.o \
hinic3_rx.o \
hinic3_tx.o \
hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
new file mode 100644
index 000000000000..ef539d1b69a3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define CMDQ_BUF_SIZE 2048
+#define CMDQ_WQEBB_SIZE 64
+
+#define CMDQ_CMD_TIMEOUT 5000
+#define CMDQ_ENABLE_WAIT_TIMEOUT 300
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_EQ_ID_MASK GENMASK_ULL(60, 53)
+#define CMDQ_CTXT_CEQ_ARM_MASK BIT_ULL(61)
+#define CMDQ_CTXT_CEQ_EN_MASK BIT_ULL(62)
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK BIT_ULL(63)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_CI_MASK GENMASK_ULL(63, 52)
+#define CMDQ_CTXT_SET(val, member) \
+ FIELD_PREP(CMDQ_CTXT_##member##_MASK, val)
+
+#define CMDQ_WQE_HDR_BUFDESC_LEN_MASK GENMASK(7, 0)
+#define CMDQ_WQE_HDR_COMPLETE_FMT_MASK BIT(15)
+#define CMDQ_WQE_HDR_DATA_FMT_MASK BIT(22)
+#define CMDQ_WQE_HDR_COMPLETE_REQ_MASK BIT(23)
+#define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK GENMASK(28, 27)
+#define CMDQ_WQE_HDR_CTRL_LEN_MASK GENMASK(30, 29)
+#define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_WQE_HDR_SET(val, member) \
+ FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val)
+#define CMDQ_WQE_HDR_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_CTRL_PI_MASK GENMASK(15, 0)
+#define CMDQ_CTRL_CMD_MASK GENMASK(23, 16)
+#define CMDQ_CTRL_MOD_MASK GENMASK(28, 24)
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_CTRL_SET(val, member) \
+ FIELD_PREP(CMDQ_CTRL_##member##_MASK, val)
+#define CMDQ_CTRL_GET(val, member) \
+ FIELD_GET(CMDQ_CTRL_##member##_MASK, val)
+
+#define CMDQ_WQE_ERRCODE_VAL_MASK GENMASK(30, 0)
+#define CMDQ_WQE_ERRCODE_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK GENMASK(7, 0)
+#define CMDQ_DB_INFO_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val)
+
+#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK BIT(23)
+#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK GENMASK(26, 24)
+#define CMDQ_DB_HEAD_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val)
+
+#define CMDQ_CEQE_TYPE_MASK GENMASK(2, 0)
+#define CMDQ_CEQE_GET(val, member) \
+ FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_WQE_HEADER(wqe) ((struct cmdq_header *)(wqe))
+#define CMDQ_WQE_COMPLETED(ctrl_info) \
+ CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT)
+
+#define CMDQ_PFN(addr) ((addr) >> 12)
+
+/* cmdq work queue's chip logical address table is up to 512B */
+#define CMDQ_WQ_CLA_SIZE 512
+
+/* Completion codes: send, direct sync, force stop */
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_DIRECT_SYNC_CMPT_CODE 11
+#define CMDQ_FORCE_STOP_CMPT_CODE 12
+
+enum cmdq_data_format {
+ CMDQ_DATA_SGE = 0,
+ CMDQ_DATA_DIRECT = 1,
+};
+
+enum cmdq_ctrl_sect_len {
+ CMDQ_CTRL_SECT_LEN = 1,
+ CMDQ_CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum cmdq_bufdesc_len {
+ CMDQ_BUFDESC_LCMD_LEN = 2,
+ CMDQ_BUFDESC_SCMD_LEN = 3,
+};
+
+enum cmdq_completion_format {
+ CMDQ_COMPLETE_DIRECT = 0,
+ CMDQ_COMPLETE_SGE = 1,
+};
+
+enum cmdq_cmd_type {
+ CMDQ_CMD_DIRECT_RESP,
+ CMDQ_CMD_SGE_RESP,
+};
+
+#define CMDQ_WQE_NUM_WQEBBS 1
+
+static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
+{
+ if (hinic3_wq_get_used(wq) == 0)
+ return NULL;
+
+ *ci = wq->cons_idx & wq->idx_mask;
+
+ return get_q_element(&wq->qpages, wq->cons_idx, NULL);
+}
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+
+ cmd_buf = kmalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto err_free_cmd_buf;
+ }
+
+ cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE);
+ refcount_set(&cmd_buf->ref_cnt, 1);
+
+ return cmd_buf;
+
+err_free_cmd_buf:
+ kfree(cmd_buf);
+
+ return NULL;
+}
+
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ if (!refcount_dec_and_test(&cmd_buf->ref_cnt))
+ return;
+
+ cmdqs = hwdev->cmdqs;
+
+ dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+
+static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_hwdev *hwdev)
+{
+ if (cmd_info->buf_in) {
+ hinic3_free_cmd_buf(hwdev, cmd_info->buf_in);
+ cmd_info->buf_in = NULL;
+ }
+}
+
+static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ __le32 header_info = hdr->header_info;
+ enum cmdq_data_format df;
+ struct cmdq_ctrl *ctrl;
+
+ df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT);
+ if (df == CMDQ_DATA_SGE)
+ ctrl = &wqe->wqe_lcmd.ctrl;
+ else
+ ctrl = &wqe->wqe_scmd.ctrl;
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
+ wmb(); /* verify wqe is clear before updating ci */
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+}
+
+static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx,
+ struct cmdq_wqe *wqe)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ __le32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+ if (cmd_info->errcode) {
+ status_info = wqe_lcmd->status.status_info;
+ *cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp)
+ *cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val;
+}
+
+static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ spin_lock(&cmdq->cmdq_lock);
+ cmdq_update_cmd_status(cmdq, ci, wqe);
+ if (cmdq->cmd_infos[ci].cmpt_code) {
+ *cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE;
+ cmdq->cmd_infos[ci].cmpt_code = NULL;
+ }
+
+ /* Ensure that completion code has been updated before updating done */
+ smp_wmb();
+ if (cmdq->cmd_infos[ci].done) {
+ complete(cmdq->cmd_infos[ci].done);
+ cmdq->cmd_infos[ci].done = NULL;
+ }
+ spin_unlock(&cmdq->cmdq_lock);
+
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data)
+{
+ enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic3_cmdq *cmdq;
+ struct cmdq_wqe *wqe;
+ __le32 ctrl_info;
+ u16 ci;
+
+ if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq)))
+ return;
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+ while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+ switch (cmd_info->cmd_type) {
+ case HINIC3_CMD_TYPE_NONE:
+ return;
+ case HINIC3_CMD_TYPE_TIMEOUT:
+ dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ fallthrough;
+ case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ break;
+ default:
+ /* only arm bit is using scmd wqe,
+ * the other wqe is lcmd
+ */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl_info = wqe_lcmd->ctrl.ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info))
+ return;
+
+ dma_rmb();
+ /* For FORCE_STOP cmd_type, we also need to wait for
+ * the firmware processing to complete to prevent the
+ * firmware from accessing the released cmd_buf
+ */
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) {
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else {
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+
+ break;
+ }
+ }
+}
+
+static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC3_CMDQ_ENABLE)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end) && !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+static void cmdq_set_completion(struct cmdq_completion *complete,
+ struct hinic3_cmd_buf *buf_out)
+{
+ struct hinic3_sge *sge = &complete->resp.sge;
+
+ hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE));
+}
+
+static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi)
+{
+ if (!hinic3_wq_free_wqebbs(wq))
+ return NULL;
+
+ return hinic3_wq_get_one_wqebb(wq, pi);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe,
+ struct hinic3_cmd_buf *buf_in)
+{
+ hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr,
+ (__force __le32)buf_in->size);
+}
+
+static void cmdq_set_db(struct hinic3_cmdq *cmdq,
+ enum hinic3_cmdq_type cmdq_type, u16 prod_idx)
+{
+ u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base;
+ u16 db_ofs = (prod_idx & 0xFF) << 3;
+ struct cmdq_db db;
+
+ db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX));
+ db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) |
+ CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE));
+ writeq(*(u64 *)&db, db_base + db_ofs);
+}
+
+static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe,
+ const struct cmdq_wqe *shadow_wqe)
+{
+ const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe;
+ struct cmdq_header *dst = (struct cmdq_header *)hw_wqe;
+ size_t len;
+
+ len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header);
+ memcpy(dst + 1, src + 1, len);
+ /* Ensure buffer len before updating header */
+ wmb();
+ WRITE_ONCE(*dst, *src);
+}
+
+static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped,
+ u8 mod, u8 cmd, u16 prod_idx,
+ enum cmdq_completion_format complete_format,
+ enum cmdq_data_format data_format,
+ enum cmdq_bufdesc_len buf_len)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ enum cmdq_ctrl_sect_len ctrl_len;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_wqe_scmd *wqe_scmd;
+ struct cmdq_ctrl *ctrl;
+
+ if (data_format == CMDQ_DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CMDQ_CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->wqe_scmd;
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info =
+ cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD));
+
+ hdr->header_info =
+ cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HDR_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) |
+ CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT));
+}
+
+static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic3_cmd_buf *buf_in,
+ struct hinic3_cmd_buf *buf_out,
+ u8 wrapped, u8 mod, u8 cmd, u16 prod_idx)
+{
+ enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT;
+ struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+
+ switch (cmd_type) {
+ case CMDQ_CMD_DIRECT_RESP:
+ wqe_lcmd->completion.resp.direct.val = 0;
+ break;
+ case CMDQ_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = CMDQ_COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
+ CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN);
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 pi)
+{
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_ctrl *ctrl;
+ __le32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = ctrl->ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info)) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n");
+ return -EFAULT;
+ }
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ return 0;
+}
+
+static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info,
+ const struct hinic3_cmdq_cmd_info *saved_cmd_info)
+{
+ if (cmd_info->errcode == saved_cmd_info->errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == saved_cmd_info->done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
+ cmd_info->direct_resp = NULL;
+}
+
+static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq,
+ struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_cmdq_cmd_info *saved_cmd_info,
+ u64 curr_msg_id, u16 curr_prod_idx,
+ struct cmdq_wqe *curr_wqe,
+ u32 timeout)
+{
+ ulong timeo = msecs_to_jiffies(timeout);
+ int err;
+
+ if (wait_for_completion_timeout(saved_cmd_info->done, timeo))
+ return 0;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return 0;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx);
+ if (err)
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command current msg id mismatch cmd_info msg id\n");
+ }
+
+ clear_cmd_info(cmd_info, saved_cmd_info);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in,
+ __le64 *out_param)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info;
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ struct cmdq_wqe *curr_wqe, wqe = {};
+ struct hinic3_wq *wq = &cmdq->wq;
+ u16 curr_prod_idx, next_prod_idx;
+ struct completion done;
+ u64 curr_msg_id;
+ int errcode;
+ u8 wrapped;
+ int err;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped ^= 1;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+ init_completion(&done);
+ refcount_inc(&buf_in->ref_cnt);
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP;
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+ cmd_info->buf_in = buf_in;
+ saved_cmd_info = *cmd_info;
+ cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, mod, cmd, curr_prod_idx);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+ cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info,
+ curr_msg_id, curr_prod_idx,
+ curr_wqe, CMDQ_CMD_TIMEOUT);
+ if (err) {
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n",
+ mod, cmd, curr_prod_idx);
+ err = -ETIMEDOUT;
+ }
+
+ if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev,
+ "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd);
+ err = -EAGAIN;
+ }
+
+ smp_rmb(); /* read error code after completion */
+
+ return err ? err : errcode;
+}
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param)
+{
+ struct hinic3_cmdqs *cmdqs;
+ int err;
+
+ cmdqs = hwdev->cmdqs;
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ dev_err(hwdev->dev, "Cmdq is disabled\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC],
+ mod, cmd, buf_in, out_param);
+
+ return err;
+}
+
+static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id,
+ struct comm_cmdq_ctxt_info *ctxt_info)
+{
+ const struct hinic3_cmdqs *cmdqs;
+ u64 cmdq_first_block_paddr, pfn;
+ const struct hinic3_wq *wq;
+
+ cmdqs = hwdev->cmdqs;
+ wq = &cmdqs->cmdq[cmdq_id].wq;
+ pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
+
+ ctxt_info->curr_wqe_page_pfn =
+ cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_SET(1, CEQ_EN) |
+ CMDQ_CTXT_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_SET(0, EQ_ID) |
+ CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
+
+ if (!hinic3_wq_is_0_level_cla(wq)) {
+ cmdq_first_block_paddr = cmdqs->wq_block_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) |
+ CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
+}
+
+static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev,
+ enum hinic3_cmdq_type q_type)
+{
+ int err;
+
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id)
+{
+ struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt);
+ cmdq_ctxt.func_id = hinic3_global_func_id(hwdev);
+ cmdq_ctxt.cmdq_id = cmdq_id;
+
+ mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt,
+ sizeof(cmdq_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CMDQ_CTXT, &msg_params);
+ if (err || cmdq_ctxt.head.status) {
+ dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n",
+ err, cmdq_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type);
+ if (err)
+ return err;
+ }
+
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+static int create_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq,
+ CMDQ_DEPTH, CMDQ_WQEBB_SIZE);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create cmdq wq\n");
+ goto err_destroy_wq;
+ }
+ }
+
+ /* 1-level Chip Logical Address (CLA) must put all
+ * cmdq's wq page addr in one wq block
+ */
+ if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) {
+ if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages >
+ CMDQ_WQ_CLA_SIZE / sizeof(u64)) {
+ err = -EINVAL;
+ dev_err(hwdev->dev,
+ "Cmdq number of wq pages exceeds limit: %lu\n",
+ CMDQ_WQ_CLA_SIZE / sizeof(u64));
+ goto err_destroy_wq;
+ }
+
+ cmdqs->wq_block_vaddr =
+ dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ &cmdqs->wq_block_paddr, GFP_KERNEL);
+ if (!cmdqs->wq_block_vaddr) {
+ err = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ memcpy((u8 *)cmdqs->wq_block_vaddr +
+ CMDQ_WQ_CLA_SIZE * cmdq_type,
+ cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr,
+ cmdqs->cmdq[cmdq_type].wq.qpages.num_pages *
+ sizeof(__be64));
+ }
+
+ return 0;
+
+err_destroy_wq:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return err;
+}
+
+static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+
+ if (cmdqs->wq_block_vaddr)
+ dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr);
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+}
+
+static int init_cmdqs(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+ cmdqs->cmdq_num = hwdev->max_cmdq;
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev,
+ CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0);
+ if (!cmdqs->cmd_buf_pool) {
+ dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n");
+ kfree(cmdqs);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info)
+{
+ if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP)
+ return;
+
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP;
+
+ if (cmd_info->cmpt_code &&
+ *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE)
+ *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE;
+
+ if (cmd_info->done) {
+ complete(cmd_info->done);
+ cmd_info->done = NULL;
+ cmd_info->cmpt_code = NULL;
+ cmd_info->direct_resp = NULL;
+ cmd_info->errcode = NULL;
+ }
+}
+
+static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ u16 ci;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ while (cmdq_read_wqe(&cmdq->wq, &ci)) {
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+ cmd_info = &cmdq->cmd_infos[ci];
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP)
+ cmdq_flush_sync_cmd(cmd_info);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdq *cmdq;
+ u16 wqe_cnt, wqe_idx, i;
+ struct hinic3_wq *wq;
+
+ cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC];
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wq = &cmdq->wq;
+ wqe_cnt = hinic3_wq_get_used(wq);
+ for (i = 0; i < wqe_cnt; i++) {
+ wqe_idx = (wq->cons_idx + i) & wq->idx_mask;
+ cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq)
+{
+ u16 i;
+
+ for (i = 0; i < cmdq->wq.q_depth; i++)
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev);
+}
+
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic3_set_cmdq_ctxts(hwdev);
+}
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+ void __iomem *db_base;
+ u8 cmdq_type;
+ int err;
+
+ err = init_cmdqs(hwdev);
+ if (err)
+ goto err_out;
+
+ cmdqs = hwdev->cmdqs;
+ err = create_cmdq_wq(hwdev, cmdqs);
+ if (err)
+ goto err_free_cmdqs;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell address\n");
+ goto err_destroy_cmdq_wq;
+ }
+ cmdqs->cmdqs_db_base = db_base;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to initialize cmdq type : %d\n",
+ cmdq_type);
+ goto err_free_cmd_infos;
+ }
+ }
+
+ err = hinic3_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto err_free_cmd_infos;
+
+ return 0;
+
+err_free_cmd_infos:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+
+err_destroy_cmdq_wq:
+ destroy_cmdq_wq(hwdev, cmdqs);
+
+err_free_cmdqs:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+
+err_out:
+ return err;
+}
+
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+ destroy_cmdq_wq(hwdev, cmdqs);
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+}
+
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq)
+{
+ return hinic3_wq_get_used(&cmdq->wq) == 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
new file mode 100644
index 000000000000..f99c386a2780
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CMDQ_H_
+#define _HINIC3_CMDQ_H_
+
+#include <linux/dmapool.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_wq.h"
+
+#define CMDQ_DEPTH 4096
+
+struct cmdq_db {
+ __le32 db_head;
+ __le32 db_info;
+};
+
+/* hw defined cmdq wqe header */
+struct cmdq_header {
+ __le32 header_info;
+ __le32 saved_data;
+};
+
+struct cmdq_lcmd_bufdesc {
+ struct hinic3_sge sge;
+ __le64 rsvd2;
+ __le64 rsvd3;
+};
+
+struct cmdq_status {
+ __le32 status_info;
+};
+
+struct cmdq_ctrl {
+ __le32 ctrl_info;
+};
+
+struct cmdq_direct_resp {
+ __le64 val;
+ __le64 rsvd;
+};
+
+struct cmdq_completion {
+ union {
+ struct hinic3_sge sge;
+ struct cmdq_direct_resp direct;
+ } resp;
+};
+
+struct cmdq_wqe_scmd {
+ struct cmdq_header header;
+ __le64 rsvd3;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ __le32 rsvd10[6];
+};
+
+struct cmdq_wqe_lcmd {
+ struct cmdq_header header;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ struct cmdq_lcmd_bufdesc buf_desc;
+};
+
+struct cmdq_wqe {
+ union {
+ struct cmdq_wqe_scmd wqe_scmd;
+ struct cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+static_assert(sizeof(struct cmdq_wqe) == 64);
+
+enum hinic3_cmdq_type {
+ HINIC3_CMDQ_SYNC = 0,
+ HINIC3_MAX_CMDQ_TYPES = 4
+};
+
+enum hinic3_cmdq_status {
+ HINIC3_CMDQ_ENABLE = BIT(0),
+};
+
+enum hinic3_cmdq_cmd_type {
+ HINIC3_CMD_TYPE_NONE,
+ HINIC3_CMD_TYPE_DIRECT_RESP,
+ HINIC3_CMD_TYPE_FAKE_TIMEOUT,
+ HINIC3_CMD_TYPE_TIMEOUT,
+ HINIC3_CMD_TYPE_FORCE_STOP,
+};
+
+struct hinic3_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ __le16 size;
+ refcount_t ref_cnt;
+};
+
+struct hinic3_cmdq_cmd_info {
+ enum hinic3_cmdq_cmd_type cmd_type;
+ struct completion *done;
+ int *errcode;
+ /* completion code */
+ int *cmpt_code;
+ __le64 *direct_resp;
+ u64 cmdq_msg_id;
+ struct hinic3_cmd_buf *buf_in;
+};
+
+struct hinic3_cmdq {
+ struct hinic3_wq wq;
+ enum hinic3_cmdq_type cmdq_type;
+ u8 wrapped;
+ /* synchronize command submission with completions via event queue */
+ spinlock_t cmdq_lock;
+ struct hinic3_cmdq_cmd_info *cmd_infos;
+ struct hinic3_hwdev *hwdev;
+};
+
+struct hinic3_cmdqs {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_cmdq cmdq[HINIC3_MAX_CMDQ_TYPES];
+ struct dma_pool *cmd_buf_pool;
+ /* doorbell area */
+ u8 __iomem *cmdqs_db_base;
+
+ /* When command queue uses multiple memory pages (1-level CLA), this
+ * block will hold aggregated indirection table for all command queues
+ * of cmdqs. Not used for small cmdq (0-level CLA).
+ */
+ dma_addr_t wq_block_paddr;
+ void *wq_block_vaddr;
+
+ u32 status;
+ u32 disable_flag;
+ u8 cmdq_num;
+};
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev);
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev);
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev);
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf);
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data);
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param);
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev);
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev);
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
index 0aa42068728c..fe4778d152cf 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -3,6 +3,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include "hinic3_common.h"
@@ -51,3 +52,25 @@ void hinic3_dma_free_coherent_align(struct device *dev,
dma_free_coherent(dev, mem_align->real_size,
mem_align->ori_vaddr, mem_align->ori_paddr);
}
+
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us)
+{
+ enum hinic3_wait_return ret;
+ int err;
+
+ err = read_poll_timeout(handler, ret, ret == HINIC3_WAIT_PROCESS_CPL,
+ wait_once_us, wait_total_ms * USEC_PER_MSEC,
+ false, priv_data);
+
+ return err;
+}
+
+/* Data provided to/by cmdq is arranged in structs with little endian fields but
+ * every dword (32bits) should be swapped since HW swaps it again when it
+ * copies it from/to host memory.
+ */
+void hinic3_cmdq_buf_swab32(void *data, int len)
+{
+ swab32_array(data, len / sizeof(u32));
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
index bb795dace04c..a8fabfae90fb 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -18,10 +18,37 @@ struct hinic3_dma_addr_align {
dma_addr_t align_paddr;
};
+enum hinic3_wait_return {
+ HINIC3_WAIT_PROCESS_CPL = 0,
+ HINIC3_WAIT_PROCESS_WAITING = 1,
+};
+
+struct hinic3_sge {
+ __le32 hi_addr;
+ __le32 lo_addr;
+ __le32 len;
+ __le32 rsvd;
+};
+
+static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr,
+ __le32 len)
+{
+ sge->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ sge->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ sge->len = len;
+ sge->rsvd = 0;
+}
+
int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
gfp_t flag,
struct hinic3_dma_addr_align *mem_align);
void hinic3_dma_free_coherent_align(struct device *dev,
struct hinic3_dma_addr_align *mem_align);
+typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data);
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us);
+
+void hinic3_cmdq_buf_swab32(void *data, int len);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
new file mode 100644
index 000000000000..e7417e8efa99
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CSR_H_
+#define _HINIC3_CSR_H_
+
+#define HINIC3_CFG_REGS_FLAG 0x40000000
+#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF
+
+#define HINIC3_VF_CFG_REG_OFFSET 0x2000
+
+/* HW interface registers */
+#define HINIC3_CSR_FUNC_ATTR0_ADDR (HINIC3_CFG_REGS_FLAG + 0x0)
+#define HINIC3_CSR_FUNC_ATTR1_ADDR (HINIC3_CFG_REGS_FLAG + 0x4)
+#define HINIC3_CSR_FUNC_ATTR2_ADDR (HINIC3_CFG_REGS_FLAG + 0x8)
+#define HINIC3_CSR_FUNC_ATTR3_ADDR (HINIC3_CFG_REGS_FLAG + 0xC)
+#define HINIC3_CSR_FUNC_ATTR4_ADDR (HINIC3_CFG_REGS_FLAG + 0x10)
+#define HINIC3_CSR_FUNC_ATTR5_ADDR (HINIC3_CFG_REGS_FLAG + 0x14)
+#define HINIC3_CSR_FUNC_ATTR6_ADDR (HINIC3_CFG_REGS_FLAG + 0x18)
+
+#define HINIC3_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF (HINIC3_CFG_REGS_FLAG + 0x0100)
+#define HINIC3_FUNC_CSR_MAILBOX_INT_OFF (HINIC3_CFG_REGS_FLAG + 0x0104)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C)
+
+#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380)
+#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390)
+
+/* MSI-X registers */
+#define HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC3_CFG_REGS_FLAG + 0x58)
+
+#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK BIT(0)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_MASK BIT(1)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_MASK BIT(2)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_MASK BIT(3)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK BIT(4)
+#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK GENMASK(31, 22)
+#define HINIC3_MSI_CLR_INDIR_SET(val, member) \
+ FIELD_PREP(HINIC3_MSI_CLR_INDIR_##member##_MASK, val)
+
+/* EQ registers */
+#define HINIC3_AEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x210)
+#define HINIC3_CEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x290)
+
+#define HINIC3_EQ_INDIR_IDX_ADDR(type) \
+ ((type == HINIC3_AEQ) ? HINIC3_AEQ_INDIR_IDX_ADDR : \
+ HINIC3_CEQ_INDIR_IDX_ADDR)
+
+#define HINIC3_AEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x240)
+#define HINIC3_CEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x2C0)
+
+#define HINIC3_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CSR_AEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x200)
+#define HINIC3_CSR_AEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x204)
+#define HINIC3_CSR_AEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x20C)
+#define HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x50)
+
+#define HINIC3_CSR_CEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x28c)
+#define HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x54)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
new file mode 100644
index 000000000000..01686472985b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define AEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define AEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(22, 20)
+#define AEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define AEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val)
+
+#define AEQ_CTRL_1_LEN_MASK GENMASK(20, 0)
+#define AEQ_CTRL_1_ELEM_SIZE_MASK GENMASK(25, 24)
+#define AEQ_CTRL_1_PAGE_SIZE_MASK GENMASK(31, 28)
+#define AEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define CEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define CEQ_CTRL_0_LIMIT_KICK_MASK GENMASK(23, 20)
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(25, 24)
+#define CEQ_CTRL_0_PAGE_SIZE_MASK GENMASK(30, 27)
+#define CEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define CEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val)
+
+#define CEQ_CTRL_1_LEN_MASK GENMASK(19, 0)
+#define CEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQE_TYPE_MASK GENMASK(25, 23)
+#define CEQE_TYPE(type) \
+ FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type))
+
+#define CEQE_DATA_MASK GENMASK(25, 0)
+#define CEQE_DATA(data) ((data) & cpu_to_le32(CEQE_DATA_MASK))
+
+#define EQ_ELEM_DESC_TYPE_MASK GENMASK(6, 0)
+#define EQ_ELEM_DESC_SRC_MASK BIT(7)
+#define EQ_ELEM_DESC_SIZE_MASK GENMASK(15, 8)
+#define EQ_ELEM_DESC_WRAPPED_MASK BIT(31)
+#define EQ_ELEM_DESC_GET(val, member) \
+ FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val))
+
+#define EQ_CI_SIMPLE_INDIR_CI_MASK GENMASK(20, 0)
+#define EQ_CI_SIMPLE_INDIR_ARMED_MASK BIT(21)
+#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK GENMASK(31, 30)
+#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK GENMASK(31, 24)
+#define EQ_CI_SIMPLE_INDIR_SET(val, member) \
+ FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
+
+#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \
+ HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
+
+#define EQ_PROD_IDX_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR)
+
+#define EQ_HI_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))
+
+#define EQ_LO_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define HINIC3_EQ_MAX_PAGES(eq) \
+ ((eq)->type == HINIC3_AEQ ? \
+ HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES)
+
+#define HINIC3_TASK_PROCESS_EQE_LIMIT 1024
+#define HINIC3_EQ_UPDATE_CI_STEP 64
+#define HINIC3_EQS_WQ_NAME "hinic3_eqs"
+
+#define HINIC3_EQ_VALID_SHIFT 31
+#define HINIC3_EQ_WRAPPED(eq) \
+ ((eq)->wrapped << HINIC3_EQ_VALID_SHIFT)
+
+#define HINIC3_EQ_WRAPPED_SHIFT 20
+#define HINIC3_EQ_CONS_IDX(eq) \
+ ((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT))
+
+static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+ aeqs->aeq_cb[event] = hwe_cb;
+ spin_lock_init(&aeqs->aeq_lock);
+
+ return 0;
+}
+
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ aeqs->aeq_cb[event] = NULL;
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+ ceqs->ceq_cb[event] = callback;
+ spin_lock_init(&ceqs->ceq_lock);
+
+ return 0;
+}
+
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ ceqs->ceq_cb[event] = NULL;
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+/* Set consumer index in the hw. */
+static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state)
+{
+ u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq);
+ u32 eq_wrap_ci, val;
+
+ eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq);
+ val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED);
+ if (eq->type == HINIC3_AEQ) {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX);
+ } else {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX);
+ }
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]);
+}
+
+static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe)
+{
+ enum hinic3_ceq_event event = CEQE_TYPE(ceqe);
+ struct hinic3_hwdev *hwdev = ceqs->hwdev;
+ __le32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HINIC3_MAX_CEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ if (ceqs->ceq_cb[event])
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]);
+}
+
+static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe,
+ const struct hinic3_aeq_elem *aeqe_pos)
+{
+ struct hinic3_hwdev *hwdev = aeqs->hwdev;
+ u8 data[HINIC3_AEQE_DATA_SIZE], size;
+ enum hinic3_aeq_type event;
+ hinic3_aeq_event_cb hwe_cb;
+
+ if (EQ_ELEM_DESC_GET(aeqe, SRC))
+ return;
+
+ event = EQ_ELEM_DESC_GET(aeqe, TYPE);
+ if (event >= HINIC3_MAX_AEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event);
+ return;
+ }
+
+ memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE);
+ swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32));
+ size = EQ_ELEM_DESC_GET(aeqe, SIZE);
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ hwe_cb = aeqs->aeq_cb[event];
+ if (hwe_cb)
+ hwe_cb(aeqs->hwdev, data, size);
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+static int aeq_irq_handler(struct hinic3_eq *eq)
+{
+ const struct hinic3_aeq_elem *aeqe_pos;
+ struct hinic3_aeqs *aeqs;
+ u32 i, eqe_cnt = 0;
+ __le32 aeqe;
+
+ aeqs = aeq_to_aeqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = get_curr_aeq_elem(eq);
+ aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc);
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ /* Prevent speculative reads from element */
+ dma_rmb();
+ aeq_event_handler(aeqs, aeqe, aeqe_pos);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int ceq_irq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_ceqs *ceqs;
+ u32 eqe_cnt = 0;
+ __be32 ceqe_raw;
+ __le32 ceqe;
+ u32 i;
+
+ ceqs = ceq_to_ceqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ ceqe_raw = *get_curr_ceq_elem(eq);
+ ceqe = (__force __le32)swab32((__force __u32)ceqe_raw);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ ceq_event_handler(ceqs, ceqe);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static void reschedule_aeq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq);
+
+ queue_work(aeqs->workq, &eq->aeq_work);
+}
+
+static int eq_irq_handler(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ)
+ err = aeq_irq_handler(eq);
+ else
+ err = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED :
+ HINIC3_EQ_ARMED);
+
+ return err;
+}
+
+static void aeq_irq_work(struct work_struct *work)
+{
+ struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
+ int err;
+
+ err = eq_irq_handler(eq);
+ if (err)
+ reschedule_aeq_handler(eq);
+}
+
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct workqueue_struct *workq;
+ struct hinic3_eq *aeq = data;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = aeq_to_aeqs(aeq);
+ hwdev = aeq->hwdev;
+
+ /* clear resend timer cnt register */
+ workq = aeqs->workq;
+ hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ queue_work(workq, &aeq->aeq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic3_eq *ceq = data;
+ int err;
+
+ /* clear resend timer counters */
+ hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ err = eq_irq_handler(ceq);
+ if (err)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ ceq_ctrl.func_id = hinic3_global_func_id(hwdev);
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CEQ_CTRL_REG, &msg_params);
+ if (err || ceq_ctrl.head.status) {
+ dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n",
+ q_id, err, ceq_ctrl.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int set_eq_ctrls(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ u8 pci_intf_idx, elem_size;
+ u32 mask, ctrl0, ctrl1;
+ u32 page_size_val;
+ int err;
+
+ qpages = &eq->qpages;
+ page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE);
+ pci_intf_idx = hwif->attr.pci_intf_idx;
+
+ if (eq->type == HINIC3_AEQ) {
+ /* set ctrl0 using read-modify-write */
+ mask = AEQ_CTRL_0_INTR_IDX_MASK |
+ AEQ_CTRL_0_DMA_ATTR_MASK |
+ AEQ_CTRL_0_PCI_INTF_IDX_MASK |
+ AEQ_CTRL_0_INTR_MODE_MASK;
+ ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR);
+ ctrl0 = (ctrl0 & ~mask) |
+ AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(0, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0);
+
+ /* HW expects log2(number of 32 byte units). */
+ elem_size = qpages->elem_size_shift - 5;
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1);
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(0, DMA_ATTR) |
+ CEQ_CTRL_0_SET(0, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
+ CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0,
+ ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ __be32 *ceqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = get_q_element(&eq->qpages, i, NULL);
+ *ceqe = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear ceq elements bit */
+}
+
+static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ struct hinic3_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = get_q_element(&eq->qpages, i, NULL);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear aeq elements bit */
+}
+
+static void eq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ if (eq->type == HINIC3_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+}
+
+static int alloc_eq_pages(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ dma_addr_t page_paddr;
+ u32 reg, init_val;
+ u16 pg_idx;
+ int err;
+
+ qpages = &eq->qpages;
+ err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE);
+ if (err)
+ return err;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ page_paddr = qpages->pages[pg_idx].align_paddr;
+ reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr));
+ reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr));
+ }
+
+ init_val = HINIC3_EQ_WRAPPED(eq);
+ eq_elements_init(eq, init_val);
+
+ return 0;
+}
+
+static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size)
+{
+ u32 max_pages, min_page_size, page_size, total_size;
+
+ /* No need for complicated arithmetic. All values must be power of 2.
+ * Multiplications give power of 2 and divisions give power of 2 without
+ * remainder.
+ */
+ max_pages = HINIC3_EQ_MAX_PAGES(eq);
+ min_page_size = HINIC3_MIN_PAGE_SIZE;
+ total_size = eq->eq_len * elem_size;
+
+ if (total_size <= max_pages * min_page_size)
+ page_size = min_page_size;
+ else
+ page_size = total_size / max_pages;
+
+ hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size);
+}
+
+static int request_eq_irq(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ) {
+ INIT_WORK(&eq->aeq_work, aeq_irq_work);
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_aeq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_irq(eq->irq_id, aeq_interrupt, 0,
+ eq->irq_name, eq);
+ } else {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_ceq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt,
+ IRQF_ONESHOT, eq->irq_name, eq);
+ }
+
+ return err;
+}
+
+static void reset_eq(struct hinic3_eq *eq)
+{
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HINIC3_AEQ)
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ else
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+}
+
+static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hinic3_eq_type type,
+ struct msix_entry *msix_entry)
+{
+ u32 elem_size;
+ int err;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ reset_eq(eq);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE;
+ eq_calc_page_size_and_num(eq, elem_size);
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->msix_entry_idx = msix_entry->entry;
+ eq->irq_id = msix_entry->vector;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ctrls for eq\n");
+ goto err_free_queue_pages;
+ }
+
+ set_eq_cons_idx(eq, HINIC3_EQ_ARMED);
+
+ err = request_eq_irq(eq);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to request irq for the eq, err: %d\n", err);
+ goto err_free_queue_pages;
+ }
+
+ hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE);
+
+ return 0;
+
+err_free_queue_pages:
+ hinic3_queue_pages_free(hwdev, &eq->qpages);
+
+ return err;
+}
+
+static void remove_eq(struct hinic3_eq *eq)
+{
+ hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ free_irq(eq->irq_id, eq);
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ if (eq->type == HINIC3_AEQ) {
+ disable_work_sync(&eq->aeq_work);
+ /* clear eq_len to avoid hw access host memory */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ } else {
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update consumer index to avoid invalid interrupt */
+ eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ hinic3_queue_pages_free(eq->hwdev, &eq->qpages);
+}
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_aeqs *aeqs;
+ u16 q_id;
+ int err;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+ aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_MAX_AEQS);
+ if (!aeqs->workq) {
+ dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto err_free_aeqs;
+ }
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeq %u\n",
+ q_id);
+ goto err_remove_eqs;
+ }
+ }
+ for (q_id = 0; q_id < num_aeqs; q_id++)
+ hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_remove_eqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&aeqs->aeq[q_id]);
+ }
+
+ destroy_workqueue(aeqs->workq);
+
+err_free_aeqs:
+ kfree(aeqs);
+
+ return err;
+}
+
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ enum hinic3_aeq_type aeq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = aeqs->aeq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++)
+ hinic3_aeq_unregister_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_ceqs *ceqs;
+ u16 q_id;
+ int err;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceq %u\n",
+ q_id);
+ goto err_free_ceqs;
+ }
+ }
+ for (q_id = 0; q_id < num_ceqs; q_id++)
+ hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_free_ceqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&ceqs->ceq[q_id]);
+ }
+
+ kfree(ceqs);
+
+ return err;
+}
+
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ enum hinic3_ceq_event ceq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = ceqs->ceq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++)
+ hinic3_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
new file mode 100644
index 000000000000..005a6e0745b3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_EQS_H_
+#define _HINIC3_EQS_H_
+
+#include <linux/interrupt.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_queue_common.h"
+
+#define HINIC3_MAX_AEQS 4
+#define HINIC3_MAX_CEQS 32
+
+#define HINIC3_AEQ_MAX_PAGES 4
+#define HINIC3_CEQ_MAX_PAGES 8
+
+#define HINIC3_AEQE_SIZE 64
+#define HINIC3_CEQE_SIZE 4
+
+#define HINIC3_AEQE_DESC_SIZE 4
+#define HINIC3_AEQE_DATA_SIZE (HINIC3_AEQE_SIZE - HINIC3_AEQE_DESC_SIZE)
+
+#define HINIC3_DEFAULT_AEQ_LEN 0x10000
+#define HINIC3_DEFAULT_CEQ_LEN 0x10000
+
+#define HINIC3_EQ_IRQ_NAME_LEN 64
+
+#define HINIC3_EQ_USLEEP_LOW_BOUND 900
+#define HINIC3_EQ_USLEEP_HIGH_BOUND 1000
+
+enum hinic3_eq_type {
+ HINIC3_AEQ = 0,
+ HINIC3_CEQ = 1,
+};
+
+enum hinic3_eq_intr_mode {
+ HINIC3_INTR_MODE_ARMED = 0,
+ HINIC3_INTR_MODE_ALWAYS = 1,
+};
+
+enum hinic3_eq_ci_arm_state {
+ HINIC3_EQ_NOT_ARMED = 0,
+ HINIC3_EQ_ARMED = 1,
+};
+
+struct hinic3_eq {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_queue_pages qpages;
+ u16 q_id;
+ enum hinic3_eq_type type;
+ u32 eq_len;
+ u32 cons_idx;
+ u8 wrapped;
+ u32 irq_id;
+ u16 msix_entry_idx;
+ char irq_name[HINIC3_EQ_IRQ_NAME_LEN];
+ struct work_struct aeq_work;
+};
+
+struct hinic3_aeq_elem {
+ u8 aeqe_data[HINIC3_AEQE_DATA_SIZE];
+ __be32 desc;
+};
+
+enum hinic3_aeq_type {
+ HINIC3_HW_INTER_INT = 0,
+ HINIC3_MBX_FROM_FUNC = 1,
+ HINIC3_MSG_FROM_FW = 2,
+ HINIC3_MAX_AEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_aeq_event_cb)(struct hinic3_hwdev *hwdev, u8 *data,
+ u8 size);
+
+struct hinic3_aeqs {
+ struct hinic3_hwdev *hwdev;
+ hinic3_aeq_event_cb aeq_cb[HINIC3_MAX_AEQ_EVENTS];
+ struct hinic3_eq aeq[HINIC3_MAX_AEQS];
+ u16 num_aeqs;
+ struct workqueue_struct *workq;
+ /* lock for aeq event flag */
+ spinlock_t aeq_lock;
+};
+
+enum hinic3_ceq_event {
+ HINIC3_CMDQ = 3,
+ HINIC3_MAX_CEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_ceq_event_cb)(struct hinic3_hwdev *hwdev,
+ __le32 ceqe_data);
+
+struct hinic3_ceqs {
+ struct hinic3_hwdev *hwdev;
+
+ hinic3_ceq_event_cb ceq_cb[HINIC3_MAX_CEQ_EVENTS];
+
+ struct hinic3_eq ceq[HINIC3_MAX_CEQS];
+ u16 num_ceqs;
+ /* lock for ceq event flag */
+ spinlock_t ceq_lock;
+};
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries);
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb);
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event);
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries);
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback);
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
index 87d9450c30ca..7827c1f626db 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -8,6 +8,217 @@
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+#define HINIC3_CFG_MAX_QP 256
+
+static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ cap->port_id = dev_cap->port_id;
+ cap->supp_svcs_bitmap = dev_cap->svc_cap_en;
+}
+
+static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap;
+
+ nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1,
+ HINIC3_CFG_MAX_QP);
+}
+
+static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap;
+
+ /* Public resource */
+ hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (hinic3_support_nic(hwdev))
+ hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic3_hwdev *hwdev,
+ enum hinic3_func_type type)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct cfg_cmd_dev_cap dev_cap = {};
+ int err;
+
+ dev_cap.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM,
+ CFG_CMD_GET_DEV_CAP, &msg_params);
+ if (err || dev_cap.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get capability from FW, err: %d, status: 0x%x\n",
+ err, dev_cap.head.status);
+ return -EIO;
+ }
+
+ hinic3_parse_dev_cap(hwdev, &dev_cap, type);
+
+ return 0;
+}
+
+static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ u16 intr_num = hwif->attr.num_irqs;
+ struct hinic3_irq_info *irq_info;
+ u16 intr_needed;
+
+ intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs +
+ hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num;
+ if (intr_needed > intr_num) {
+ dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n",
+ intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en);
+ intr_needed = intr_num;
+ }
+
+ irq_info = &cfg_mgmt->irq_info;
+ irq_info->irq = kcalloc(intr_num, sizeof(struct hinic3_irq),
+ GFP_KERNEL);
+ if (!irq_info->irq)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_needed;
+ mutex_init(&irq_info->irq_mutex);
+
+ return 0;
+}
+
+static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_irq *irq = cfg_mgmt->irq_info.irq;
+ u16 nreq = cfg_mgmt->irq_info.num_irq_hw;
+ struct pci_dev *pdev = hwdev->pdev;
+ int actual_irq;
+ u16 i;
+
+ actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX);
+ if (actual_irq < 0) {
+ dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n",
+ actual_irq);
+ return -ENOMEM;
+ }
+
+ nreq = actual_irq;
+ cfg_mgmt->irq_info.num_irq = nreq;
+
+ for (i = 0; i < nreq; ++i) {
+ irq[i].msix_entry_idx = i;
+ irq[i].irq_id = pci_irq_vector(pdev, i);
+ irq[i].allocated = false;
+ }
+
+ return 0;
+}
+
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ int err;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ hwdev->cfg_mgmt = cfg_mgmt;
+
+ err = hinic3_init_irq_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n",
+ err);
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_irq_alloc_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n",
+ err);
+ goto err_free_irq_info;
+ }
+
+ return 0;
+
+err_free_irq_info:
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+err_free_cfg_mgmt:
+ kfree(cfg_mgmt);
+
+ return err;
+}
+
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+
+ pci_free_irq_vectors(hwdev->pdev);
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+ kfree(cfg_mgmt);
+}
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i, found = 0;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq && found < num; i++) {
+ curr = irq_info->irq + i;
+ if (curr->allocated)
+ continue;
+ curr->allocated = true;
+ alloc_arr[found].vector = curr->irq_id;
+ alloc_arr[found].entry = curr->msix_entry_idx;
+ found++;
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+
+ *act_num = found;
+
+ return found == 0 ? -ENOMEM : 0;
+}
+
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq; i++) {
+ curr = irq_info->irq + i;
+ if (curr->irq_id == irq_id) {
+ curr->allocated = false;
+ break;
+ }
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+}
+
+int hinic3_init_capability(struct hinic3_hwdev *hwdev)
+{
+ return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF);
+}
+
bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
{
return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
index e017b1ae9f05..58806199bf54 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -42,10 +42,14 @@ struct hinic3_cfg_mgmt_info {
struct hinic3_dev_cap cap;
};
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev);
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev);
+
int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
struct msix_entry *alloc_arr, u16 *act_num);
void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+int hinic3_init_capability(struct hinic3_hwdev *hwdev);
bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
index 434696ce7dc2..89638813df40 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -3,11 +3,43 @@
#include <linux/delay.h>
+#include "hinic3_cmdq.h"
#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info)
+{
+ struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ msix_cfg.func_id = hinic3_global_func_id(hwdev);
+ msix_cfg.msix_index = info->msix_index;
+ msix_cfg.opcode = MGMT_MSG_CMD_OP_SET;
+
+ msix_cfg.lli_credit_cnt = info->lli_credit_limit;
+ msix_cfg.lli_timer_cnt = info->lli_timer_cfg;
+ msix_cfg.pending_cnt = info->pending_limit;
+ msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = info->resend_timer_cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params);
+ if (err || msix_cfg.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set interrupt config, err: %d, status: 0x%x\n",
+ err, msix_cfg.head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
{
struct comm_cmd_func_reset func_reset = {};
@@ -30,3 +62,365 @@ int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
return 0;
}
+
+static int hinic3_comm_features_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct comm_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature,
+ array_size(size, sizeof(u64)));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate feature, err: %d, status: 0x%x\n",
+ err, feature_nego.head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature,
+ array_size(size, sizeof(u64)));
+
+ return 0;
+}
+
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature,
+ size);
+}
+
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature,
+ size);
+}
+
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr)
+{
+ struct comm_cmd_get_glb_attr get_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mgmt_msg_params_init_default(&msg_params, &get_attr, sizeof(get_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_GET_GLOBAL_ATTR, &msg_params);
+ if (err || get_attr.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get global attribute, err: %d, status: 0x%x\n",
+ err, get_attr.head.status);
+ return -EIO;
+ }
+
+ memcpy(attr, &get_attr.attr, sizeof(*attr));
+
+ return 0;
+}
+
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state)
+{
+ struct comm_cmd_set_func_svc_used_state used_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ used_state.func_id = hinic3_global_func_id(hwdev);
+ used_state.svc_type = svc_type;
+ used_state.used_state = state;
+
+ mgmt_msg_params_init_default(&msg_params, &used_state,
+ sizeof(used_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE,
+ &msg_params);
+ if (err || used_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func service used state, err: %d, status: 0x%x\n",
+ err, used_state.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en)
+{
+ struct comm_cmd_set_dma_attr dma_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dma_attr.func_id = hinic3_global_func_id(hwdev);
+ dma_attr.entry_idx = entry_idx;
+ dma_attr.st = st;
+ dma_attr.at = at;
+ dma_attr.ph = ph;
+ dma_attr.no_snooping = no_snooping;
+ dma_attr.tph_en = tph_en;
+
+ mgmt_msg_params_init_default(&msg_params, &dma_attr, sizeof(dma_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_DMA_ATTR, &msg_params);
+ if (err || dma_attr.head.status) {
+ dev_err(hwdev->dev, "Failed to set dma attr, err: %d, status: 0x%x\n",
+ err, dma_attr.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct comm_cmd_cfg_wq_page_size page_size_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ page_size_info.func_id = func_idx;
+ page_size_info.page_size = ilog2(page_size / HINIC3_MIN_PAGE_SIZE);
+ page_size_info.opcode = MGMT_MSG_CMD_OP_SET;
+
+ mgmt_msg_params_init_default(&msg_params, &page_size_info,
+ sizeof(page_size_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_PAGESIZE, &msg_params);
+ if (err || page_size_info.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set wq page size, err: %d, status: 0x%x\n",
+ err, page_size_info.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = ilog2(cmdq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set cmdq depth, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ enum hinic3_cmdq_type cmdq_type;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ return HINIC3_WAIT_PROCESS_WAITING;
+ }
+
+ return HINIC3_WAIT_PROCESS_CPL;
+}
+
+static int wait_cmdq_stop(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic3_cmdq_type cmdq_type;
+ int err;
+
+ if (!(cmdqs->status & HINIC3_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+ err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler,
+ HINIC3_WAIT_CMDQ_IDLE_TIMEOUT,
+ USEC_PER_MSEC);
+
+ if (err)
+ goto err_reenable_cmdq;
+
+ return 0;
+
+err_reenable_cmdq:
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ dev_err(hwdev->dev, "Cmdq %d is busy\n", cmdq_type);
+ }
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+
+ return err;
+}
+
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_clear_resource clear_db = {};
+ struct comm_cmd_clear_resource clr_res = {};
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ struct mgmt_msg_params msg_params = {};
+ int ret = 0;
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "CMDQ is still working, CMDQ timeout value is unreasonable\n");
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, DISABLE_DOORBELL);
+
+ clear_db.func_id = hwif->attr.func_global_idx;
+ mgmt_msg_params_init_default(&msg_params, &clear_db, sizeof(clear_db));
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FLUSH_DOORBELL, &msg_params);
+ if (err || clear_db.head.status) {
+ dev_warn(hwdev->dev, "Failed to flush doorbell, err: %d, status: 0x%x\n",
+ err, clear_db.head.status);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ clr_res.func_id = hwif->attr.func_global_idx;
+ msg_params.buf_in = &clr_res;
+ msg_params.in_size = sizeof(clr_res);
+ err = hinic3_send_mbox_to_mgmt_no_ack(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_START_FLUSH,
+ &msg_params);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to notice flush message, err: %d\n",
+ err);
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, ENABLE_DOORBELL);
+
+ err = hinic3_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
+{
+ /* Supported RX buffer sizes in bytes. Configured by array index. */
+ static const int supported_sizes[16] = {
+ [0] = 32, [1] = 64, [2] = 96, [3] = 128,
+ [4] = 192, [5] = 256, [6] = 384, [7] = 512,
+ [8] = 768, [9] = 1024, [10] = 1536, [11] = 2048,
+ [12] = 3072, [13] = 4096, [14] = 8192, [15] = 16384,
+ };
+ u16 idx;
+
+ /* Scan from biggest to smallest. Choose supported size that is equal or
+ * smaller. For smaller value HW will under-utilize posted buffers. For
+ * bigger value HW may overrun posted buffers.
+ */
+ idx = ARRAY_SIZE(supported_sizes);
+ while (idx > 0) {
+ idx--;
+ if (supported_sizes[idx] <= rx_buf_sz) {
+ *buf_sz_idx = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ u16 buf_sz_idx;
+ int err;
+
+ err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx);
+ if (err)
+ return err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = buf_sz_idx;
+ root_ctxt.sq_depth = ilog2(sq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
index c33a1c77da9c..304f5691f0c2 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -8,6 +8,40 @@
struct hinic3_hwdev;
+#define HINIC3_WQ_PAGE_SIZE_ORDER 8
+
+struct hinic3_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limit;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info);
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr);
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state);
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en);
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz);
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
index 22c84093efa2..623cf2d14cbc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -51,6 +51,48 @@ static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_para
msg_params->timeout_ms = 0;
}
+enum cfg_cmd {
+ CFG_CMD_GET_DEV_CAP = 0,
+};
+
+/* Device capabilities, defined by hw */
+struct cfg_cmd_dev_cap {
+ struct mgmt_msg_head head;
+
+ u16 func_id;
+ u16 rsvd1;
+
+ /* Public resources */
+ u8 host_id;
+ u8 ep_id;
+ u8 er_id;
+ u8 port_id;
+
+ u16 host_total_func;
+ u8 host_pf_num;
+ u8 pf_id_start;
+ u16 host_vf_num;
+ u16 vf_id_start;
+ u8 host_oq_id_mask_val;
+ u8 timer_en;
+ u8 host_valid_bitmap;
+ u8 rsvd_host;
+
+ u16 svc_cap_en;
+ u16 max_vf;
+ u8 flexq_en;
+ u8 valid_cos_bitmap;
+ u8 port_cos_valid_bitmap;
+ u8 rsvd2[45];
+
+ /* l2nic */
+ u16 nic_max_sq_id;
+ u16 nic_max_rq_id;
+ u16 nic_default_num_queues;
+
+ u8 rsvd3[250];
+};
+
/* COMM Commands between Driver to fw */
enum comm_cmd {
/* Commands for clearing FLR and resources */
@@ -70,6 +112,20 @@ enum comm_cmd {
COMM_CMD_SET_DMA_ATTR = 25,
};
+struct comm_cmd_cfg_msix_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesce_timer_cnt;
+ u8 resend_timer_cnt;
+ u8 lli_timer_cnt;
+ u8 lli_credit_cnt;
+ u8 rsvd2[5];
+};
+
enum comm_func_reset_bits {
COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
COMM_FUNC_RESET_BIT_MQM = BIT(1),
@@ -84,6 +140,11 @@ enum comm_func_reset_bits {
COMM_FUNC_RESET_BIT_NIC = BIT(13),
};
+#define COMM_FUNC_RESET_FLAG \
+ (COMM_FUNC_RESET_BIT_COMM | COMM_FUNC_RESET_BIT_COMM_CMD_CH | \
+ COMM_FUNC_RESET_BIT_FLUSH | COMM_FUNC_RESET_BIT_MQM | \
+ COMM_FUNC_RESET_BIT_SMF | COMM_FUNC_RESET_BIT_PF_BW_CFG)
+
struct comm_cmd_func_reset {
struct mgmt_msg_head head;
u16 func_id;
@@ -100,6 +161,96 @@ struct comm_cmd_feature_nego {
u64 s_feature[COMM_MAX_FEATURE_QWORD];
};
+struct comm_global_attr {
+ u8 max_host_num;
+ u8 max_pf_num;
+ u16 vf_id_start;
+ /* for api cmd to mgmt cpu */
+ u8 mgmt_host_node_id;
+ u8 cmdq_num;
+ u8 rsvd1[34];
+};
+
+struct comm_cmd_get_glb_attr {
+ struct mgmt_msg_head head;
+ struct comm_global_attr attr;
+};
+
+enum comm_func_svc_type {
+ COMM_FUNC_SVC_T_COMM = 0,
+ COMM_FUNC_SVC_T_NIC = 1,
+};
+
+struct comm_cmd_set_func_svc_used_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 svc_type;
+ u8 used_state;
+ u8 rsvd[35];
+};
+
+struct comm_cmd_set_dma_attr {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u32 resv1;
+};
+
+struct comm_cmd_set_ceq_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 rsvd1;
+};
+
+struct comm_cmd_cfg_wq_page_size {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+ u32 rsvd1;
+};
+
+struct comm_cmd_set_root_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u16 rx_buf_sz;
+ u8 lro_en;
+ u8 rsvd1;
+ u16 sq_depth;
+ u16 rq_depth;
+ u64 rsvd2;
+};
+
+struct comm_cmdq_ctxt_info {
+ __le64 curr_wqe_page_pfn;
+ __le64 wq_block_pfn;
+};
+
+struct comm_cmd_set_cmdq_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 cmdq_id;
+ u8 rsvd1[5];
+ struct comm_cmdq_ctxt_info ctxt;
+};
+
+struct comm_cmd_clear_resource {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+};
+
/* Services supported by HW. HW uses these values when delivering events.
* HW supports multiple services that are not yet supported by driver
* (e.g. RoCE).
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
index 6e8788a64925..95a213133be9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -1,24 +1,557 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+#include "hinic3_cmdq.h"
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
#include "hinic3_mgmt.h"
+#define HINIC3_PCIE_SNOOP 0
+#define HINIC3_PCIE_TPH_DISABLE 0
+
+#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0)
+#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
+
+#define HINIC3_DMA_ATTR_ENTRY_ST_MASK GENMASK(7, 0)
+#define HINIC3_DMA_ATTR_ENTRY_AT_MASK GENMASK(9, 8)
+#define HINIC3_DMA_ATTR_ENTRY_PH_MASK GENMASK(11, 10)
+#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK BIT(12)
+#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK BIT(13)
+#define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val)
+
+#define HINIC3_PCIE_ST_DISABLE 0
+#define HINIC3_PCIE_AT_DISABLE 0
+#define HINIC3_PCIE_PH_DISABLE 0
+#define HINIC3_PCIE_MSIX_ATTR_ENTRY 0
+
+#define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC3_HWDEV_WQ_NAME "hinic3_hardware"
+#define HINIC3_WQ_MAX_REQ 10
+
+enum hinic3_hwdev_init_state {
+ HINIC3_HWDEV_MBOX_INITED = 2,
+ HINIC3_HWDEV_CMDQ_INITED = 3,
+};
+
+static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS];
+ u16 num_aeqs, resp_num_irq, i;
+ int err;
+
+ num_aeqs = hwdev->hwif->attr.num_aeqs;
+ if (num_aeqs > HINIC3_MAX_AEQS) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %d\n",
+ HINIC3_MAX_AEQS);
+ num_aeqs = HINIC3_MAX_AEQS;
+ }
+ err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %u\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs\n");
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_aeqs; i++)
+ hinic3_free_irq(hwdev, aeq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS];
+ u16 num_ceqs, resp_num_irq, i;
+ int err;
+
+ num_ceqs = hwdev->hwif->attr.num_ceqs;
+ if (num_ceqs > HINIC3_MAX_CEQS) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %d\n",
+ HINIC3_MAX_CEQS);
+ num_ceqs = HINIC3_MAX_CEQS;
+ }
+
+ err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %u\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to init ceqs, err:%d\n", err);
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_ceqs; i++)
+ hinic3_free_irq(hwdev, ceq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_init_mbox(hwdev);
+ if (err)
+ return err;
+
+ hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC,
+ hinic3_mbox_func_aeqe_handler);
+ hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW,
+ hinic3_mgmt_msg_aeqe_handler);
+
+ set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+ hinic3_free_mbox(hwdev);
+}
+
+static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_aeqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init async event queues\n");
+ return err;
+ }
+
+ err = hinic3_comm_mbox_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init mailbox\n");
+ goto err_free_comm_aeqs;
+ }
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs msix attr\n");
+ goto err_free_comm_mbox;
+ }
+
+ return 0;
+
+err_free_comm_mbox:
+ hinic3_comm_mbox_free(hwdev);
+err_free_comm_aeqs:
+ hinic3_aeqs_free(hwdev);
+
+ return err;
+}
+
+static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_mbox_free(hwdev);
+ hinic3_aeqs_free(hwdev);
+}
+
+static int dma_attr_table_init(struct hinic3_hwdev *hwdev)
+{
+ u32 addr, val, dst_attr;
+
+ /* Indirect access, set entry_idx first */
+ addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK;
+ val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX);
+ hinic3_hwif_write_reg(hwdev->hwif, addr, val);
+
+ addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+
+ dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN);
+ if (val == dst_attr)
+ return 0;
+
+ return hinic3_set_dma_attr_tbl(hwdev,
+ HINIC3_PCIE_MSIX_ATTR_ENTRY,
+ HINIC3_PCIE_ST_DISABLE,
+ HINIC3_PCIE_AT_DISABLE,
+ HINIC3_PCIE_PH_DISABLE,
+ HINIC3_PCIE_SNOOP,
+ HINIC3_PCIE_TPH_DISABLE);
+}
+
+static int init_basic_attributes(struct hinic3_hwdev *hwdev)
+{
+ struct comm_global_attr glb_attr;
+ int err;
+
+ err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev),
+ COMM_FUNC_RESET_FLAG);
+ if (err)
+ return err;
+
+ err = hinic3_get_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err)
+ return err;
+
+ dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]);
+
+ err = hinic3_get_global_attr(hwdev, &glb_attr);
+ if (err)
+ return err;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1);
+ if (err)
+ return err;
+
+ err = dma_attr_table_init(hwdev);
+ if (err)
+ return err;
+
+ hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES);
+ dev_dbg(hwdev->dev,
+ "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n",
+ glb_attr.max_host_num, glb_attr.max_pf_num,
+ glb_attr.vf_id_start, glb_attr.mgmt_host_node_id,
+ glb_attr.cmdq_num);
+
+ return 0;
+}
+
+static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
+
+ err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set cmdq depth\n");
+ goto err_free_cmdqs;
+ }
+
+ set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+err_free_cmdqs:
+ hinic3_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ);
+ hinic3_cmdqs_free(hwdev);
+}
+
+static int init_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_ceqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init completion event queues\n");
+ return err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceqs msix attr\n");
+ goto err_free_ceqs;
+ }
+
+ hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER;
+ err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ hwdev->wq_page_size);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set wq page size\n");
+ goto err_free_ceqs;
+ }
+
+ err = hinic3_comm_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ goto err_reset_wq_page_size;
+ }
+
+ return 0;
+
+err_reset_wq_page_size:
+ hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ HINIC3_MIN_PAGE_SIZE);
+err_free_ceqs:
+ hinic3_ceqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_cmdqs_free(hwdev);
+ hinic3_ceqs_free(hwdev);
+}
+
+static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = init_basic_mgmt_channel(hwdev);
+ if (err)
+ return err;
+
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_basic_mgmt_ch;
+
+ err = init_cmdqs_channel(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmdq channel\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_basic_mgmt_ch:
+ free_base_mgmt_channel(hwdev);
+
+ return err;
+}
+
+static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ hinic3_free_cmdqs_channel(hwdev);
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ free_base_mgmt_channel(hwdev);
+}
+
+static DEFINE_IDA(hinic3_adev_ida);
+
+static int hinic3_adev_idx_alloc(void)
+{
+ return ida_alloc(&hinic3_adev_ida, GFP_KERNEL);
+}
+
+static void hinic3_adev_idx_free(int id)
+{
+ ida_free(&hinic3_adev_ida, id);
+}
+
int hinic3_init_hwdev(struct pci_dev *pdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ struct hinic3_hwdev *hwdev;
+ int err;
+
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ pci_adapter->hwdev = hwdev;
+ hwdev->adapter = pci_adapter;
+ hwdev->pdev = pci_adapter->pdev;
+ hwdev->dev = &pci_adapter->pdev->dev;
+ hwdev->func_state = 0;
+ hwdev->dev_id = hinic3_adev_idx_alloc();
+ spin_lock_init(&hwdev->channel_lock);
+
+ err = hinic3_init_hwif(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hwif\n");
+ goto err_free_hwdev;
+ }
+
+ hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_WQ_MAX_REQ);
+ if (!hwdev->workq) {
+ dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
+ err = -ENOMEM;
+ goto err_free_hwif;
+ }
+
+ err = hinic3_init_cfg_mgmt(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init config mgmt\n");
+ goto err_destroy_workqueue;
+ }
+
+ err = hinic3_init_comm_ch(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init communication channel\n");
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_capability(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init capability\n");
+ goto err_uninit_comm_ch;
+ }
+
+ err = hinic3_set_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set comm features\n");
+ goto err_uninit_comm_ch;
+ }
+
+ return 0;
+
+err_uninit_comm_ch:
+ hinic3_uninit_comm_ch(hwdev);
+err_free_cfg_mgmt:
+ hinic3_free_cfg_mgmt(hwdev);
+err_destroy_workqueue:
+ destroy_workqueue(hwdev->workq);
+err_free_hwif:
+ hinic3_free_hwif(hwdev);
+err_free_hwdev:
+ pci_adapter->hwdev = NULL;
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
+
+ return err;
}
void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
{
- /* Completed by later submission due to LoC limit. */
+ u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
+
+ hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
+ hinic3_func_rx_tx_flush(hwdev);
+ hinic3_uninit_comm_ch(hwdev);
+ hinic3_free_cfg_mgmt(hwdev);
+ destroy_workqueue(hwdev->workq);
+ hinic3_free_hwif(hwdev);
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
}
void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_mbox *mbox;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
+ mbox = hwdev->mbox;
+ spin_lock(&mbox->mbox_lock);
+ if (mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_TIMEOUT;
+ spin_unlock(&mbox->mbox_lock);
+ }
+
+ if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state))
+ hinic3_cmdq_flush_sync_cmd(hwdev);
+
+ spin_unlock_bh(&hwdev->channel_lock);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
index 0865453bf0e7..f76f140fb6f7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -6,13 +6,428 @@
#include <linux/io.h>
#include "hinic3_common.h"
+#include "hinic3_csr.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
+#define HINIC3_HWIF_READY_TIMEOUT 10000
+#define HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT 60000
+#define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF
+
+/* config BAR4/5 4MB, DB & DWQE both 2MB */
+#define HINIC3_DB_DWQE_SIZE 0x00400000
+
+/* db/dwqe page size: 4K */
+#define HINIC3_DB_PAGE_SIZE 0x00001000
+#define HINIC3_DWQE_OFFSET 0x00000800
+#define HINIC3_DB_MAX_AREAS (HINIC3_DB_DWQE_SIZE / HINIC3_DB_PAGE_SIZE)
+
+#define HINIC3_MAX_MSIX_ENTRY 2048
+
+#define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK GENMASK(11, 0)
+#define HINIC3_AF0_P2P_IDX_MASK GENMASK(16, 12)
+#define HINIC3_AF0_PCI_INTF_IDX_MASK GENMASK(19, 17)
+#define HINIC3_AF0_FUNC_TYPE_MASK BIT(28)
+#define HINIC3_AF0_GET(val, member) \
+ FIELD_GET(HINIC3_AF0_##member##_MASK, val)
+
+#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8)
+#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30)
+#define HINIC3_AF1_GET(val, member) \
+ FIELD_GET(HINIC3_AF1_##member##_MASK, val)
+
+#define HINIC3_AF2_CEQS_PER_FUNC_MASK GENMASK(8, 0)
+#define HINIC3_AF2_IRQS_PER_FUNC_MASK GENMASK(26, 16)
+#define HINIC3_AF2_GET(val, member) \
+ FIELD_GET(HINIC3_AF2_##member##_MASK, val)
+
+#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0)
+#define HINIC3_AF4_GET(val, member) \
+ FIELD_GET(HINIC3_AF4_##member##_MASK, val)
+#define HINIC3_AF4_SET(val, member) \
+ FIELD_PREP(HINIC3_AF4_##member##_MASK, val)
+
+#define HINIC3_AF5_OUTBOUND_CTRL_MASK BIT(0)
+#define HINIC3_AF5_GET(val, member) \
+ FIELD_GET(HINIC3_AF5_##member##_MASK, val)
+
+#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0)
+#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23)
+#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22)
+#define HINIC3_AF6_GET(val, member) \
+ FIELD_GET(HINIC3_AF6_##member##_MASK, val)
+
+#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK))
+
+static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg)
+{
+ return hwif->cfg_regs_base + HINIC3_GET_REG_ADDR(reg);
+}
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ return ioread32be(addr);
+}
+
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ iowrite32be(val, addr);
+}
+
+static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ u32 attr1;
+
+ attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+
+ return HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_hwif_ready(struct hinic3_hwdev *hwdev)
+{
+ return hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler,
+ HINIC3_HWIF_READY_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+/* Set attr struct from HW attr values. */
+static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1,
+ u32 attr2, u32 attr3, u32 attr6)
+{
+ attr->func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ attr->port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX);
+ attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX);
+ attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE);
+
+ attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC));
+ attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC);
+ attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC);
+ if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY)
+ attr->num_irqs = HINIC3_MAX_MSIX_ENTRY;
+
+ attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ);
+ attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN);
+}
+
+/* Read attributes from HW and set attribute struct. */
+static int init_hwif_attr(struct hinic3_hwdev *hwdev)
+{
+ u32 attr0, attr1, attr2, attr3, attr6;
+ struct hinic3_hwif *hwif;
+
+ hwif = hwdev->hwif;
+ attr0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR0_ADDR);
+ if (attr0 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr2 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR2_ADDR);
+ if (attr2 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr3 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR3_ADDR);
+ if (attr3 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+ if (attr6 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ set_hwif_attr(&hwif->attr, attr0, attr1, attr2, attr3, attr6);
+
+ if (!hwif->attr.num_ceqs) {
+ dev_err(hwdev->dev, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+
+ if (!hwif->attr.num_irqs) {
+ dev_err(hwdev->dev,
+ "Irq num cfg in fw is zero, msix_flex_en %d\n",
+ hwif->attr.msix_flex_en);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC3_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL);
+}
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag)
+{
+ u32 addr, attr4;
+
+ addr = HINIC3_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic3_hwif_read_reg(hwif, addr);
+
+ attr4 &= ~HINIC3_AF4_DOORBELL_CTRL_MASK;
+ attr4 |= HINIC3_AF4_SET(flag, DOORBELL_CTRL);
+
+ hinic3_hwif_write_reg(hwif, addr, attr4);
+}
+
+static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy,
+ u8 __iomem *db_base, u64 db_dwqe_len)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 db_max_areas;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->db_dwqe_len = db_dwqe_len;
+
+ db_max_areas = db_dwqe_len > HINIC3_DB_DWQE_SIZE ?
+ HINIC3_DB_MAX_AREAS : db_dwqe_len / HINIC3_DB_PAGE_SIZE;
+ db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL);
+ if (!db_area->db_bitmap_array)
+ return -ENOMEM;
+
+ db_area->db_max_areas = db_max_areas;
+ spin_lock_init(&db_area->idx_lock);
+
+ return 0;
+}
+
+static void db_area_idx_free(struct hinic3_db_area *db_area)
+{
+ bitmap_free(db_area->db_bitmap_array);
+}
+
+static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 pg_idx;
+
+ spin_lock(&db_area->idx_lock);
+ pg_idx = find_first_zero_bit(db_area->db_bitmap_array,
+ db_area->db_max_areas);
+ if (pg_idx == db_area->db_max_areas) {
+ spin_unlock(&db_area->idx_lock);
+ return -ENOMEM;
+ }
+ set_bit(pg_idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic3_hwif *hwif, u32 idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+
+ spin_lock(&db_area->idx_lock);
+ clear_bit(idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+}
+
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base)
+{
+ struct hinic3_hwif *hwif;
+ uintptr_t distance;
+ u32 idx;
+
+ hwif = hwdev->hwif;
+ distance = db_base - hwif->db_base;
+ idx = distance / HINIC3_DB_PAGE_SIZE;
+
+ free_db_idx(hwif, idx);
+}
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hinic3_hwif *hwif;
+ u8 __iomem *addr;
+ u32 idx;
+ int err;
+
+ hwif = hwdev->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return err;
+
+ addr = hwif->db_base + idx * HINIC3_DB_PAGE_SIZE;
+ *db_base = addr;
+
+ if (dwqe_base)
+ *dwqe_base = addr + HINIC3_DWQE_OFFSET;
+
+ return 0;
+}
+
void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
enum hinic3_msix_state flag)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_hwif *hwif;
+ u8 int_msk = 1;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR);
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static void disable_all_msix(struct hinic3_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE);
+}
+
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic3_hwif *hwif;
+ u32 msix_ctrl, addr;
+
+ hwif = hwdev->hwif;
+
+ msix_ctrl = HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) |
+ HINIC3_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag)
+{
+ struct hinic3_hwif *hwif;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR);
+
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data)
+{
+ enum hinic3_outbound_ctrl outbound_ctrl;
+ struct hinic3_hwif *hwif = priv_data;
+ enum hinic3_doorbell_ctrl db_ctrl;
+
+ db_ctrl = hinic3_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif);
+ if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL)
+ return HINIC3_WAIT_PROCESS_CPL;
+
+ return HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif)
+{
+ return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler,
+ HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_hwif *hwif;
+ u32 attr1, attr4, attr5;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base +
+ HINIC3_VF_CFG_REG_OFFSET;
+
+ err = db_area_idx_init(hwif, pci_adapter->db_base_phy,
+ pci_adapter->db_base,
+ pci_adapter->db_dwqe_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init db area.\n");
+ goto err_free_hwif;
+ }
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ dev_err(hwdev->dev, "Chip status is not ready, attr1:0x%x\n",
+ attr1);
+ goto err_free_db_area_idx;
+ }
+
+ err = init_hwif_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Init hwif attr failed\n");
+ goto err_free_db_area_idx;
+ }
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+ attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+ dev_err(hwdev->dev, "HW doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n",
+ attr4, attr5);
+ goto err_free_db_area_idx;
+ }
+
+ disable_all_msix(hwdev);
+
+ return 0;
+
+err_free_db_area_idx:
+ db_area_idx_free(&hwif->db_area);
+err_free_hwif:
+ kfree(hwif);
+
+ return err;
+}
+
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev)
+{
+ db_area_idx_free(&hwdev->hwif->db_area);
+ kfree(hwdev->hwif);
}
u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
index 513c9680e6b6..c02904e861cc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -45,13 +45,45 @@ struct hinic3_hwif {
struct hinic3_func_attr attr;
};
+enum hinic3_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic3_doorbell_ctrl {
+ ENABLE_DOORBELL = 0,
+ DISABLE_DOORBELL = 1,
+};
+
enum hinic3_msix_state {
HINIC3_MSIX_ENABLE,
HINIC3_MSIX_DISABLE,
};
+enum hinic3_msix_auto_mask {
+ HINIC3_CLR_MSIX_AUTO_MASK,
+ HINIC3_SET_MSIX_AUTO_MASK,
+};
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg);
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val);
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag);
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base);
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev);
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev);
+
void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
enum hinic3_msix_state flag);
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en);
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag);
u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
index 8b92eed25edf..a69b361225e9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -38,19 +38,19 @@ static int hinic3_poll(struct napi_struct *napi, int budget)
return work_done;
}
-void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
- netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
napi_enable(&irq_cfg->napi);
}
-void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
{
napi_disable(&irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
@@ -60,3 +60,135 @@ void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
netif_napi_del(&irq_cfg->napi);
}
+
+static irqreturn_t qp_irq(int irq, void *data)
+{
+ struct hinic3_irq_cfg *irq_cfg = data;
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+ hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx, 1);
+
+ napi_schedule(&irq_cfg->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
+{
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ int err;
+
+ netdev = irq_cfg->netdev;
+ nic_dev = netdev_priv(netdev);
+ qp_add_napi(irq_cfg);
+
+ info.msix_index = irq_cfg->msix_entry_idx;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit;
+ info.coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+ err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
+ if (err) {
+ netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name,
+ irq_cfg);
+ if (err) {
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
+
+ return 0;
+}
+
+static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
+{
+ irq_set_affinity_hint(irq_cfg->irq_id, NULL);
+ free_irq(irq_cfg->irq_id, irq_cfg);
+}
+
+int hinic3_qps_irq_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_irq_cfg *irq_cfg;
+ struct msix_entry *msix_entry;
+ u32 local_cpu;
+ u16 q_id;
+ int err;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+
+ irq_cfg->irq_id = msix_entry->vector;
+ irq_cfg->msix_entry_idx = msix_entry->entry;
+ irq_cfg->netdev = netdev;
+ irq_cfg->txq = &nic_dev->txqs[q_id];
+ irq_cfg->rxq = &nic_dev->rxqs[q_id];
+ nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
+
+ local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev));
+ cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
+
+ snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
+ "%s_qp%u", netdev->name, q_id);
+
+ err = hinic3_request_irq(irq_cfg, q_id);
+ if (err) {
+ netdev_err(netdev, "Failed to request Rx irq\n");
+ goto err_release_irqs;
+ }
+
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_SET_MSIX_AUTO_MASK);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+ }
+
+ return 0;
+
+err_release_irqs:
+ while (q_id > 0) {
+ q_id--;
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+
+ return err;
+}
+
+void hinic3_qps_irq_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
index 4827326e6a59..3db8241a3b0c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -8,6 +8,7 @@
#include "hinic3_hwdev.h"
#include "hinic3_lld.h"
#include "hinic3_mgmt.h"
+#include "hinic3_pci_id_tbl.h"
#define HINIC3_VF_PCI_CFG_REG_BAR 0
#define HINIC3_PCI_INTR_REG_BAR 2
@@ -121,6 +122,7 @@ static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
goto err_del_adevs;
}
mutex_unlock(&pci_adapter->pdev_mutex);
+
return 0;
err_del_adevs:
@@ -132,6 +134,7 @@ err_del_adevs:
}
}
mutex_unlock(&pci_adapter->pdev_mutex);
+
return -ENOMEM;
}
@@ -153,6 +156,7 @@ struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
struct hinic3_adev *hadev;
hadev = container_of(adev, struct hinic3_adev, adev);
+
return hadev->hwdev;
}
@@ -307,6 +311,7 @@ static void hinic3_func_uninit(struct pci_dev *pdev)
{
struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ hinic3_flush_mgmt_workq(pci_adapter->hwdev);
hinic3_detach_aux_devices(pci_adapter->hwdev);
hinic3_free_hwdev(pci_adapter->hwdev);
}
@@ -333,6 +338,7 @@ err_unmap_bar:
err_out:
dev_err(&pdev->dev, "PCIe device probe function failed\n");
+
return err;
}
@@ -365,6 +371,7 @@ err_uninit_pci:
err_out:
dev_err(&pdev->dev, "PCIe device probe failed\n");
+
return err;
}
@@ -377,7 +384,7 @@ static void hinic3_remove(struct pci_dev *pdev)
}
static const struct pci_device_id hinic3_pci_table[] = {
- /* Completed by later submission due to LoC limit. */
+ {PCI_VDEVICE(HUAWEI, PCI_DEV_ID_HINIC3_VF), 0},
{0, 0}
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 497f2a36f35d..6d87d4d895ba 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -12,17 +12,59 @@
#include "hinic3_nic_cfg.h"
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
#include "hinic3_rx.h"
#include "hinic3_tx.h"
#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
-#define HINIC3_RX_BUF_LEN 2048
-#define HINIC3_LRO_REPLENISH_THLD 256
-#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
-#define HINIC3_SQ_DEPTH 1024
-#define HINIC3_RQ_DEPTH 1024
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+#define HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25
+#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+
+static void init_intr_coal_param(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_intr_coal_info *info;
+ u16 i;
+
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ info = &nic_dev->intr_coalesce[i];
+ info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ }
+}
+
+static int hinic3_init_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->intr_coalesce = kcalloc(nic_dev->max_qps,
+ sizeof(*nic_dev->intr_coalesce),
+ GFP_KERNEL);
+
+ if (!nic_dev->intr_coalesce)
+ return -ENOMEM;
+
+ init_intr_coal_param(netdev);
+
+ return 0;
+}
+
+static void hinic3_free_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->intr_coalesce);
+}
static int hinic3_alloc_txrxqs(struct net_device *netdev)
{
@@ -42,8 +84,17 @@ static int hinic3_alloc_txrxqs(struct net_device *netdev)
goto err_free_txqs;
}
+ err = hinic3_init_intr_coalesce(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init_intr_coalesce\n");
+ goto err_free_rxqs;
+ }
+
return 0;
+err_free_rxqs:
+ hinic3_free_rxqs(netdev);
+
err_free_txqs:
hinic3_free_txqs(netdev);
@@ -52,6 +103,7 @@ err_free_txqs:
static void hinic3_free_txrxqs(struct net_device *netdev)
{
+ hinic3_free_intr_coalesce(netdev);
hinic3_free_rxqs(netdev);
hinic3_free_txqs(netdev);
}
@@ -83,6 +135,8 @@ static int hinic3_sw_init(struct net_device *netdev)
nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+ hinic3_try_to_enable_rss(netdev);
+
/* VF driver always uses random MAC address. During VM migration to a
* new device, the new device should learn the VMs old MAC rather than
* provide its own MAC. The product design assumes that every VF is
@@ -94,7 +148,7 @@ static int hinic3_sw_init(struct net_device *netdev)
hinic3_global_func_id(hwdev));
if (err) {
dev_err(hwdev->dev, "Failed to set default MAC\n");
- return err;
+ goto err_clear_rss_config;
}
err = hinic3_alloc_txrxqs(netdev);
@@ -108,6 +162,8 @@ static int hinic3_sw_init(struct net_device *netdev)
err_del_mac:
hinic3_del_mac(hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(hwdev));
+err_clear_rss_config:
+ hinic3_clear_rss_config(netdev);
return err;
}
@@ -119,6 +175,7 @@ static void hinic3_sw_uninit(struct net_device *netdev)
hinic3_free_txrxqs(netdev);
hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(nic_dev->hwdev));
+ hinic3_clear_rss_config(netdev);
}
static void hinic3_assign_netdev_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
index e74d1eb09730..cf67e26acece 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -4,13 +4,857 @@
#include <linux/dma-mapping.h>
#include "hinic3_common.h"
+#include "hinic3_csr.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+#define MBOX_INT_DST_AEQN_MASK GENMASK(11, 10)
+#define MBOX_INT_SRC_RESP_AEQN_MASK GENMASK(13, 12)
+#define MBOX_INT_STAT_DMA_MASK GENMASK(19, 14)
+/* TX size, expressed in 4 bytes units */
+#define MBOX_INT_TX_SIZE_MASK GENMASK(24, 20)
+/* SO_RO == strong order, relaxed order */
+#define MBOX_INT_STAT_DMA_SO_RO_MASK GENMASK(26, 25)
+#define MBOX_INT_WB_EN_MASK BIT(28)
+#define MBOX_INT_SET(val, field) \
+ FIELD_PREP(MBOX_INT_##field##_MASK, val)
+
+#define MBOX_CTRL_TRIGGER_AEQE_MASK BIT(0)
+#define MBOX_CTRL_TX_STATUS_MASK BIT(1)
+#define MBOX_CTRL_DST_FUNC_MASK GENMASK(28, 16)
+#define MBOX_CTRL_SET(val, field) \
+ FIELD_PREP(MBOX_CTRL_##field##_MASK, val)
+
+#define MBOX_MSG_POLLING_TIMEOUT_MS 8000 // send msg seg timeout
+#define MBOX_COMP_POLLING_TIMEOUT_MS 40000 // response
+
+#define MBOX_MAX_BUF_SZ 2048
+#define MBOX_HEADER_SZ 8
+
+/* MBOX size is 64B, 8B for mbox_header, 8B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16
+
+#define MBOX_SEQ_ID_START_VAL 0
+#define MBOX_SEQ_ID_MAX_VAL 42
+#define MBOX_LAST_SEG_MAX_LEN \
+ (MBOX_MAX_BUF_SZ - MBOX_SEQ_ID_MAX_VAL * MBOX_SEG_LEN)
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) == \
+ MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define MBOX_DMA_MSG_QUEUE_DEPTH 32
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define MBOX_MQ_CI_OFFSET \
+ (HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \
+ MBOX_HEADER_SZ + MBOX_SEG_LEN)
+
+#define MBOX_MQ_SYNC_CI_MASK GENMASK(7, 0)
+#define MBOX_MQ_ASYNC_CI_MASK GENMASK(15, 8)
+#define MBOX_MQ_CI_GET(val, field) \
+ FIELD_GET(MBOX_MQ_##field##_CI_MASK, val)
+
+#define MBOX_MGMT_FUNC_ID 0x1FFF
+#define MBOX_COMM_F_MBOX_SEGMENT BIT(3)
+
+static u8 *get_mobx_body_from_hdr(u8 *header)
+{
+ return header + MBOX_HEADER_SZ;
+}
+
+static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
+ enum mbox_msg_direction_type dir,
+ u16 src_func_id)
+{
+ struct hinic3_msg_channel *msg_ch;
+
+ msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
+ &mbox->mgmt_msg : mbox->func_msg;
+
+ return (dir == MBOX_MSG_SEND) ?
+ &msg_ch->recv_msg : &msg_ch->resp_msg;
+}
+
+static void resp_mbox_handler(struct hinic3_mbox *mbox,
+ const struct hinic3_msg_desc *msg_desc)
+{
+ spin_lock(&mbox->mbox_lock);
+ if (msg_desc->msg_info.msg_id == mbox->send_msg_id &&
+ mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_SUCCESS;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static bool mbox_segment_valid(struct hinic3_mbox *mbox,
+ struct hinic3_msg_desc *msg_desc,
+ __le64 mbox_header)
+{
+ u8 seq_id, seg_len, msg_id, mod;
+ __le16 src_func_idx, cmd;
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ msg_id = MBOX_MSG_HEADER_GET(mbox_header, MSG_ID);
+ mod = MBOX_MSG_HEADER_GET(mbox_header, MODULE);
+ cmd = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, CMD));
+ src_func_idx = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ SRC_GLB_FUNC_IDX));
+
+ if (seq_id > MBOX_SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN ||
+ (seq_id == MBOX_SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN))
+ goto err_seg;
+
+ if (seq_id == 0) {
+ msg_desc->seq_id = seq_id;
+ msg_desc->msg_info.msg_id = msg_id;
+ msg_desc->mod = mod;
+ msg_desc->cmd = cmd;
+ } else {
+ if (seq_id != msg_desc->seq_id + 1 ||
+ msg_id != msg_desc->msg_info.msg_id ||
+ mod != msg_desc->mod || cmd != msg_desc->cmd)
+ goto err_seg;
+
+ msg_desc->seq_id = seq_id;
+ }
+
+ return true;
+
+err_seg:
+ dev_err(mbox->hwdev->dev,
+ "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id,
+ msg_desc->mod, msg_desc->cmd);
+ dev_err(mbox->hwdev->dev,
+ "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ seg_len, seq_id, msg_id, mod, cmd);
+
+ return false;
+}
+
+static void recv_mbox_handler(struct hinic3_mbox *mbox,
+ u8 *header, struct hinic3_msg_desc *msg_desc)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ u8 *mbox_body = get_mobx_body_from_hdr(header);
+ u8 seq_id, seg_len;
+ int pos;
+
+ if (!mbox_segment_valid(mbox, msg_desc, mbox_header)) {
+ msg_desc->seq_id = MBOX_SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy(msg_desc->msg + pos, mbox_body, seg_len);
+
+ if (!MBOX_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ msg_desc->msg_len = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ MSG_LEN));
+ msg_desc->msg_info.status = MBOX_MSG_HEADER_GET(mbox_header, STATUS);
+
+ if (MBOX_MSG_HEADER_GET(mbox_header, DIRECTION) == MBOX_MSG_RESP)
+ resp_mbox_handler(mbox, msg_desc);
+}
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ enum mbox_msg_direction_type dir;
+ struct hinic3_msg_desc *msg_desc;
+ struct hinic3_mbox *mbox;
+ u16 src_func_id;
+
+ mbox = hwdev->mbox;
+ dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
+ src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+ msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
+ recv_mbox_handler(mbox, header, msg_desc);
+}
+
+static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ u32 size;
+
+ mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH;
+ mq->prod_idx = 0;
+ mq->cons_idx = 0;
+
+ size = mq->depth * MBOX_MAX_BUF_SZ;
+ mq->dma_buf_vaddr = dma_alloc_coherent(hwdev->dev, size,
+ &mq->dma_buf_paddr,
+ GFP_KERNEL);
+ if (!mq->dma_buf_vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void uninit_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ dma_free_coherent(hwdev->dev, mq->depth * MBOX_MAX_BUF_SZ,
+ mq->dma_buf_vaddr, mq->dma_buf_paddr);
+}
+
+static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ u32 val;
+ int err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ if (err)
+ return err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+ if (err) {
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ return err;
+ }
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ val &= ~MBOX_MQ_SYNC_CI_MASK;
+ val &= ~MBOX_MQ_ASYNC_CI_MASK;
+ hinic3_hwif_write_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET, val);
+
+ return 0;
+}
+
+static void hinic3_uninit_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+}
+
+static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->resp_msg.msg)
+ return -ENOMEM;
+
+ msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->recv_msg.msg) {
+ kfree(msg_ch->resp_msg.msg);
+ return -ENOMEM;
+ }
+
+ msg_ch->resp_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+ msg_ch->recv_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+
+ return 0;
+}
+
+static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ kfree(msg_ch->recv_msg.msg);
+ kfree(msg_ch->resp_msg.msg);
+}
+
+static int init_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ int err;
+
+ err = alloc_mbox_msg_channel(&mbox->mgmt_msg);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to alloc mgmt message channel\n");
+ return err;
+ }
+
+ err = hinic3_init_mbox_dma_queue(mbox);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to init mbox dma queue\n");
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void uninit_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ hinic3_uninit_mbox_dma_queue(mbox);
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+}
+
+static int hinic3_init_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = hwdev->mbox;
+ mbox->func_msg = kzalloc(sizeof(*mbox->func_msg), GFP_KERNEL);
+ if (!mbox->func_msg)
+ return -ENOMEM;
+
+ err = alloc_mbox_msg_channel(mbox->func_msg);
+ if (err)
+ goto err_free_func_msg;
+
+ return 0;
+
+err_free_func_msg:
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+
+ return err;
+}
+
+static void hinic3_uninit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ free_mbox_msg_channel(mbox->func_msg);
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+}
+
+static void prepare_send_mbox(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+
+ send_mbox->data = MBOX_AREA(mbox->hwdev->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(hwdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr, send_mbox->wb_paddr);
+}
+
+static int hinic3_mbox_pre_init(struct hinic3_hwdev *hwdev,
+ struct hinic3_mbox *mbox)
+{
+ mbox->hwdev = hwdev;
+ mutex_init(&mbox->mbox_send_lock);
+ spin_lock_init(&mbox->mbox_lock);
+
+ mbox->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME);
+ if (!mbox->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MBOX workqueue\n");
+ return -ENOMEM;
+ }
+ hwdev->mbox = mbox;
+
+ return 0;
+}
+
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = kzalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ err = hinic3_mbox_pre_init(hwdev, mbox);
+ if (err)
+ goto err_free_mbox;
+
+ err = init_mgmt_msg_channel(mbox);
+ if (err)
+ goto err_destroy_workqueue;
+
+ err = hinic3_init_func_mbox_msg_channel(hwdev);
+ if (err)
+ goto err_uninit_mgmt_msg_ch;
+
+ err = alloc_mbox_wb_status(mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_uninit_func_mbox_msg_ch;
+ }
+
+ prepare_send_mbox(mbox);
+
+ return 0;
+
+err_uninit_func_mbox_msg_ch:
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+
+err_uninit_mgmt_msg_ch:
+ uninit_mgmt_msg_channel(mbox);
+
+err_destroy_workqueue:
+ destroy_workqueue(mbox->workq);
+
+err_free_mbox:
+ kfree(mbox);
+
+ return err;
+}
+
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ destroy_workqueue(mbox->workq);
+ free_mbox_wb_status(mbox);
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+ uninit_mgmt_msg_channel(mbox);
+ kfree(mbox);
+}
+
+#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a
+#define MBOX_XOR_DATA_ALIGN 4
+static u32 mbox_dma_msg_xor(u32 *data, u32 msg_len)
+{
+ u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL;
+ u32 dw_len = msg_len / sizeof(u32);
+ u32 i;
+
+ for (i = 0; i < dw_len; i++)
+ xor ^= data[i];
+
+ return xor;
+}
+
+#define MBOX_MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1))
+
+static bool is_msg_queue_full(struct mbox_dma_queue *mq)
+{
+ return MBOX_MQ_ID_MASK(mq, (mq)->prod_idx + 1) ==
+ MBOX_MQ_ID_MASK(mq, (mq)->cons_idx);
+}
+
+static int mbox_prepare_dma_entry(struct hinic3_mbox *mbox,
+ struct mbox_dma_queue *mq,
+ struct mbox_dma_msg *dma_msg,
+ const void *msg, u32 msg_len)
+{
+ u64 dma_addr, offset;
+ void *dma_vaddr;
+
+ if (is_msg_queue_full(mq)) {
+ dev_err(mbox->hwdev->dev, "Mbox sync message queue is busy, pi: %u, ci: %u\n",
+ mq->prod_idx, MBOX_MQ_ID_MASK(mq, mq->cons_idx));
+ return -EBUSY;
+ }
+
+ /* copy data to DMA buffer */
+ offset = mq->prod_idx * MBOX_MAX_BUF_SZ;
+ dma_vaddr = (u8 *)mq->dma_buf_vaddr + offset;
+ memcpy(dma_vaddr, msg, msg_len);
+ dma_addr = mq->dma_buf_paddr + offset;
+ dma_msg->dma_addr_high = cpu_to_le32(upper_32_bits(dma_addr));
+ dma_msg->dma_addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ dma_msg->msg_len = cpu_to_le32(msg_len);
+ /* The firmware obtains message based on 4B alignment. */
+ dma_msg->xor = cpu_to_le32(mbox_dma_msg_xor(dma_vaddr,
+ ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)));
+ mq->prod_idx++;
+ mq->prod_idx = MBOX_MQ_ID_MASK(mq, mq->prod_idx);
+
+ return 0;
+}
+
+static int mbox_prepare_dma_msg(struct hinic3_mbox *mbox,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_dma_msg *dma_msg, const void *msg,
+ u32 msg_len)
+{
+ struct mbox_dma_queue *mq;
+ u32 val;
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ if (ack_type == MBOX_MSG_ACK) {
+ mq = &mbox->sync_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC);
+ } else {
+ mq = &mbox->async_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC);
+ }
+
+ return mbox_prepare_dma_entry(mbox, mq, dma_msg, msg, msg_len);
+}
+
+static void clear_mbox_status(struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+
+ *wb_status = 0;
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_dword_write(const void *src, void __iomem *dst, u32 count)
+{
+ const __le32 *src32 = src;
+ u32 __iomem *dst32 = dst;
+ u32 i;
+
+ /* Data written to mbox is arranged in structs with little endian fields
+ * but when written to HW every dword (32bits) should be swapped since
+ * the HW will swap it again.
+ */
+ for (i = 0; i < count; i++)
+ __raw_writel(swab32((__force __u32)src32[i]), dst32 + i);
+}
+
+static void mbox_copy_header(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, __le64 *header)
+{
+ mbox_dword_write(header, mbox->data, MBOX_HEADER_SZ / sizeof(__le32));
+}
+
+static void mbox_copy_send_data(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, void *seg,
+ u32 seg_len)
+{
+ u32 __iomem *dst = (u32 __iomem *)(mbox->data + MBOX_HEADER_SZ);
+ u32 count, leftover, last_dword;
+ const __le32 *src = seg;
+
+ count = seg_len / sizeof(u32);
+ leftover = seg_len % sizeof(u32);
+ if (count > 0)
+ mbox_dword_write(src, dst, count);
+
+ if (leftover > 0) {
+ last_dword = 0;
+ memcpy(&last_dword, src + count, leftover);
+ mbox_dword_write(&last_dword, dst + count, 1);
+ }
+}
+
+static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
+ u16 dst_func, u16 dst_aeqn, u32 seg_len)
+{
+ struct hinic3_hwif *hwif = mbox->hwdev->hwif;
+ u32 mbox_int, mbox_ctrl, tx_size;
+
+ tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
+
+ mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ MBOX_INT_SET(0, STAT_DMA) |
+ MBOX_INT_SET(tx_size, TX_SIZE) |
+ MBOX_INT_SET(0, STAT_DMA_SO_RO) |
+ MBOX_INT_SET(1, WB_EN);
+
+ mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
+ MBOX_CTRL_SET(0, TRIGGER_AEQE) |
+ MBOX_CTRL_SET(dst_func, DST_FUNC);
+
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
+ mbox_ctrl);
+}
+
+static u16 get_mbox_status(const struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+ u64 wb_val;
+
+ wb_val = be64_to_cpu(*wb_status);
+ /* verify reading before check */
+ rmb();
+
+ return wb_val & MBOX_WB_STATUS_ERRCODE_MASK;
+}
+
+static enum hinic3_wait_return check_mbox_wb_status(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+ u16 wb_status;
+
+ wb_status = get_mbox_status(&mbox->send_mbox);
+
+ return MBOX_STATUS_FINISHED(wb_status) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int send_mbox_seg(struct hinic3_mbox *mbox, __le64 header,
+ u16 dst_func, void *seg, u32 seg_len, void *msg_info)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ enum mbox_msg_direction_type dir;
+ u16 dst_aeqn, wb_status, errcode;
+ int err;
+
+ /* mbox to mgmt cpu, hardware doesn't care about dst aeq id */
+ if (num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) {
+ dir = MBOX_MSG_HEADER_GET(header, DIRECTION);
+ dst_aeqn = (dir == MBOX_MSG_SEND) ?
+ MBOX_MSG_AEQ_FOR_EVENT : MBOX_MSG_AEQ_FOR_MBOX;
+ } else {
+ dst_aeqn = 0;
+ }
+
+ clear_mbox_status(send_mbox);
+ mbox_copy_header(hwdev, send_mbox, &header);
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+ write_mbox_msg_attr(mbox, dst_func, dst_aeqn, seg_len);
+
+ err = hinic3_wait_for_timeout(mbox, check_mbox_wb_status,
+ MBOX_MSG_POLLING_TIMEOUT_MS,
+ USEC_PER_MSEC);
+ wb_status = get_mbox_status(send_mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ wb_status);
+ return err;
+ }
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(hwdev->dev,
+ "Send mailbox segment to function %u error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_msg(struct hinic3_mbox *mbox, u8 mod, u16 cmd,
+ const void *msg, u32 msg_len, u16 dst_func,
+ enum mbox_msg_direction_type direction,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ enum mbox_msg_data_type data_type = MBOX_MSG_DATA_INLINE;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ struct mbox_dma_msg dma_msg;
+ u32 seg_len = MBOX_SEG_LEN;
+ __le64 header = 0;
+ u32 seq_id = 0;
+ u16 rsp_aeq_id;
+ u8 *msg_seg;
+ int err = 0;
+ u32 left;
+
+ if (hwdev->hwif->attr.num_aeqs > MBOX_MSG_AEQ_FOR_MBOX)
+ rsp_aeq_id = MBOX_MSG_AEQ_FOR_MBOX;
+ else
+ rsp_aeq_id = 0;
+
+ if (dst_func == MBOX_MGMT_FUNC_ID &&
+ !(hwdev->features[0] & MBOX_COMM_F_MBOX_SEGMENT)) {
+ err = mbox_prepare_dma_msg(mbox, ack_type, &dma_msg,
+ msg, msg_len);
+ if (err)
+ goto err_send;
+
+ msg = &dma_msg;
+ msg_len = sizeof(dma_msg);
+ data_type = MBOX_MSG_DATA_DMA;
+ }
+
+ msg_seg = (u8 *)msg;
+ left = msg_len;
+
+ header = cpu_to_le64(MBOX_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ MBOX_MSG_HEADER_SET(mod, MODULE) |
+ MBOX_MSG_HEADER_SET(seg_len, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(ack_type, NO_ACK) |
+ MBOX_MSG_HEADER_SET(data_type, DATA_TYPE) |
+ MBOX_MSG_HEADER_SET(MBOX_SEQ_ID_START_VAL, SEQID) |
+ MBOX_MSG_HEADER_SET(direction, DIRECTION) |
+ MBOX_MSG_HEADER_SET(cmd, CMD) |
+ MBOX_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ MBOX_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) |
+ MBOX_MSG_HEADER_SET(MBOX_MSG_FROM_MBOX, SOURCE) |
+ MBOX_MSG_HEADER_SET(!!msg_info->status, STATUS));
+
+ while (!(MBOX_MSG_HEADER_GET(header, LAST))) {
+ if (left <= MBOX_SEG_LEN) {
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |=
+ cpu_to_le64(MBOX_MSG_HEADER_SET(left, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(1, LAST));
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(mbox, header, dst_func, msg_seg,
+ seg_len, msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ MBOX_MSG_HEADER_GET(header, SEQID));
+ goto err_send;
+ }
+
+ left -= MBOX_SEG_LEN;
+ msg_seg += MBOX_SEG_LEN;
+ seq_id++;
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |= cpu_to_le64(MBOX_MSG_HEADER_SET(seq_id, SEQID));
+ }
+
+err_send:
+ return err;
+}
+
+static void set_mbox_to_func_event(struct hinic3_mbox *mbox,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&mbox->mbox_lock);
+ mbox->event_flag = event_flag;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+
+ return (mbox->event_flag == MBOX_EVENT_SUCCESS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_mbox_msg_completion(struct hinic3_mbox *mbox,
+ u32 timeout)
+{
+ u32 wait_time;
+ int err;
+
+ wait_time = (timeout != 0) ? timeout : MBOX_COMP_POLLING_TIMEOUT_MS;
+ err = hinic3_wait_for_timeout(mbox, check_mbox_msg_finish,
+ wait_time, USEC_PER_MSEC);
+ if (err) {
+ set_mbox_to_func_event(mbox, MBOX_EVENT_TIMEOUT);
+ return err;
+ }
+ set_mbox_to_func_event(mbox, MBOX_EVENT_END);
+
+ return 0;
+}
+
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ struct hinic3_msg_desc *msg_desc;
+ u32 msg_len;
+ int err;
+
+ /* expect response message */
+ msg_desc = get_mbox_msg_desc(mbox, MBOX_MSG_RESP, MBOX_MGMT_FUNC_ID);
+ mutex_lock(&mbox->mbox_send_lock);
+ msg_info.msg_id = (mbox->send_msg_id + 1) & 0xF;
+ mbox->send_msg_id = msg_info.msg_id;
+ set_mbox_to_func_event(mbox, MBOX_EVENT_START);
+
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_ACK, &msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n",
+ mod, cmd, msg_info.msg_id, err);
+ set_mbox_to_func_event(mbox, MBOX_EVENT_FAIL);
+ goto err_send;
+ }
+
+ if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) {
+ dev_err(hwdev->dev,
+ "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send;
+ }
+
+ if (mod != msg_desc->mod || cmd != le16_to_cpu(msg_desc->cmd)) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n",
+ msg_desc->mod, msg_desc->cmd, mod, cmd);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ if (msg_desc->msg_info.status) {
+ err = msg_desc->msg_info.status;
+ goto err_send;
+ }
+
+ if (msg_params->buf_out) {
+ msg_len = le16_to_cpu(msg_desc->msg_len);
+ if (msg_len != msg_params->expected_out_size) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message length: %u for mod %d cmd %u, expected length: %u\n",
+ msg_desc->msg_len, mod, cmd,
+ msg_params->expected_out_size);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ memcpy(msg_params->buf_out, msg_desc->msg, msg_len);
+ }
+
+err_send:
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
+}
+
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ int err;
+
+ mutex_lock(&mbox->mbox_send_lock);
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_NO_ACK, &msg_info);
+ if (err)
+ dev_err(hwdev->dev, "Send mailbox no ack failed\n");
+
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
index d7a6c37b7eff..e71629e95086 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -8,8 +8,134 @@
#include <linux/mutex.h>
struct hinic3_hwdev;
+struct mgmt_msg_params;
+
+#define MBOX_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK GENMASK_ULL(12, 0)
+#define MBOX_MSG_HEADER_STATUS_MASK BIT_ULL(13)
+#define MBOX_MSG_HEADER_SOURCE_MASK BIT_ULL(15)
+#define MBOX_MSG_HEADER_AEQ_ID_MASK GENMASK_ULL(17, 16)
+#define MBOX_MSG_HEADER_MSG_ID_MASK GENMASK_ULL(21, 18)
+#define MBOX_MSG_HEADER_CMD_MASK GENMASK_ULL(31, 22)
+#define MBOX_MSG_HEADER_MSG_LEN_MASK GENMASK_ULL(42, 32)
+#define MBOX_MSG_HEADER_MODULE_MASK GENMASK_ULL(47, 43)
+#define MBOX_MSG_HEADER_SEG_LEN_MASK GENMASK_ULL(53, 48)
+#define MBOX_MSG_HEADER_NO_ACK_MASK BIT_ULL(54)
+#define MBOX_MSG_HEADER_DATA_TYPE_MASK BIT_ULL(55)
+#define MBOX_MSG_HEADER_SEQID_MASK GENMASK_ULL(61, 56)
+#define MBOX_MSG_HEADER_LAST_MASK BIT_ULL(62)
+#define MBOX_MSG_HEADER_DIRECTION_MASK BIT_ULL(63)
+
+#define MBOX_MSG_HEADER_SET(val, member) \
+ FIELD_PREP(MBOX_MSG_HEADER_##member##_MASK, val)
+#define MBOX_MSG_HEADER_GET(val, member) \
+ FIELD_GET(MBOX_MSG_HEADER_##member##_MASK, le64_to_cpu(val))
+
+/* identifies if a segment belongs to a message or to a response. A VF is only
+ * expected to send messages and receive responses. PF driver could receive
+ * messages and send responses.
+ */
+enum mbox_msg_direction_type {
+ MBOX_MSG_SEND = 0,
+ MBOX_MSG_RESP = 1,
+};
+
+/* Indicates if mbox message expects a response (ack) or not */
+enum mbox_msg_ack_type {
+ MBOX_MSG_ACK = 0,
+ MBOX_MSG_NO_ACK = 1,
+};
+
+enum mbox_msg_data_type {
+ MBOX_MSG_DATA_INLINE = 0,
+ MBOX_MSG_DATA_DMA = 1,
+};
+
+enum mbox_msg_src_type {
+ MBOX_MSG_FROM_MBOX = 1,
+};
+
+enum mbox_msg_aeq_type {
+ MBOX_MSG_AEQ_FOR_EVENT = 0,
+ MBOX_MSG_AEQ_FOR_MBOX = 1,
+};
+
+#define HINIC3_MBOX_WQ_NAME "hinic3_mbox"
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic3_msg_desc {
+ u8 *msg;
+ __le16 msg_len;
+ u8 seq_id;
+ u8 mod;
+ __le16 cmd;
+ struct mbox_msg_info msg_info;
+};
+
+struct hinic3_msg_channel {
+ struct hinic3_msg_desc resp_msg;
+ struct hinic3_msg_desc recv_msg;
+};
+
+struct hinic3_send_mbox {
+ u8 __iomem *data;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+enum mbox_event_state {
+ MBOX_EVENT_START = 0,
+ MBOX_EVENT_FAIL = 1,
+ MBOX_EVENT_SUCCESS = 2,
+ MBOX_EVENT_TIMEOUT = 3,
+ MBOX_EVENT_END = 4,
+};
+
+struct mbox_dma_msg {
+ __le32 xor;
+ __le32 dma_addr_high;
+ __le32 dma_addr_low;
+ __le32 msg_len;
+ __le64 rsvd;
+};
+
+struct mbox_dma_queue {
+ void *dma_buf_vaddr;
+ dma_addr_t dma_buf_paddr;
+ u16 depth;
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct hinic3_mbox {
+ struct hinic3_hwdev *hwdev;
+ /* lock for send mbox message and ack message */
+ struct mutex mbox_send_lock;
+ struct hinic3_send_mbox send_mbox;
+ struct mbox_dma_queue sync_msg_queue;
+ struct mbox_dma_queue async_msg_queue;
+ struct workqueue_struct *workq;
+ /* driver and MGMT CPU */
+ struct hinic3_msg_channel mgmt_msg;
+ /* VF to PF */
+ struct hinic3_msg_channel *func_msg;
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size);
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev);
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
new file mode 100644
index 000000000000..c38d10cd7fac
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
+{
+ if (hwdev->aeqs)
+ flush_workqueue(hwdev->aeqs->workq);
+}
+
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
+ MBOX_MSG_FROM_MBOX)
+ hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
index 4edabeb32112..bbef3b32a6ec 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -9,5 +9,7 @@
struct hinic3_hwdev;
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
+ u8 *header, u8 size);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
index c4434efdc7f7..6cc0345c39e4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -56,12 +56,105 @@ struct l2nic_cmd_update_mac {
u8 new_mac[ETH_ALEN];
};
+struct l2nic_cmd_set_ci_attr {
+ struct mgmt_msg_head msg_head;
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 rsvd;
+ u64 ci_addr;
+};
+
+struct l2nic_cmd_clear_qp_resource {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+};
+
struct l2nic_cmd_force_pkt_drop {
struct mgmt_msg_head msg_head;
u8 port;
u8 rsvd1[3];
};
+struct l2nic_cmd_set_vport_state {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ /* 0--disable, 1--enable */
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct l2nic_cmd_set_dcb_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ /* 0 - get dcb state, 1 - set dcb state */
+ u8 op_code;
+ /* 0 - disable, 1 - enable dcb */
+ u8 state;
+ /* 0 - disable, 1 - enable dcb */
+ u8 port_state;
+ u8 rsvd[7];
+};
+
+#define L2NIC_RSS_TYPE_VALID_MASK BIT(23)
+#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24)
+#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25)
+#define L2NIC_RSS_TYPE_TCP_IPV6_MASK BIT(26)
+#define L2NIC_RSS_TYPE_IPV6_MASK BIT(27)
+#define L2NIC_RSS_TYPE_TCP_IPV4_MASK BIT(28)
+#define L2NIC_RSS_TYPE_IPV4_MASK BIT(29)
+#define L2NIC_RSS_TYPE_UDP_IPV6_MASK BIT(30)
+#define L2NIC_RSS_TYPE_UDP_IPV4_MASK BIT(31)
+#define L2NIC_RSS_TYPE_SET(val, member) \
+ FIELD_PREP(L2NIC_RSS_TYPE_##member##_MASK, val)
+#define L2NIC_RSS_TYPE_GET(val, member) \
+ FIELD_GET(L2NIC_RSS_TYPE_##member##_MASK, val)
+
+#define L2NIC_RSS_INDIR_SIZE 256
+#define L2NIC_RSS_KEY_SIZE 40
+
+/* IEEE 802.1Qaz std */
+#define L2NIC_DCB_COS_MAX 0x8
+
+struct l2nic_cmd_set_rss_ctx_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ u32 context;
+};
+
+struct l2nic_cmd_cfg_rss_engine {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct l2nic_cmd_cfg_rss_hash_key {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u8 key[L2NIC_RSS_KEY_SIZE];
+};
+
+struct l2nic_cmd_cfg_rss {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 rss_en;
+ u8 rq_priority_number;
+ u8 prio_tc[L2NIC_DCB_COS_MAX];
+ u16 num_qps;
+ u16 rsvd1;
+};
+
/* Commands between NIC to fw */
enum l2nic_cmd {
/* FUNC CFG */
@@ -82,6 +175,32 @@ enum l2nic_cmd {
L2NIC_CMD_MAX = 256,
};
+struct l2nic_cmd_rss_set_indir_tbl {
+ __le32 rsvd[4];
+ __le16 entry[L2NIC_RSS_INDIR_SIZE];
+};
+
+/* NIC CMDQ MODE */
+enum l2nic_ucode_cmd {
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4,
+};
+
+/* hilink mac group command */
+enum mag_cmd {
+ MAG_CMD_GET_LINK_STATUS = 7,
+};
+
+/* firmware also use this cmd report link event to driver */
+struct mag_cmd_get_link_status {
+ struct mgmt_msg_head head;
+ u8 port_id;
+ /* 0:link down 1:link up */
+ u8 status;
+ u8 rsvd0[2];
+};
+
enum hinic3_nic_feature_cap {
HINIC3_NIC_F_CSUM = BIT(0),
HINIC3_NIC_F_SCTP_CRC = BIT(1),
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
index 71104a6b8bef..0fa3c7900225 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -8,19 +8,437 @@
#include "hinic3_nic_cfg.h"
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
#include "hinic3_rx.h"
#include "hinic3_tx.h"
+/* try to modify the number of irq to the target number,
+ * and return the actual number of irq.
+ */
+static u16 hinic3_qp_irq_change(struct net_device *netdev,
+ u16 dst_num_qp_irq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct msix_entry *qps_msix_entries;
+ u16 resp_irq_num, irq_num_gap, i;
+ u16 idx;
+ int err;
+
+ qps_msix_entries = nic_dev->qps_msix_entries;
+ if (dst_num_qp_irq > nic_dev->num_qp_irq) {
+ irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
+ err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
+ &qps_msix_entries[nic_dev->num_qp_irq],
+ &resp_irq_num);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc irqs\n");
+ return nic_dev->num_qp_irq;
+ }
+
+ nic_dev->num_qp_irq += resp_irq_num;
+ } else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
+ irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
+ for (i = 0; i < irq_num_gap; i++) {
+ idx = (nic_dev->num_qp_irq - i) - 1;
+ hinic3_free_irq(nic_dev->hwdev,
+ qps_msix_entries[idx].vector);
+ qps_msix_entries[idx].vector = 0;
+ qps_msix_entries[idx].entry = 0;
+ }
+ nic_dev->num_qp_irq = dst_num_qp_irq;
+ }
+
+ return nic_dev->num_qp_irq;
+}
+
+static void hinic3_config_num_qps(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 alloc_num_irq, cur_num_irq;
+ u16 dst_num_irq;
+
+ if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ q_params->num_qps = 1;
+
+ if (nic_dev->num_qp_irq >= q_params->num_qps)
+ goto out;
+
+ cur_num_irq = nic_dev->num_qp_irq;
+
+ alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
+ if (alloc_num_irq < q_params->num_qps) {
+ q_params->num_qps = alloc_num_irq;
+ netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
+ q_params->num_qps);
+
+ /* The current irq may be in use, we must keep it */
+ dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
+ hinic3_qp_irq_change(netdev, dst_num_irq);
+ }
+
+out:
+ netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
+ q_params->num_qps);
+}
+
+static int hinic3_setup_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->num_qp_irq = 0;
+
+ nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic_dev->qps_msix_entries)
+ return -ENOMEM;
+
+ hinic3_config_num_qps(netdev, &nic_dev->q_params);
+
+ return 0;
+}
+
+static void hinic3_destroy_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qp_irq; i++)
+ hinic3_free_irq(nic_dev->hwdev,
+ nic_dev->qps_msix_entries[i].vector);
+
+ kfree(nic_dev->qps_msix_entries);
+}
+
+static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ q_params->txqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->txqs_res), GFP_KERNEL);
+ if (!q_params->txqs_res)
+ return -ENOMEM;
+
+ q_params->rxqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->rxqs_res), GFP_KERNEL);
+ if (!q_params->rxqs_res) {
+ err = -ENOMEM;
+ goto err_free_txqs_res_arr;
+ }
+
+ q_params->irq_cfg = kcalloc(q_params->num_qps,
+ sizeof(*q_params->irq_cfg), GFP_KERNEL);
+ if (!q_params->irq_cfg) {
+ err = -ENOMEM;
+ goto err_free_rxqs_res_arr;
+ }
+
+ err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txqs resource\n");
+ goto err_free_irq_cfg;
+ }
+
+ err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc rxqs resource\n");
+ goto err_free_txqs_res;
+ }
+
+ return 0;
+
+err_free_txqs_res:
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+err_free_irq_cfg:
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+err_free_rxqs_res_arr:
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+err_free_txqs_res_arr:
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+
+ return err;
+}
+
+static void hinic3_free_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
+ q_params->rxqs_res);
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+}
+
+static int hinic3_configure_txrxqs(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ err = hinic3_configure_txqs(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txqs\n");
+ return err;
+ }
+
+ err = hinic3_configure_rxqs(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure rxqs\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
+ netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
+ err = hinic3_set_port_mtu(netdev, netdev->mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to set mtu\n");
+ return err;
+ }
+
+ /* Ensure DCB is disabled */
+ hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
+ err = hinic3_rss_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init rss\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hinic3_remove_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ hinic3_rss_uninit(netdev);
+}
+
+static int hinic3_alloc_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ qp_params->num_qps = trxq_params->num_qps;
+ qp_params->sq_depth = trxq_params->sq_depth;
+ qp_params->rq_depth = trxq_params->rq_depth;
+
+ err = hinic3_alloc_qps(nic_dev, qp_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc qps\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txrxq resources\n");
+ hinic3_free_qps(nic_dev, qp_params);
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_free_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxq_resources(netdev, trxq_params);
+ hinic3_free_qps(nic_dev, qp_params);
+}
+
+static int hinic3_open_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_init_qp_ctxts(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init qps\n");
+ return err;
+ }
+
+ err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txrxqs\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_qps_irq_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init txrxq irq\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_configure(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init txrxq irq\n");
+ goto err_uninit_qps_irq;
+ }
+
+ return 0;
+
+err_uninit_qps_irq:
+ hinic3_qps_irq_uninit(netdev);
+err_free_qp_ctxts:
+ hinic3_free_qp_ctxts(nic_dev);
+
+ return err;
+}
+
+static void hinic3_close_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_remove_configure(netdev);
+ hinic3_qps_irq_uninit(netdev);
+ hinic3_free_qp_ctxts(nic_dev);
+}
+
+static int hinic3_vport_up(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ bool link_status_up;
+ u16 glb_func_id;
+ int err;
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable vport\n");
+ goto err_flush_qps_res;
+ }
+
+ err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
+ nic_dev->q_params.num_qps);
+ if (err) {
+ netdev_err(netdev, "Failed to set real number of queues\n");
+ goto err_flush_qps_res;
+ }
+ netif_tx_start_all_queues(netdev);
+
+ err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
+ if (!err && link_status_up)
+ netif_carrier_on(netdev);
+
+ return 0;
+
+err_flush_qps_res:
+ hinic3_flush_qps_res(nic_dev->hwdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+
+ return err;
+}
+
+static void hinic3_vport_down(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 glb_func_id;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
+
+ hinic3_flush_txqs(netdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+ hinic3_flush_qps_res(nic_dev->hwdev);
+}
+
static int hinic3_open(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+ int err;
+
+ err = hinic3_init_nicio_res(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init nicio resources\n");
+ return err;
+ }
+
+ err = hinic3_setup_num_qps(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to setup num_qps\n");
+ goto err_free_nicio_res;
+ }
+
+ err = hinic3_alloc_channel_resources(netdev, &qp_params,
+ &nic_dev->q_params);
+ if (err)
+ goto err_destroy_num_qps;
+
+ hinic3_init_qps(nic_dev, &qp_params);
+
+ err = hinic3_open_channel(netdev);
+ if (err)
+ goto err_uninit_qps;
+
+ err = hinic3_vport_up(netdev);
+ if (err)
+ goto err_close_channel;
+
+ return 0;
+
+err_close_channel:
+ hinic3_close_channel(netdev);
+err_uninit_qps:
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+err_destroy_num_qps:
+ hinic3_destroy_num_qps(netdev);
+err_free_nicio_res:
+ hinic3_free_nicio_res(nic_dev);
+
+ return err;
}
static int hinic3_close(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+
+ hinic3_vport_down(netdev);
+ hinic3_close_channel(netdev);
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+
+ return 0;
}
static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
index 5b1a91a18c67..979f47ca77f9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -39,6 +39,12 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
return 0;
}
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
{
return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
@@ -82,6 +88,23 @@ static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
return 0;
}
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ u32 cfg_bitmap;
+
+ func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */
+ func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buf_len;
+
+ cfg_bitmap = BIT(L2NIC_FUNC_TBL_CFG_INIT) |
+ BIT(L2NIC_FUNC_TBL_CFG_MTU) |
+ BIT(L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE);
+
+ return hinic3_set_function_table(nic_dev->hwdev, cfg_bitmap,
+ &func_tbl_cfg);
+}
+
int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -89,6 +112,7 @@ int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
struct hinic3_hwdev *hwdev = nic_dev->hwdev;
func_tbl_cfg.mtu = new_mtu;
+
return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
&func_tbl_cfg);
}
@@ -206,6 +230,63 @@ int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
err, mac_info.msg_head.status);
return -EIO;
}
+
+ return 0;
+}
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr)
+{
+ struct l2nic_cmd_set_ci_attr cons_idx_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cons_idx_attr.func_idx = hinic3_global_func_id(hwdev);
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ mgmt_msg_params_init_default(&msg_params, &cons_idx_attr,
+ sizeof(cons_idx_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params);
+ if (err || cons_idx_attr.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set ci attribute table, err: %d, status: 0x%x\n",
+ err, cons_idx_attr.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_clear_qp_resource sq_res = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ sq_res.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &sq_res, sizeof(sq_res));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CLEAR_QP_RESOURCE,
+ &msg_params);
+ if (err || sq_res.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to clear sq resources, err: %d, status: 0x%x\n",
+ err, sq_res.msg_head.status);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -231,3 +312,74 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
return pkt_drop.msg_head.status;
}
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
+{
+ struct l2nic_cmd_set_dcb_state dcb_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dcb_state.op_code = op_code;
+ dcb_state.state = state;
+ dcb_state.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dcb_state,
+ sizeof(dcb_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_QOS_DCB_STATE, &msg_params);
+ if (err || dcb_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set dcb state, err: %d, status: 0x%x\n",
+ err, dcb_state.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up)
+{
+ struct mag_cmd_get_link_status get_link = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ get_link.port_id = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &get_link, sizeof(get_link));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_GET_LINK_STATUS, &msg_params);
+ if (err || get_link.head.status) {
+ dev_err(hwdev->dev, "Failed to get link state, err: %d, status: 0x%x\n",
+ err, get_link.head.status);
+ return -EIO;
+ }
+
+ *link_status_up = !!get_link.status;
+
+ return 0;
+}
+
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable)
+{
+ struct l2nic_cmd_set_vport_state en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ en_state.func_id = func_id;
+ en_state.state = enable ? 1 : 0;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state, sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_VPORT_ENABLE, &msg_params);
+ if (err || en_state.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set vport state, err: %d, status: 0x%x\n",
+ err, en_state.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
index bf9ce51dc401..b83b567fa542 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -22,11 +22,23 @@ enum hinic3_nic_event_type {
HINIC3_NIC_EVENT_LINK_UP = 1,
};
+struct hinic3_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
enum hinic3_nic_feature_cap feature_bits);
void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev);
int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
@@ -36,6 +48,14 @@ int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
u8 *new_mac, u16 vlan_id, u16 func_id);
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
+ struct hinic3_sq_attr *attr);
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index c994fc9b6ee0..5ba83261616c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -51,6 +51,12 @@ struct hinic3_dyna_txrxq_params {
struct hinic3_irq_cfg *irq_cfg;
};
+struct hinic3_intr_coal_info {
+ u8 pending_limit;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
struct hinic3_nic_dev {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -67,16 +73,21 @@ struct hinic3_nic_dev {
struct hinic3_txq *txqs;
struct hinic3_rxq *rxqs;
+ enum hinic3_rss_hash_type rss_hash_type;
+ struct hinic3_rss_type rss_type;
+ u8 *rss_hkey;
+ u16 *rss_indir;
+
u16 num_qp_irq;
struct msix_entry *qps_msix_entries;
+ struct hinic3_intr_coal_info *intr_coalesce;
+
bool link_status_up;
};
void hinic3_set_netdev_ops(struct net_device *netdev);
-
-/* Temporary prototypes. Functions become static in later submission. */
-void qp_add_napi(struct hinic3_irq_cfg *irq_cfg);
-void qp_del_napi(struct hinic3_irq_cfg *irq_cfg);
+int hinic3_qps_irq_init(struct net_device *netdev);
+void hinic3_qps_irq_uninit(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
index 34a1f5bd5ac1..d86cd1ba4605 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+#include "hinic3_cmdq.h"
#include "hinic3_hw_comm.h"
#include "hinic3_hw_intf.h"
#include "hinic3_hwdev.h"
@@ -9,13 +10,876 @@
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1
+#define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1
+#define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF)
+#define HINIC3_DEFAULT_DROP_THD_OFF 0
+
+#define HINIC3_CI_Q_ADDR_SIZE (64)
+
+#define HINIC3_CI_TABLE_SIZE(num_qps) \
+ (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE))
+
+#define HINIC3_CI_VADDR(base_addr, q_id) \
+ ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define HINIC3_CI_PADDR(base_paddr, q_id) \
+ ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define SQ_WQ_PREFETCH_MAX 1
+#define SQ_WQ_PREFETCH_MIN 1
+#define SQ_WQ_PREFETCH_THRESHOLD 16
+
+#define RQ_WQ_PREFETCH_MAX 4
+#define RQ_WQ_PREFETCH_MIN 1
+#define RQ_WQ_PREFETCH_THRESHOLD 256
+
+/* (2048 - 8) / 64 */
+#define HINIC3_Q_CTXT_MAX 31
+
+enum hinic3_qp_ctxt_type {
+ HINIC3_QP_CTXT_TYPE_SQ = 0,
+ HINIC3_QP_CTXT_TYPE_RQ = 1,
+};
+
+struct hinic3_qp_ctxt_hdr {
+ __le16 num_queues;
+ __le16 queue_type;
+ __le16 start_qid;
+ __le16 rsvd;
+};
+
+struct hinic3_sq_ctxt {
+ __le32 ci_pi;
+ __le32 drop_mode_sp;
+ __le32 wq_pfn_hi_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd0;
+ __le32 pkt_drop_thd;
+ __le32 global_sq_id;
+ __le32 vlan_ceq_attr;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_rq_ctxt {
+ __le32 ci_pi;
+ __le32 ceq_attr;
+ __le32 wq_pfn_hi_type_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd[3];
+ __le32 cqe_sge_len;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 pi_paddr_hi;
+ __le32 pi_paddr_lo;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_sq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_rq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_clean_queue_ctxt {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ __le32 rsvd;
+};
+
+#define SQ_CTXT_SIZE(num_sqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_sqs) * sizeof(struct hinic3_sq_ctxt))
+
+#define RQ_CTXT_SIZE(num_rqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_rqs) * sizeof(struct hinic3_rq_ctxt))
+
+#define SQ_CTXT_PREF_CI_HI_SHIFT 12
+#define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT)
+
+#define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define SQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0)
+#define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1)
+#define SQ_CTXT_MODE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23)
+#define SQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0)
+#define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16)
+#define SQ_CTXT_PKT_DROP_THD_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val)
+
+#define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0)
+#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19)
+#define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23)
+#define SQ_CTXT_VLAN_CEQ_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define SQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define SQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define RQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21)
+#define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31)
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28)
+#define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31)
+#define RQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28)
+#define RQ_CTXT_CQE_LEN_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define RQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define RQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_nic_io *nic_io;
+ int err;
+
+ nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL);
+ if (!nic_io)
+ return -ENOMEM;
+
+ nic_dev->nic_io = nic_io;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set function svc used state\n");
+ goto err_free_nicio;
+ }
+
+ err = hinic3_init_function_table(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init function table\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->rx_buf_len = nic_dev->rx_buf_len;
+
+ err = hinic3_get_nic_feature_from_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to get nic features\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK;
+ nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE;
+ dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap);
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0);
+err_free_nicio:
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+
+ return err;
}
void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0);
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+}
+
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ void __iomem *db_base;
+ int err;
+
+ nic_io->max_qps = hinic3_func_max_qnum(hwdev);
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n");
+ return err;
+ }
+ nic_io->sqs_db_addr = db_base;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n");
+ return err;
+ }
+ nic_io->rqs_db_addr = db_base;
+
+ nic_io->ci_vaddr_base =
+ dma_alloc_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ &nic_io->ci_dma_base,
+ GFP_KERNEL);
+ if (!nic_io->ci_vaddr_base) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ dma_free_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+}
+
+static int hinic3_create_sq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ u16 q_id, u32 sq_depth, u16 sq_msix_idx)
+{
+ int err;
+
+ /* sq used & hardware request init 1 */
+ sq->owner = 1;
+
+ sq->q_id = q_id;
+ sq->msix_entry_idx = sq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &sq->wq, sq_depth,
+ BIT(HINIC3_SQ_WQEBB_SHIFT));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create tx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_rq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *rq,
+ u16 q_id, u32 rq_depth, u16 rq_msix_idx)
+{
+ int err;
+
+ rq->q_id = q_id;
+ rq->msix_entry_idx = rq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
+ BIT(HINIC3_RQ_WQEBB_SHIFT +
+ HINIC3_NORMAL_RQ_WQE));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth,
+ u32 rq_depth, u16 qp_msix_idx)
+{
+ int err;
+
+ err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create sq, qid: %u\n",
+ q_id);
+ return err;
+ }
+
+ err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rq, qid: %u\n",
+ q_id);
+ goto err_destroy_sq_wq;
+ }
+
+ return 0;
+
+err_destroy_sq_wq:
+ hinic3_wq_destroy(hwdev, &sq->wq);
+
+ return err;
+}
+
+static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq)
+{
+ hinic3_wq_destroy(hwdev, &sq->wq);
+ hinic3_wq_destroy(hwdev, &rq->wq);
+}
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries;
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+ u16 q_id;
+ int err;
+
+ if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps)
+ return -EINVAL;
+
+ sqs = kcalloc(qp_params->num_qps, sizeof(*sqs), GFP_KERNEL);
+ if (!sqs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rqs = kcalloc(qp_params->num_qps, sizeof(*rqs), GFP_KERNEL);
+ if (!rqs) {
+ err = -ENOMEM;
+ goto err_free_sqs;
+ }
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++) {
+ err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id,
+ qp_params->sq_depth, qp_params->rq_depth,
+ qps_msix_entries[q_id].entry);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n",
+ q_id, err);
+ goto err_destroy_qp;
+ }
+ }
+
+ qp_params->sqs = sqs;
+ qp_params->rqs = rqs;
+
+ return 0;
+
+err_destroy_qp:
+ while (q_id > 0) {
+ q_id--;
+ hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]);
+ }
+ kfree(rqs);
+err_free_sqs:
+ kfree(sqs);
+err_out:
+ return err;
+}
+
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id;
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++)
+ hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id],
+ &qp_params->rqs[q_id]);
+
+ kfree(qp_params->sqs);
+ kfree(qp_params->rqs);
+}
+
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_io_queue *sqs = qp_params->sqs;
+ struct hinic3_io_queue *rqs = qp_params->rqs;
+ u16 q_id;
+
+ nic_io->num_qps = qp_params->num_qps;
+ nic_io->sq = qp_params->sqs;
+ nic_io->rq = qp_params->rqs;
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sqs[q_id].cons_idx_addr =
+ (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id);
+ /* clear ci value */
+ WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0);
+
+ sqs[q_id].db_addr = nic_io->sqs_db_addr;
+ rqs[q_id].db_addr = nic_io->rqs_db_addr;
+ }
+}
+
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ qp_params->sqs = nic_io->sq;
+ qp_params->rqs = nic_io->rq;
+ qp_params->num_qps = nic_io->num_qps;
+}
+
+static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr,
+ enum hinic3_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type);
+ qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues);
+ qp_ctxt_hdr->start_qid = cpu_to_le16(q_id);
+ qp_ctxt_hdr->rsvd = 0;
+}
+
+static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
+ struct hinic3_sq_ctxt *sq_ctxt)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = hinic3_get_sq_local_ci(sq);
+ pi_start = hinic3_get_sq_local_pi(sq);
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ci_pi =
+ cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ SQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ sq_ctxt->drop_mode_sp =
+ cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) |
+ SQ_CTXT_MODE_SET(0, PKT_DROP));
+
+ sq_ctxt->wq_pfn_hi_owner =
+ cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->pkt_drop_thd =
+ cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) |
+ SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF));
+
+ sq_ctxt->global_sq_id =
+ cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id,
+ GLOBAL_SQ_ID));
+
+ /* enable insert c-vlan by default */
+ sq_ctxt->vlan_ceq_attr =
+ cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) |
+ SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE));
+
+ sq_ctxt->rsvd0 = 0;
+
+ sq_ctxt->pref_cache =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ sq_ctxt->pref_ci_owner =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ SQ_CTXT_PREF_SET(1, OWNER));
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI));
+
+ sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+ u32 *wq_page_pfn_hi,
+ u32 *wq_page_pfn_lo,
+ u32 *wq_block_pfn_hi,
+ u32 *wq_block_pfn_lo)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ *wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ *wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
+ *wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ *wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+}
+
+static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+ struct hinic3_rq_ctxt *rq_ctxt)
+{
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+ pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+
+ hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+ &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+ rq_ctxt->ci_pi =
+ cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ RQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ rq_ctxt->ceq_attr =
+ cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
+
+ rq_ctxt->wq_pfn_hi_type_owner =
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ /* use 16Byte WQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE));
+ rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN));
+
+ rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->pref_cache =
+ cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ rq_ctxt->pref_ci_owner =
+ cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ RQ_CTXT_PREF_SET(1, OWNER));
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI_LOW));
+
+ rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_ctxt_block *sq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_sq_ctxt *sq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *sq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ sq = &nic_io->sq[curr_id];
+ hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_rq_ctxt_block *rq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_rq_ctxt *rq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *rq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ rq = &nic_io->rq[curr_id];
+ hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ int err;
+
+ err = init_sq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ err = init_rq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_qp_ctxt_type ctxt_type)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_clean_queue_ctxt *ctxt_block;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
+ ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
+ ctxt_block->cmdq_hdr.start_qid = 0;
+ ctxt_block->cmdq_hdr.rsvd = 0;
+ ctxt_block->rsvd = 0;
+
+ hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev)
+{
+ /* clean LRO/TSO context space */
+ return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ);
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_attr sq_attr;
+ u32 rq_depth;
+ u16 q_id;
+ int err;
+
+ err = init_qp_ctxts(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init QP ctxts\n");
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n");
+ return err;
+ }
+
+ rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
+
+ err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
+ nic_io->rx_buf_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set root context\n");
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2;
+ sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT;
+ sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME;
+ sq_attr.intr_en = 1;
+ sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic3_set_ci_table(hwdev, &sq_attr);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ci table\n");
+ goto err_clean_root_ctxt;
+ }
+ }
+
+ return 0;
+
+err_clean_root_ctxt:
+ hinic3_clean_root_ctxt(hwdev);
+
+ return err;
+}
+
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ hinic3_clean_root_ctxt(nic_dev->hwdev);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
index 865ba6878c48..12eefabcf1db 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -75,8 +75,8 @@ static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
#define DB_CFLAG_DP_RQ 1
struct hinic3_nic_db {
- u32 db_info;
- u32 pi_hi;
+ __le32 db_info;
+ __le32 pi_hi;
};
static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
@@ -84,15 +84,25 @@ static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
{
struct hinic3_nic_db db;
- db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) |
- DB_INFO_SET(cflag, CFLAG) |
- DB_INFO_SET(cos, COS) |
- DB_INFO_SET(queue->q_id, QID);
- db.pi_hi = DB_PI_HIGH(pi);
+ db.db_info =
+ cpu_to_le32(DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID));
+ db.pi_hi = cpu_to_le32(DB_PI_HIGH(pi));
writeq(*((u64 *)&db), DB_ADDR(queue, pi));
}
+struct hinic3_dyna_qp_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+};
+
struct hinic3_nic_io {
struct hinic3_io_queue *sq;
struct hinic3_io_queue *rq;
@@ -117,4 +127,19 @@ struct hinic3_nic_io {
int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev);
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
new file mode 100644
index 000000000000..86c88d0bb4bd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_PCI_ID_TBL_H_
+#define _HINIC3_PCI_ID_TBL_H_
+
+#define PCI_DEV_ID_HINIC3_VF 0x375F
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
new file mode 100644
index 000000000000..4ff1b2f79838
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/ethtool.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rss.h"
+
+static void hinic3_fillout_indir_tbl(struct net_device *netdev, u16 *indir)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i, num_qps;
+
+ num_qps = nic_dev->q_params.num_qps;
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_qps);
+}
+
+static int hinic3_rss_cfg(struct hinic3_hwdev *hwdev, u8 rss_en, u16 num_qps)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct l2nic_cmd_cfg_rss rss_cfg = {};
+ int err;
+
+ rss_cfg.func_id = hinic3_global_func_id(hwdev);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.rq_priority_number = 0;
+ rss_cfg.num_qps = num_qps;
+
+ mgmt_msg_params_init_default(&msg_params, &rss_cfg, sizeof(rss_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS, &msg_params);
+ if (err || rss_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x\n",
+ err, rss_cfg.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic3_init_rss_parameters(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR;
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+static void decide_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int dev_cpus;
+
+ dev_cpus = netif_get_num_default_rss_queues();
+ nic_dev->q_params.num_qps = min(dev_cpus, nic_dev->max_qps);
+}
+
+static int alloc_rss_resource(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hkey = kmalloc_array(L2NIC_RSS_KEY_SIZE,
+ sizeof(nic_dev->rss_hkey[0]),
+ GFP_KERNEL);
+ if (!nic_dev->rss_hkey)
+ return -ENOMEM;
+
+ netdev_rss_key_fill(nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE);
+
+ nic_dev->rss_indir = kcalloc(L2NIC_RSS_INDIR_SIZE, sizeof(u16),
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir) {
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_set_indir_tbl(struct hinic3_hwdev *hwdev,
+ const u16 *indir_table)
+{
+ struct l2nic_cmd_rss_set_indir_tbl *indir_tbl;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+ u32 i;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = cpu_to_le16(sizeof(struct l2nic_cmd_rss_set_indir_tbl));
+ indir_tbl = cmd_buf->buf;
+ memset(indir_tbl, 0, sizeof(*indir_tbl));
+
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir_tbl->entry[i] = cpu_to_le16(indir_table[i]);
+
+ hinic3_cmdq_buf_swab32(indir_tbl, sizeof(*indir_tbl));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev,
+ struct hinic3_rss_type rss_type)
+{
+ struct l2nic_cmd_set_rss_ctx_tbl ctx_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ u32 ctx;
+ int err;
+
+ ctx_tbl.func_id = hinic3_global_func_id(hwdev);
+ ctx = L2NIC_RSS_TYPE_SET(1, VALID) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+ ctx_tbl.context = ctx;
+
+ mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params);
+
+ if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) {
+ return MGMT_STATUS_CMD_UNSUPPORTED;
+ } else if (err || ctx_tbl.msg_head.status) {
+ dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n",
+ err, ctx_tbl.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode,
+ enum hinic3_rss_hash_type *type)
+{
+ struct l2nic_cmd_cfg_rss_engine hash_type_cmd = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_type_cmd.func_id = hinic3_global_func_id(hwdev);
+ hash_type_cmd.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ hash_type_cmd.hash_engine = *type;
+
+ mgmt_msg_params_init_default(&msg_params, &hash_type_cmd,
+ sizeof(hash_type_cmd));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE,
+ &msg_params);
+ if (err || hash_type_cmd.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash engine, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_type_cmd.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ *type = hash_type_cmd.hash_engine;
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_type(struct hinic3_hwdev *hwdev,
+ enum hinic3_rss_hash_type type)
+{
+ return hinic3_rss_cfg_hash_type(hwdev, MGMT_MSG_CMD_OP_SET, &type);
+}
+
+static int hinic3_config_rss_hw_resource(struct net_device *netdev,
+ u16 *indir_tbl)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl);
+ if (err)
+ return err;
+
+ err = hinic3_set_rss_type(nic_dev->hwdev, nic_dev->rss_type);
+ if (err)
+ return err;
+
+ return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type);
+}
+
+static int hinic3_rss_cfg_hash_key(struct hinic3_hwdev *hwdev, u8 opcode,
+ u8 *key)
+{
+ struct l2nic_cmd_cfg_rss_hash_key hash_key = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_key.func_id = hinic3_global_func_id(hwdev);
+ hash_key.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(hash_key.key, key, L2NIC_RSS_KEY_SIZE);
+
+ mgmt_msg_params_init_default(&msg_params, &hash_key, sizeof(hash_key));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_KEY, &msg_params);
+ if (err || hash_key.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash key, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_key.msg_head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(key, hash_key.key, L2NIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_key(struct hinic3_hwdev *hwdev, u8 *key)
+{
+ return hinic3_rss_cfg_hash_key(hwdev, MGMT_MSG_CMD_OP_SET, key);
+}
+
+static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey);
+ if (err)
+ return err;
+
+ hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir);
+
+ err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir);
+ if (err)
+ return err;
+
+ err = hinic3_rss_cfg(nic_dev->hwdev, rss_en, nic_dev->q_params.num_qps);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic3_rss_init(struct net_device *netdev)
+{
+ return hinic3_set_hw_rss_parameters(netdev, 1);
+}
+
+void hinic3_rss_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_rss_cfg(nic_dev->hwdev, 0, 0);
+}
+
+void hinic3_clear_rss_config(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+
+ kfree(nic_dev->rss_indir);
+ nic_dev->rss_indir = NULL;
+}
+
+void hinic3_try_to_enable_rss(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->max_qps = hinic3_func_max_qnum(hwdev);
+ if (nic_dev->max_qps <= 1 ||
+ !hinic3_test_support(nic_dev, HINIC3_NIC_F_RSS))
+ goto err_reset_q_params;
+
+ err = alloc_rss_resource(netdev);
+ if (err) {
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ set_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ decide_num_qps(netdev);
+ hinic3_init_rss_parameters(netdev);
+ err = hinic3_set_hw_rss_parameters(netdev, 0);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set hardware rss parameters\n");
+ hinic3_clear_rss_config(netdev);
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ return;
+
+err_reset_q_params:
+ clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->q_params.num_qps = nic_dev->max_qps;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
new file mode 100644
index 000000000000..78d82c2aca06
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RSS_H_
+#define _HINIC3_RSS_H_
+
+#include <linux/netdevice.h>
+
+int hinic3_rss_init(struct net_device *netdev);
+void hinic3_rss_uninit(struct net_device *netdev);
+void hinic3_try_to_enable_rss(struct net_device *netdev);
+void hinic3_clear_rss_config(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
index 860163e9d66c..16c00c3bb1ed 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -35,13 +35,35 @@
int hinic3_alloc_rxqs(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ u16 num_rxqs = nic_dev->max_qps;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+
+ nic_dev->rxqs = kcalloc(num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL);
+ if (!nic_dev->rxqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_rxqs; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rxq->netdev = netdev;
+ rxq->dev = &pdev->dev;
+ rxq->q_id = q_id;
+ rxq->buf_len = nic_dev->rx_buf_len;
+ rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
+ rxq->q_depth = nic_dev->q_params.rq_depth;
+ rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+ }
+
+ return 0;
}
void hinic3_free_rxqs(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rxqs);
}
static int rx_alloc_mapped_page(struct page_pool *page_pool,
@@ -50,6 +72,9 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool,
struct page *page;
u32 page_offset;
+ if (likely(rx_info->page))
+ return 0;
+
page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
if (unlikely(!page))
return -ENOMEM;
@@ -60,14 +85,35 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool,
return 0;
}
+/* Associate fixed completion element to every wqe in the rq. Every rq wqe will
+ * always post completion to the same place.
+ */
+static void rq_associate_cqes(struct hinic3_rxq *rxq)
+{
+ struct hinic3_queue_pages *qpages;
+ struct hinic3_rq_wqe *rq_wqe;
+ dma_addr_t cqe_dma;
+ u32 i;
+
+ qpages = &rxq->rq->wq.qpages;
+
+ for (i = 0; i < rxq->q_depth; i++) {
+ rq_wqe = get_q_element(qpages, i, NULL);
+ cqe_dma = rxq->cqe_start_paddr +
+ i * sizeof(struct hinic3_rq_cqe);
+ rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma));
+ rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma));
+ }
+}
+
static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
dma_addr_t dma_addr, u16 len)
{
struct hinic3_rq_wqe *rq_wqe;
rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
- rq_wqe->buf_hi_addr = upper_32_bits(dma_addr);
- rq_wqe->buf_lo_addr = lower_32_bits(dma_addr);
+ rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr));
+ rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr));
}
static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
@@ -102,6 +148,41 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
return i;
}
+static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 rq_depth, u16 buf_len)
+{
+ u32 free_wqebbs = rq_depth - 1;
+ u32 idx;
+ int err;
+
+ for (idx = 0; idx < free_wqebbs; idx++) {
+ err = rx_alloc_mapped_page(rqres->page_pool,
+ &rqres->rx_info[idx], buf_len);
+ if (err)
+ break;
+ }
+
+ return idx;
+}
+
+static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 q_depth)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < q_depth; i++) {
+ rx_info = &rqres->rx_info[i];
+
+ if (rx_info->page) {
+ page_pool_put_full_page(rqres->page_pool,
+ rx_info->page, false);
+ rx_info->page = NULL;
+ }
+ }
+}
+
static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
struct hinic3_rx_info *rx_info,
struct sk_buff *skb, u32 size)
@@ -279,7 +360,7 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
if (skb_is_nonlinear(skb))
hinic3_pull_tail(skb);
- offload_type = rx_cqe->offload_type;
+ offload_type = le32_to_cpu(rx_cqe->offload_type);
hinic3_rx_csum(rxq, offload_type, status, skb);
num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
@@ -299,6 +380,135 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
return 0;
}
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct page_pool_params pp_params = {};
+ struct hinic3_dyna_rxq_res *rqres;
+ u32 pkt_idx;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+ rqres->rx_info = kcalloc(rq_depth, sizeof(*rqres->rx_info),
+ GFP_KERNEL);
+ if (!rqres->rx_info)
+ goto err_free_rqres;
+
+ rqres->cqe_start_vaddr =
+ dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ &rqres->cqe_start_paddr, GFP_KERNEL);
+ if (!rqres->cqe_start_vaddr) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n",
+ idx);
+ goto err_free_rx_info;
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = rq_depth * nic_dev->rx_buf_len /
+ PAGE_SIZE;
+ pp_params.nid = dev_to_node(&nic_dev->pdev->dev);
+ pp_params.dev = &nic_dev->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ rqres->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rqres->page_pool)) {
+ netdev_err(netdev, "Failed to create rxq%d page pool\n",
+ idx);
+ goto err_free_cqe;
+ }
+
+ pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth,
+ nic_dev->rx_buf_len);
+ if (!pkt_idx) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n",
+ idx);
+ goto err_destroy_page_pool;
+ }
+ rqres->next_to_alloc = pkt_idx;
+ }
+
+ return 0;
+
+err_destroy_page_pool:
+ page_pool_destroy(rqres->page_pool);
+err_free_cqe:
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+err_free_rx_info:
+ kfree(rqres->rx_info);
+err_free_rqres:
+ hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res);
+
+ return -ENOMEM;
+}
+
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+
+ hinic3_free_rx_buffers(rqres, rq_depth);
+ page_pool_destroy(rqres->page_pool);
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+ kfree(rqres->rx_info);
+ }
+}
+
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ struct msix_entry *msix_entry;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+ u32 pkts;
+
+ for (q_id = 0; q_id < num_rq; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rqres = &rxqs_res[q_id];
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+
+ rxq->irq_id = msix_entry->vector;
+ rxq->msix_entry_idx = msix_entry->entry;
+ rxq->next_to_update = 0;
+ rxq->next_to_alloc = rqres->next_to_alloc;
+ rxq->q_depth = rq_depth;
+ rxq->delta = rxq->q_depth;
+ rxq->q_mask = rxq->q_depth - 1;
+ rxq->cons_idx = 0;
+
+ rxq->cqe_arr = rqres->cqe_start_vaddr;
+ rxq->cqe_start_paddr = rqres->cqe_start_paddr;
+ rxq->rx_info = rqres->rx_info;
+ rxq->page_pool = rqres->page_pool;
+
+ rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
+
+ rq_associate_cqes(rxq);
+
+ pkts = hinic3_rx_fill_buffers(rxq);
+ if (!pkts) {
+ netdev_err(netdev, "Failed to fill Rx buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
@@ -311,14 +521,14 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
while (likely(nr_pkts < budget)) {
sw_ci = rxq->cons_idx & rxq->q_mask;
rx_cqe = rxq->cqe_arr + sw_ci;
- status = rx_cqe->status;
+ status = le32_to_cpu(rx_cqe->status);
if (!RQ_CQE_STATUS_GET(status, RXDONE))
break;
/* make sure we read rx_done before packet length */
rmb();
- vlan_len = rx_cqe->vlan_len;
+ vlan_len = le32_to_cpu(rx_cqe->vlan_len);
pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
break;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
index 1cca21858d40..44ae841a3648 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -27,21 +27,21 @@
/* RX Completion information that is provided by HW for a specific RX WQE */
struct hinic3_rq_cqe {
- u32 status;
- u32 vlan_len;
- u32 offload_type;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
- u32 rsvd6;
- u32 pkt_info;
+ __le32 status;
+ __le32 vlan_len;
+ __le32 offload_type;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ __le32 rsvd6;
+ __le32 pkt_info;
};
struct hinic3_rq_wqe {
- u32 buf_hi_addr;
- u32 buf_lo_addr;
- u32 cqe_hi_addr;
- u32 cqe_lo_addr;
+ __le32 buf_hi_addr;
+ __le32 buf_lo_addr;
+ __le32 cqe_hi_addr;
+ __le32 cqe_lo_addr;
};
struct hinic3_rx_info {
@@ -82,9 +82,23 @@ struct hinic3_rxq {
dma_addr_t cqe_start_paddr;
} ____cacheline_aligned;
+struct hinic3_dyna_rxq_res {
+ u16 next_to_alloc;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ struct page_pool *page_pool;
+};
+
int hinic3_alloc_rxqs(struct net_device *netdev);
void hinic3_free_rxqs(struct net_device *netdev);
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index 3f7f73430be4..92c43c05e3f2 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -55,9 +55,9 @@ void hinic3_free_txqs(struct net_device *netdev)
static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
dma_addr_t addr, u32 len)
{
- buf_descs->hi_addr = upper_32_bits(addr);
- buf_descs->lo_addr = lower_32_bits(addr);
- buf_descs->len = len;
+ buf_descs->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ buf_descs->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ buf_descs->len = cpu_to_le32(len);
}
static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
@@ -81,10 +81,10 @@ static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
dma_info[0].len = skb_headlen(skb);
- wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma);
- wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma);
+ wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma));
+ wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma));
- wqe_desc->ctrl_len = dma_info[0].len;
+ wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &(skb_shinfo(skb)->frags[i]);
@@ -116,6 +116,7 @@ err_unmap_page:
}
dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
DMA_TO_DEVICE);
+
return err;
}
@@ -138,6 +139,23 @@ static void hinic3_tx_unmap_skb(struct net_device *netdev,
dma_info[0].len, DMA_TO_DEVICE);
}
+static void free_all_tx_skbs(struct net_device *netdev, u32 sq_depth,
+ struct hinic3_tx_info *tx_info_arr)
+{
+ struct hinic3_tx_info *tx_info;
+ u32 idx;
+
+ for (idx = 0; idx < sq_depth; idx++) {
+ tx_info = &tx_info_arr[idx];
+ if (tx_info->skb) {
+ hinic3_tx_unmap_skb(netdev, tx_info->skb,
+ tx_info->dma_info);
+ dev_kfree_skb_any(tx_info->skb);
+ tx_info->skb = NULL;
+ }
+ }
+}
+
union hinic3_ip {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -197,7 +215,8 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
union hinic3_ip ip;
u8 l4_proto;
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
ip.hdr = skb_network_header(skb);
if (ip.v4->version == 4) {
@@ -226,7 +245,7 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
}
}
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN));
return 1;
}
@@ -255,26 +274,28 @@ static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
}
}
-static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info,
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info,
enum hinic3_l4_offload_type l4_offload,
u32 offset, u32 mss)
{
if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
} else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
}
/* enable L3 calculation */
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN));
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF));
/* set MSS value */
- *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+ *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS));
}
static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
@@ -284,7 +305,7 @@ static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
}
-static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
+static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info,
struct sk_buff *skb)
{
enum hinic3_l4_offload_type l4_offload;
@@ -305,15 +326,17 @@ static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 gso_type = skb_shinfo(skb)->gso_type;
/* L3 checksum is always enabled */
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
l4.hdr = skb_transport_header(skb);
ip.hdr = skb_network_header(skb);
if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN);
+ task->pkt_info0 |=
+ cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN));
}
ip.hdr = skb_inner_network_header(skb);
@@ -343,13 +366,14 @@ static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
* 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
* 4=select TPID4 in IPSU
*/
- task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
- SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
- SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID);
+ task->vlan_offload =
+ cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID));
}
static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
- u32 *queue_info, struct hinic3_txq *txq)
+ __le32 *queue_info, struct hinic3_txq *txq)
{
u32 offload = 0;
int tso_cs_en;
@@ -440,39 +464,41 @@ static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
}
static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
- u32 queue_info, int nr_descs, u16 owner)
+ __le32 queue_info, int nr_descs, u16 owner)
{
struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
wqe_desc->ctrl_len |=
- SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
- SQ_CTRL_SET(owner, OWNER);
+ cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
/* compact wqe queue_info will transfer to chip */
wqe_desc->queue_info = 0;
return;
}
- wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
- SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
- SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
- SQ_CTRL_SET(owner, OWNER);
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
wqe_desc->queue_info = queue_info;
- wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC);
+ wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC));
if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
wqe_desc->queue_info |=
- SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS);
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS));
} else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
HINIC3_TX_MSS_MIN) {
/* mss should not be less than 80 */
- wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ wqe_desc->queue_info &=
+ cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
wqe_desc->queue_info |=
- SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS);
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS));
}
}
@@ -482,12 +508,13 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
{
struct hinic3_sq_wqe_combo wqe_combo = {};
struct hinic3_tx_info *tx_info;
- u32 offload, queue_info = 0;
struct hinic3_sq_task task;
u16 wqebb_cnt, num_sge;
+ __le32 queue_info = 0;
u16 saved_wq_prod_idx;
u16 owner, pi = 0;
u8 saved_sq_owner;
+ u32 offload;
int err;
if (unlikely(skb->len < MIN_SKB_LEN)) {
@@ -575,6 +602,7 @@ netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
err_drop_pkt:
dev_kfree_skb_any(skb);
+
return NETDEV_TX_OK;
}
@@ -624,6 +652,90 @@ void hinic3_flush_txqs(struct net_device *netdev)
#define HINIC3_BDS_PER_SQ_WQEBB \
(HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ tqres->tx_info = kcalloc(sq_depth, sizeof(*tqres->tx_info),
+ GFP_KERNEL);
+ if (!tqres->tx_info)
+ goto err_free_tqres;
+
+ tqres->bds = kcalloc(sq_depth * HINIC3_BDS_PER_SQ_WQEBB +
+ HINIC3_MAX_SQ_SGE, sizeof(*tqres->bds),
+ GFP_KERNEL);
+ if (!tqres->bds) {
+ kfree(tqres->tx_info);
+ goto err_free_tqres;
+ }
+ }
+
+ return 0;
+
+err_free_tqres:
+ while (idx > 0) {
+ idx--;
+ tqres = &txqs_res[idx];
+
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+
+ return -ENOMEM;
+}
+
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ free_all_tx_skbs(netdev, sq_depth, tqres->tx_info);
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+}
+
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_txq_res *tqres;
+ struct hinic3_txq *txq;
+ u16 q_id;
+ u32 idx;
+
+ for (q_id = 0; q_id < num_sq; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ tqres = &txqs_res[q_id];
+
+ txq->q_depth = sq_depth;
+ txq->q_mask = sq_depth - 1;
+
+ txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS,
+ sq_depth / 20);
+ txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS,
+ sq_depth / 10);
+
+ txq->tx_info = tqres->tx_info;
+ for (idx = 0; idx < sq_depth; idx++)
+ txq->tx_info[idx].dma_info =
+ &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB];
+
+ txq->sq = &nic_dev->nic_io->sq[q_id];
+ }
+
+ return 0;
+}
+
bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
{
struct net_device *netdev = txq->netdev;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
index 9e505cc19dd5..7e1b872ba752 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -58,7 +58,7 @@ enum hinic3_tx_offload_type {
#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
- FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val))
#define SQ_CTRL_MAX_PLDOFF 221
@@ -77,17 +77,17 @@ enum hinic3_tx_offload_type {
FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
struct hinic3_sq_wqe_desc {
- u32 ctrl_len;
- u32 queue_info;
- u32 hi_addr;
- u32 lo_addr;
+ __le32 ctrl_len;
+ __le32 queue_info;
+ __le32 hi_addr;
+ __le32 lo_addr;
};
struct hinic3_sq_task {
- u32 pkt_info0;
- u32 ip_identify;
- u32 rsvd;
- u32 vlan_offload;
+ __le32 pkt_info0;
+ __le32 ip_identify;
+ __le32 rsvd;
+ __le32 vlan_offload;
};
struct hinic3_sq_wqe_combo {
@@ -125,9 +125,21 @@ struct hinic3_txq {
struct hinic3_io_queue *sq;
} ____cacheline_aligned;
+struct hinic3_dyna_txq_res {
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_dma_info *bds;
+};
+
int hinic3_alloc_txqs(struct net_device *netdev);
void hinic3_free_txqs(struct net_device *netdev);
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+
netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
void hinic3_flush_txqs(struct net_device *netdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
index 2ac7efcd1365..bc3ffdc25cf6 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -6,6 +6,110 @@
#include "hinic3_hwdev.h"
#include "hinic3_wq.h"
+#define WQ_MIN_DEPTH 64
+#define WQ_MAX_DEPTH 65536
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_MAX_NUM_PAGES (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE)
+
+static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ int i;
+
+ if (hinic3_wq_is_0_level_cla(wq)) {
+ wq->wq_block_paddr = qpages->pages[0].align_paddr;
+ wq->wq_block_vaddr = qpages->pages[0].align_vaddr;
+
+ return 0;
+ }
+
+ if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) {
+ dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n",
+ WQ_MAX_NUM_PAGES);
+ return -EFAULT;
+ }
+
+ wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ &wq->wq_block_paddr,
+ GFP_KERNEL);
+ if (!wq->wq_block_vaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < qpages->num_pages; i++)
+ wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr);
+
+ return 0;
+}
+
+static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ int err;
+
+ err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0);
+ if (err)
+ return err;
+
+ err = wq_init_wq_block(hwdev, wq);
+ if (err) {
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+ return err;
+ }
+
+ return 0;
+}
+
+static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ if (!hinic3_wq_is_0_level_cla(wq))
+ dma_free_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ wq->wq_block_vaddr,
+ wq->wq_block_paddr);
+
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size)
+{
+ u32 wq_page_size;
+
+ if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH ||
+ !is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) {
+ dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n",
+ q_depth, wqebb_size);
+ return -EINVAL;
+ }
+
+ wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE);
+
+ memset(wq, 0, sizeof(*wq));
+ wq->q_depth = q_depth;
+ wq->idx_mask = q_depth - 1;
+
+ hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size);
+
+ return wq_alloc_pages(hwdev, wq);
+}
+
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ wq_free_pages(hwdev, wq);
+}
+
+void hinic3_wq_reset(struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ u16 pg_idx;
+
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++)
+ memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size);
+}
+
void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
u16 num_wqebbs, u16 *prod_idx,
struct hinic3_sq_bufdesc **first_part_wqebbs,
@@ -27,3 +131,8 @@ void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
*second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
}
}
+
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq)
+{
+ return wq->qpages.num_pages == 1;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
index ab37893efd7e..9b3f012bec80 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -10,10 +10,10 @@
struct hinic3_sq_bufdesc {
/* 31-bits Length, L2NIC only uses length[17:0] */
- u32 len;
- u32 rsvd;
- u32 hi_addr;
- u32 lo_addr;
+ __le32 len;
+ __le32 rsvd;
+ __le32 hi_addr;
+ __le32 lo_addr;
};
/* Work queue is used to submit elements (tx, rx, cmd) to hw.
@@ -59,6 +59,7 @@ static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
{
*pi = wq->prod_idx & wq->idx_mask;
wq->prod_idx++;
+
return get_q_element(&wq->qpages, *pi, NULL);
}
@@ -67,10 +68,20 @@ static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
wq->cons_idx += num_wqebbs;
}
+static inline u64 hinic3_wq_get_first_wqe_page_addr(const struct hinic3_wq *wq)
+{
+ return wq->qpages.pages[0].align_paddr;
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size);
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq);
+void hinic3_wq_reset(struct hinic3_wq *wq);
void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
u16 num_wqebbs, u16 *prod_idx,
struct hinic3_sq_bufdesc **first_part_wqebbs,
struct hinic3_sq_bufdesc **second_part_wqebbs,
u16 *first_part_wqebbs_num);
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq);
#endif
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index eec971567aac..3808148c1fc7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -756,6 +756,17 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
adapter->rx_pool[i].active = 0;
}
+static void ibmvnic_set_safe_max_ind_descs(struct ibmvnic_adapter *adapter)
+{
+ if (adapter->cur_max_ind_descs > IBMVNIC_SAFE_IND_DESC) {
+ netdev_info(adapter->netdev,
+ "set max ind descs from %u to safe limit %u\n",
+ adapter->cur_max_ind_descs,
+ IBMVNIC_SAFE_IND_DESC);
+ adapter->cur_max_ind_descs = IBMVNIC_SAFE_IND_DESC;
+ }
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -843,7 +854,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
/* if send_subcrq_indirect queue is full, flush to VIOS */
- if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ if (ind_bufp->index == adapter->cur_max_ind_descs ||
i == count - 1) {
lpar_rc =
send_subcrq_indirect(adapter, handle,
@@ -862,6 +873,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
+
+ /* Detect platform limit H_PARAMETER */
+ if (lpar_rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
for (i = ind_bufp->index - 1; i >= 0; --i) {
struct ibmvnic_rx_buff *rx_buff;
@@ -2381,16 +2400,28 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
rc = send_subcrq_direct(adapter, handle,
(u64 *)ind_bufp->indir_arr);
- if (rc)
+ if (rc) {
+ dev_err_ratelimited(&adapter->vdev->dev,
+ "tx_flush failed, rc=%u (%llu entries dma=%pad handle=%llx)\n",
+ rc, entries, &dma_addr, handle);
+ /* Detect platform limit H_PARAMETER */
+ if (rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
- else
+ } else {
ind_bufp->index = 0;
+ }
return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ u32 cur_max_ind_descs = adapter->cur_max_ind_descs;
int queue_num = skb_get_queue_mapping(skb);
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
@@ -2590,7 +2621,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
- if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
+ if (num_entries + ind_bufp->index > cur_max_ind_descs) {
lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
@@ -2603,7 +2634,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
- ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
+ ind_bufp->index < cur_max_ind_descs)) {
lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
@@ -4006,7 +4037,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
dma_free_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
scrq->ind_buf.indir_arr,
scrq->ind_buf.indir_dma);
@@ -4063,7 +4094,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->ind_buf.indir_arr =
dma_alloc_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
&scrq->ind_buf.indir_dma,
GFP_KERNEL);
@@ -6369,6 +6400,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
rc = reset_sub_crq_queues(adapter);
}
} else {
+ if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ /* After an LPM, reset the max number of indirect
+ * subcrq descriptors per H_SEND_SUB_CRQ_INDIRECT
+ * hcall to the default max (e.g POWER8 -> POWER10)
+ *
+ * If the new destination platform does not support
+ * the higher limit max (e.g. POWER10-> POWER8 LPM)
+ * H_PARAMETER will trigger automatic fallback to the
+ * safe minimum limit.
+ */
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
+ }
+
rc = init_sub_crqs(adapter);
}
@@ -6520,6 +6564,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 246ddce753f9..480dc587078f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -29,8 +29,9 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
-#define IBMVNIC_MAX_IND_DESCS 16
-#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
+#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_SAFE_IND_DESC 16
+#define IBMVNIC_IND_MAX_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -930,6 +931,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 cur_max_ind_descs;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b05cc0d7a15d..a563a94e2780 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -146,6 +146,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBIE_FWLOG
select MDIO
select NET_DEVLINK
select PLDMFW
@@ -297,6 +298,7 @@ config ICE
select DIMLIB
select LIBIE
select LIBIE_ADMINQ
+ select LIBIE_FWLOG
select NET_DEVLINK
select PACKING
select PLDMFW
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 04c844ef4964..9a37dc76aef0 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_LIBETH) += libeth/
-obj-$(CONFIG_LIBIE) += libie/
+obj-y += libie/
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 75f3fd1d8d6e..ea6ccf4b728b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -116,7 +116,7 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index d06d29c6c037..726365c567ef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -806,7 +806,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ if (checksum != EEPROM_SUM && !(*data))
*data = 2;
return *data;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index f9328f2e669f..0e5de52b1067 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3970,7 +3970,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
return E1000_SUCCESS;
#endif
- if (checksum == (u16)EEPROM_SUM)
+ if (checksum == EEPROM_SUM)
return E1000_SUCCESS;
else {
e_dbg("EEPROM Checksum Invalid\n");
@@ -3997,7 +3997,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
}
checksum += eeprom_data;
}
- checksum = (u16)EEPROM_SUM - checksum;
+ checksum = EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d8595e84326d..292389aceb2d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -313,8 +313,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
} else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans))
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
old_vid);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 952898151565..018e61aea787 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -64,7 +64,7 @@ struct e1000_info;
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 0x0400
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
#define DEFAULT_JUMBO 9234
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cf01a108a5bb..8e40bb50a01e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -963,7 +963,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)NVM_SUM) && !(*data))
+ if (checksum != NVM_SUM && !(*data))
*data = 2;
return *data;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b27a61fab371..201322dac233 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2761,7 +2761,7 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
ew32(RCTL, rctl);
- if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ if (adapter->mng_vlan_id != E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -2828,7 +2828,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
adapter->mng_vlan_id = vid;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid)
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 16369e6d245a..4bde1c9de1b9 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -564,7 +564,7 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
return 0;
}
- if (checksum != (u16)NVM_SUM) {
+ if (checksum != NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
}
@@ -594,7 +594,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
e_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index f51a63fca513..1f919a50c765 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -447,17 +447,16 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
* @q: pointer to the ring of hardware statistics queue
- * @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count)
{
u32 i;
- for (i = 0; i < count; i++, idx++, q++) {
+ for (i = 0; i < count; i++, q++) {
q->rx_stats_idx = 0;
q->tx_stats_idx = 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 4c48fb73b3e7..13fca6a91a01 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -43,6 +43,6 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count);
#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count);
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
#endif /* _FM10K_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1954a04460d1..bf2029144c1d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -560,7 +560,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct fm10k_ring));
if (!temp_ring) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 142f07ca8bc0..b8c15b837fda 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -37,7 +37,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
fm10k_driver_name);
if (!fm10k_workqueue)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index b9dd7b719832..3394645a18fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1389,7 +1389,7 @@ static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_pf(hw, stats);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 7fb1961f2921..6861a0bdc14e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -465,7 +465,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
struct fm10k_hw_stats *stats)
{
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_vf(hw, stats);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 529d5501baac..50be0a60ae13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16635,7 +16635,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_PERCPU, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b194eae03208..cc0b9efc2637 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2148,10 +2148,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
/* First buffer has already been processed, so bump ntc */
if (++rx_ring->next_to_clean == rx_ring->count)
@@ -2203,10 +2203,9 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
} else {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 69054af4689a..c2fbe443ef85 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -5491,7 +5491,7 @@ static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
- u32 err;
+ int err;
adapter = iavf_pdev_to_adapter(pdev);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index d0f9c9492363..5b2c666496e7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -42,14 +42,15 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o \
- ice_fwlog.o \
ice_debugfs.o \
ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
- ice_virtchnl.o \
- ice_virtchnl_allowlist.o \
- ice_virtchnl_fdir.o \
+ virt/allowlist.o \
+ virt/fdir.o \
+ virt/queues.o \
+ virt/virtchnl.o \
+ virt/rss.o \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
index ab519c0f28bf..8e9a8a8178d4 100644
--- a/drivers/net/ethernet/intel/ice/devlink/health.c
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -450,9 +450,8 @@ ice_init_devlink_rep(struct ice_pf *pf,
{
struct devlink *devlink = priv_to_devlink(pf);
struct devlink_health_reporter *rep;
- const u64 graceful_period = 0;
- rep = devl_health_reporter_create(devlink, ops, graceful_period, pf);
+ rep = devl_health_reporter_create(devlink, ops, pf);
if (IS_ERR(rep)) {
struct device *dev = ice_pf_to_dev(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 8a8a01a4bb40..22b8323ff0d0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -84,7 +84,11 @@
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
-#define ICE_MAX_NUM_DESC 8160
+#define ICE_MAX_NUM_DESC_E810 8160
+#define ICE_MAX_NUM_DESC_E830 8096
+#define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_MAX_NUM_DESC_E830 : \
+ ICE_MAX_NUM_DESC_E810)
#define ICE_DFLT_MIN_RX_DESC 512
#define ICE_DFLT_NUM_TX_DESC 256
#define ICE_DFLT_NUM_RX_DESC 2048
@@ -200,9 +204,11 @@ enum ice_feature {
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_TXTIME,
ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
+ ICE_F_SRIOV_AA_LAG,
ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@@ -567,9 +573,6 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct dentry *ice_debugfs_pf;
- struct dentry *ice_debugfs_pf_fwlog;
- /* keep track of all the dentrys for FW log modules */
- struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -577,6 +580,7 @@ struct ice_pf {
DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
+ unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -751,6 +755,31 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring has Tx Time enabled, false otherwise.
+ */
+static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ return test_bit(ring->q_index, pf->txtime_txqs);
+}
+
+/**
+ * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring is configured for Tx ring, false otherwise.
+ */
+static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_TXTIME);
+}
+
+/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
@@ -907,11 +936,10 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-void ice_debugfs_fwlog_init(struct ice_pf *pf);
+int ice_debugfs_pf_init(struct ice_pf *pf);
void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 3bd3ea3af888..859e9c66f3e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -33,6 +33,10 @@ typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
+#define ICE_TXTIME_CTX_SZ 25
+
+typedef struct __packed { u8 buf[ICE_TXTIME_CTX_SZ]; } ice_txtime_ctx_buf_t;
+
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
u8 driver_unloading;
@@ -2060,6 +2064,10 @@ struct ice_aqc_cfg_txqs {
#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
#define ICE_AQC_Q_CFG_DST_PRT_S 3
#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+#define ICE_AQC_Q_CFG_MODE_M GENMASK(7, 6)
+#define ICE_AQC_Q_CFG_MODE_SAME_PF 0x0
+#define ICE_AQC_Q_CFG_MODE_GIVE_OWN 0x1
+#define ICE_AQC_Q_CFG_MODE_KEEP_OWN 0x2
u8 time_out;
#define ICE_AQC_Q_CFG_TIMEOUT_S 2
#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
@@ -2113,6 +2121,34 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
+/* Set Tx Time LAN Queue (indirect 0x0C35) */
+struct ice_aqc_set_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Set Tx Time Queue
+ * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp.
+ */
+struct ice_aqc_set_txtimeqs_perq {
+ u8 reserved[4];
+ ice_txtime_ctx_buf_t txtime_ctx;
+ u8 reserved1[3];
+};
+
+/* The format of the command buffer for Set Tx Time Queue (0x0C35)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_set_txtime_qgrp is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_set_txtime_qgrp {
+ u8 reserved[8];
+ struct ice_aqc_set_txtimeqs_perq txtimeqs[];
+};
+
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2395,42 +2431,6 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_XLR,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_SYNCE,
- ICE_AQC_FW_LOG_ID_HEALTH,
- ICE_AQC_FW_LOG_ID_TSDRV,
- ICE_AQC_FW_LOG_ID_PFREG,
- ICE_AQC_FW_LOG_ID_MDLVER,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
enum ice_aqc_health_status_mask {
ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
@@ -2512,48 +2512,6 @@ struct ice_aqc_health_status_elem {
__le32 internal_data2;
};
-/* Set FW Logging configuration (indirect 0xFF30)
- * Register for FW Logging (indirect 0xFF31)
- * Query FW Logging (indirect 0xFF32)
- * FW Log Event (indirect 0xFF33)
- */
-struct ice_aqc_fw_log {
- u8 cmd_flags;
-#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
-#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
-#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
-#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
-#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
-#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
-
- u8 rsp_flag;
- __le16 fw_rt_msb;
- union {
- struct {
- __le32 fw_rt_lsb;
- } sync;
- struct {
- __le16 log_resolution;
-#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
-#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
-
- __le16 mdl_cnt;
- } cfg;
- } ops;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Response Buffer for:
- * Set Firmware Logging Configuration (0xFF30)
- * Query FW Logging (0xFF32)
- */
-struct ice_aqc_fw_log_cfg_resp {
- __le16 module_identifier;
- u8 log_level;
- u8 rsvd0;
-};
-
/* Admin Queue command opcodes */
enum ice_adminq_opc {
/* AQ commands */
@@ -2688,6 +2646,9 @@ enum ice_adminq_opc {
ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
+ /* Tx Time queue commands */
+ ice_aqc_opc_set_txtimeqs = 0x0C35,
+
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_upload_section = 0x0C41,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c5da8e9cc0a0..2d35a278c555 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -242,7 +242,8 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
+static u16
+ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
@@ -278,30 +279,20 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
+ * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type
+ * @ring: the Tx ring to configure
+ * @vmvf_type: VM/VF type
+ * @vmvf_num: VM/VF number
*
- * Configure the Tx descriptor ring in TLAN context.
+ * Return: 0 on success and a negative value on error.
*/
-static void
-ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+static int
+ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num)
{
struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+ struct ice_hw *hw;
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
+ hw = &vsi->back->hw;
/* queue belongs to a specific VSI type
* VF / VM index should be programmed per vmvf_type setting:
@@ -314,21 +305,60 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_CTRL:
case ICE_VSI_PF:
if (ring->ch)
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
else
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ *vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
default:
- return;
+ dev_info(ice_pf_to_dev(vsi->back),
+ "Unable to set VMVF type for VSI type %d\n",
+ vsi->type);
+ return -EINVAL;
}
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: the Tx ring to configure
+ * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw;
+ int err;
+
+ hw = &vsi->back->hw;
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type,
+ &tlan_ctx->vmvf_num);
+ if (err)
+ return err;
/* make sure the context is associated with the right VSI */
if (ring->ch)
@@ -355,6 +385,80 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
* 1: Legacy Host Interface
*/
tlan_ctx->legacy_int = ICE_TX_LEGACY;
+
+ return 0;
+}
+
+/**
+ * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance
+ * @ring: the tstamp ring to configure
+ * @txtime_ctx: pointer to the Tx time queue context structure to be initialized
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring,
+ struct ice_txtime_ctx *txtime_ctx)
+{
+ struct ice_tx_ring *tx_ring = ring->tx_ring;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ int err;
+
+ txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S;
+
+ /* Tx time Queue Length */
+ txtime_ctx->qlen = ring->count;
+ txtime_ctx->txtime_ena_q = 1;
+
+ /* PF number */
+ txtime_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type,
+ &txtime_ctx->vmvf_num);
+ if (err)
+ return err;
+
+ /* make sure the context is associated with the right VSI */
+ if (tx_ring->ch)
+ txtime_ctx->src_vsi = tx_ring->ch->vsi_num;
+ else
+ txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS;
+ txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32;
+ txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+
+ return 0;
+}
+
+/**
+ * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors
+ * @tx_ring: Tx ring to calculate the count for
+ *
+ * Return: the number of Tx time stamp descriptors.
+ */
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
+{
+ u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 max_fetch_desc = 0, fetch, i;
+ u32 reg;
+
+ for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) {
+ reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0));
+ fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M,
+ reg);
+ max_fetch_desc = max(fetch, max_fetch_desc);
+ }
+
+ if (!max_fetch_desc)
+ max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT;
+
+ max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE);
+
+ return tx_ring->count + max_fetch_desc;
}
/**
@@ -882,13 +986,49 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_tstamp - Configure Tx time stamp queue
+ * @tx_ring: Tx ring to be configured with timestamping
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_cfg_tstamp(struct ice_tx_ring *tx_ring)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf,
+ txtimeqs, 1);
+ u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1);
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct ice_txtime_ctx txtime_ctx = {};
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 pf_q = tx_ring->reg_idx;
+ int err;
+
+ err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n",
+ pf_q, err);
+ return err;
+ }
+ ice_pack_txtime_ctx(&txtime_ctx,
+ &txtime_qg_buf->txtimeqs[0].txtime_ctx);
+
+ tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q);
+ return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf,
+ txtime_buf_len, NULL);
+}
+
+/**
* ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
+ *
+ * Return: 0 on success and a negative value on error.
*/
static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
@@ -897,15 +1037,20 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ u32 pf_q, vsi_idx;
int status;
- u16 pf_q;
u8 tc;
/* Configure XPS */
ice_cfg_xps_tx_ring(ring);
pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n",
+ pf_q, status);
+ return status;
+ }
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
@@ -925,14 +1070,15 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
*/
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- if (ch)
- status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
- else
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
+ if (ch) {
+ tc = 0;
+ vsi_idx = ch->ch_vsi->idx;
+ } else {
+ vsi_idx = vsi->idx;
+ }
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
@@ -947,7 +1093,32 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid);
+ if (ice_is_txtime_ena(ring)) {
+ status = ice_alloc_setup_tstamp_ring(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to allocate Tx timestamp ring, error: %d\n",
+ status);
+ goto err_setup_tstamp;
+ }
+
+ status = ice_cfg_tstamp(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n",
+ status);
+ goto err_cfg_tstamp;
+ }
+ }
return 0;
+
+err_cfg_tstamp:
+ ice_free_tx_tstamp_ring(ring);
+err_setup_tstamp:
+ ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle,
+ &ring->reg_idx, &ring->txq_teid, ICE_NO_RESET,
+ tlan_ctx.vmvf_num, NULL);
+
+ return status;
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
@@ -1206,3 +1377,148 @@ ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
txq_meta->tc = tc;
}
}
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
+ if (vsi->xdp_rings)
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (vsi->xdp_rings)
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ int fail = 0;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (!fail)
+ fail = err;
+ if (vsi->xdp_rings) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (!fail)
+ fail = err;
+ }
+
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return fail;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+ if (!fail)
+ fail = err;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+ if (!fail)
+ fail = err;
+ ice_set_ring_xdp(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+ if (!fail)
+ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+ if (!fail)
+ fail = err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
+
+ return fail;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b711bc921928..d28294247599 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -32,4 +32,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
void
ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx);
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 003d60a4db21..2250426ec91b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -984,6 +984,37 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ice_hw *hw = priv;
+
+ return ice_aq_send_cmd(hw, desc, buf, size, NULL);
+}
+
+static int __fwlog_init(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = pf->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .priv = hw,
+ };
+ int err;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ err = ice_debugfs_pf_init(pf);
+ if (err)
+ return err;
+
+ api.debugfs_root = pf->ice_debugfs_pf;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
@@ -1012,7 +1043,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- status = ice_fwlog_init(hw);
+ status = __fwlog_init(hw);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
status);
@@ -1159,6 +1190,16 @@ err_unroll_cqinit:
return status;
}
+static void __fwlog_deinit(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ ice_debugfs_pf_deinit(hw->back);
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
@@ -1177,8 +1218,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_seg(hw);
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
-
- ice_fwlog_deinit(hw);
+ __fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1693,6 +1733,44 @@ int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
return 0;
}
+/* Tx time Queue Context */
+static const struct packed_field_u8 ice_txtime_ctx_fields[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70),
+ ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72),
+ ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82),
+ ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90),
+ ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91),
+ ICE_CTX_STORE(ice_txtime_ctx, timer_num, 1, 104),
+ ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 105),
+ ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 106),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 107),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 111),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 113),
+ ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 116),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 117),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 121),
+ ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 125),
+};
+
+/**
+ * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer
+ * @ctx: the Tx time queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx time queue context from the CPU-friendly unpacked buffer into
+ * its bit-packed HW layout.
+ */
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -2418,12 +2496,15 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->reset_restrict_support);
break;
case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
- caps->roce_lag = !!(number & LIBIE_AQC_BIT_ROCEV2_LAG);
+ caps->roce_lag = number & LIBIE_AQC_BIT_ROCEV2_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
- caps->sriov_lag = !!(number & LIBIE_AQC_BIT_SRIOV_LAG);
+ caps->sriov_lag = number & LIBIE_AQC_BIT_SRIOV_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
+ caps->sriov_aa_lag = number & LIBIE_AQC_BIT_SRIOV_AA_LAG;
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_aa_lag = %u\n",
+ prefix, caps->sriov_aa_lag);
break;
case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
@@ -4712,24 +4793,24 @@ do_aq:
}
/**
- * ice_aq_cfg_lan_txq
+ * ice_aq_cfg_lan_txq - send AQ command 0x0C32 to FW
* @hw: pointer to the hardware structure
* @buf: buffer for command
* @buf_size: size of buffer in bytes
* @num_qs: number of queues being configured
* @oldport: origination lport
* @newport: destination lport
+ * @mode: cmd_type for move to use
* @cd: pointer to command details structure or NULL
*
* Move/Configure LAN Tx queue (0x0C32)
*
- * There is a better AQ command to use for moving nodes, so only coding
- * this one for configuring the node.
+ * Return: Zero on success, associated error code on failure.
*/
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd)
+ u8 mode, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
struct libie_aq_desc desc;
@@ -4742,10 +4823,12 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
if (!buf)
return -EINVAL;
- cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->cmd_type = mode;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_MODE_M,
+ ICE_AQC_Q_CFG_MODE_KEEP_OWN);
cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
@@ -4801,6 +4884,46 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
+/**
+ * ice_aq_set_txtimeq - set Tx time queues
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @txtime_qg: queue group to be set
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Tx Time queue (0x0C35)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_txtimeqs *cmd;
+ struct libie_aq_desc desc;
+ u16 size;
+
+ if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE ||
+ q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
+ return -EINVAL;
+
+ size = struct_size(txtime_qg, txtimeqs, q_count);
+ if (buf_size != size)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
+
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd->q_id = cpu_to_le16(txtimeq);
+ cmd->q_amount = cpu_to_le16(q_count);
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
+}
+
/* End of FW Admin Queue command wrappers */
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 60320cdf7804..e700ac0dc347 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -270,11 +270,17 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd);
+ u8 mode, struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg,
+ u16 buf_size, struct ice_sq_cd *cd);
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
struct ice_cgu_input_measure *meas,
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index cb71eca6a85b..f450250fc827 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -1,647 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Intel Corporation. */
-#include <linux/fs.h>
#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
#include "ice.h"
static struct dentry *ice_debugfs_root;
-/* create a define that has an extra module that doesn't really exist. this
- * is so we can add a module 'all' to easily enable/disable all the modules
- */
-#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
- */
-static const char * const ice_fwlog_module_string[] = {
- "general",
- "ctrl",
- "link",
- "link_topo",
- "dnl",
- "i2c",
- "sdp",
- "mdio",
- "adminq",
- "hdma",
- "lldp",
- "dcbx",
- "dcb",
- "xlr",
- "nvm",
- "auth",
- "vpd",
- "iosf",
- "parser",
- "sw",
- "scheduler",
- "txq",
- "rsvd",
- "post",
- "watchdog",
- "task_dispatch",
- "mng",
- "synce",
- "health",
- "tsdrv",
- "pfreg",
- "mdlver",
- "all",
-};
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
-static const char * const ice_fwlog_level_string[] = {
- "none",
- "error",
- "warning",
- "normal",
- "verbose",
-};
-
-static const char * const ice_fwlog_log_size[] = {
- "128K",
- "256K",
- "512K",
- "1M",
- "2M",
-};
-
-/**
- * ice_fwlog_print_module_cfg - print current FW logging module configuration
- * @hw: pointer to the HW structure
- * @module: module to print
- * @s: the seq file to put data into
- */
-static void
-ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
-{
- struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
- struct ice_fwlog_module_entry *entry;
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- entry = &cfg->module_entries[module];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- } else {
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- entry = &cfg->module_entries[i];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- }
- }
-}
-
-static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
-{
- int i, module;
-
- module = -1;
- /* find the module based on the dentry */
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
- module = i;
- break;
- }
- }
-
- return module;
-}
-
-/**
- * ice_debugfs_module_show - read from 'module' file
- * @s: the opened file
- * @v: pointer to the offset
- */
-static int ice_debugfs_module_show(struct seq_file *s, void *v)
-{
- const struct file *filp = s->file;
- struct dentry *dentry;
- struct ice_pf *pf;
- int module;
-
- dentry = file_dentry(filp);
- pf = s->private;
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(ice_pf_to_dev(pf), "unknown module\n");
- return -EINVAL;
- }
-
- ice_fwlog_print_module_cfg(&pf->hw, module, s);
-
- return 0;
-}
-
-static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ice_debugfs_module_show, inode->i_private);
-}
-
-/**
- * ice_debugfs_module_write - write into 'module' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_module_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = file_inode(filp)->i_private;
- struct dentry *dentry = file_dentry(filp);
- struct device *dev = ice_pf_to_dev(pf);
- char user_val[16], *cmd_buf;
- int module, log_level, cnt;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 8)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(dev, "unknown module\n");
- return -EINVAL;
- }
-
- cnt = sscanf(cmd_buf, "%s", user_val);
- if (cnt != 1)
- return -EINVAL;
-
- log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
- if (log_level < 0) {
- dev_info(dev, "unknown log level '%s'\n", user_val);
- return -EINVAL;
- }
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- ice_pf_fwlog_update_module(pf, log_level, module);
- } else {
- /* the module 'all' is a shortcut so that we can set
- * all of the modules to the same level quickly
- */
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
- ice_pf_fwlog_update_module(pf, log_level, i);
- }
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_module_fops = {
- .owner = THIS_MODULE,
- .open = ice_debugfs_module_open,
- .read = seq_read,
- .release = single_release,
- .write = ice_debugfs_module_write,
-};
-
-/**
- * ice_debugfs_nr_messages_read - read from 'nr_messages' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%d\n",
- hw->fwlog_cfg.log_resolution);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_nr_messages_write - write into 'nr_messages' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- s16 nr_messages;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 4)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtos16(user_val, 0, &nr_messages);
- if (ret)
- return ret;
-
- if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
- nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
- dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
- nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
- ICE_AQC_FW_LOG_MAX_RESOLUTION);
- return -EINVAL;
- }
-
- hw->fwlog_cfg.log_resolution = nr_messages;
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_nr_messages_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_nr_messages_read,
- .write = ice_debugfs_nr_messages_write,
-};
-
-/**
- * ice_debugfs_enable_read - read from 'enable' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_enable_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%u\n",
- (u16)(hw->fwlog_cfg.options &
- ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_enable_write - write into 'enable' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_enable_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- bool enable;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 2)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtobool(user_val, &enable);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
-
- ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- ret = ice_fwlog_register(hw);
- else
- ret = ice_fwlog_unregister(hw);
-
- if (ret)
- goto enable_write_error;
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-enable_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_enable_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_enable_read,
- .write = ice_debugfs_enable_write,
-};
-
-/**
- * ice_debugfs_log_size_read - read from 'log_size' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_log_size_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
- int index;
-
- index = hw->fwlog_ring.index;
- snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_log_size_write - write into 'log_size' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- ssize_t ret;
- int index;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 5)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- index = sysfs_match_string(ice_fwlog_log_size, user_val);
- if (index < 0) {
- dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
- user_val);
- ret = -EINVAL;
- goto log_size_write_error;
- } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
- dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
- ret = -EINVAL;
- goto log_size_write_error;
- }
-
- /* free all the buffers and the tracking info and resize */
- ice_fwlog_realloc_rings(hw, index);
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-log_size_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_log_size_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_log_size_read,
- .write = ice_debugfs_log_size_write,
-};
-
-/**
- * ice_debugfs_data_read - read from 'data' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- int data_copied = 0;
- bool done = false;
-
- if (ice_fwlog_ring_empty(&hw->fwlog_ring))
- return 0;
-
- while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
- struct ice_fwlog_data *log;
- u16 cur_buf_len;
-
- log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
- cur_buf_len = log->data_size;
- if (cur_buf_len >= count) {
- done = true;
- continue;
- }
-
- if (copy_to_user(buffer, log->data, cur_buf_len)) {
- /* if there is an error then bail and return whatever
- * the driver has copied so far
- */
- done = true;
- continue;
- }
-
- data_copied += cur_buf_len;
- buffer += cur_buf_len;
- count -= cur_buf_len;
- *ppos += cur_buf_len;
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-
- return data_copied;
-}
-
-/**
- * ice_debugfs_data_write - write into 'data' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- ssize_t ret;
-
- /* don't allow partial writes */
- if (*ppos != 0)
- return 0;
-
- /* any value is allowed to clear the buffer so no need to even look at
- * what the value is
- */
- if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
- } else {
- dev_info(dev, "Can't clear FW log data while FW log running\n");
- ret = -EINVAL;
- goto nr_buffs_write_error;
- }
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-nr_buffs_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_data_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_data_read,
- .write = ice_debugfs_data_write,
-};
-
-/**
- * ice_debugfs_fwlog_init - setup the debugfs directory
- * @pf: the ice that is starting up
- */
-void ice_debugfs_fwlog_init(struct ice_pf *pf)
+int ice_debugfs_pf_init(struct ice_pf *pf)
{
const char *name = pci_name(pf->pdev);
- struct dentry *fw_modules_dir;
- struct dentry **fw_modules;
- int i;
-
- /* only support fw log commands on PF 0 */
- if (pf->hw.bus.func)
- return;
-
- /* allocate space for this first because if it fails then we don't
- * need to unwind
- */
- fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
- GFP_KERNEL);
- if (!fw_modules)
- return;
pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
-
- pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
- pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf_fwlog))
- goto err_create_module_files;
+ return PTR_ERR(pf->ice_debugfs_pf);
- fw_modules_dir = debugfs_create_dir("modules",
- pf->ice_debugfs_pf_fwlog);
- if (IS_ERR(fw_modules_dir))
- goto err_create_module_files;
-
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
- 0600, fw_modules_dir, pf,
- &ice_debugfs_module_fops);
- if (IS_ERR(fw_modules[i]))
- goto err_create_module_files;
- }
-
- debugfs_create_file("nr_messages", 0600,
- pf->ice_debugfs_pf_fwlog, pf,
- &ice_debugfs_nr_messages_fops);
-
- pf->ice_debugfs_pf_fwlog_modules = fw_modules;
-
- debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_enable_fops);
-
- debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_log_size_fops);
-
- debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_data_fops);
-
- return;
-
-err_create_module_files:
- debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
- kfree(fw_modules);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 55e0f2c6af9e..dc131779d426 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3147,9 +3147,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_hw *hw;
- ring->rx_max_pending = ICE_MAX_NUM_DESC;
- ring->tx_max_pending = ICE_MAX_NUM_DESC;
+ hw = &vsi->back->hw;
+ ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
+ ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
if (vsi->tx_rings && vsi->rx_rings) {
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
@@ -3177,15 +3179,16 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
+ struct ice_hw *hw = &pf->hw;
u16 new_rx_cnt, new_tx_cnt;
- if (ring->tx_pending > ICE_MAX_NUM_DESC ||
+ if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
- ring->rx_pending > ICE_MAX_NUM_DESC ||
+ ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
@@ -3258,6 +3261,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tstamp_ring = NULL;
tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
@@ -4620,10 +4624,12 @@ static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
* ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
* @netdev: network interface device structure
* @fec_stats: buffer to hold FEC statistics for given port
+ * @hist: buffer to put FEC histogram statistics for given port
*
*/
static void ice_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_topology port_topology;
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
deleted file mode 100644
index a31bb026ad34..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022, Intel Corporation. */
-
-#include <linux/vmalloc.h>
-#include "ice.h"
-#include "ice_common.h"
-#include "ice_fwlog.h"
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
-{
- u16 head, tail;
-
- head = rings->head;
- tail = rings->tail;
-
- if (head < tail && (tail - head == (rings->size - 1)))
- return true;
- else if (head > tail && (tail == (head - 1)))
- return true;
-
- return false;
-}
-
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
-{
- return rings->head == rings->tail;
-}
-
-void ice_fwlog_ring_increment(u16 *item, u16 size)
-{
- *item = (*item + 1) & (size - 1);
-}
-
-static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i, nr_bytes;
- u8 *mem;
-
- nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
- mem = vzalloc(nr_bytes);
- if (!mem)
- return -ENOMEM;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- ring->data_size = ICE_AQ_MAX_BUF_LEN;
- ring->data = mem;
- mem += ICE_AQ_MAX_BUF_LEN;
- }
-
- return 0;
-}
-
-static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- /* the first ring is the base memory for the whole range so
- * free it
- */
- if (!i)
- vfree(ring->data);
-
- ring->data = NULL;
- ring->data_size = 0;
- }
-}
-
-#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
-/**
- * ice_fwlog_realloc_rings - reallocate the FW log rings
- * @hw: pointer to the HW structure
- * @index: the new index to use to allocate memory for the log data
- *
- */
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
-{
- struct ice_fwlog_ring ring;
- int status, ring_size;
-
- /* convert the number of bytes into a number of 4K buffers. externally
- * the driver presents the interface to the FW log data as a number of
- * bytes because that's easy for users to understand. internally the
- * driver uses a ring of buffers because the driver doesn't know where
- * the beginning and end of any line of log data is so the driver has
- * to overwrite data as complete blocks. when the data is returned to
- * the user the driver knows that the data is correct and the FW log
- * can be correctly parsed by the tools
- */
- ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
- if (ring_size == hw->fwlog_ring.size)
- return;
-
- /* allocate space for the new rings and buffers then release the
- * old rings and buffers. that way if we don't have enough
- * memory then we at least have what we had before
- */
- ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
- if (!ring.rings)
- return;
-
- ring.size = ring_size;
-
- status = ice_fwlog_alloc_ring_buffs(&ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&ring);
- kfree(ring.rings);
- return;
- }
-
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
-
- hw->fwlog_ring.rings = ring.rings;
- hw->fwlog_ring.size = ring.size;
- hw->fwlog_ring.index = index;
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
-}
-
-/**
- * ice_fwlog_init - Initialize FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called on driver initialization during
- * ice_init_hw().
- */
-int ice_fwlog_init(struct ice_hw *hw)
-{
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return -EINVAL;
-
- ice_fwlog_set_supported(hw);
-
- if (ice_fwlog_supported(hw)) {
- int status;
-
- /* read the current config from the FW and store it */
- status = ice_fwlog_get(hw, &hw->fwlog_cfg);
- if (status)
- return status;
-
- hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
- sizeof(*hw->fwlog_ring.rings),
- GFP_KERNEL);
- if (!hw->fwlog_ring.rings) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
- return -ENOMEM;
- }
-
- hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
- hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
-
- status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- return status;
- }
-
- ice_debugfs_fwlog_init(hw->back);
- } else {
- dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
- }
-
- return 0;
-}
-
-/**
- * ice_fwlog_deinit - unroll FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called in ice_deinit_hw().
- */
-void ice_fwlog_deinit(struct ice_hw *hw)
-{
- struct ice_pf *pf = hw->back;
- int status;
-
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return;
-
- ice_debugfs_pf_deinit(hw->back);
-
- /* make sure FW logging is disabled to not put the FW in a weird state
- * for the next driver load
- */
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
- status = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
- status);
-
- kfree(pf->ice_debugfs_pf_fwlog_modules);
-
- pf->ice_debugfs_pf_fwlog_modules = NULL;
-
- status = ice_fwlog_unregister(hw);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
- status);
-
- if (hw->fwlog_ring.rings) {
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- }
-}
-
-/**
- * ice_fwlog_supported - Cached for whether FW supports FW logging or not
- * @hw: pointer to the HW structure
- *
- * This will always return false if called before ice_init_hw(), so it must be
- * called after ice_init_hw().
- */
-bool ice_fwlog_supported(struct ice_hw *hw)
-{
- return hw->fwlog_supported;
-}
-
-/**
- * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
- * @hw: pointer to the HW structure
- * @entries: entries to configure
- * @num_entries: number of @entries
- * @options: options from ice_fwlog_cfg->options structure
- * @log_resolution: logging resolution
- */
-static int
-ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
- u16 num_entries, u16 options, u16 log_resolution)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct libie_aq_desc desc;
- int status;
- int i;
-
- fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
- if (!fw_modules)
- return -ENOMEM;
-
- for (i = 0; i < num_entries; i++) {
- fw_modules[i].module_identifier =
- cpu_to_le16(entries[i].module_id);
- fw_modules[i].log_level = entries[i].log_level;
- }
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
- desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
-
- cmd = libie_aq_raw(&desc);
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
- cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
- cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
-
- if (options & ICE_FWLOG_OPTION_ARQ_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
- if (options & ICE_FWLOG_OPTION_UART_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
-
- status = ice_aq_send_cmd(hw, &desc, fw_modules,
- sizeof(*fw_modules) * num_entries,
- NULL);
-
- kfree(fw_modules);
-
- return status;
-}
-
-/**
- * ice_fwlog_set - Set the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config used to set firmware logging
- *
- * This function should be called whenever the driver needs to set the firmware
- * logging configuration. It can be called on initialization, reset, or during
- * runtime.
- *
- * If the PF wishes to receive FW logging then it must register via
- * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
- * for init.
- */
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_set(hw, cfg->module_entries,
- ICE_AQC_FW_LOG_ID_MAX, cfg->options,
- cfg->log_resolution);
-}
-
-/**
- * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
- * @hw: pointer to the HW structure
- * @cfg: firmware logging configuration to populate
- */
-static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct libie_aq_desc desc;
- u16 module_id_cnt;
- int status;
- void *buf;
- int i;
-
- memset(cfg, 0, sizeof(*cfg));
-
- buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
- cmd = libie_aq_raw(&desc);
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
-
- status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
- if (status) {
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
- goto status_out;
- }
-
- module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
- if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
- } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
- ICE_AQC_FW_LOG_ID_MAX);
- module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
- }
-
- cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
- cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
- cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
- cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
-
- for (i = 0; i < module_id_cnt; i++) {
- struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
-
- cfg->module_entries[i].module_id =
- le16_to_cpu(fw_module->module_identifier);
- cfg->module_entries[i].log_level = fw_module->log_level;
- }
-
-status_out:
- kfree(buf);
- return status;
-}
-
-/**
- * ice_fwlog_get - Get the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config to populate based on current firmware logging settings
- */
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_get(hw, cfg);
-}
-
-/**
- * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
- * @hw: pointer to the HW structure
- * @reg: true to register and false to unregister
- */
-static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
-{
- struct ice_aqc_fw_log *cmd;
- struct libie_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
- cmd = libie_aq_raw(&desc);
-
- if (reg)
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
-
-/**
- * ice_fwlog_register - Register the PF for firmware logging
- * @hw: pointer to the HW structure
- *
- * After this call the PF will start to receive firmware logging based on the
- * configuration set in ice_fwlog_set.
- */
-int ice_fwlog_register(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, true);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_unregister - Unregister the PF from firmware logging
- * @hw: pointer to the HW structure
- */
-int ice_fwlog_unregister(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, false);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_set_supported - Set if FW logging is supported by FW
- * @hw: pointer to the HW struct
- *
- * If FW returns success to the ice_aq_fwlog_get call then it supports FW
- * logging, else it doesn't. Set the fwlog_supported flag accordingly.
- *
- * This function is only meant to be called during driver init to determine if
- * the FW support FW logging.
- */
-void ice_fwlog_set_supported(struct ice_hw *hw)
-{
- struct ice_fwlog_cfg *cfg;
- int status;
-
- hw->fwlog_supported = false;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return;
-
- /* don't call ice_fwlog_get() because that would check to see if FW
- * logging is supported which is what the driver is determining now
- */
- status = ice_aq_fwlog_get(hw, cfg);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
- status);
- else
- hw->fwlog_supported = true;
-
- kfree(cfg);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
deleted file mode 100644
index 287e71fa4b86..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2022, Intel Corporation. */
-
-#ifndef _ICE_FWLOG_H_
-#define _ICE_FWLOG_H_
-#include "ice_adminq_cmd.h"
-
-struct ice_hw;
-
-/* Only a single log level should be set and all log levels under the set value
- * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
- * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
- */
-enum ice_fwlog_level {
- ICE_FWLOG_LEVEL_NONE = 0,
- ICE_FWLOG_LEVEL_ERROR = 1,
- ICE_FWLOG_LEVEL_WARNING = 2,
- ICE_FWLOG_LEVEL_NORMAL = 3,
- ICE_FWLOG_LEVEL_VERBOSE = 4,
- ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
-};
-
-struct ice_fwlog_module_entry {
- /* module ID for the corresponding firmware logging event */
- u16 module_id;
- /* verbosity level for the module_id */
- u8 log_level;
-};
-
-struct ice_fwlog_cfg {
- /* list of modules for configuring log level */
- struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
- /* options used to configure firmware logging */
- u16 options;
-#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
-#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
- /* set before calling ice_fwlog_init() so the PF registers for firmware
- * logging on initialization
- */
-#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
- /* set in the ice_fwlog_get() response if the PF is registered for FW
- * logging events over ARQ
- */
-#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
-
- /* minimum number of log events sent per Admin Receive Queue event */
- u16 log_resolution;
-};
-
-struct ice_fwlog_data {
- u16 data_size;
- u8 *data;
-};
-
-struct ice_fwlog_ring {
- struct ice_fwlog_data *rings;
- u16 index;
- u16 size;
- u16 head;
- u16 tail;
-};
-
-#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
-#define ICE_FWLOG_RING_SIZE_DFLT 256
-#define ICE_FWLOG_RING_SIZE_MAX 512
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
-void ice_fwlog_ring_increment(u16 *item, u16 size);
-void ice_fwlog_set_supported(struct ice_hw *hw);
-bool ice_fwlog_supported(struct ice_hw *hw);
-int ice_fwlog_init(struct ice_hw *hw);
-void ice_fwlog_deinit(struct ice_hw *hw);
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_register(struct ice_hw *hw);
-int ice_fwlog_unregister(struct ice_hw *hw);
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
-#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dd520aa4d1d6..082ad33c53dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -19,6 +19,7 @@
#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8))
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
@@ -571,6 +572,8 @@
#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64))
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M ICE_M(0x1FF, 0)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index b1129da72139..aebf8e08a297 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -10,12 +10,17 @@
#define ICE_LAG_RES_SHARED BIT(14)
#define ICE_LAG_RES_VALID BIT(15)
-#define LACP_TRAIN_PKT_LEN 16
-static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0x88, 0x09, 0, 0 };
+#define ICE_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+static const u8 act_act_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0 };
#define ICE_RECIPE_LEN 64
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+
static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
@@ -46,10 +51,10 @@ static void ice_lag_set_primary(struct ice_lag *lag)
}
/**
- * ice_lag_set_backup - set PF LAG state to Backup
+ * ice_lag_set_bkup - set PF LAG state to Backup
* @lag: LAG info struct
*/
-static void ice_lag_set_backup(struct ice_lag *lag)
+static void ice_lag_set_bkup(struct ice_lag *lag)
{
struct ice_pf *pf = lag->pf;
@@ -99,6 +104,28 @@ static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_netdev_to_lag - return pointer to associated lag struct from netdev
* @netdev: pointer to net_device struct to query
*/
@@ -210,13 +237,12 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_hw *hw = &lag->pf->hw;
u16 s_rule_sz, vsi_num;
- struct ice_hw *hw;
u8 *eth_hdr;
u32 opc;
int err;
- hw = &lag->pf->hw;
vsi_num = ice_get_hw_vsi_num(hw, 0);
s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
@@ -314,26 +340,15 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
}
/**
- * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * ice_lag_cfg_pf_fltrs_act_bkup - set filters up for new active port
* @lag: local interfaces lag struct
- * @ptr: opaque data containing notifier event
+ * @bonding_info: netdev event bonding info
*/
static void
-ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+ice_lag_cfg_pf_fltrs_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *bonding_info)
{
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
- struct net_device *event_netdev;
- struct device *dev;
-
- event_netdev = netdev_notifier_info_to_dev(ptr);
- /* not for this netdev */
- if (event_netdev != lag->netdev)
- return;
-
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- dev = ice_pf_to_dev(lag->pf);
+ struct device *dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
if (bonding_info->slave.state && lag->pf_rx_rule_id) {
@@ -354,6 +369,105 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_cfg_lp_fltr - configure lport filters
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ * @cp: control packet only or general PF lport rule
+ */
+static void
+ice_lag_cfg_lp_fltr(struct ice_lag *lag, bool add, bool cp)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_vsi *vsi = lag->pf->vsi[0];
+ u16 buf_len, opc;
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, ICE_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ if (cp) {
+ s_rule->recipe_id =
+ cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ memcpy(s_rule->hdr_data, lacp_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ } else {
+ s_rule->recipe_id = cpu_to_le16(lag->act_act_recipe);
+ memcpy(s_rule->hdr_data, act_act_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ }
+
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ vsi->vsi_num));
+ s_rule->hdr_len = cpu_to_le16(ICE_TRAIN_PKT_LEN);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ if (cp)
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ else
+ s_rule->index = cpu_to_le16(lag->act_act_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s %s rule for aggregate\n",
+ add ? "ADDING" : "REMOVING",
+ cp ? "CONTROL PACKET" : "LPORT");
+ goto err_cp_free;
+ }
+
+ if (add) {
+ if (cp)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->act_act_rule_idx = le16_to_cpu(s_rule->index);
+ } else {
+ if (cp)
+ lag->cp_rule_idx = 0;
+ else
+ lag->act_act_rule_idx = 0;
+ }
+
+err_cp_free:
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for PF traffic
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (event_netdev != lag->netdev)
+ return;
+
+ bonding_info = &info->bonding_info;
+
+ if (lag->bond_aa) {
+ if (lag->need_fltr_cfg) {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ lag->need_fltr_cfg = false;
+ }
+ } else {
+ ice_lag_cfg_pf_fltrs_act_bkup(lag, bonding_info);
+ }
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
@@ -402,12 +516,11 @@ static u16
ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
u16 vsi_num, u16 numq, u8 tc)
{
+ struct ice_pf *pf = hw->back;
struct ice_q_ctx *q_ctx;
u16 qid, count = 0;
- struct ice_pf *pf;
int i;
- pf = hw->back;
for (i = 0; i < numq; i++) {
q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
if (!q_ctx) {
@@ -577,7 +690,7 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
}
if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
- newport, NULL)) {
+ newport, ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto qbuf_err;
}
@@ -677,54 +790,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
}
/**
- * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
- * @vf: the VF to move Tx nodes for
- *
- * Called just after configuring new VF queues. Check whether the VF Tx
- * scheduling nodes need to be updated to fail over to the active port. If so,
- * move them now.
- */
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
-{
- struct ice_lag_netdev_list ndlist;
- u8 pri_port, act_port;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
-
- vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- if (WARN_ON(vsi->type != ICE_VSI_VF))
- return;
-
- pf = vf->pf;
- lag = pf->lag;
-
- mutex_lock(&pf->lag_mutex);
- if (!lag->bonded)
- goto new_vf_unlock;
-
- pri_port = pf->hw.port_info->lport;
- act_port = lag->active_port;
-
- if (lag->upper_netdev)
- ice_lag_build_netdev_list(lag, &ndlist);
-
- if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
- lag->bonded && lag->primary && pri_port != act_port &&
- !list_empty(lag->netdev_head))
- ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
-
- ice_lag_destroy_netdev_list(lag, &ndlist);
-
-new_vf_unlock:
- mutex_unlock(&pf->lag_mutex);
-}
-
-/**
* ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
* @lag: lag info struct
* @oldport: lport of previous interface
@@ -767,61 +832,6 @@ void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
ice_lag_destroy_netdev_list(lag, &ndlist);
}
-#define ICE_LAG_SRIOV_CP_RECIPE 10
-#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
-
-/**
- * ice_lag_cfg_cp_fltr - configure filter for control packets
- * @lag: local interface's lag struct
- * @add: add or remove rule
- */
-static void
-ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
-{
- struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
- struct ice_vsi *vsi;
- u16 buf_len, opc;
-
- vsi = lag->pf->vsi[0];
-
- buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
- ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- s_rule = kzalloc(buf_len, GFP_KERNEL);
- if (!s_rule) {
- netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
- return;
- }
-
- if (add) {
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
- s_rule->src = cpu_to_le16(vsi->port_info->lport);
- s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
- ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_VALID_BIT |
- FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
- s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
- opc = ice_aqc_opc_add_sw_rules;
- } else {
- opc = ice_aqc_opc_remove_sw_rules;
- s_rule->index = cpu_to_le16(lag->cp_rule_idx);
- }
- if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
- netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
- add ? "ADDING" : "REMOVING");
- goto cp_free;
- }
-
- if (add)
- lag->cp_rule_idx = le16_to_cpu(s_rule->index);
- else
- lag->cp_rule_idx = 0;
-
-cp_free:
- kfree(s_rule);
-}
-
/**
* ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
* @lag: lag struct for interface that owns VF
@@ -835,11 +845,20 @@ u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
u8 pri_prt, act_prt;
if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
- pri_prt = lag->pf->hw.port_info->lport;
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT) {
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- return act_prt;
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ } else {
+ if (lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, NULL);
+ lag->port_bitmap |= ICE_LAGS_M;
+ }
}
}
@@ -857,10 +876,15 @@ void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
{
u8 pri_prt;
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT) {
- pri_prt = lag->pf->hw.port_info->lport;
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ if (lag && lag->bonded && lag->primary) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ if (act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt,
+ act_prt);
+ } else {
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, NULL);
+ }
}
}
@@ -873,13 +897,12 @@ void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_bonding_info *info;
+ struct netdev_notifier_bonding_info *info = ptr;
struct netdev_bonding_info *bonding_info;
struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
- info = ptr;
lag_netdev_name = netdev_name(lag->netdev);
bonding_info = &info->bonding_info;
@@ -897,7 +920,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (bonding_info->slave.state)
- ice_lag_set_backup(lag);
+ ice_lag_set_bkup(lag);
else
ice_lag_set_primary(lag);
@@ -906,6 +929,295 @@ lag_out:
}
/**
+ * ice_lag_aa_qbuf_recfg - fill a single queue buffer for recfg cmd
+ * @hw: HW struct that contains the queue context
+ * @qbuf: pointer to single queue buffer
+ * @vsi_num: index of the VF VSI in PF space
+ * @qnum: queue index
+ *
+ * Return: Zero on success, error code on failure.
+ */
+static int
+ice_lag_aa_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, int qnum)
+{
+ struct ice_pf *pf = hw->back;
+ struct ice_q_ctx *q_ctx;
+ u16 q_id;
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, 0, qnum);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d no Q context\n", qnum);
+ return -ENOENT;
+ }
+
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL TEID\n", qnum);
+ return -EINVAL;
+ }
+
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL Q HANDLE\n", qnum);
+ return -EINVAL;
+ }
+
+ q_id = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[0].q_handle = cpu_to_le16(q_id);
+ qbuf->queue_info[0].tc = 0;
+ qbuf->queue_info[0].q_teid = cpu_to_le32(q_ctx->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_lag_aa_move_vf_qs - Move some/all VF queues to destination
+ * @lag: primary interface's lag struct
+ * @dest: index of destination port
+ * @vsi_num: index of VF VSI in PF space
+ * @all: if true move all queues to destination
+ * @odd: VF wide q indicator for odd/even
+ * @e_pf: PF struct for the event interface
+ *
+ * the parameter "all" is to control whether we are splitting the queues
+ * between two interfaces or moving them all to the destination interface
+ */
+static void ice_lag_aa_move_vf_qs(struct ice_lag *lag, u8 dest, u16 vsi_num,
+ bool all, bool *odd, struct ice_pf *e_pf)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_cfg_txqs_buf, qbuf, queue_info, 1);
+ struct ice_hw *old_hw, *new_hw, *pri_hw, *sec_hw;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_vsi_ctx *pv_ctx, *sv_ctx;
+ struct ice_lag_netdev_list ndlist;
+ u16 num_q, qbuf_size, sec_vsi_num;
+ u8 pri_lport, sec_lport;
+ u32 pvf_teid, svf_teid;
+ u16 vf_id;
+
+ vf_id = lag->pf->vsi[vsi_num]->vf->vf_id;
+ /* If sec_vf[] not defined, then no second interface to share with */
+ if (lag->sec_vf[vf_id])
+ sec_vsi_num = lag->sec_vf[vf_id]->idx;
+ else
+ return;
+
+ pri_lport = lag->bond_lport_pri;
+ sec_lport = lag->bond_lport_sec;
+
+ if (pri_lport == ICE_LAG_INVALID_PORT ||
+ sec_lport == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!e_pf)
+ ice_lag_build_netdev_list(lag, &ndlist);
+
+ pri_hw = &lag->pf->hw;
+ if (e_pf && lag->pf != e_pf)
+ sec_hw = &e_pf->hw;
+ else
+ sec_hw = ice_lag_find_hw_by_lport(lag, sec_lport);
+
+ if (!pri_hw || !sec_hw)
+ return;
+
+ if (dest == ICE_LAGP_IDX) {
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(lag->pf);
+ if (!vsi)
+ return;
+
+ old_hw = sec_hw;
+ new_hw = pri_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ } else {
+ struct ice_pf *sec_pf = sec_hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(sec_pf);
+ if (!vsi)
+ return;
+
+ old_hw = pri_hw;
+ new_hw = sec_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ }
+
+ pv_ctx = ice_get_vsi_ctx(pri_hw, vsi_num);
+ if (!pv_ctx) {
+ dev_warn(dev, "Unable to locate primary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ sv_ctx = ice_get_vsi_ctx(sec_hw, sec_vsi_num);
+ if (!sv_ctx) {
+ dev_warn(dev, "Unable to locate secondary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ num_q = pv_ctx->num_lan_q_entries[0];
+ qbuf_size = __struct_size(qbuf);
+
+ /* Suspend traffic for primary VSI VF */
+ pvf_teid = le32_to_cpu(pv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, true);
+
+ /* Suspend traffic for secondary VSI VF */
+ svf_teid = le32_to_cpu(sv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, true);
+
+ for (int i = 0; i < num_q; i++) {
+ struct ice_sched_node *n_prt, *q_node, *parent;
+ struct ice_port_info *pi, *new_pi;
+ struct ice_vsi_ctx *src_ctx;
+ struct ice_sched_node *p;
+ struct ice_q_ctx *q_ctx;
+ u16 dst_vsi_num;
+
+ pi = old_hw->port_info;
+ new_pi = new_hw->port_info;
+
+ *odd = !(*odd);
+ if ((dest == ICE_LAGP_IDX && *odd && !all) ||
+ (dest == ICE_LAGS_IDX && !(*odd) && !all) ||
+ lag->q_home[vf_id][i] == dest)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ dst_vsi_num = vsi_num;
+ else
+ dst_vsi_num = sec_vsi_num;
+
+ n_prt = ice_sched_get_free_qparent(new_hw->port_info,
+ dst_vsi_num, 0,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!n_prt)
+ continue;
+
+ q_ctx = ice_get_lan_q_ctx(pri_hw, vsi_num, 0, i);
+ if (!q_ctx)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ src_ctx = sv_ctx;
+ else
+ src_ctx = pv_ctx;
+
+ q_node = ice_sched_find_node_by_teid(src_ctx->sched.vsi_node[0],
+ q_ctx->q_teid);
+ if (!q_node)
+ continue;
+
+ qbuf->src_parent_teid = q_node->info.parent_teid;
+ qbuf->dst_parent_teid = n_prt->info.node_teid;
+
+ /* Move the node in the HW/FW */
+ if (ice_lag_aa_qbuf_recfg(pri_hw, qbuf, vsi_num, i))
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ sec_lport, pri_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+ else
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ pri_lport, sec_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+
+ /* Move the node in the SW */
+ parent = q_node->parent;
+ if (!parent)
+ continue;
+
+ for (int n = 0; n < parent->num_children; n++) {
+ int j;
+
+ if (parent->children[n] != q_node)
+ continue;
+
+ for (j = n + 1; j < parent->num_children;
+ j++) {
+ parent->children[j - 1] =
+ parent->children[j];
+ }
+ parent->children[j] = NULL;
+ parent->num_children--;
+ break;
+ }
+
+ p = pi->sib_head[0][q_node->tx_sched_layer];
+ while (p) {
+ if (p->sibling == q_node) {
+ p->sibling = q_node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+
+ if (pi->sib_head[0][q_node->tx_sched_layer] == q_node)
+ pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node->sibling;
+
+ q_node->parent = n_prt;
+ q_node->info.parent_teid = n_prt->info.node_teid;
+ q_node->sibling = NULL;
+ p = new_pi->sib_head[0][q_node->tx_sched_layer];
+ if (p) {
+ while (p) {
+ if (!p->sibling) {
+ p->sibling = q_node;
+ break;
+ }
+ p = p->sibling;
+ }
+ } else {
+ new_pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node;
+ }
+
+ n_prt->children[n_prt->num_children++] = q_node;
+ lag->q_home[vf_id][i] = dest;
+ }
+
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, false);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, false);
+
+ if (!e_pf)
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
+/**
+ * ice_lag_aa_failover - move VF queues in A/A mode
+ * @lag: primary lag struct
+ * @dest: index of destination port
+ * @e_pf: PF struct for event port
+ */
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf)
+{
+ bool odd = true, all = false;
+ int i;
+
+ /* Primary can be a target if down (cleanup), but secondary can't */
+ if (dest == ICE_LAGS_IDX && !(lag->port_bitmap & ICE_LAGS_M))
+ return;
+
+ /* Move all queues to a destination if only one port is active,
+ * or no ports are active and dest is primary.
+ */
+ if ((lag->port_bitmap ^ (ICE_LAGP_M | ICE_LAGS_M)) ||
+ (!lag->port_bitmap && dest == ICE_LAGP_IDX))
+ all = true;
+
+ ice_for_each_vsi(lag->pf, i)
+ if (lag->pf->vsi[i] && lag->pf->vsi[i]->type == ICE_VSI_VF)
+ ice_lag_aa_move_vf_qs(lag, dest, i, all, &odd, e_pf);
+}
+
+/**
* ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
* @lag: primary interface lag struct
* @src_hw: HW struct current node location
@@ -921,13 +1233,12 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
@@ -968,7 +1279,7 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
src_hw->port_info->lport, hw->port_info->lport,
- NULL)) {
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto reclaim_qerr;
}
@@ -1039,36 +1350,15 @@ static void ice_lag_link(struct ice_lag *lag)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
+ lag->need_fltr_cfg = true;
netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
- * ice_lag_config_eswitch - configure eswitch to work with LAG
- * @lag: lag info struct
- * @netdev: active network interface device struct
- *
- * Updates all port representors in eswitch to use @netdev for Tx.
- *
- * Configures the netdev to keep dst metadata (also used in representor Tx).
- * This is required for an uplink without switchdev mode configured.
- */
-static void ice_lag_config_eswitch(struct ice_lag *lag,
- struct net_device *netdev)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(&lag->pf->eswitch.reprs, id, repr)
- repr->dst->u.port_info.lower_dev = netdev;
-
- netif_keep_dst(netdev);
-}
-
-/**
- * ice_lag_unlink - handle unlink event
+ * ice_lag_act_bkup_unlink - handle unlink event for A/B bond
* @lag: LAG info struct
*/
-static void ice_lag_unlink(struct ice_lag *lag)
+static void ice_lag_act_bkup_unlink(struct ice_lag *lag)
{
u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
@@ -1104,10 +1394,32 @@ static void ice_lag_unlink(struct ice_lag *lag)
}
}
}
+}
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
- lag->upper_netdev = NULL;
+/**
+ * ice_lag_aa_unlink - handle unlink event for Active-Active bond
+ * @lag: LAG info struct
+ */
+static void ice_lag_aa_unlink(struct ice_lag *lag)
+{
+ struct ice_lag *pri_lag;
+
+ if (lag->primary) {
+ pri_lag = lag;
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ } else {
+ pri_lag = ice_lag_find_primary(lag);
+ if (pri_lag)
+ pri_lag->port_bitmap &= ICE_LAGS_M;
+ }
+
+ if (pri_lag) {
+ ice_lag_aa_failover(pri_lag, ICE_LAGP_IDX, lag->pf);
+ if (lag->primary)
+ pri_lag->bond_lport_pri = ICE_LAG_INVALID_PORT;
+ else
+ pri_lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
}
/**
@@ -1123,10 +1435,20 @@ static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (info->linking)
+ if (info->linking) {
ice_lag_link(lag);
- else
- ice_lag_unlink(lag);
+ } else {
+ if (lag->bond_aa)
+ ice_lag_aa_unlink(lag);
+ else
+ ice_lag_act_bkup_unlink(lag);
+
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
+ }
}
/**
@@ -1224,11 +1546,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
*/
static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_hw *hw;
- u16 swid;
-
- hw = &lag->pf->hw;
- swid = hw->port_info->sw_id;
+ struct ice_hw *hw = &lag->pf->hw;
+ u16 swid = hw->port_info->sw_id;
if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
@@ -1241,12 +1560,10 @@ static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
*/
static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
event_vsi_num = event_pf->vsi[0]->vsi_num;
prim_vsi_idx = lag->pf->vsi[0]->idx;
@@ -1282,12 +1599,10 @@ static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
*/
static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 vsi_num, vsi_idx, rule_buf_sz, vsi_list_id, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
vsi_num = event_pf->vsi[0]->vsi_num;
vsi_idx = lag->pf->vsi[0]->idx;
@@ -1335,6 +1650,11 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+
+ if (caps->sriov_aa_lag && ice_pkg_has_lport_extract(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
}
/**
@@ -1344,11 +1664,10 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_lag *primary_lag;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
/* not for this netdev */
@@ -1369,6 +1688,9 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
+ lag->bond_lport_pri = lag->pf->hw.port_info->lport;
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0;
} else {
u16 swid;
@@ -1378,16 +1700,29 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
- ice_lag_cfg_drop_fltr(lag, true);
+ primary_lag->bond_lport_sec =
+ lag->pf->hw.port_info->lport;
}
/* add filter for primary control packets */
- ice_lag_cfg_cp_fltr(lag, true);
+ ice_lag_cfg_lp_fltr(lag, true, true);
} else {
if (!primary_lag && lag->primary)
primary_lag = lag;
+ if (primary_lag) {
+ for (int i = 0; i < ICE_MAX_SRIOV_VFS; i++) {
+ if (primary_lag->sec_vf[i]) {
+ ice_vsi_release(primary_lag->sec_vf[i]);
+ primary_lag->sec_vf[i] = NULL;
+ }
+ }
+ }
+
if (!lag->primary) {
ice_lag_set_swid(0, lag, false);
+ if (primary_lag)
+ primary_lag->bond_lport_sec =
+ ICE_LAG_INVALID_PORT;
} else {
if (primary_lag && lag->primary) {
ice_lag_primary_swid(lag, false);
@@ -1395,7 +1730,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
}
}
/* remove filter for control packets */
- ice_lag_cfg_cp_fltr(lag, false);
+ ice_lag_cfg_lp_fltr(lag, false, !lag->bond_aa);
}
}
@@ -1408,7 +1743,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_hw *prim_hw, *active_hw;
struct net_device *event_netdev;
struct ice_pf *pf;
@@ -1421,19 +1756,34 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
if (!netif_is_same_ice(lag->pf, event_netdev))
return;
+ if (info->upper_dev != lag->upper_netdev)
+ return;
+
+ if (info->linking)
+ return;
+
pf = lag->pf;
prim_hw = &pf->hw;
prim_port = prim_hw->port_info->lport;
- info = (struct netdev_notifier_changeupper_info *)ptr;
- if (info->upper_dev != lag->upper_netdev)
- return;
-
- if (!info->linking) {
- /* Since there are only two interfaces allowed in SRIOV+LAG, if
- * one port is leaving, then nodes need to be on primary
- * interface.
- */
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (lag->bond_aa) {
+ struct ice_netdev_priv *e_ndp;
+ struct ice_pf *e_pf;
+
+ e_ndp = netdev_priv(event_netdev);
+ e_pf = e_ndp->vsi->back;
+
+ if (lag->bond_lport_pri != ICE_LAG_INVALID_PORT &&
+ lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, e_pf);
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
+ } else {
if (prim_port != lag->active_port &&
lag->active_port != ICE_LAG_INVALID_PORT) {
active_hw = ice_lag_find_hw_by_lport(lag,
@@ -1445,45 +1795,32 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
}
/**
- * ice_lag_monitor_active - main PF keep track of which port is active
+ * ice_lag_monitor_act_bkup - keep track of which port is active in A/B LAG
* @lag: lag info struct
- * @ptr: opaque data containing notifier event
+ * @b_info: bonding info
+ * @event_netdev: net_device got target netdev
*
* This function is for the primary PF to monitor changes in which port is
* active and handle changes for SRIOV VF functionality
*/
-static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+static void ice_lag_monitor_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
{
- struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
struct ice_netdev_priv *event_np;
struct ice_pf *pf, *event_pf;
u8 prim_port, event_port;
- if (!lag->primary)
- return;
-
pf = lag->pf;
if (!pf)
return;
- event_netdev = netdev_notifier_info_to_dev(ptr);
- rcu_read_lock();
- event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
- rcu_read_unlock();
- if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
- return;
-
event_np = netdev_priv(event_netdev);
event_pf = event_np->vsi->back;
event_port = event_pf->hw.port_info->lport;
prim_port = pf->hw.port_info->lport;
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
-
- if (!bonding_info->slave.state) {
+ if (!b_info->slave.state) {
/* if no port is currently active, then nodes and filters exist
* on primary port, check if we need to move them
*/
@@ -1520,6 +1857,128 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_aa_clear_spoof - adjust the placeholder VSI spoofing for A/A LAG
+ * @vsi: placeholder VSI to adjust
+ */
+static void ice_lag_aa_clear_spoof(struct ice_vsi *vsi)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+}
+
+/**
+ * ice_lag_monitor_act_act - Keep track of active ports in A/A LAG
+ * @lag: lag struct for primary interface
+ * @b_info: bonding_info for event
+ * @event_netdev: net_device for target netdev
+ */
+static void ice_lag_monitor_act_act(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *event_np;
+ u8 prim_port, event_port;
+ struct ice_pf *event_pf;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = lag->pf->hw.port_info->lport;
+
+ if (b_info->slave.link == BOND_LINK_UP) {
+ /* Port is coming up */
+ if (prim_port == event_port) {
+ /* Processing event for primary interface */
+ if (lag->bond_lport_pri == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!(lag->port_bitmap & ICE_LAGP_M)) {
+ /* Primary port was not marked up before, move
+ * some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ } else {
+ if (lag->bond_lport_sec == ICE_LAG_INVALID_PORT)
+ return;
+
+ /* Create placeholder VSIs on secondary PF.
+ * The placeholder is necessary so that we have
+ * an element that represents the VF on the secondary
+ * interface's scheduling tree. This will be a tree
+ * root for scheduling nodes when they are moved to
+ * the secondary interface.
+ */
+ if (!lag->sec_vf[0]) {
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *nvsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ params.type = ICE_VSI_VF;
+ params.port_info = event_pf->hw.port_info;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ ice_for_each_vf(lag->pf, bkt, vf) {
+ params.vf = vf;
+ nvsi = ice_vsi_setup(event_pf,
+ &params);
+ ice_lag_aa_clear_spoof(nvsi);
+ lag->sec_vf[vf->vf_id] = nvsi;
+ }
+ }
+
+ if (!(lag->port_bitmap & ICE_LAGS_M)) {
+ /* Secondary port was not marked up before,
+ * move some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ }
+ }
+ } else {
+ /* Port is going down */
+ if (prim_port == event_port) {
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ } else {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ }
+}
+
+/**
+ * ice_lag_monitor_info - Calls relevant A/A or A/B monitoring function
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_info(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_bonding_info *bonding_info;
+
+ if (!lag->primary)
+ return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ bonding_info = &info->bonding_info;
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
+ return;
+
+ if (lag->bond_aa)
+ ice_lag_monitor_act_act(lag, bonding_info, event_netdev);
+ else
+ ice_lag_monitor_act_bkup(lag, bonding_info, event_netdev);
+}
+/**
* ice_lag_chk_comp - evaluate bonded interface for feature support
* @lag: lag info struct
* @ptr: opaque data for netdev event info
@@ -1527,13 +1986,21 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
static bool
ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
{
+ struct netdev_notifier_bonding_info *info = ptr;
struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
struct list_head *tmp;
struct device *dev;
int count = 0;
+ /* All members need to know if bond A/A or A/B */
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP)
+ lag->bond_aa = true;
+ else
+ lag->bond_aa = false;
+
if (!lag->primary)
return true;
@@ -1554,13 +2021,9 @@ ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
return false;
}
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- lag->bond_mode = bonding_info->master.bond_mode;
- if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
- dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ if (lag->bond_aa && !ice_is_feature_supported(lag->pf,
+ ICE_F_SRIOV_AA_LAG))
return false;
- }
list_for_each(tmp, lag->netdev_head) {
struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
@@ -1664,10 +2127,9 @@ ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
static void
ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
if (netdev != lag->netdev)
@@ -1715,12 +2177,30 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_netdev_priv *np;
- struct ice_pf *pf;
+ struct ice_netdev_priv *np = netdev_priv(lag->netdev);
+ struct ice_pf *pf = np->vsi->back;
- np = netdev_priv(lag->netdev);
- pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+}
+
+/**
+ * ice_lag_preset_drop_fltr - preset drop filter for A/B bonds
+ * @lag: local lag struct
+ * @ptr: opaque data containing event
+ *
+ * Sets the initial drop filter for secondary interface in an
+ * active-backup bond
+ */
+static void ice_lag_preset_drop_fltr(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev || lag->primary || !lag->need_fltr_cfg)
+ return;
+
+ ice_lag_cfg_drop_fltr(lag, true);
+ lag->need_fltr_cfg = false;
}
/**
@@ -1761,10 +2241,12 @@ static void ice_lag_process_event(struct work_struct *work)
ice_lag_unregister(lag_work->lag, netdev);
goto lag_cleanup;
}
- ice_lag_monitor_active(lag_work->lag,
- &lag_work->info.bonding_info);
ice_lag_cfg_pf_fltrs(lag_work->lag,
&lag_work->info.bonding_info);
+ ice_lag_preset_drop_fltr(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_monitor_info(lag_work->lag,
+ &lag_work->info.bonding_info);
}
ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
break;
@@ -1837,9 +2319,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
lag_work->lag = lag;
lag_work->event = event;
if (event == NETDEV_CHANGEUPPER) {
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
- info = ptr;
upper_netdev = info->upper_dev;
} else {
upper_netdev = netdev_master_upper_dev_get(netdev);
@@ -1889,10 +2370,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
*/
static int ice_register_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
-
- notif_blk = &lag->notif_block;
if (!notif_blk->notifier_call) {
notif_blk->notifier_call = ice_lag_event_handler;
@@ -1912,10 +2391,9 @@ static int ice_register_lag_handler(struct ice_lag *lag)
*/
static void ice_unregister_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
- notif_blk = &lag->notif_block;
if (notif_blk->notifier_call) {
unregister_netdevice_notifier(notif_blk);
dev_dbg(dev, "LAG event handler unregistered\n");
@@ -1977,13 +2455,12 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
@@ -2020,7 +2497,8 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
}
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
- dest_hw->port_info->lport, NULL)) {
+ dest_hw->port_info->lport,
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
goto sync_qerr;
}
@@ -2116,9 +2594,13 @@ int ice_init_lag(struct ice_pf *pf)
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
lag->active_port = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0x0;
lag->bonded = false;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
+ memset(lag->sec_vf, 0, sizeof(lag->sec_vf));
err = ice_register_lag_handler(lag);
if (err) {
@@ -2136,6 +2618,11 @@ int ice_init_lag(struct ice_pf *pf)
if (err)
goto free_rcp_res;
+ err = ice_create_lag_recipe(&pf->hw, &lag->act_act_recipe,
+ ice_lport_rcp, 1);
+ if (err)
+ goto free_lport_res;
+
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
@@ -2145,7 +2632,8 @@ int ice_init_lag(struct ice_pf *pf)
if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
recipe_bits |= BIT(lag->pf_recipe) |
- BIT(lag->lport_recipe);
+ BIT(lag->lport_recipe) |
+ BIT(lag->act_act_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
recipe_bits, NULL);
}
@@ -2156,9 +2644,13 @@ int ice_init_lag(struct ice_pf *pf)
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_lport_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &lag->lport_recipe);
+
free_rcp_res:
ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
- &pf->lag->pf_recipe);
+ &lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -2174,9 +2666,7 @@ lag_error:
*/
void ice_deinit_lag(struct ice_pf *pf)
{
- struct ice_lag *lag;
-
- lag = pf->lag;
+ struct ice_lag *lag = pf->lag;
if (!lag)
return;
@@ -2245,11 +2735,15 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
}
- ice_lag_cfg_cp_fltr(lag, true);
+ if (!lag->bond_aa) {
+ ice_lag_cfg_lp_fltr(lag, true, true);
+ if (lag->pf_rx_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+ } else {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ }
- if (lag->pf_rx_rule_id)
- if (ice_lag_cfg_dflt_fltr(lag, true))
- dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
ice_clear_rdma_cap(pf);
lag_rebuild_out:
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 69347d9f986b..f77ebcd61042 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,7 +14,11 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
-#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAGP_IDX 0
+#define ICE_LAGS_IDX 1
+#define ICE_LAGP_M 0x1
+#define ICE_LAGS_M 0x2
#define ICE_LAG_RESET_RETRIES 5
#define ICE_SW_DEFAULT_PROFILE 0
@@ -41,12 +45,26 @@ struct ice_lag {
u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u8 bond_aa:1; /* is this bond active-active */
+ u8 need_fltr_cfg:1; /* fltrs for A/A bond still need to be make */
+ u8 port_bitmap:2; /* bitmap of active ports */
+ u8 bond_lport_pri; /* lport values for primary PF */
+ u8 bond_lport_sec; /* lport values for secondary PF */
+
+ /* q_home keeps track of which interface the q is currently on */
+ u8 q_home[ICE_MAX_SRIOV_VFS][ICE_MAX_RSS_QS_PER_VF];
+
+ /* placeholder VSI for hanging VF queues from on secondary interface */
+ struct ice_vsi *sec_vf[ICE_MAX_SRIOV_VFS];
+
u16 pf_recipe;
u16 lport_recipe;
+ u16 act_act_recipe;
u16 pf_rx_rule_id;
u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
+ u16 act_act_rule_idx;
u8 role;
};
@@ -64,7 +82,7 @@ struct ice_lag_work {
} info;
};
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 77ba26538b07..10c312d49e05 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -569,4 +569,45 @@ struct ice_tlan_ctx {
u8 pkt_shaper_prof_idx;
};
+#define ICE_TXTIME_TX_DESC_IDX_M GENMASK(12, 0)
+#define ICE_TXTIME_STAMP_M GENMASK(31, 13)
+
+/* Tx time stamp descriptor */
+struct ice_ts_desc {
+ __le32 tx_desc_idx_tstamp;
+};
+
+#define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i]))
+
+#define ICE_TXTIME_MAX_QUEUE 2047
+#define ICE_SET_TXTIME_MAX_Q_AMOUNT 127
+#define ICE_TXTIME_FETCH_TS_DESC_DFLT 8
+#define ICE_TXTIME_FETCH_PROFILE_CNT 16
+
+/* Tx Time queue context data */
+struct ice_txtime_ctx {
+#define ICE_TXTIME_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+ u16 src_vsi;
+ u8 cpuid;
+ u8 tphrd_desc;
+ u16 qlen;
+ u8 timer_num;
+ u8 txtime_ena_q;
+ u8 drbell_mode_32;
+#define ICE_TXTIME_CTX_DRBELL_MODE_32 1
+ u8 ts_res;
+#define ICE_TXTIME_CTX_RESOLUTION_128NS 7
+ u8 ts_round_type;
+ u8 ts_pacing_slot;
+#define ICE_TXTIME_CTX_FETCH_PROF_ID_0 0
+ u8 merging_ena;
+ u8 ts_fetch_prof_id;
+ u8 ts_fetch_cache_line_aln_thld;
+ u8 tx_pipe_delay_mode;
+};
+
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a439b5a61a56..4479c824561e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3950,6 +3950,7 @@ void ice_init_feature_support(struct ice_pf *pf)
if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
ice_set_feature_support(pf, ICE_F_GCS);
+ ice_set_feature_support(pf, ICE_F_TXTIME);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 77781277aa8e..86f5859e88ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -39,6 +39,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS("LIBIE");
MODULE_IMPORT_NS("LIBIE_ADMINQ");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -1251,32 +1252,6 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
- * ice_get_fwlog_data - copy the FW log data from ARQ event
- * @pf: PF that the FW log event is associated with
- * @event: event structure containing FW log data
- */
-static void
-ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- struct ice_fwlog_data *fwlog;
- struct ice_hw *hw = &pf->hw;
-
- fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
-
- memset(fwlog->data, 0, PAGE_SIZE);
- fwlog->data_size = le16_to_cpu(event->desc.datalen);
-
- memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
- ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
-
- if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
- /* the rings are full so bump the head to create room */
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-}
-
-/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1566,7 +1541,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
break;
case ice_aqc_opc_fw_logs_event:
- ice_get_fwlog_data(pf, &event);
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
@@ -3993,6 +3969,11 @@ static void ice_deinit_pf(struct ice_pf *pf)
pf->avail_rxqs = NULL;
}
+ if (pf->txtime_txqs) {
+ bitmap_free(pf->txtime_txqs);
+ pf->txtime_txqs = NULL;
+ }
+
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
@@ -4086,6 +4067,15 @@ static int ice_init_pf(struct ice_pf *pf)
return -ENOMEM;
}
+ pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->txtime_txqs) {
+ bitmap_free(pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ bitmap_free(pf->avail_rxqs);
+ pf->avail_rxqs = NULL;
+ return -ENOMEM;
+ }
+
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
@@ -4654,19 +4644,6 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_pf_fwlog_update_module - update 1 module
- * @pf: pointer to the PF struct
- * @log_level: log_level to use for the @module
- * @module: module to update
- */
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
-{
- struct ice_hw *hw = &pf->hw;
-
- hw->fwlog_cfg.module_entries[module].log_level = log_level;
-}
-
-/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -7521,7 +7498,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+ if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs))
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
@@ -9125,7 +9103,7 @@ static int ice_create_q_channels(struct ice_vsi *vsi)
list_add_tail(&ch->list, &vsi->ch_list);
vsi->tc_map_vsi[i] = ch->ch_vsi;
dev_dbg(ice_pf_to_dev(pf),
- "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ "successfully created channel: VSI %p\n", ch->ch_vsi);
}
return 0;
@@ -9310,6 +9288,96 @@ exit:
return ret;
}
+/**
+ * ice_cfg_txtime - configure Tx Time for the Tx ring
+ * @tx_ring: pointer to the Tx ring structure
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_cfg_txtime(struct ice_tx_ring *tx_ring)
+{
+ int err, timeout = 50;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ u32 queue;
+
+ if (!tx_ring)
+ return -EINVAL;
+
+ vsi = tx_ring->vsi;
+ pf = vsi->back;
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ queue = tx_ring->q_index;
+ dev = ice_pf_to_dev(pf);
+
+ /* Ignore return value, and always attempt to enable queue. */
+ ice_qp_dis(vsi, queue);
+
+ err = ice_qp_ena(vsi, queue);
+ if (err)
+ dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n",
+ queue);
+
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
+ * ice_offload_txtime - set earliest TxTime first
+ * @netdev: network interface device structure
+ * @qopt_off: etf queue option offload from the skb to set
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_offload_txtime(struct net_device *netdev,
+ void *qopt_off)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct tc_etf_qopt_offload *qopt;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *tx_ring;
+ int ret = 0;
+
+ if (!ice_is_feature_supported(pf, ICE_F_TXTIME))
+ return -EOPNOTSUPP;
+
+ qopt = qopt_off;
+ if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq)
+ return -EINVAL;
+
+ if (qopt->enable)
+ set_bit(qopt->queue, pf->txtime_txqs);
+ else
+ clear_bit(qopt->queue, pf->txtime_txqs);
+
+ if (netif_running(vsi->netdev)) {
+ tx_ring = vsi->tx_rings[qopt->queue];
+ ret = ice_cfg_txtime(tx_ring);
+ if (ret)
+ goto err;
+ }
+
+ netdev_info(netdev, "%s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+ return 0;
+
+err:
+ netdev_err(netdev, "Failed to %s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+
+ if (qopt->enable)
+ clear_bit(qopt->queue, pf->txtime_txqs);
+ return ret;
+}
+
static LIST_HEAD(ice_block_cb_list);
static int
@@ -9373,6 +9441,8 @@ adev_unlock:
mutex_unlock(&pf->adev_mutex);
}
return err;
+ case TC_SETUP_QDISC_ETF:
+ return ice_offload_txtime(netdev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 9ce4c4db400e..843e82fd3bf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -9,7 +9,7 @@
#include "ice_dcb_lib.h"
#include "ice_flow.h"
#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
#include "ice_flex_pipe.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index d1a998a4bef6..6c4fad09a527 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,9 +3,9 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vf_lib.h"
-#include "ice_virtchnl.h"
+#include "virt/virtchnl.h"
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 07aab6e130cd..4f35ef8d6b29 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
__entry->buf = buf;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->buf)
);
@@ -158,7 +158,7 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->desc = desc;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname),
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
@@ -182,7 +182,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->skb)
);
@@ -205,7 +205,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+ TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname),
__entry->skb, __entry->ring)
);
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
TP_fast_assign(__entry->skb = skb;
__entry->idx = idx;),
- TP_printk("skb %pK idx %d",
+ TP_printk("skb %p idx %d",
__entry->skb, __entry->idx)
);
#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 41e7e29879a3..73f08d02f9c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -144,6 +144,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
}
/**
+ * ice_clean_tstamp_ring - clean time stamp ring
+ * @tx_ring: Tx ring to clean the Time Stamp ring for
+ */
+static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ memset(tstamp_ring->desc, 0, size);
+ tstamp_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_tstamp_ring - free time stamp resources per queue
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ ice_clean_tstamp_ring(tx_ring);
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
+ tstamp_ring->dma);
+ tstamp_ring->desc = NULL;
+}
+
+/**
+ * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ ice_free_tstamp_ring(tx_ring);
+ kfree_rcu(tx_ring->tstamp_ring, rcu);
+ tx_ring->tstamp_ring = NULL;
+ tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+}
+
+/**
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
@@ -181,6 +231,9 @@ tx_skip_free:
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ if (ice_is_txtime_cfg(tx_ring))
+ ice_free_tx_tstamp_ring(tx_ring);
}
/**
@@ -332,6 +385,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
}
/**
+ * ice_alloc_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to allocate the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL);
+ if (!tstamp_ring)
+ return -ENOMEM;
+
+ tstamp_ring->tx_ring = tx_ring;
+ tx_ring->tstamp_ring = tstamp_ring;
+ tstamp_ring->desc = NULL;
+ tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
+ tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ return 0;
+}
+
+/**
+ * ice_setup_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to set up the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct device *dev = tx_ring->dev;
+ u32 size;
+
+ /* round up to nearest page */
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
+ GFP_KERNEL);
+ if (!tstamp_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
+ size);
+ return -ENOMEM;
+ }
+
+ tstamp_ring->next_to_use = 0;
+ return 0;
+}
+
+/**
+ * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
+ * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int err;
+
+ err = ice_alloc_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ return err;
+ }
+
+ err = ice_setup_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ ice_free_tx_tstamp_ring(tx_ring);
+ return err;
+ }
+ return 0;
+}
+
+/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the Tx ring to set up
*
@@ -1031,10 +1162,9 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -1111,10 +1241,10 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
}
return skb;
@@ -1823,10 +1953,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more());
- if (kick)
- /* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!kick)
+ return;
+ if (ice_is_txtime_cfg(tx_ring)) {
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 tstamp_count = tstamp_ring->count;
+ u32 j = tstamp_ring->next_to_use;
+ struct ice_ts_desc *ts_desc;
+ struct timespec64 ts;
+ u32 tstamp;
+
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
+
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
+
+ j++;
+ if (j == tstamp_count) {
+ u32 fetch = tstamp_count - tx_ring->count;
+
+ j = 0;
+
+ /* To prevent an MDD, when wrapping the tstamp ring
+ * create additional TS descriptors equal to the number
+ * of the fetch TS descriptors value. HW will merge the
+ * TS descriptors with the same timestamp value into a
+ * single descriptor.
+ */
+ for (; j < fetch; j++) {
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp =
+ ice_build_tstamp_desc(i, tstamp);
+ }
+ }
+ tstamp_ring->next_to_use = j;
+ writel_relaxed(j, tstamp_ring->tail);
+ } else {
+ writel_relaxed(i, tx_ring->tail);
+ }
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 2fd8e78178a2..841a07bfba54 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -310,6 +310,16 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
+struct ice_tstamp_ring {
+ struct ice_tx_ring *tx_ring; /* Backreference to associated Tx ring */
+ dma_addr_t dma; /* physical address of ring */
+ struct rcu_head rcu; /* to avoid race on free */
+ u8 __iomem *tail;
+ void *desc;
+ u16 next_to_use;
+ u16 count;
+} ____cacheline_internodealigned_in_smp;
+
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
void *desc; /* Descriptor ring memory */
@@ -402,9 +412,11 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
+ struct ice_tstamp_ring *tstamp_ring;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u16 quanta_prof_id;
@@ -500,6 +512,7 @@ void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring);
void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
@@ -508,4 +521,6 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring);
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 6cf32b404127..99717730f21a 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -54,6 +54,20 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_build_tstamp_desc - build Tx time stamp descriptor
+ * @tx_desc: Tx LAN descriptor index
+ * @tstamp: time stamp
+ *
+ * Return: Tx time stamp descriptor
+ */
+static inline __le32
+ice_build_tstamp_desc(u16 tx_desc, u32 tstamp)
+{
+ return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) |
+ FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
+}
+
+/**
* ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 03c6c271865d..b0a1b67071c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,7 +17,7 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
-#include "ice_fwlog.h"
+#include <linux/net/intel/libie/fwlog.h>
#include <linux/wait.h>
#include <net/dscp.h>
@@ -293,8 +293,10 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
- u8 roce_lag;
- u8 sriov_lag;
+
+ bool roce_lag;
+ bool sriov_lag;
+ bool sriov_aa_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -946,9 +948,7 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fwlog_cfg fwlog_cfg;
- bool fwlog_supported; /* does hardware support FW logging? */
- struct ice_fwlog_ring fwlog_ring;
+ struct libie_fwlog fwlog;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 5ee74f3e82dc..de9e81ccee66 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
/* Public functions which may be accessed by all driver files */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index ffe1f9f830ea..b00708907176 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -13,7 +13,7 @@
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
#include "ice_flow.h"
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vsi_vlan_ops.h"
#define ICE_MAX_SRIOV_VFS 256
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a3a4eaa17739..575fd48f485f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -19,52 +19,12 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
}
/**
- * ice_qp_reset_stats - Resets all stats for rings of given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf;
-
- pf = vsi->back;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
- memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
- sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (vsi->xdp_rings)
- memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
-}
-
-/**
- * ice_qp_clean_rings - Cleans all the rings of a given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
-{
- ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (vsi->xdp_rings)
- ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- ice_clean_rx_ring(vsi->rx_rings[q_idx]);
-}
-
-/**
* ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
* @vsi: VSI that has netdev
* @q_vector: q_vector that has NAPI context
* @enable: true for enable, false for disable
*/
-static void
+void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
bool enable)
{
@@ -83,7 +43,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @rx_ring: Rx ring that will have its IRQ disabled
* @q_vector: queue vector
*/
-static void
+void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
@@ -113,7 +73,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* @q_vector: queue vector
* @qid: queue index
*/
-static void
+void
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
@@ -143,7 +103,7 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
*/
-static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -154,111 +114,6 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
- * ice_qp_dis - Disables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_txq_meta txq_meta = { };
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- int fail = 0;
- int err;
-
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- synchronize_net();
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
- ice_qvec_toggle_napi(vsi, q_vector, false);
-
- ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (!fail)
- fail = err;
- if (vsi->xdp_rings) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- memset(&txq_meta, 0, sizeof(txq_meta));
- ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
- &txq_meta);
- if (!fail)
- fail = err;
- }
-
- ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
- ice_qp_clean_rings(vsi, q_idx);
- ice_qp_reset_stats(vsi, q_idx);
-
- return fail;
-}
-
-/**
- * ice_qp_ena - Enables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_q_vector *q_vector;
- int fail = 0;
- bool link_up;
- int err;
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (!fail)
- fail = err;
-
- if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (!fail)
- fail = err;
- ice_set_ring_xdp(xdp_ring);
- ice_tx_xsk_pool(vsi, q_idx);
- }
-
- err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (!fail)
- fail = err;
-
- q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector, q_idx);
-
- err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (!fail)
- fail = err;
-
- ice_qvec_toggle_napi(vsi, q_vector, true);
- ice_qvec_ena_irq(vsi, q_vector);
-
- /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
- synchronize_net();
- ice_get_link_status(vsi->port_info, &link_up);
- if (link_up) {
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- netif_carrier_on(vsi->netdev);
- }
-
- return fail;
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 8dc5d55e26c5..600cbeeaa203 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -23,6 +23,13 @@ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid);
+void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable);
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
+void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector);
#else
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
struct xsk_buff_pool __always_unused *xsk_pool)
@@ -75,5 +82,20 @@ ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
{
return 0;
}
+
+static inline void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid) { }
+
+static inline void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable) { }
+
+static inline void
+ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { }
+
+static inline void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/virt/allowlist.c
index 4c2ec2337b38..a07efec19c45 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
/* Purpose of this file is to share functionality to allowlist or denylist
* opcodes used in PF <-> VF communication. Group of opcodes:
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/virt/allowlist.h
index d3ae86ded219..d3ae86ded219 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.h
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/virt/fdir.c
index ae83c3914e29..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.c
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/virt/fdir.h
index ac6dcab454b4..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.h
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
new file mode 100644
index 000000000000..370f6ec2a374
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -0,0 +1,973 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "virtchnl.h"
+#include "queues.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vsi: VSI to check queue ID against
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
+{
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return qid < vsi->alloc_txq;
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC_E810 &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vf_cfg_qs_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @num_queues: number of queues to be configured
+ *
+ * Configure per queue bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int ret;
+ u16 i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ for (i = 0; i < num_queues; i++) {
+ u32 p_rate, min_rate;
+ u8 tc;
+
+ p_rate = vf->qs_bw[i].peak;
+ min_rate = vf->qs_bw[i].committed;
+ tc = vf->qs_bw[i].tc;
+ if (p_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW, p_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW);
+ if (ret)
+ return ret;
+
+ if (min_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW, min_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_cfg_q_quanta_profile - Configure quanta profile
+ * @vf: pointer to the VF info
+ * @quanta_prof_idx: pointer to the quanta profile index
+ * @quanta_size: quanta size to be set
+ *
+ * This function chooses available quanta profile and configures the register.
+ * The quanta profile is evenly divided by the number of device ports, and then
+ * available to the specific PF and VFs. The first profile for each PF is a
+ * reserved default profile. Only quanta size of the rest unused profile can be
+ * modified.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
+ u16 *quanta_prof_idx)
+{
+ const u16 n_desc = calc_quanta_desc(quanta_size);
+ struct ice_hw *hw = &vf->pf->hw;
+ const u16 n_cmd = 2 * n_desc;
+ struct ice_pf *pf = vf->pf;
+ u16 per_pf, begin_id;
+ u8 n_used;
+ u32 reg;
+
+ begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
+ hw->logical_pf_id;
+
+ if (quanta_size == ICE_DFLT_QUANTA) {
+ *quanta_prof_idx = begin_id;
+ } else {
+ per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
+ hw->dev_caps.num_funcs;
+ n_used = pf->num_quanta_prof_used;
+ if (n_used < per_pf) {
+ *quanta_prof_idx = begin_id + 1 + n_used;
+ pf->num_quanta_prof_used++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
+ wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ vf->num_msix < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < vf->num_msix) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queues_bw_cfg *qbw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ struct ice_vsi *vsi;
+ u16 i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
+ qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
+ vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
+ vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
+ vf->qs_bw[i].tc = qbw->cfg[i].tc;
+ }
+
+ if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_quanta - Configure per queue quanta
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues quanta.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
+{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_quanta_cfg *qquanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ struct ice_vsi *vsi;
+ int ret;
+
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
+ end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ quanta_size = qquanta->quanta_size;
+ if (quanta_size > ICE_MAX_QUANTA_SIZE ||
+ quanta_size < ICE_MIN_QUANTA_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (quanta_size % 64) {
+ dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
+ &quanta_prof_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto err;
+ }
+
+ for (i = start_qid; i < end_qid; i++)
+ vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+ bool ena_ts;
+ u8 act_prt;
+
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
+ u32 rxdid;
+
+ ring->dma = qpi->rxq.dma_ring_addr;
+ ring->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.crc_disable)
+ ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ ring->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ ring->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
+ }
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.h b/drivers/net/ethernet/intel/ice/virt/queues.h
new file mode 100644
index 000000000000..c4a792cecea1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_QUEUES_H_
+#define _ICE_VIRT_QUEUES_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf);
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_QUEUES_H_ */
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c
new file mode 100644
index 000000000000..cbdbb32d512b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "rss.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+};
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @hash_cfg: pointer to the HW hash configuration
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ bool hdr_found = false;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers. */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr) {
+ *addl_hdrs |= hdr_map.ice_hdr;
+ hdr_found = true;
+ }
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ struct ice_vsi_ctx *ctx;
+ u8 lut_type, hash_type;
+ int status;
+
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto error_param;
+ }
+
+ ctx->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+ ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, libie_aq_str(hw->adminq.sq_last_status));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+ } else {
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ if (ice_add_rss_cfg(hw, vsi, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
+ vsi->vsi_num, v_ret);
+ }
+ } else {
+ int status;
+
+ status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
+ */
+ if (status && status != -ENOENT) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
+ }
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
+ * @vf: pointer to the VF info
+ */
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hashcfg);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hashcfg) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* save the requested VF configuration */
+ if (!v_ret)
+ vf->rss_hashcfg = vrh->hashcfg;
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
+ NULL, 0);
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.h b/drivers/net/ethernet/intel/ice/virt/rss.h
new file mode 100644
index 000000000000..784d4c43ce8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_RSS_H_
+#define _ICE_VIRT_RSS_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add);
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg);
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf);
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
index 257967273079..f3f921134379 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
@@ -1,170 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022, Intel Corporation. */
-#include "ice_virtchnl.h"
+#include "virtchnl.h"
+#include "queues.h"
+#include "rss.h"
#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
#include "ice_flex_pipe.h"
#include "ice_dcb_lib.h"
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
@@ -338,28 +188,6 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (ice_vf_is_port_vlan_ena(vf))
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
* ice_vc_get_vlan_caps
* @hw: pointer to the hw
* @vf: pointer to the VF info
@@ -559,488 +387,6 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
}
/**
- * ice_vc_isvalid_q_id
- * @vsi: VSI to check queue ID against
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
-{
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return qid < vsi->alloc_txq;
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
- * @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
- bool is_ipv4 = false;
- bool is_ipv6 = false;
- bool is_udp = false;
- u16 ptype = -1;
- int i = 0;
-
- while (i < proto->count &&
- proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
- switch (proto->proto_hdr[i].type) {
- case VIRTCHNL_PROTO_HDR_ETH:
- ptype = ICE_PTYPE_MAC_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_IPV4:
- ptype = ICE_PTYPE_IPV4_PAY;
- is_ipv4 = true;
- break;
- case VIRTCHNL_PROTO_HDR_IPV6:
- ptype = ICE_PTYPE_IPV6_PAY;
- is_ipv6 = true;
- break;
- case VIRTCHNL_PROTO_HDR_UDP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_UDP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_UDP_PAY;
- is_udp = true;
- break;
- case VIRTCHNL_PROTO_HDR_TCP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_TCP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_TCP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_SCTP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_SCTP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_SCTP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_GTPU_IP:
- case VIRTCHNL_PROTO_HDR_GTPU_EH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_GTPU;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_GTPU;
- goto out;
- case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_L2TPV3;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_L2TPV3;
- goto out;
- case VIRTCHNL_PROTO_HDR_ESP:
- if (is_ipv4)
- ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
- ICE_MAC_IPV4_ESP;
- else if (is_ipv6)
- ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
- ICE_MAC_IPV6_ESP;
- goto out;
- case VIRTCHNL_PROTO_HDR_AH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_AH;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_AH;
- goto out;
- case VIRTCHNL_PROTO_HDR_PFCP:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_PFCP_SESSION;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_PFCP_SESSION;
- goto out;
- default:
- break;
- }
- i++;
- }
-
-out:
- return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @hash_cfg: pointer to the HW hash configuration
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
- struct virtchnl_rss_cfg *rss_cfg,
- struct ice_rss_hash_cfg *hash_cfg)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
- u32 *addl_hdrs = &hash_cfg->addl_hdrs;
- u64 *hash_flds = &hash_cfg->hash_flds;
-
- /* set outer layer RSS as default */
- hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hash_cfg->symm = true;
- else
- hash_cfg->symm = false;
-
- hf_list = ice_vc_hash_field_list;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
- hdr_list = ice_vc_hdr_list;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- u8 lut_type, hash_type;
- int status;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss =
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, libie_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- struct ice_rss_hash_cfg cfg;
-
- /* Only check for none raw pattern case */
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- cfg.hash_flds = ICE_HASH_INVALID;
- cfg.hdr_type = ICE_RSS_ANY_HEADERS;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- int status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
- /* We just ignore -ENOENT, because if two configurations
- * share the same profile remove one of them actually
- * removes both, since the profile is deleted.
- */
- if (status && status != -ENOENT) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
- vf->vf_id, status);
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_hfunc
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS Hash function
- */
-static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
-
- if (ice_set_rss_hfunc(vsi, hfunc))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_get_qos_caps - Get current QoS caps from PF
* @vf: pointer to the VF info
*
@@ -1122,110 +468,6 @@ err:
}
/**
- * ice_vf_cfg_qs_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @num_queues: number of queues to be configured
- *
- * Configure per queue bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
-{
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
- int ret;
- u16 i;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- for (i = 0; i < num_queues; i++) {
- u32 p_rate, min_rate;
- u8 tc;
-
- p_rate = vf->qs_bw[i].peak;
- min_rate = vf->qs_bw[i].committed;
- tc = vf->qs_bw[i].tc;
- if (p_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW, p_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW);
- if (ret)
- return ret;
-
- if (min_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW, min_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_cfg_q_quanta_profile - Configure quanta profile
- * @vf: pointer to the VF info
- * @quanta_prof_idx: pointer to the quanta profile index
- * @quanta_size: quanta size to be set
- *
- * This function chooses available quanta profile and configures the register.
- * The quanta profile is evenly divided by the number of device ports, and then
- * available to the specific PF and VFs. The first profile for each PF is a
- * reserved default profile. Only quanta size of the rest unused profile can be
- * modified.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
- u16 *quanta_prof_idx)
-{
- const u16 n_desc = calc_quanta_desc(quanta_size);
- struct ice_hw *hw = &vf->pf->hw;
- const u16 n_cmd = 2 * n_desc;
- struct ice_pf *pf = vf->pf;
- u16 per_pf, begin_id;
- u8 n_used;
- u32 reg;
-
- begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
- hw->logical_pf_id;
-
- if (quanta_size == ICE_DFLT_QUANTA) {
- *quanta_prof_idx = begin_id;
- } else {
- per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
- hw->dev_caps.num_funcs;
- n_used = pf->num_quanta_prof_used;
- if (n_used < per_pf) {
- *quanta_prof_idx = begin_id + 1 + n_used;
- pf->num_quanta_prof_used++;
- } else {
- return -EINVAL;
- }
- }
-
- reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
- wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
-
- return 0;
-}
-
-/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1407,757 +649,6 @@ error_param:
}
/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vf_vsi_dis_single_txq - disable a single Tx queue
- * @vf: VF to disable queue for
- * @vsi: VSI for the VF
- * @q_id: VF relative (0-based) queue ID
- *
- * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
- * disabled then clear q_id bit in the enabled queues bitmap and return
- * success. Otherwise return error.
- */
-int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
-{
- struct ice_txq_meta txq_meta = { 0 };
- struct ice_tx_ring *ring;
- int err;
-
- if (!test_bit(q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
- q_id, vsi->vsi_num);
-
- ring = vsi->tx_rings[q_id];
- if (!ring)
- return -EINVAL;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- q_id, vsi->vsi_num);
- return err;
- }
-
- /* Clear enabled queues flag */
- clear_bit(q_id, vf->txq_ena);
-
- return 0;
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static enum virtchnl_status_code
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- vf->num_msix < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < vf->num_msix) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queues_bw_cfg *qbw =
- (struct virtchnl_queues_bw_cfg *)msg;
- struct ice_vsi *vsi;
- u16 i;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
- qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
- dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
- dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->min_tx_rate);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
- dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
- dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
- vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
- vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
- vf->qs_bw[i].tc = qbw->cfg[i].tc;
- }
-
- if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_quanta - Configure per queue quanta
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues quanta.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
-{
- u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_quanta_cfg *qquanta =
- (struct virtchnl_quanta_cfg *)msg;
- struct ice_vsi *vsi;
- int ret;
-
- start_qid = qquanta->queue_select.start_queue_id;
- num_queues = qquanta->queue_select.num_queues;
-
- if (check_add_overflow(start_qid, num_queues, &end_qid)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
- end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- quanta_size = qquanta->quanta_size;
- if (quanta_size > ICE_MAX_QUANTA_SIZE ||
- quanta_size < ICE_MIN_QUANTA_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (quanta_size % 64) {
- dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
- &quanta_prof_id);
- if (ret) {
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto err;
- }
-
- for (i = start_qid; i < end_qid; i++)
- vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int i = -1, q_idx;
- bool ena_ts;
- u8 act_prt;
-
- mutex_lock(&pf->lag_mutex);
- act_prt = ice_lag_prepare_vf_reset(pf->lag);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- goto error_param;
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
- goto error_param;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- goto error_param;
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- if (!qci->qpair[i].rxq.crc_disable)
- continue;
-
- if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
- vf->vlan_strip_ena)
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
-
- /* Disable any existing queue first */
- if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
- goto error_param;
-
- /* Configure a queue with the requested settings */
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
- u32 rxdid;
-
- ring->dma = qpi->rxq.dma_ring_addr;
- ring->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.crc_disable)
- ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
- else
- ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024))
- goto error_param;
- ring->rx_buf_len = qpi->rxq.databuffer_size;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64)
- goto error_param;
-
- ring->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is
- * not expected to account for it in the MTU
- * calculation
- */
- if (ice_vf_is_port_vlan_ena(vf))
- ring->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
-
- /* If Rx flex desc is supported, select RXDID for Rx
- * queues. Otherwise, use legacy 32byte descriptor
- * format. Legacy 16byte descriptor is not supported.
- * If this RXDID is selected, return error.
- */
- if (vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
- rxdid = qpi->rxq.rxdid;
- if (!(BIT(rxdid) & pf->supported_rxdids))
- goto error_param;
- } else {
- rxdid = ICE_RXDID_LEGACY_1;
- }
-
- ena_ts = ((vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
- (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
- (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
-
- ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx], rxdid,
- ICE_RXDID_PRIO, ena_ts);
- }
- }
-
- ice_lag_complete_vf_reset(pf->lag, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-error_param:
- /* disable whatever we can */
- for (; i >= 0; i--) {
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
- vf->vf_id, i);
- if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
- vf->vf_id, i);
- }
-
- ice_lag_complete_vf_reset(pf->lag, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- ice_lag_move_new_vf_nodes(vf);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -2531,66 +1022,6 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
* ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
* @caps: VF driver negotiated capabilities
*
@@ -2983,112 +1414,6 @@ error_param:
}
/**
- * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
- * @vf: pointer to the VF info
- */
-static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hashcfg *vrh = NULL;
- int len = 0, ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_rss_hashcfg);
- vrh = kzalloc(len, GFP_KERNEL);
- if (!vrh) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
- (u8 *)vrh, len);
- kfree(vrh);
- return ret;
-}
-
-/**
- * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- */
-static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int status;
-
- dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- dev_err(dev, "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- /* clear all previously programmed RSS configuration to allow VF drivers
- * the ability to customize the RSS configuration and/or completely
- * disable RSS
- */
- status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hashcfg) {
- /* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
- */
- v_ret = ice_err_to_virt_err(status);
- goto err;
- } else if (status) {
- /* allow the VF to update the RSS configuration even on failure
- * to clear the current RSS confguration in an attempt to keep
- * RSS in a working state
- */
- dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
- vf->vf_id);
- }
-
- if (vrh->hashcfg) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
- v_ret = ice_err_to_virt_err(status);
- }
-
- /* save the requested VF configuration */
- if (!v_ret)
- vf->rss_hashcfg = vrh->hashcfg;
-
- /* send the response to the VF */
-err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_query_rxdid - query RXDID supported by DDP package
* @vf: pointer to VF info
*
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
index 71bb456e2d71..71bb456e2d71 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
index 2c359a8551c7..adab2154125b 100644
--- a/drivers/net/ethernet/intel/idpf/Kconfig
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -6,7 +6,7 @@ config IDPF
depends on PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
- select LIBETH
+ select LIBETH_XDP
help
This driver supports Intel(R) Infrastructure Data Path Function
devices.
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 4ef4b2b5e37a..651ddee942bd 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -21,3 +21,6 @@ idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
+
+idpf-y += xdp.o
+idpf-y += xsk.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index f4c0eaf9bde3..ca4da0c89979 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -40,6 +40,7 @@ struct idpf_vport_max_q;
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
+#define IDPF_WAIT_FOR_MARKER_TIMEO 500
#define IDPF_MAX_WAIT 500
/* available message levels */
@@ -148,6 +149,7 @@ enum idpf_vport_state {
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
* @max_tx_hdr_size: Max header length hardware can support
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -159,6 +161,7 @@ struct idpf_netdev_priv {
u32 link_speed_mbps;
u16 vport_idx;
u16 max_tx_hdr_size;
+ u16 tx_max_bufs;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
@@ -246,16 +249,28 @@ enum idpf_vport_reset_cause {
/**
* enum idpf_vport_flags - Vport flags
* @IDPF_VPORT_DEL_QUEUES: To send delete queues message
- * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
- * processing is done
* @IDPF_VPORT_FLAGS_NBITS: Must be last
*/
enum idpf_vport_flags {
IDPF_VPORT_DEL_QUEUES,
- IDPF_VPORT_SW_MARKER,
IDPF_VPORT_FLAGS_NBITS,
};
+/**
+ * struct idpf_tstamp_stats - Tx timestamp statistics
+ * @stats_sync: See struct u64_stats_sync
+ * @packets: Number of packets successfully timestamped by the hardware
+ * @discarded: Number of Tx skbs discarded due to cached PHC
+ * being too old to correctly extend timestamp
+ * @flushed: Number of Tx skbs flushed due to interface closed
+ */
+struct idpf_tstamp_stats {
+ struct u64_stats_sync stats_sync;
+ u64_stats_t packets;
+ u64_stats_t discarded;
+ u64_stats_t flushed;
+};
+
struct idpf_port_stats {
struct u64_stats_sync stats_sync;
u64_stats_t rx_hw_csum_err;
@@ -287,6 +302,10 @@ struct idpf_fsteer_fltr {
* @txq_model: Split queue or single queue queuing model
* @txqs: Used only in hotpath to get to the right queue very fast
* @crc_enable: Enable CRC insertion offload
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @num_xdp_txq: number of XDPSQs
+ * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
+ * @xdp_prog: installed XDP program
* @num_rxq: Number of allocated RX queues
* @num_bufq: Number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
@@ -312,16 +331,19 @@ struct idpf_fsteer_fltr {
* @num_q_vectors: Number of IRQ vectors allocated
* @q_vectors: Array of queue vectors
* @q_vector_idxs: Starting index of queue vectors
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
- * @sw_marker_wq: workqueue for marker packets
* @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
* @tstamp_config: The Tx tstamp config
* @tstamp_task: Tx timestamping task
+ * @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
u16 num_txq;
@@ -335,6 +357,11 @@ struct idpf_vport {
struct idpf_tx_queue **txqs;
bool crc_enable;
+ bool xdpsq_share;
+ u16 num_xdp_txq;
+ u16 xdp_txq_offset;
+ struct bpf_prog *xdp_prog;
+
u16 num_rxq;
u16 num_bufq;
u32 rxq_desc_count;
@@ -359,6 +386,11 @@ struct idpf_vport {
u16 num_q_vectors;
struct idpf_q_vector *q_vectors;
u16 *q_vector_idxs;
+
+ void __iomem *noirq_dyn_ctl;
+ u32 noirq_dyn_ctl_ena;
+ u16 noirq_v_idx;
+
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
@@ -367,11 +399,10 @@ struct idpf_vport {
bool link_up;
- wait_queue_head_t sw_marker_wq;
-
struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
struct kernel_hwtstamp_config tstamp_config;
struct work_struct tstamp_task;
+ struct idpf_tstamp_stats tstamp_stats;
};
/**
@@ -433,6 +464,7 @@ struct idpf_q_coalesce {
* ethtool
* @num_req_rxq_desc: Number of user requested RX queue descriptors through
* ethtool
+ * @xdp_prog: requested XDP program to install
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
* @num_fsteer_fltrs: number of flow steering filters
@@ -447,6 +479,7 @@ struct idpf_vport_user_config_data {
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
+ struct bpf_prog *xdp_prog;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
u32 num_fsteer_fltrs;
@@ -676,6 +709,11 @@ static inline int idpf_is_queue_model_split(u16 q_model)
q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
}
+static inline bool idpf_xdp_enabled(const struct idpf_vport *vport)
+{
+ return vport->adapter && vport->xdp_prog;
+}
+
#define idpf_is_cap_ena(adapter, field, flag) \
idpf_is_capability_ena(adapter, false, field, flag)
#define idpf_is_cap_ena_all(adapter, field, flag) \
@@ -957,6 +995,13 @@ static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
mutex_unlock(&np->adapter->vport_ctrl_lock);
}
+static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return mutex_is_locked(&np->adapter->vport_ctrl_lock);
+}
+
void idpf_statistics_task(struct work_struct *work);
void idpf_init_task(struct work_struct *work);
void idpf_service_task(struct work_struct *work);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index bfa60f7d43de..3a04a6bd0d7c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -77,7 +77,7 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -121,6 +121,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
+ FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 0eb812ac19c2..a5a1eec9ade8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -1245,8 +1245,8 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
*
* returns pointer to rx vector
*/
-static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
- int q_num)
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp, q_idx;
@@ -1266,8 +1266,8 @@ static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
*
* returns pointer to tx vector
*/
-static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
- int q_num)
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp;
@@ -1685,6 +1685,61 @@ unlock:
return err;
}
+/**
+ * idpf_get_ts_stats - Collect HW tstamping statistics
+ * @netdev: network interface device structure
+ * @ts_stats: HW timestamping stats structure
+ *
+ * Collect HW timestamping statistics including successfully timestamped
+ * packets, discarded due to illegal values, flushed during releasing PTP and
+ * skipped due to lack of the free index.
+ */
+static void idpf_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ unsigned int start;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ do {
+ start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync);
+ ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets);
+ ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed);
+ ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
+ } while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
+
+ if (np->state != __IDPF_VPORT_UP)
+ goto exit;
+
+ for (u16 i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (u16 j = 0; j < txq_grp->num_txq; j++) {
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue_stats *stats;
+ u64 ts;
+
+ if (!txq)
+ continue;
+
+ stats = &txq->q_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+
+ ts = u64_stats_read(&stats->tstamp_skipped);
+ } while (u64_stats_fetch_retry(&txq->stats_sync,
+ start));
+
+ ts_stats->lost += ts;
+ }
+ }
+
+exit:
+ idpf_vport_ctrl_unlock(netdev);
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1711,6 +1766,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
.get_ts_info = idpf_get_ts_info,
+ .get_ts_stats = idpf_get_ts_stats,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index 7492d1713243..20d5af64e750 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -186,13 +186,17 @@ struct idpf_base_tx_desc {
__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
}; /* read used with buffer queues */
-struct idpf_splitq_tx_compl_desc {
+struct idpf_splitq_4b_tx_compl_desc {
/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
__le16 qid_comptype_gen;
union {
__le16 q_head; /* Queue head */
__le16 compl_tag; /* Completion tag */
} q_head_compl_tag;
+}; /* writeback used with completion queues */
+
+struct idpf_splitq_tx_compl_desc {
+ struct idpf_splitq_4b_tx_compl_desc common;
u8 ts[3];
u8 rsvd; /* Reserved */
}; /* writeback used with completion queues */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 513032cb5f08..8a941f0fb048 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -4,6 +4,8 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
#include "idpf_ptp.h"
+#include "xdp.h"
+#include "xsk.h"
static const struct net_device_ops idpf_netdev_ops;
@@ -776,6 +778,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
+ np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
spin_lock_init(&np->stats_lock);
@@ -834,6 +837,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->hw_features |= netdev->features | other_offloads;
netdev->vlan_features |= netdev->features | other_offloads;
netdev->hw_enc_features |= dflt_features | other_offloads;
+ idpf_xdp_set_features(vport);
+
idpf_set_ethtool_ops(netdev);
netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
@@ -883,14 +888,18 @@ static void idpf_remove_features(struct idpf_vport *vport)
/**
* idpf_vport_stop - Disable a vport
* @vport: vport to disable
+ * @rtnl: whether to take RTNL lock
*/
-static void idpf_vport_stop(struct idpf_vport *vport)
+static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
if (np->state <= __IDPF_VPORT_DOWN)
return;
+ if (rtnl)
+ rtnl_lock();
+
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
@@ -909,9 +918,13 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false;
idpf_vport_intr_deinit(vport);
+ idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
idpf_vport_intr_rel(vport);
np->state = __IDPF_VPORT_DOWN;
+
+ if (rtnl)
+ rtnl_unlock();
}
/**
@@ -935,7 +948,7 @@ static int idpf_stop(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
idpf_vport_ctrl_unlock(netdev);
@@ -1028,7 +1041,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, true);
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
idpf_decfg_netdev(vport);
@@ -1134,7 +1147,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
struct idpf_q_coalesce *q_coal;
@@ -1308,13 +1321,13 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err;
+ int err, txq = vport->num_txq - vport->num_xdp_txq;
err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
if (err)
return err;
- return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
+ return netif_set_real_num_tx_queues(vport->netdev, txq);
}
/**
@@ -1369,8 +1382,9 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
/**
* idpf_vport_open - Bring up a vport
* @vport: vport to bring up
+ * @rtnl: whether to take RTNL lock
*/
-static int idpf_vport_open(struct idpf_vport *vport)
+static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
@@ -1380,6 +1394,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
if (np->state != __IDPF_VPORT_DOWN)
return -EBUSY;
+ if (rtnl)
+ rtnl_lock();
+
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
@@ -1387,7 +1404,7 @@ static int idpf_vport_open(struct idpf_vport *vport)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
- return err;
+ goto err_rtnl_unlock;
}
err = idpf_vport_queues_alloc(vport);
@@ -1408,35 +1425,44 @@ static int idpf_vport_open(struct idpf_vport *vport)
goto queues_rel;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_queue_reg_init(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_rx_bufs_init_all(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
idpf_rx_init_buf_tail(vport);
+
+ err = idpf_xdp_rxq_info_init_all(vport);
+ if (err) {
+ netdev_err(vport->netdev,
+ "Failed to initialize XDP RxQ info for vport %u: %pe\n",
+ vport->vport_id, ERR_PTR(err));
+ goto intr_deinit;
+ }
+
idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_map_unmap_queue_vector_msg(vport, true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_enable_queues_msg(vport);
@@ -1474,6 +1500,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
goto deinit_rss;
}
+ if (rtnl)
+ rtnl_unlock();
+
return 0;
deinit_rss:
@@ -1484,6 +1513,8 @@ disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
idpf_send_map_unmap_queue_vector_msg(vport, false);
+rxq_deinit:
+ idpf_xdp_rxq_info_deinit_all(vport);
intr_deinit:
idpf_vport_intr_deinit(vport);
queues_rel:
@@ -1491,6 +1522,10 @@ queues_rel:
intr_rel:
idpf_vport_intr_rel(vport);
+err_rtnl_unlock:
+ if (rtnl)
+ rtnl_unlock();
+
return err;
}
@@ -1547,8 +1582,6 @@ void idpf_init_task(struct work_struct *work)
index = vport->idx;
vport_config = adapter->vport_config[index];
- init_waitqueue_head(&vport->sw_marker_wq);
-
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1571,7 +1604,7 @@ void idpf_init_task(struct work_struct *work)
np = netdev_priv(vport->netdev);
np->state = __IDPF_VPORT_DOWN;
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport);
+ idpf_vport_open(vport, true);
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1961,7 +1994,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_send_delete_queues_msg(vport);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
}
idpf_deinit_rss(vport);
@@ -1991,7 +2024,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto err_open;
if (current_state == __IDPF_VPORT_UP)
- err = idpf_vport_open(vport);
+ err = idpf_vport_open(vport, false);
goto free_vport;
@@ -2001,7 +2034,7 @@ err_reset:
err_open:
if (current_state == __IDPF_VPORT_UP)
- idpf_vport_open(vport);
+ idpf_vport_open(vport, false);
free_vport:
kfree(new_vport);
@@ -2239,7 +2272,7 @@ static int idpf_open(struct net_device *netdev)
if (err)
goto unlock;
- err = idpf_vport_open(vport);
+ err = idpf_vport_open(vport, false);
unlock:
idpf_vport_ctrl_unlock(netdev);
@@ -2272,6 +2305,92 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * idpf_chk_tso_segment - Check skb is not using too many buffers
+ * @skb: send buffer
+ * @max_bufs: maximum number of buffers
+ *
+ * For TSO we need to count the TSO header and segment payload separately. As
+ * such we need to check cases where we have max_bufs-1 fragments or more as we
+ * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
+ * for the segment payload in the first descriptor, and another max_buf-1 for
+ * the fragments.
+ *
+ * Returns true if the packet needs to be software segmented by core stack.
+ */
+static bool idpf_chk_tso_segment(const struct sk_buff *skb,
+ unsigned int max_bufs)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than max_bufs - 1 */
+ nr_frags = shinfo->nr_frags;
+ if (nr_frags < (max_bufs - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of max_bufs-2 fragments totals at least gso_size.
+ */
+ nr_frags -= max_bufs - 2;
+ frag = &shinfo->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We use
+ * this as the worst case scenario in which the frag ahead of us only
+ * provides one byte which is why we are limited to max_bufs-2
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - shinfo->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &shinfo->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > IDPF_TX_MAX_DESC_DATA) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ } while (stale_size > IDPF_TX_MAX_DESC_DATA);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
* idpf_features_check - Validate packet conforms to limits
* @skb: skb buffer
* @netdev: This port's netdev
@@ -2292,12 +2411,15 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
- /* We cannot support GSO if the MSS is going to be less than
- * 88 bytes. If it is then we need to drop support for GSO.
- */
- if (skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
- features &= ~NETIF_F_GSO_MASK;
+ if (skb_is_gso(skb)) {
+ /* We cannot support GSO if the MSS is going to be less than
+ * 88 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
+ features &= ~NETIF_F_GSO_MASK;
+ else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
+ features &= ~NETIF_F_GSO_MASK;
+ }
/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
len = skb_network_offset(skb);
@@ -2495,4 +2617,7 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_tx_timeout = idpf_tx_timeout,
.ndo_hwtstamp_get = idpf_hwtstamp_get,
.ndo_hwtstamp_set = idpf_hwtstamp_set,
+ .ndo_bpf = idpf_xdp,
+ .ndo_xdp_xmit = idpf_xdp_xmit,
+ .ndo_xsk_wakeup = idpf_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index dfe9126f1f4a..8c46481d2e1f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -9,6 +9,7 @@
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
MODULE_LICENSE("GPL");
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index ee21f2ff0cad..142823af1f9e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -618,8 +618,13 @@ u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
discard_time = ptp->cached_phc_jiffies + 2 * HZ;
- if (time_is_before_jiffies(discard_time))
+ if (time_is_before_jiffies(discard_time)) {
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.discarded);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
return 0;
+ }
return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
lower_32_bits(in_tstamp));
@@ -853,10 +858,14 @@ static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
/* Remove list with latches in use */
head = &vport->tx_tstamp_caps->latches_in_use;
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ u64_stats_inc(&vport->tstamp_stats.flushed);
+
list_del(&ptp_tx_tstamp->list_member);
kfree(ptp_tx_tstamp);
}
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index b19b462e0bb6..61e613066140 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
-#include <net/libeth/rx.h>
-#include <net/libeth/tx.h>
+#include <net/libeth/xdp.h>
#include "idpf.h"
@@ -655,7 +654,7 @@ static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
@@ -794,7 +793,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
{
u64 mask, qw1;
- if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -822,7 +821,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
struct libeth_rx_pt decoded)
{
- if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
@@ -834,7 +833,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
}
/**
- * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
+ * __idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
* descriptor
* @rx_q: Rx ring being processed
* @skb: pointer to current skb being populated
@@ -846,17 +845,14 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
* other fields within the skb.
*/
static void
-idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
- struct sk_buff *skb,
- const union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+__idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
struct libeth_rx_csum csum_bits;
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->netdev);
-
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
@@ -867,7 +863,6 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
}
idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rx_q->idx);
}
/**
@@ -1003,6 +998,32 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
+static bool
+idpf_rx_singleq_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
+{
+ struct libeth_rqe_info fields;
+ struct idpf_rx_queue *rxq;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_rx_singleq_extract_fields(rxq, xdp->desc, &fields);
+ __idpf_rx_singleq_process_skb_fields(rxq, skb, xdp->desc,
+ fields.ptype);
+
+ return true;
+}
+
+static void idpf_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs,
+ const union virtchnl2_rx_desc *desc)
+{
+ libeth_xdp_run_pass(xdp, NULL, napi, rs, desc, NULL,
+ idpf_rx_singleq_process_skb_fields);
+}
+
/**
* idpf_rx_singleq_clean - Reclaim resources after receive completes
* @rx_q: rx queue to clean
@@ -1012,14 +1033,15 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
*/
static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- struct sk_buff *skb = rx_q->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rx_q->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
u16 cleaned_count = 0;
- bool failure = false;
+
+ libeth_xdp_init_buff(xdp, &rx_q->xdp, &rx_q->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < (unsigned int)budget)) {
+ while (likely(rs.packets < budget)) {
struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
@@ -1046,73 +1068,41 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
- goto skip_data;
-
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.len);
- else
- skb = idpf_rx_build_skb(rx_buf, fields.len);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
+ libeth_xdp_process_buff(xdp, rx_buf, fields.len);
rx_buf->netmem = 0;
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) ||
+ unlikely(!xdp->data))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M)
if (unlikely(idpf_rx_singleq_test_staterr(rx_desc,
IDPF_RXD_ERR_S))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
+ libeth_xdp_return_buff_slow(xdp);
continue;
}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc,
- fields.ptype);
-
- /* send completed skb up the stack */
- napi_gro_receive(rx_q->pp->p.napi, skb);
- skb = NULL;
-
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, rx_q->pp->p.napi, &rs, rx_desc);
}
- rx_q->skb = skb;
-
rx_q->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rx_q->xdp, xdp);
page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
- failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
+ idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, rs.packets);
+ u64_stats_add(&rx_q->q_stats.bytes, rs.bytes);
u64_stats_update_end(&rx_q->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return failure ? budget : (int)total_rx_pkts;
+ return rs.packets;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index eaad52a83b04..828f7c444d30 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -1,18 +1,37 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
-#include <net/libeth/rx.h>
-#include <net/libeth/tx.h>
-
#include "idpf.h"
#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
LIBETH_SQE_CHECK_PRIV(u32);
-static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
+/**
+ * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
+ * @skb: send buffer
+ * @max_bufs: maximum scatter gather buffers for single packet
+ * @count: number of buffers this packet needs
+ *
+ * Make sure we don't exceed maximum scatter gather buffers for a single
+ * packet.
+ * TSO case has been handled earlier from idpf_features_check().
+ */
+static bool idpf_chk_linearize(const struct sk_buff *skb,
+ unsigned int max_bufs,
+ unsigned int count)
+{
+ if (likely(count <= max_bufs))
+ return false;
+
+ if (skb_is_gso(skb))
+ return false;
+
+ return true;
+}
/**
* idpf_tx_timeout - Respond to a Tx Hang
@@ -35,26 +54,39 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
}
-/**
- * idpf_tx_buf_rel_all - Free any empty Tx buffers
- * @txq: queue to be cleaned
- */
-static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+static void idpf_tx_buf_clean(struct idpf_tx_queue *txq)
{
struct libeth_sq_napi_stats ss = { };
+ struct xdp_frame_bulk bq;
struct libeth_cq_pp cp = {
.dev = txq->dev,
+ .bq = &bq,
.ss = &ss,
};
- u32 i;
+ xdp_frame_bulk_init(&bq);
+
+ /* Free all the Tx buffer sk_buffs */
+ for (u32 i = 0; i < txq->buf_pool_size; i++)
+ libeth_tx_complete_any(&txq->tx_buf[i], &cp);
+
+ xdp_flush_frame_bulk(&bq);
+}
+
+/**
+ * idpf_tx_buf_rel_all - Free any empty Tx buffers
+ * @txq: queue to be cleaned
+ */
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+{
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
return;
- /* Free all the Tx buffer sk_buffs */
- for (i = 0; i < txq->buf_pool_size; i++)
- libeth_tx_complete(&txq->tx_buf[i], &cp);
+ if (idpf_queue_has(XSK, txq))
+ idpf_xsksq_clean(txq);
+ else
+ idpf_tx_buf_clean(txq);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
@@ -68,13 +100,22 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
*/
static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
+ bool xdp = idpf_queue_has(XDP, txq);
+
+ if (xdp)
+ libeth_xdpsq_deinit_timer(txq->timer);
+
idpf_tx_buf_rel_all(txq);
- netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ if (!xdp)
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ idpf_xsk_clear_queue(txq, VIRTCHNL2_QUEUE_TYPE_TX);
if (!txq->desc_ring)
return;
- if (txq->refillq)
+ if (!xdp && txq->refillq)
kfree(txq->refillq->ring);
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
@@ -91,12 +132,14 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
*/
static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
{
+ idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
if (!complq->comp)
return;
dma_free_coherent(complq->netdev->dev.parent, complq->size,
- complq->comp, complq->dma);
- complq->comp = NULL;
+ complq->desc_ring, complq->dma);
+ complq->desc_ring = NULL;
complq->next_to_use = 0;
complq->next_to_clean = 0;
}
@@ -183,6 +226,8 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
tx_q->next_to_clean = 0;
idpf_queue_set(GEN_CHK, tx_q);
+ idpf_xsk_setup_queue(vport, tx_q, VIRTCHNL2_QUEUE_TYPE_TX);
+
if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
return 0;
@@ -226,18 +271,25 @@ err_alloc:
static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
struct idpf_compl_queue *complq)
{
- complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
+ u32 desc_size;
- complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
- complq->size, &complq->dma,
- GFP_KERNEL);
- if (!complq->comp)
+ desc_size = idpf_queue_has(FLOW_SCH_EN, complq) ?
+ sizeof(*complq->comp) : sizeof(*complq->comp_4b);
+ complq->size = array_size(complq->desc_count, desc_size);
+
+ complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->desc_ring)
return -ENOMEM;
complq->next_to_use = 0;
complq->next_to_clean = 0;
idpf_queue_set(GEN_CHK, complq);
+ idpf_xsk_setup_queue(vport, complq,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
return 0;
}
@@ -337,6 +389,11 @@ static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
if (!bufq->buf)
return;
+ if (idpf_queue_has(XSK, bufq)) {
+ idpf_xskfq_rel(bufq);
+ return;
+ }
+
/* Free all the bufs allocated and given to hw on Rx queue */
for (u32 i = 0; i < bufq->desc_count; i++)
idpf_rx_page_rel(&bufq->buf[i]);
@@ -385,14 +442,14 @@ static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
if (!rxq)
return;
- if (rxq->skb) {
- dev_kfree_skb_any(rxq->skb);
- rxq->skb = NULL;
- }
+ if (!idpf_queue_has(XSK, rxq))
+ libeth_xdp_return_stash(&rxq->xdp);
if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
+ idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
@@ -415,6 +472,7 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
return;
idpf_rx_buf_rel_bufq(bufq);
+ idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
bufq->next_to_alloc = 0;
bufq->next_to_clean = 0;
@@ -497,6 +555,7 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
struct libeth_fq fq = {
.count = bufq->desc_count,
.type = LIBETH_FQE_HDR,
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
@@ -696,10 +755,14 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.count = bufq->desc_count,
.type = type,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
+ if (idpf_queue_has(XSK, bufq))
+ return idpf_xskfq_init(bufq);
+
ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
if (ret)
return ret;
@@ -723,6 +786,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
+ idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u32 truesize = 0;
@@ -793,6 +858,8 @@ static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
rxq->next_to_use = 0;
idpf_queue_set(GEN_CHK, rxq);
+ idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
return 0;
}
@@ -818,9 +885,10 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
bufq->next_to_alloc = 0;
bufq->next_to_clean = 0;
bufq->next_to_use = 0;
-
idpf_queue_set(GEN_CHK, bufq);
+ idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+
return 0;
}
@@ -886,6 +954,341 @@ err_out:
return err;
}
+static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ bool splitq;
+ int err;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ struct idpf_buf_queue *bufq;
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ err = idpf_rx_desc_alloc(vport, q->rxq);
+ if (err)
+ break;
+
+ err = idpf_xdp_rxq_info_init(q->rxq);
+ if (err)
+ break;
+
+ if (!splitq)
+ err = idpf_rx_bufs_init_singleq(q->rxq);
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q->bufq;
+
+ err = idpf_bufq_desc_alloc(vport, bufq);
+ if (err)
+ break;
+
+ for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
+ struct idpf_buf_queue * const *bufqs;
+ enum libeth_fqe_type type;
+ u32 ts;
+
+ bufqs = bufq->q_vector->bufq;
+ if (bufqs[j] != bufq)
+ continue;
+
+ if (j) {
+ type = LIBETH_FQE_SHORT;
+ ts = bufqs[j - 1]->truesize >> 1;
+ } else {
+ type = LIBETH_FQE_MTU;
+ ts = 0;
+ }
+
+ bufq->truesize = ts;
+
+ err = idpf_rx_bufs_init(bufq, type);
+ break;
+ }
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ err = idpf_tx_desc_alloc(vport, q->txq);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ err = idpf_compl_desc_alloc(vport, q->complq);
+ break;
+ default:
+ continue;
+ }
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ struct device *dev = vport->netdev->dev.parent;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
+ idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_rx_desc_rel_bufq(q->bufq, dev);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_tx_desc_rel(q->txq);
+
+ if (idpf_queue_has(XDP, q->txq)) {
+ q->txq->pending = 0;
+ q->txq->xdp_tx = 0;
+ } else {
+ q->txq->txq_grp->num_completions_pending = 0;
+ }
+
+ writel(q->txq->next_to_use, q->txq->tail);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_compl_desc_rel(q->complq);
+ q->complq->num_completions = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
+{
+ if (qv->num_txq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
+ itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
+ else
+ itr = qv->tx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, true);
+ }
+
+ if (qv->num_rxq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
+ itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
+ else
+ itr = qv->rx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, false);
+ }
+
+ if (qv->num_txq || qv->num_rxq)
+ idpf_vport_intr_update_itr_ena_irq(qv);
+}
+
+/**
+ * idpf_vector_to_queue_set - create a queue set associated with the given
+ * queue vector
+ * @qv: queue vector corresponding to the queue pair
+ *
+ * Returns a pointer to a dynamically allocated array of pointers to all
+ * queues associated with a given queue vector (@qv).
+ * Please note that the caller is responsible to free the memory allocated
+ * by this function using kfree().
+ *
+ * Return: &idpf_queue_set on success, %NULL in case of error.
+ */
+static struct idpf_queue_set *
+idpf_vector_to_queue_set(struct idpf_q_vector *qv)
+{
+ bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ struct idpf_vport *vport = qv->vport;
+ struct idpf_queue_set *qs;
+ u32 num;
+
+ num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
+ num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2;
+ if (!num)
+ return NULL;
+
+ qs = idpf_alloc_queue_set(vport, num);
+ if (!qs)
+ return NULL;
+
+ num = 0;
+
+ for (u32 i = 0; i < qv->num_bufq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[num++].bufq = qv->bufq[i];
+ }
+
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ qs->qs[num++].rxq = qv->rx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_txq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->tx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_complq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->complq[i];
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto finalize;
+
+ if (xdp) {
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = vport->txqs[idx];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = vport->txqs[idx]->complq;
+ }
+ } else {
+ for (u32 i = 0; i < qv->num_xsksq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->xsksq[i];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->xsksq[i]->complq;
+ }
+ }
+
+finalize:
+ if (num != qs->num) {
+ kfree(qs);
+ return NULL;
+ }
+
+ return qs;
+}
+
+static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+
+ err = idpf_init_queue_set(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto config;
+
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors),
+ sizeof(*q_vector->xsksq), GFP_KERNEL);
+ if (!q_vector->xsksq)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
+ continue;
+
+ if (!idpf_queue_has(XSK, q->txq))
+ continue;
+
+ idpf_xsk_init_wakeup(q_vector);
+
+ q->txq->q_vector = q_vector;
+ q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
+ }
+
+config:
+ err = idpf_send_config_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_send_enable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ napi_enable(&q_vector->napi);
+ idpf_qvec_ena_irq(q_vector);
+
+ netif_start_subqueue(vport->netdev, qid);
+
+ return 0;
+}
+
+static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ netif_stop_subqueue(vport->netdev, qid);
+
+ writel(0, q_vector->intr_reg.dyn_ctl);
+ napi_disable(&q_vector->napi);
+
+ err = idpf_send_disable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ idpf_clean_queue_set(qs);
+
+ kfree(q_vector->xsksq);
+ q_vector->num_xsksq = 0;
+
+ return 0;
+}
+
+/**
+ * idpf_qp_switch - enable or disable queues associated with queue pair
+ * @vport: vport to switch the pair for
+ * @qid: index of the queue pair to switch
+ * @en: whether to enable or disable the pair
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
+{
+ struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ if (idpf_find_txq_vec(vport, qid) != q_vector)
+ return -EINVAL;
+
+ qs = idpf_vector_to_queue_set(q_vector);
+ if (!qs)
+ return -ENOMEM;
+
+ return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+}
+
/**
* idpf_txq_group_rel - Release all resources for txq groups
* @vport: vport to release txq groups on
@@ -1001,8 +1404,12 @@ static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
*/
void idpf_vport_queues_rel(struct idpf_vport *vport)
{
+ idpf_xdp_copy_prog_to_rqs(vport, NULL);
+
idpf_tx_desc_rel_all(vport);
idpf_rx_desc_rel_all(vport);
+
+ idpf_xdpsqs_put(vport);
idpf_vport_queue_grp_rel_all(vport);
kfree(vport->txqs);
@@ -1076,6 +1483,18 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ vport->xdp_prog = config_data->xdp_prog;
+ if (idpf_xdp_enabled(vport)) {
+ vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
+ vport->xdp_txq_offset;
+ vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
+ } else {
+ vport->xdp_txq_offset = 0;
+ vport->num_xdp_txq = 0;
+ vport->xdpsq_share = false;
+ }
+
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
@@ -1147,22 +1566,17 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
+ struct idpf_vport_user_config_data *user;
struct idpf_vport_config *vport_config;
u16 num_txq_grps, num_rxq_grps;
- u32 num_qs;
+ u32 num_qs, num_xdpsq;
vport_config = adapter->vport_config[vport_idx];
if (vport_config) {
num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
} else {
- int num_cpus;
-
- /* Restrict num of queues to cpus online as a default
- * configuration to give best performance. User can always
- * override to a max number of queues via ethtool.
- */
- num_cpus = num_online_cpus();
+ u32 num_cpus = netif_get_num_default_rss_queues();
dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
@@ -1197,6 +1611,24 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
vport_msg->num_rx_bufq = 0;
}
+ if (!vport_config)
+ return 0;
+
+ user = &vport_config->user_config;
+ user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
+ user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
+
+ if (vport_config->user_config.xdp_prog)
+ num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
+ user->num_req_tx_qs,
+ vport_config->max_q.max_txq);
+ else
+ num_xdpsq = 0;
+
+ vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model)))
+ vport_msg->num_tx_complq = vport_msg->num_tx_q;
+
return 0;
}
@@ -1246,14 +1678,13 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
static void idpf_rxq_set_descids(const struct idpf_vport *vport,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
- } else {
- if (vport->base_rxd)
- q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
- else
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
- }
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ if (vport->base_rxd)
+ q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
+ else
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
}
/**
@@ -1301,6 +1732,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
+ q->rel_q_id = j;
if (!split) {
q->clean_budget = vport->compln_clean_budget;
@@ -1461,7 +1893,6 @@ skip_splitq_rx_init:
setup_rxq:
q->desc_count = vport->rxq_desc_count;
q->rx_ptype_lkup = vport->rx_ptype_lkup;
- q->netdev = vport->netdev;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
@@ -1522,15 +1953,19 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_vport_init_fast_path_txqs(vport);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_xdpsqs_get(vport);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_tx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_rx_desc_alloc_all(vport);
if (err)
goto err_out;
@@ -1543,32 +1978,6 @@ err_out:
}
/**
- * idpf_tx_handle_sw_marker - Handle queue marker packet
- * @tx_q: tx queue to handle software marker
- */
-static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
-{
- struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
- struct idpf_vport *vport = priv->vport;
- int i;
-
- idpf_queue_clear(SW_MARKER, tx_q);
- /* Hardware must write marker packets to all queues associated with
- * completion queues. So check if all queues received marker packets
- */
- for (i = 0; i < vport->num_txq; i++)
- /* If we're still waiting on any other TXQ marker completions,
- * just return now since we cannot wake up the marker_wq yet.
- */
- if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
- return;
-
- /* Drain complete */
- set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
- wake_up(&vport->sw_marker_wq);
-}
-
-/**
* idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
* @txq: queue to read the timestamp from
* @skb: socket buffer to provide Tx timestamp value
@@ -1745,7 +2154,7 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
/* RS completion contains queue head for queue based scheduling or
* completion tag for flow based scheduling.
*/
- u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
+ u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head);
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
@@ -1780,19 +2189,19 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
do {
struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
+ __le16 hw_head;
int rel_tx_qid;
- u16 hw_head;
u8 ctype; /* completion type */
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ gen = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
@@ -1802,22 +2211,19 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ ctype = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
- hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
+ hw_head = tx_desc->common.q_head_compl_tag.q_head;
- idpf_tx_splitq_clean(tx_q, hw_head, budget,
- &cleaned_stats, true);
+ idpf_tx_splitq_clean(tx_q, le16_to_cpu(hw_head),
+ budget, &cleaned_stats, true);
break;
case IDPF_TXD_COMPLT_RS:
idpf_tx_handle_rs_completion(tx_q, tx_desc,
&cleaned_stats, budget);
break;
- case IDPF_TXD_COMPLT_SW_MARKER:
- idpf_tx_handle_sw_marker(tx_q);
- break;
default:
netdev_err(tx_q->netdev,
"Unknown TX completion type: %d\n", ctype);
@@ -1890,6 +2296,69 @@ fetch_next_desc:
}
/**
+ * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
+ * @txq: disabled Tx queue
+ *
+ * When Tx queue is requested for disabling, the CP sends a special completion
+ * descriptor called "SW marker", meaning the queue is ready to be destroyed.
+ * If, for some reason, the marker is not received within 500 ms, break the
+ * polling to not hang the driver.
+ */
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+{
+ struct idpf_compl_queue *complq;
+ unsigned long timeout;
+ bool flow, gen_flag;
+ u32 ntc;
+
+ if (!idpf_queue_has(SW_MARKER, txq))
+ return;
+
+ complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq;
+ ntc = complq->next_to_clean;
+
+ flow = idpf_queue_has(FLOW_SCH_EN, complq);
+ gen_flag = idpf_queue_has(GEN_CHK, complq);
+
+ timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
+
+ do {
+ struct idpf_splitq_4b_tx_compl_desc *tx_desc;
+ struct idpf_tx_queue *target;
+ u32 ctype_gen, id;
+
+ tx_desc = flow ? &complq->comp[ntc].common :
+ &complq->comp_4b[ntc];
+ ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen);
+
+ if (!!(ctype_gen & IDPF_TXD_COMPLQ_GEN_M) != gen_flag) {
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (FIELD_GET(IDPF_TXD_COMPLQ_COMPL_TYPE_M, ctype_gen) !=
+ IDPF_TXD_COMPLT_SW_MARKER)
+ goto next;
+
+ id = FIELD_GET(IDPF_TXD_COMPLQ_QID_M, ctype_gen);
+ target = complq->txq_grp->txqs[id];
+
+ idpf_queue_clear(SW_MARKER, target);
+ if (target == txq)
+ break;
+
+next:
+ if (unlikely(++ntc == complq->desc_count)) {
+ ntc = 0;
+ gen_flag = !gen_flag;
+ }
+ } while (time_before(jiffies, timeout));
+
+ idpf_queue_assign(GEN_CHK, complq, gen_flag);
+ complq->next_to_clean = ntc;
+}
+
+/**
* idpf_tx_splitq_build_ctb - populate command tag and size for queue
* based scheduling descriptors
* @desc: descriptor to populate
@@ -2397,111 +2866,6 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
return 1;
}
-/**
- * __idpf_chk_linearize - Check skb is not using too many buffers
- * @skb: send buffer
- * @max_bufs: maximum number of buffers
- *
- * For TSO we need to count the TSO header and segment payload separately. As
- * such we need to check cases where we have max_bufs-1 fragments or more as we
- * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
- * for the segment payload in the first descriptor, and another max_buf-1 for
- * the fragments.
- */
-static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
-{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
- const skb_frag_t *frag, *stale;
- int nr_frags, sum;
-
- /* no need to check if number of frags is less than max_bufs - 1 */
- nr_frags = shinfo->nr_frags;
- if (nr_frags < (max_bufs - 1))
- return false;
-
- /* We need to walk through the list and validate that each group
- * of max_bufs-2 fragments totals at least gso_size.
- */
- nr_frags -= max_bufs - 2;
- frag = &shinfo->frags[0];
-
- /* Initialize size to the negative value of gso_size minus 1. We use
- * this as the worst case scenario in which the frag ahead of us only
- * provides one byte which is why we are limited to max_bufs-2
- * descriptors for a single transmit as the header and previous
- * fragment are already consuming 2 descriptors.
- */
- sum = 1 - shinfo->gso_size;
-
- /* Add size of frags 0 through 4 to create our initial sum */
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
-
- /* Walk through fragments adding latest fragment, testing it, and
- * then removing stale fragments from the sum.
- */
- for (stale = &shinfo->frags[0];; stale++) {
- int stale_size = skb_frag_size(stale);
-
- sum += skb_frag_size(frag++);
-
- /* The stale fragment may present us with a smaller
- * descriptor than the actual fragment size. To account
- * for that we need to remove all the data on the front and
- * figure out what the remainder would be in the last
- * descriptor associated with the fragment.
- */
- if (stale_size > IDPF_TX_MAX_DESC_DATA) {
- int align_pad = -(skb_frag_off(stale)) &
- (IDPF_TX_MAX_READ_REQ_SIZE - 1);
-
- sum -= align_pad;
- stale_size -= align_pad;
-
- do {
- sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- } while (stale_size > IDPF_TX_MAX_DESC_DATA);
- }
-
- /* if sum is negative we failed to make sufficient progress */
- if (sum < 0)
- return true;
-
- if (!nr_frags--)
- break;
-
- sum -= stale_size;
- }
-
- return false;
-}
-
-/**
- * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
- * @skb: send buffer
- * @max_bufs: maximum scatter gather buffers for single packet
- * @count: number of buffers this packet needs
- *
- * Make sure we don't exceed maximum scatter gather buffers for a single
- * packet. We have to do some special checking around the boundary (max_bufs-1)
- * if TSO is on since we need count the TSO header and payload separately.
- * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
- * header, 1 for segment payload, and then 7 for the fragments.
- */
-static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
-{
- if (likely(count < max_bufs))
- return false;
- if (skb_is_gso(skb))
- return __idpf_chk_linearize(skb, max_bufs);
-
- return count > max_bufs;
-}
/**
* idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
@@ -2758,10 +3122,11 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
*/
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ const struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_tx_queue *tx_q;
- if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
+ if (unlikely(skb_get_queue_mapping(skb) >=
+ vport->num_txq - vport->num_xdp_txq)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2798,7 +3163,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
{
u32 hash;
- if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
@@ -2824,7 +3189,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
@@ -2996,7 +3361,7 @@ idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
}
/**
- * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
+ * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
* @rx_desc: Receive descriptor
@@ -3006,8 +3371,8 @@ idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
* other fields within the skb.
*/
static int
-idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
@@ -3023,9 +3388,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (idpf_queue_has(PTP, rxq))
idpf_rx_hwtstamp(rxq, rx_desc, skb);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
- skb_record_rx_queue(skb, rxq->idx);
-
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
@@ -3036,25 +3398,24 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return 0;
}
-/**
- * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
- * @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
- */
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
{
- u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
+ struct idpf_rx_queue *rxq;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
- skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
- rx_buf->offset + hr, size, rx_buf->truesize);
+ return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
}
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_RUN(static idpf_xdp_run_pass, idpf_xdp_run_prog,
+ idpf_xdp_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XDP_DEFINE_FINALIZE(static idpf_xdp_finalize_rx, idpf_xdp_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
/**
* idpf_rx_hsplit_wa - handle header buffer overflows and split errors
* @hdr: Rx buffer for the headers
@@ -3097,36 +3458,6 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
}
/**
- * idpf_rx_build_skb - Allocate skb and populate it from header buffer
- * @buf: Rx buffer to pull data from
- * @size: the length of the packet
- *
- * This function allocates an skb. It then populates it with the page data from
- * the current receive descriptor, taking care to set up the skb correctly.
- */
-struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
-{
- struct page *buf_page = __netmem_to_page(buf->netmem);
- u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
- struct sk_buff *skb;
- void *va;
-
- va = page_address(buf_page) + buf->offset;
- prefetch(va + hr);
-
- skb = napi_build_skb(va, buf->truesize);
- if (unlikely(!skb))
- return NULL;
-
- skb_mark_for_recycle(skb);
-
- skb_reserve(skb, hr);
- __skb_put(skb, size);
-
- return skb;
-}
-
-/**
* idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
* status and error fields
* @stat_err_field: field from descriptor to test bits in
@@ -3167,13 +3498,18 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*/
static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
- int total_rx_bytes = 0, total_rx_pkts = 0;
struct idpf_buf_queue *rx_bufq = NULL;
- struct sk_buff *skb = rxq->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rxq->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ LIBETH_XDP_ONSTACK_BULK(bq);
+
+ libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < budget)) {
+ while (likely(rs.packets < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
@@ -3187,18 +3523,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc
- */
- dma_rmb();
-
/* if the descriptor isn't done, no work yet to do */
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
-
if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
+ dma_rmb();
+
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
rx_desc->rxdid_ucast);
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
@@ -3243,7 +3575,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
hdr = &rx_bufq->hdr_buf[buf_id];
- if (unlikely(!hdr_len && !skb)) {
+ if (unlikely(!hdr_len && !xdp->data)) {
hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
/* If failed, drop both buffers by setting len to 0 */
pkt_len -= hdr_len ? : pkt_len;
@@ -3253,75 +3585,37 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
u64_stats_update_end(&rxq->stats_sync);
}
- if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
- skb = idpf_rx_build_skb(hdr, hdr_len);
- if (!skb)
- break;
-
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.hsplit_pkts);
- u64_stats_update_end(&rxq->stats_sync);
- }
+ if (libeth_xdp_process_buff(xdp, hdr, hdr_len))
+ rs.hsplit++;
hdr->netmem = 0;
payload:
- if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
- goto skip_data;
-
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_build_skb(rx_buf, pkt_len);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
+ libeth_xdp_process_buff(xdp, rx_buf, pkt_len);
rx_buf->netmem = 0;
idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data))
continue;
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
- continue;
- }
-
- /* send completed skb up the stack */
- napi_gro_receive(rxq->napi, skb);
- skb = NULL;
-
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
}
+ idpf_xdp_finalize_rx(&bq);
+
rxq->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rxq->xdp, xdp);
- rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
u64_stats_update_end(&rxq->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return total_rx_pkts;
+ return rs.packets;
}
/**
@@ -3464,7 +3758,7 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
q_vector->total_events++;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3505,6 +3799,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->xsksq);
+ q_vector->xsksq = NULL;
kfree(q_vector->complq);
q_vector->complq = NULL;
kfree(q_vector->bufq);
@@ -3519,6 +3815,20 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
vport->q_vectors = NULL;
}
+static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
+{
+ struct napi_struct *napi = link ? &q_vector->napi : NULL;
+ struct net_device *dev = q_vector->vport->netdev;
+
+ for (u32 i = 0; i < q_vector->num_rxq; i++)
+ netif_queue_set_napi(dev, q_vector->rx[i]->idx,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ for (u32 i = 0; i < q_vector->num_txq; i++)
+ netif_queue_set_napi(dev, q_vector->tx[i]->idx,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
@@ -3539,6 +3849,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
+ idpf_q_vector_set_napi(q_vector, false);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3552,6 +3863,8 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
struct idpf_q_vector *q_vector = vport->q_vectors;
int q_idx;
+ writel(0, vport->noirq_dyn_ctl);
+
for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
@@ -3726,6 +4039,8 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
+
+ idpf_q_vector_set_napi(q_vector, true);
}
return 0;
@@ -3793,6 +4108,8 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
if (qv->num_txq || qv->num_rxq)
idpf_vport_intr_update_itr_ena_irq(qv);
}
+
+ writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
}
/**
@@ -3942,7 +4259,9 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
- pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
+ pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
+ idpf_xskrq_poll(rxq, budget_per_q) :
+ idpf_rx_splitq_clean(rxq, budget_per_q);
/* if we clean as many as budgeted, we must not be done */
if (pkts_cleaned_per_q >= budget_per_q)
clean_complete = false;
@@ -3952,8 +4271,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
nid = numa_mem_id();
- for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ for (i = 0; i < q_vec->num_bufq; i++) {
+ if (!idpf_queue_has(XSK, q_vec->bufq[i]))
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ }
return clean_complete;
}
@@ -3967,7 +4288,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
{
struct idpf_q_vector *q_vector =
container_of(napi, struct idpf_q_vector, napi);
- bool clean_complete;
+ bool clean_complete = true;
int work_done = 0;
/* Handle case where we are called by netpoll with a budget of 0 */
@@ -3977,8 +4298,13 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
- clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
- clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+ for (u32 i = 0; i < q_vector->num_xsksq; i++)
+ clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]);
+
+ clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget,
+ &work_done);
+ clean_complete &= idpf_rx_splitq_clean_all(q_vector, budget,
+ &work_done);
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
@@ -3986,20 +4312,12 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that.
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
-
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
+ if (napi_complete_done(napi, work_done))
idpf_vport_intr_update_itr_ena_irq(q_vector);
else
idpf_vport_intr_set_wb_on_itr(q_vector);
@@ -4015,8 +4333,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
+ u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
bool split = idpf_is_queue_model_split(vport->rxq_model);
- u16 num_txq_grp = vport->num_txq_grp;
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
u32 i, qv_idx, q_index;
@@ -4092,6 +4410,21 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
qv_idx++;
}
+
+ for (i = 0; i < vport->num_xdp_txq; i++) {
+ struct idpf_tx_queue *xdpsq;
+ struct idpf_q_vector *qv;
+
+ xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ if (!idpf_queue_has(XSK, xdpsq))
+ continue;
+
+ qv = idpf_find_rxq_vec(vport, i);
+ idpf_xsk_init_wakeup(qv);
+
+ xdpsq->q_vector = qv;
+ qv->xsksq[qv->num_xsksq++] = xdpsq;
+ }
}
/**
@@ -4112,6 +4445,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ vport->noirq_v_idx = vport->q_vector_idxs[i];
+
return 0;
}
@@ -4125,6 +4460,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+
kfree(vecids);
return 0;
@@ -4225,6 +4562,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
GFP_KERNEL);
if (!q_vector->complq)
goto error;
+
+ if (!vport->xdp_txq_offset)
+ continue;
+
+ q_vector->xsksq = kcalloc(rxqs_per_vector,
+ sizeof(*q_vector->xsksq),
+ GFP_KERNEL);
+ if (!q_vector->xsksq)
+ goto error;
}
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 52753dff381c..75b977094741 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -7,8 +7,10 @@
#include <linux/dim.h>
#include <net/libeth/cache.h>
-#include <net/tcp.h>
+#include <net/libeth/types.h>
#include <net/netdev_queues.h>
+#include <net/tcp.h>
+#include <net/xdp.h>
#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h"
@@ -58,6 +60,8 @@
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
#define IDPF_MIN_RDMA_VEC 2
+/* Data vector for NOIRQ queues */
+#define IDPF_RESERVED_VECS 1
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
@@ -137,6 +141,8 @@ do { \
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
#define IDPF_TX_FLAGS_TSYN BIT(4)
+struct libeth_rq_napi_stats;
+
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
@@ -275,11 +281,13 @@ struct idpf_ptype_state {
* bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
- * @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
* queue
+ * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
+ * @__IDPF_Q_XDP: this is an XDP queue
+ * @__IDPF_Q_XSK: the queue has an XSk pool installed
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
@@ -287,10 +295,12 @@ enum idpf_queue_flags_t {
__IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
- __IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
+ __IDPF_Q_NOIRQ,
+ __IDPF_Q_XDP,
+ __IDPF_Q_XSK,
__IDPF_Q_FLAGS_NBITS,
};
@@ -357,14 +367,17 @@ struct idpf_intr_reg {
* @num_txq: Number of TX queues
* @num_bufq: Number of buffer queues
* @num_complq: number of completion queues
+ * @num_xsksq: number of XSk send queues
* @rx: Array of RX queues to service
* @tx: Array of TX queues to service
* @bufq: Array of buffer queues to service
* @complq: array of completion queues
+ * @xsksq: array of XSk send queues
* @intr_reg: See struct idpf_intr_reg
- * @napi: napi handler
+ * @csd: XSk wakeup CSD
* @total_events: Number of interrupts processed
* @wb_on_itr: whether WB on ITR is enabled
+ * @napi: napi handler
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
@@ -383,19 +396,24 @@ struct idpf_q_vector {
u16 num_txq;
u16 num_bufq;
u16 num_complq;
+ u16 num_xsksq;
struct idpf_rx_queue **rx;
struct idpf_tx_queue **tx;
struct idpf_buf_queue **bufq;
struct idpf_compl_queue **complq;
+ struct idpf_tx_queue **xsksq;
struct idpf_intr_reg intr_reg;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- struct napi_struct napi;
+ call_single_data_t csd;
+
u16 total_events;
bool wb_on_itr;
+ struct napi_struct napi;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
@@ -412,8 +430,8 @@ struct idpf_q_vector {
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 120,
- 24 + sizeof(struct napi_struct) +
+libeth_cacheline_set_assert(struct idpf_q_vector, 136,
+ 56 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8);
@@ -461,21 +479,26 @@ struct idpf_tx_queue_stats {
* @desc_ring: virtual descriptor ring address
* @bufq_sets: Pointer to the array of buffer queues in splitq mode
* @napi: NAPI instance corresponding to this queue (splitq)
+ * @xdp_prog: attached XDP program
* @rx_buf: See struct &libeth_fqe
* @pp: Page pool pointer in singleq mode
- * @netdev: &net_device corresponding to this queue
* @tail: Tail offset. Used for both queue models single and split.
* @flags: See enum idpf_queue_flags_t
* @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
* @desc_count: Number of descriptors
+ * @num_xdp_txq: total number of XDP Tx queues
+ * @xdpsqs: shortcut for XDP Tx queues array
* @rxdids: Supported RX descriptor ids
+ * @truesize: data buffer truesize in singleq
* @rx_ptype_lkup: LUT of Rx ptypes
+ * @xdp_rxq: XDP queue info
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
- * @skb: Pointer to the skb
- * @truesize: data buffer truesize in singleq
+ * @xdp: XDP buffer with the current frame
+ * @xsk: current XDP buffer in XSk mode
+ * @pool: XSk pool if installed
* @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats
@@ -500,30 +523,44 @@ struct idpf_rx_queue {
struct {
struct idpf_bufq_set *bufq_sets;
struct napi_struct *napi;
+ struct bpf_prog __rcu *xdp_prog;
};
struct {
struct libeth_fqe *rx_buf;
struct page_pool *pp;
+ void __iomem *tail;
};
};
- struct net_device *netdev;
- void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
- u32 rxdids;
+ u32 num_xdp_txq;
+ union {
+ struct idpf_tx_queue **xdpsqs;
+ struct {
+ u32 rxdids;
+ u32 truesize;
+ };
+ };
const struct libeth_rx_pt *rx_ptype_lkup;
+
+ struct xdp_rxq_info xdp_rxq;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- u16 next_to_use;
- u16 next_to_clean;
- u16 next_to_alloc;
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
- struct sk_buff *skb;
- u32 truesize;
+ union {
+ struct libeth_xdp_buff_stash xdp;
+ struct {
+ struct libeth_xdp_buff *xsk;
+ struct xsk_buff_pool *pool;
+ };
+ };
u64 cached_phc_time;
struct u64_stats_sync stats_sync;
@@ -543,8 +580,11 @@ struct idpf_rx_queue {
u16 rx_max_pkt_size;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
- 88 + sizeof(struct u64_stats_sync),
+libeth_cacheline_set_assert(struct idpf_rx_queue,
+ ALIGN(64, __alignof(struct xdp_rxq_info)) +
+ sizeof(struct xdp_rxq_info),
+ 96 + offsetof(struct idpf_rx_queue, q_stats) -
+ offsetofend(struct idpf_rx_queue, cached_phc_time),
32);
/**
@@ -556,33 +596,16 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @desc_ring: virtual descriptor ring address
* @tx_buf: See struct idpf_tx_buf
* @txq_grp: See struct idpf_txq_group
+ * @complq: corresponding completion queue in XDP mode
* @dev: Device back pointer for DMA mapping
+ * @pool: corresponding XSk pool if installed
* @tail: Tail offset. Used for both queue models single and split
* @flags: See enum idpf_queue_flags_t
* @idx: For TX queue, it is used as index to map between TX queue group and
* hot path TX pointers stored in vport. Used in both singleq/splitq.
* @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length
- * @compl_tag_gen_s: Completion tag generation bit
- * The format of the completion tag will change based on the TXQ
- * descriptor ring size so that we can maintain roughly the same level
- * of "uniqueness" across all descriptor sizes. For example, if the
- * TXQ descriptor ring size is 64 (the minimum size supported), the
- * completion tag will be formatted as below:
- * 15 6 5 0
- * --------------------------------
- * | GEN=0-1023 |IDX = 0-63|
- * --------------------------------
- *
- * This gives us 64*1024 = 65536 possible unique values. Similarly, if
- * the TXQ descriptor ring size is 8160 (the maximum size supported),
- * the completion tag will be formatted as below:
- * 15 13 12 0
- * --------------------------------
- * |GEN | IDX = 0-8159 |
- * --------------------------------
- *
- * This gives us 8*8160 = 65280 possible unique values.
+ * @thresh: XDP queue cleaning threshold
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
@@ -599,6 +622,10 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
* @refillq: Pointer to refill queue
+ * @pending: number of pending descriptors to send in QB
+ * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers
+ * @timer: timer for XDP Tx queue cleanup
+ * @xdp_lock: lock for XDP Tx queues sharing
* @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
* @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
@@ -608,6 +635,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
* @buf_pool_size: Total number of idpf_tx_buf
+ * @rel_q_id: relative virtchnl queue index
*/
struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly);
@@ -620,32 +648,53 @@ struct idpf_tx_queue {
void *desc_ring;
};
struct libeth_sqe *tx_buf;
- struct idpf_txq_group *txq_grp;
- struct device *dev;
+ union {
+ struct idpf_txq_group *txq_grp;
+ struct idpf_compl_queue *complq;
+ };
+ union {
+ struct device *dev;
+ struct xsk_buff_pool *pool;
+ };
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
- u16 tx_min_pkt_len;
+ union {
+ u16 tx_min_pkt_len;
+ u32 thresh;
+ };
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- u16 next_to_use;
- u16 next_to_clean;
- u16 last_re;
- u16 tx_max_bufs;
+ u32 next_to_use;
+ u32 next_to_clean;
union {
- u32 cleaned_bytes;
- u32 clean_budget;
- };
- u16 cleaned_pkts;
+ struct {
+ u16 last_re;
+ u16 tx_max_bufs;
+
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
- struct idpf_sw_queue *refillq;
+ struct idpf_sw_queue *refillq;
+ };
+ struct {
+ u32 pending;
+ u32 xdp_tx;
+
+ struct libeth_xdpsq_timer *timer;
+ struct libeth_xdpsq_lock xdp_lock;
+ };
+ };
struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
struct work_struct *tstamp_task;
@@ -660,26 +709,36 @@ struct idpf_tx_queue {
dma_addr_t dma;
struct idpf_q_vector *q_vector;
+
u32 buf_pool_size;
+ u32 rel_q_id;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 104 + sizeof(struct u64_stats_sync),
+ 104 +
+ offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
+ offsetofend(struct idpf_tx_queue, timer) +
+ offsetof(struct idpf_tx_queue, q_stats) -
+ offsetofend(struct idpf_tx_queue, tstamp_task),
32);
/**
* struct idpf_buf_queue - software structure representing a buffer queue
* @split_buf: buffer descriptor array
- * @hdr_buf: &libeth_fqe for header buffers
- * @hdr_pp: &page_pool for header buffers
* @buf: &libeth_fqe for data buffers
* @pp: &page_pool for data buffers
+ * @xsk_buf: &xdp_buff for XSk Rx buffers
+ * @pool: &xsk_buff_pool on XSk queues
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
* @tail: Tail offset
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
+ * @thresh: refill threshold in XSk
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
+ * @pending: number of buffers to refill (Xsk)
* @hdr_truesize: truesize for buffer headers
* @truesize: truesize for data buffers
* @q_id: Queue id
@@ -693,14 +752,24 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
struct idpf_buf_queue {
__cacheline_group_begin_aligned(read_mostly);
struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ union {
+ struct {
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ };
+ struct {
+ struct libeth_xdp_buff **xsk_buf;
+ struct xsk_buff_pool *pool;
+ };
+ };
struct libeth_fqe *hdr_buf;
struct page_pool *hdr_pp;
- struct libeth_fqe *buf;
- struct page_pool *pp;
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
+
+ u32 thresh;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
@@ -708,6 +777,7 @@ struct idpf_buf_queue {
u32 next_to_clean;
u32 next_to_alloc;
+ u32 pending;
u32 hdr_truesize;
u32 truesize;
__cacheline_group_end_aligned(read_write);
@@ -728,7 +798,9 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
/**
* struct idpf_compl_queue - software structure representing a completion queue
- * @comp: completion descriptor array
+ * @comp: 8-byte completion descriptor array
+ * @comp_4b: 4-byte completion descriptor array
+ * @desc_ring: virtual descriptor ring address
* @txq_grp: See struct idpf_txq_group
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
@@ -748,7 +820,12 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
*/
struct idpf_compl_queue {
__cacheline_group_begin_aligned(read_mostly);
- struct idpf_splitq_tx_compl_desc *comp;
+ union {
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_splitq_4b_tx_compl_desc *comp_4b;
+
+ void *desc_ring;
+ };
struct idpf_txq_group *txq_grp;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
@@ -1012,9 +1089,13 @@ int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size);
-struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
+
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
+
void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
@@ -1027,6 +1108,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
+
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 259d50fded67..4cc58c83688c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -76,7 +76,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -120,6 +120,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
+ FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 6330d4a0ae07..cbb5fa30f5a0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -702,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
- dmam_free_coherent(&adapter->pdev->dev,
- dma_mem->size, dma_mem->va,
- dma_mem->pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
break;
}
@@ -716,34 +716,145 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
return err;
}
+struct idpf_chunked_msg_params {
+ u32 (*prepare_msg)(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num);
+
+ const void *chunks;
+ u32 num_chunks;
+
+ u32 chunk_sz;
+ u32 config_sz;
+
+ u32 vc_op;
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+{
+ struct idpf_queue_set *qp;
+
+ qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->vport = vport;
+ qp->num = num;
+
+ return qp;
+}
+
/**
- * idpf_wait_for_marker_event - wait for software marker response
+ * idpf_send_chunked_msg - send VC message consisting of chunks
* @vport: virtual port data structure
+ * @params: message params
*
- * Returns 0 success, negative on failure.
- **/
-static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+ * Helper function for preparing a message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static int idpf_send_chunked_msg(struct idpf_vport *vport,
+ const struct idpf_chunked_msg_params *params)
{
- int event;
- int i;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = params->vc_op,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ const void *pos = params->chunks;
+ u32 num_chunks, num_msgs, buf_sz;
+ void *buf __free(kfree) = NULL;
+ u32 totqs = params->num_chunks;
+
+ num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
+ params->chunk_sz), totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_set(SW_MARKER, vport->txqs[i]);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- event = wait_event_timeout(vport->sw_marker_wq,
- test_and_clear_bit(IDPF_VPORT_SW_MARKER,
- vport->flags),
- msecs_to_jiffies(500));
+ xn_params.send_buf.iov_base = buf;
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_clear(POLL_MODE, vport->txqs[i]);
+ for (u32 i = 0; i < num_msgs; i++) {
+ ssize_t reply_sz;
- if (event)
- return 0;
+ memset(buf, 0, buf_sz);
+ xn_params.send_buf.iov_len = buf_sz;
+
+ if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ return -EINVAL;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ pos += num_chunks * params->chunk_sz;
+ totqs -= num_chunks;
+
+ num_chunks = min(num_chunks, totqs);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event_set - wait for software marker response for
+ * selected Tx queues
+ * @qs: set of the Tx queues
+ *
+ * Return: 0 success, -errno on failure.
+ */
+static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
+{
+ struct idpf_tx_queue *txq;
+ bool markers_rcvd = true;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ switch (qs->qs[i].type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = qs->qs[i].txq;
+
+ idpf_queue_set(SW_MARKER, txq);
+ idpf_wait_for_sw_marker_completion(txq);
+ markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!markers_rcvd) {
+ netdev_warn(qs->vport->netdev,
+ "Failed to receive marker packets\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event - wait for software marker response
+ * @vport: virtual port data structure
+ *
+ * Return: 0 success, negative on failure.
+ **/
+static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ if (!qs)
+ return -ENOMEM;
- dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
+ for (u32 i = 0; i < qs->num; i++) {
+ qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[i].txq = vport->txqs[i];
+ }
- return -ETIMEDOUT;
+ return idpf_wait_for_marker_event_set(qs);
}
/**
@@ -1061,21 +1172,35 @@ int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
struct virtchnl2_get_capabilities *caps = &adapter->caps;
u16 default_vports = idpf_get_default_vports(adapter);
- int max_rx_q, max_tx_q;
+ u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
mutex_lock(&adapter->queue_lock);
+ /* Caps are device-wide. Give each vport an equal piece */
max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
- if (adapter->num_alloc_vports < default_vports) {
- max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
- max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
- } else {
- max_q->max_rxq = IDPF_MIN_Q;
- max_q->max_txq = IDPF_MIN_Q;
+ max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
+ max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
+
+ if (adapter->num_alloc_vports >= default_vports) {
+ max_rx_q = IDPF_MIN_Q;
+ max_tx_q = IDPF_MIN_Q;
}
- max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
- max_q->max_complq = max_q->max_txq;
+
+ /*
+ * Harmonize the numbers. The current implementation always creates
+ * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
+ * one completion queue for each Tx queue for best performance.
+ * If less buffer or completion queues is available, cap the number
+ * of the corresponding Rx/Tx queues.
+ */
+ max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
+ max_tx_q = min(max_tx_q, max_compl_q);
+
+ max_q->max_rxq = max_rx_q;
+ max_q->max_txq = max_tx_q;
+ max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ max_q->max_complq = max_tx_q;
if (avail_queues->avail_rxq < max_q->max_rxq ||
avail_queues->avail_txq < max_q->max_txq ||
@@ -1506,7 +1631,7 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
@@ -1554,236 +1679,368 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
- * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
+ * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
+ * @vport: virtual port data structure
+ * @q: Tx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_tx_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
+
+ if (!idpf_is_queue_model_split(vport->txq_model)) {
+ qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ return;
+ }
+
+ if (idpf_queue_has(XDP, q))
+ val = q->complq->q_id;
+ else
+ val = q->txq_grp->complq->q_id;
+
+ qi->tx_compl_queue_id = cpu_to_le16(val);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
+ * @vport: virtual port data structure
+ * @q: completion queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_compl_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
* @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the tx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config tx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Tx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_tx_queues *ctq = buf;
+
+ ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
+
+ return struct_size(ctq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
+ * message for selected queues
+ * @qs: set of the Tx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Tx queues (or completion queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_txqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_tx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_txq + vport->num_complq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j, sched_mode;
-
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qi[k].queue_id =
- cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qi[k].model =
- cpu_to_le16(vport->txq_model);
- qi[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- qi[k].ring_len =
- cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
- qi[k].dma_ring_addr =
- cpu_to_le64(tx_qgrp->txqs[j]->dma);
- if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_tx_queue *q = tx_qgrp->txqs[j];
-
- qi[k].tx_compl_queue_id =
- cpu_to_le16(tx_qgrp->complq->q_id);
- qi[k].relative_queue_id = cpu_to_le16(j);
-
- if (idpf_queue_has(FLOW_SCH_EN, q))
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
- else
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- } else {
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- }
- }
+ params.chunks = qi;
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
+ idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
+ idpf_fill_complq_config_chunk(qs->vport,
+ qs->qs[i].complq,
+ &qi[params.num_chunks++]);
+ }
- qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
- else
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
- qi[k].sched_mode = cpu_to_le16(sched_mode);
+/**
+ * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_txq + vport->num_complq;
+ u32 k = 0;
- k++;
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
+ }
}
/* Make sure accounting agrees */
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_tx_queues);
- chunk_sz = sizeof(struct virtchnl2_txq_info);
+ return idpf_send_config_tx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
+ * @vport: virtual port data structure
+ * @q: Rx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ const struct idpf_bufq_set *sets;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->desc_ids = cpu_to_le64(q->rxdids);
- buf_sz = struct_size(ctq, qinfo, num_chunks);
- ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq)
- return -ENOMEM;
+ return;
+ }
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ sets = q->bufq_sets;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(ctq, 0, buf_sz);
- ctq->vport_id = cpu_to_le32(vport->vport_id);
- ctq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
+ /*
+ * In splitq mode, RxQ buffer size should be set to that of the first
+ * buffer queue associated with this RxQ.
+ */
+ q->rx_buf_size = sets[0].bufq.rx_buf_size;
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
- xn_params.send_buf.iov_base = ctq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ qi->bufq2_ena = IDPF_BUFQ2_ENA;
+ qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
+ }
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(ctq, qinfo, num_chunks);
+ q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
}
- return 0;
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
/**
- * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
+ * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
* @vport: virtual port data structure
+ * @q: buffer queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_buf_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
+ }
+}
+
+/**
+ * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the rx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config rx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Rx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_rx_queues *crq = buf;
+
+ crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
+
+ return struct_size(crq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
+ * for selected queues.
+ * @qs: set of the Rx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Rx queues (or buffer queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_rxqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_rx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_rxq + vport->num_bufq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
- int j;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto setup_rxqs;
-
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_buf_queue *bufq =
- &rx_qgrp->splitq.bufq_sets[j].bufq;
-
- qi[k].queue_id = cpu_to_le32(bufq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
- qi[k].ring_len = cpu_to_le16(bufq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
- qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(bufq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- }
+ params.chunks = qi;
-setup_rxqs:
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
+ idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
+ idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ &qi[params.num_chunks++]);
+ }
- for (j = 0; j < num_rxq; j++, k++) {
- const struct idpf_bufq_set *sets;
- struct idpf_rx_queue *rxq;
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- rxq = rx_qgrp->singleq.rxqs[j];
- goto common_qi_fields;
- }
+/**
+ * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 k = 0;
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- sets = rxq->bufq_sets;
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ.
- */
- rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
- qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
- qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
- qi[k].rx_bufq2_id =
- cpu_to_le16(sets[1].bufq.q_id);
- }
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(rxq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-
- rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
-
- if (idpf_queue_has(HSPLIT_EN, rxq)) {
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
- qi[k].hdr_buffer_size =
- cpu_to_le16(rxq->rx_hbuf_size);
- }
+ if (!splitq) {
+ num_rxq = rx_qgrp->singleq.num_rxq;
+ goto rxq;
+ }
+
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ }
+
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+
+rxq:
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
-common_qi_fields:
- qi[k].queue_id = cpu_to_le32(rxq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- qi[k].ring_len = cpu_to_le16(rxq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
- qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
- qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
+ if (splitq)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
@@ -1791,317 +2048,395 @@ common_qi_fields:
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_rx_queues);
- chunk_sz = sizeof(struct virtchnl2_rxq_info);
+ return idpf_send_config_rx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
+ * queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the queue
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_del_ena_dis_queues *eq = buf;
+
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+ memcpy(eq->chunks.chunks, pos,
+ num_chunks * sizeof(*eq->chunks.chunks));
+
+ return struct_size(eq, chunks.chunks, num_chunks);
+}
- buf_sz = struct_size(crq, qinfo, num_chunks);
- crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq)
+/**
+ * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
+ * message for selected queues
+ * @qs: set of the queues to enable or disable
+ * @en: whether to enable or disable queues
+ *
+ * Send enable or disable queues virtchnl message for queues contained
+ * in the @qs array.
+ * The @qs array can contain pointers to both Rx and Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
+ bool en)
+{
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
+ VIRTCHNL2_OP_DISABLE_QUEUES,
+ .prepare_msg = idpf_prepare_ena_dis_qs_msg,
+ .config_sz = sizeof(struct virtchnl2_del_ena_dis_queues),
+ .chunk_sz = sizeof(*qc),
+ .num_chunks = qs->num,
+ };
+
+ qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL);
+ if (!qc)
return -ENOMEM;
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ params.chunks = qc;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(crq, 0, buf_sz);
- crq->vport_id = cpu_to_le32(vport->vport_id);
- crq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ u32 qid;
- xn_params.send_buf.iov_base = crq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qc[i].type = cpu_to_le32(q->type);
+ qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(crq, qinfo, num_chunks);
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ qid = q->bufq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ qid = q->complq->q_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qc[i].start_queue_id = cpu_to_le32(qid);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
- * queues message
+ * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
+ * message
* @vport: virtual port data structure
- * @ena: if true enable, false disable
+ * @en: whether to enable or disable queues
*
- * Send enable or disable queues virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
{
- struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
- u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_vc_xn_params xn_params = {};
- struct virtchnl2_queue_chunks *qcs;
- u32 config_sz, chunk_sz, buf_sz;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_txq, num_q, k = 0;
+ bool split;
num_txq = vport->num_txq + vport->num_complq;
- num_rxq = vport->num_rxq + vport->num_bufq;
- num_q = num_txq + num_rxq;
- buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
- qc = kzalloc(buf_sz, GFP_KERNEL);
- if (!qc)
+ num_q = num_txq + vport->num_rxq + vport->num_bufq;
+
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ split = idpf_is_queue_model_split(vport->txq_model);
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_txq != k)
- return -EINVAL;
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- if (!idpf_is_queue_model_split(vport->txq_model))
- goto setup_rx;
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
- for (i = 0; i < vport->num_txq_grp; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ if (!split)
+ continue;
- qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
}
- if (vport->num_complq != (k - vport->num_txq))
+
+ if (k != num_txq)
return -EINVAL;
-setup_rx:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ split = idpf_is_queue_model_split(vport->rxq_model);
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- } else {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- }
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
- return -EINVAL;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto send_msg;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ if (split)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
+ }
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- const struct idpf_buf_queue *q;
+ if (!split)
+ continue;
- q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- qc[k].start_queue_id = cpu_to_le32(q->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
}
- if (vport->num_bufq != k - (vport->num_txq +
- vport->num_complq +
- vport->num_rxq))
+
+ if (k != num_q)
return -EINVAL;
-send_msg:
- /* Chunk up the queue info into multiple messages */
- config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
- chunk_sz = sizeof(struct virtchnl2_queue_chunk);
+ return idpf_send_ena_dis_queue_set_msg(qs, en);
+}
+
+/**
+ * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
+ * queue set to the interrupt vector
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the vector mapping
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing mapping queues to
+ * q_vectors.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32
+idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_queue_vector_maps *vqvm = buf;
+
+ vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->num_qv_maps = cpu_to_le16(num_chunks);
+ memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+ return struct_size(vqvm, qv_maps, num_chunks);
+}
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
- eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq)
+/**
+ * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
+ * queue set vector message
+ * @qs: set of the queues to map or unmap
+ * @map: true for map and false for unmap
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int
+idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
+ bool map)
+{
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
+ .prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
+ .config_sz = sizeof(struct virtchnl2_queue_vector_maps),
+ .chunk_sz = sizeof(*vqv),
+ .num_chunks = qs->num,
+ };
+ bool split;
+
+ vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL);
+ if (!vqv)
return -ENOMEM;
- if (ena) {
- xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ params.chunks = vqv;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(eq, 0, buf_sz);
- eq->vport_id = cpu_to_le32(vport->vport_id);
- eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- qcs = &eq->chunks;
- memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
+ split = idpf_is_queue_model_split(qs->vport->txq_model);
- xn_params.send_buf.iov_base = eq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ const struct idpf_q_vector *vec;
+ u32 qid, v_idx, itr_idx;
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ vqv[i].queue_type = cpu_to_le32(q->type);
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->rxq))
+ vec = NULL;
+ else
+ vec = q->rxq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->rx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_0;
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->txq))
+ vec = NULL;
+ else if (idpf_queue_has(XDP, q->txq))
+ vec = q->txq->complq->q_vector;
+ else if (split)
+ vec = q->txq->txq_grp->complq->q_vector;
+ else
+ vec = q->txq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->tx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_1;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vqv[i].queue_id = cpu_to_le32(qid);
+ vqv[i].vector_id = cpu_to_le16(v_idx);
+ vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
- * vector message
+ * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
+ * vector message
* @vport: virtual port data structure
* @map: true for map and false for unmap
*
- * Send map or unmap queue vector virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
- struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- u32 num_msgs, num_chunks, num_q;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 k = 0;
- num_q = vport->num_txq + vport->num_rxq;
-
- buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
- vqv = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqv)
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
-
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
- } else {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
- }
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
}
}
- if (vport->num_txq != k)
+ if (k != vport->num_txq)
return -EINVAL;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_rx_queue *rxq;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
if (idpf_is_queue_model_split(vport->rxq_model))
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
- rxq = rx_qgrp->singleq.rxqs[j];
-
- vqv[k].queue_type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- vqv[k].queue_id = cpu_to_le32(rxq->q_id);
- vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
- vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq)
- return -EINVAL;
- } else {
- if (vport->num_rxq != k - vport->num_txq)
- return -EINVAL;
- }
+ if (k != num_q)
+ return -EINVAL;
- /* Chunk up the vector info into multiple messages */
- config_sz = sizeof(struct virtchnl2_queue_vector_maps);
- chunk_sz = sizeof(struct virtchnl2_queue_vector);
+ return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+/**
+ * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send enable queues virtchnl message for queues contained in the @qs array.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ return idpf_send_ena_dis_queue_set_msg(qs, true);
+}
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm)
- return -ENOMEM;
+/**
+ * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- if (map) {
- xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ err = idpf_send_ena_dis_queue_set_msg(qs, false);
+ if (err)
+ return err;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(vqvm, 0, buf_sz);
- xn_params.send_buf.iov_base = vqvm;
- xn_params.send_buf.iov_len = buf_sz;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
- vqvm->num_qv_maps = cpu_to_le16(num_chunks);
- memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
+ return idpf_wait_for_marker_event_set(qs);
+}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+/**
+ * idpf_send_config_queue_set_msg - send virtchnl config queues message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain both Rx or Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- }
+ err = idpf_send_config_tx_queue_set_msg(qs);
+ if (err)
+ return err;
- return 0;
+ return idpf_send_config_rx_queue_set_msg(qs);
}
/**
@@ -2125,24 +2460,12 @@ int idpf_send_enable_queues_msg(struct idpf_vport *vport)
*/
int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
- int err, i;
+ int err;
err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
- /* switch to poll mode as interrupts will be disabled after disable
- * queues virtchnl message is sent
- */
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_set(POLL_MODE, vport->txqs[i]);
-
- /* schedule the napi to receive all the marker packets */
- local_bh_disable();
- for (i = 0; i < vport->num_q_vectors; i++)
- napi_schedule(&vport->q_vectors[i].napi);
- local_bh_enable();
-
return idpf_wait_for_marker_event(vport);
}
@@ -2207,7 +2530,7 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
@@ -2371,7 +2694,7 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
xn_params.send_buf.iov_base = vcs;
xn_params.send_buf.iov_len = buf_size;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -3285,9 +3608,17 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
+ u32 req;
vec_info.num_curr_vecs = vport->num_q_vectors;
- vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+ if (vec_info.num_curr_vecs)
+ vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
+
+ /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
+ req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ IDPF_RESERVED_VECS;
+ vec_info.num_req_vecs = req;
+
vec_info.default_vport = vport->default_vport;
vec_info.index = vport->idx;
@@ -3300,7 +3631,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs;
+ vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 86f30f0db07a..eac3d15daa42 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,7 +4,8 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#include "virtchnl2.h"
+
#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
@@ -115,6 +116,33 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
+struct idpf_queue_ptr {
+ enum virtchnl2_queue_type type;
+ union {
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_compl_queue *complq;
+ };
+};
+
+struct idpf_queue_set {
+ struct idpf_vport *vport;
+
+ u32 num;
+ struct idpf_queue_ptr qs[] __counted_by(num);
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
+
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
@@ -131,9 +159,6 @@ void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
index 4f1fb0cefe51..8a2e0f8c5e36 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -521,6 +521,10 @@ idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
list_add(&ptp_tx_tstamp->list_member,
&tx_tstamp_caps->latches_free);
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.packets);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
new file mode 100644
index 000000000000..21ce25b0567f
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static int idpf_rxq_for_each(const struct idpf_vport *vport,
+ int (*fn)(struct idpf_rx_queue *rxq, void *arg),
+ void *arg)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (!vport->rxq_grps)
+ return -ENETDOWN;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (splitq)
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+ int err;
+
+ if (splitq)
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+
+ err = fn(q, arg);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
+{
+ const struct idpf_vport *vport = rxq->q_vector->vport;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
+ int err;
+
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
+ rxq->q_vector->napi.napi_id,
+ rxq->rx_buf_size);
+ if (err)
+ return err;
+
+ if (idpf_queue_has(XSK, rxq)) {
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ rxq->pool);
+ if (err)
+ goto unreg;
+ } else {
+ const struct page_pool *pp;
+
+ pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
+ xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
+ }
+
+ if (!split)
+ return 0;
+
+ rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->num_xdp_txq = vport->num_xdp_txq;
+
+ return 0;
+
+unreg:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+}
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
+{
+ return __idpf_xdp_rxq_info_init(rxq, NULL);
+}
+
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+{
+ return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+}
+
+static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
+{
+ if (idpf_is_queue_model_split((size_t)arg)) {
+ rxq->xdpsqs = NULL;
+ rxq->num_xdp_txq = 0;
+ }
+
+ if (!idpf_queue_has(XSK, rxq))
+ xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return 0;
+}
+
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
+{
+ __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
+}
+
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+{
+ idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)vport->rxq_model);
+}
+
+static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
+{
+ struct bpf_prog *prog = arg;
+ struct bpf_prog *old;
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog)
+{
+ idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+}
+
+static void idpf_xdp_tx_timer(struct work_struct *work);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport)
+{
+ struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return 0;
+
+ timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
+ if (!timers)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < vport->num_xdp_txq; i++) {
+ timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
+ cpu_to_mem(i));
+ if (!timers[i]) {
+ for (int j = i - 1; j >= 0; j--)
+ kfree(timers[j]);
+
+ return -ENOMEM;
+ }
+ }
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ xdpsq->complq = xdpsq->txq_grp->complq;
+ kfree(xdpsq->refillq);
+ xdpsq->refillq = NULL;
+
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq);
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
+ idpf_queue_set(NOIRQ, xdpsq);
+ idpf_queue_set(XDP, xdpsq);
+ idpf_queue_set(XDP, xdpsq->complq);
+
+ xdpsq->timer = timers[i - sqs];
+ libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
+ libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
+ idpf_xdp_tx_timer);
+
+ xdpsq->pending = 0;
+ xdpsq->xdp_tx = 0;
+ xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
+ }
+
+ return 0;
+}
+
+void idpf_xdpsqs_put(const struct idpf_vport *vport)
+{
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ if (!idpf_queue_has_clear(XDP, xdpsq))
+ continue;
+
+ libeth_xdpsq_deinit_timer(xdpsq->timer);
+ libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
+
+ kfree(xdpsq->timer);
+ xdpsq->refillq = NULL;
+ idpf_queue_clear(NOIRQ, xdpsq);
+ }
+}
+
+static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
+ bool gen)
+{
+ u32 val;
+
+#ifdef __LIBETH_WORD_ACCESS
+ val = *(const u32 *)desc;
+#else
+ val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
+ le16_to_cpu(desc->qid_comptype_gen);
+#endif
+ if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
+ return -ENODATA;
+
+ if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
+ FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
+ IDPF_TXD_COMPLT_RS)))
+ return -EINVAL;
+
+ return upper_16_bits(val);
+}
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
+{
+ struct idpf_compl_queue *cq = xdpsq->complq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 ntc = cq->next_to_clean;
+ u32 cnt = cq->desc_count;
+ u32 done_frames;
+ bool gen;
+
+ gen = idpf_queue_has(GEN_CHK, cq);
+
+ for (done_frames = 0; done_frames < budget; ) {
+ int ret;
+
+ ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
+ if (ret >= 0) {
+ done_frames = ret > tx_ntc ? ret - tx_ntc :
+ ret + tx_cnt - tx_ntc;
+ goto next;
+ }
+
+ switch (ret) {
+ case -ENODATA:
+ goto out;
+ case -EINVAL:
+ break;
+ }
+
+next:
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, cq);
+ }
+ }
+
+out:
+ cq->next_to_clean = ntc;
+
+ return done_frames;
+}
+
+static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 done_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done_frames); i++) {
+ libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
+
+ if (unlikely(++tx_ntc == tx_cnt))
+ tx_ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = tx_ntc;
+ xdpsq->pending -= done_frames;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return done_frames;
+}
+
+static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
+LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_END();
+
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+
+ if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
+ return -ENETDOWN;
+
+ return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
+ &vport->txqs[vport->xdp_txq_offset],
+ vport->num_xdp_txq,
+ idpf_xdp_xmit_flush_bulk,
+ idpf_xdp_tx_finalize);
+}
+
+static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ struct libeth_rx_pt pt;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_xdp_get_qw0(&desc, xdp->desc);
+
+ pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
+ return -ENODATA;
+
+ idpf_xdp_get_qw2(&desc, xdp->desc);
+
+ return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
+ pt);
+}
+
+static const struct xdp_metadata_ops idpf_xdpmo = {
+ .xmo_rx_hash = idpf_xdpmo_rx_hash,
+};
+
+void idpf_xdp_set_features(const struct idpf_vport *vport)
+{
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
+ idpf_get_max_tx_bufs(vport->adapter),
+ libeth_xsktmo);
+}
+
+static int idpf_xdp_setup_prog(struct idpf_vport *vport,
+ const struct netdev_bpf *xdp)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct bpf_prog *old, *prog = xdp->prog;
+ struct idpf_vport_config *cfg;
+ int ret;
+
+ cfg = vport->adapter->vport_config[vport->idx];
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
+ !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
+ !!vport->xdp_prog == !!prog) {
+ if (np->state == __IDPF_VPORT_UP)
+ idpf_xdp_copy_prog_to_rqs(vport, prog);
+
+ old = xchg(&vport->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ cfg->user_config.xdp_prog = prog;
+
+ return 0;
+ }
+
+ if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "No Tx queues available for XDP, please decrease the number of regular SQs");
+ return -ENOSPC;
+ }
+
+ old = cfg->user_config.xdp_prog;
+ cfg->user_config.xdp_prog = prog;
+
+ ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Could not reopen the vport after XDP setup");
+
+ cfg->user_config.xdp_prog = old;
+ old = prog;
+ }
+
+ if (old)
+ bpf_prog_put(old);
+
+ libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
+
+ return ret;
+}
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct idpf_vport *vport;
+ int ret;
+
+ idpf_vport_ctrl_lock(dev);
+ vport = idpf_netdev_to_vport(dev);
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto notsupp;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ ret = idpf_xdp_setup_prog(vport, xdp);
+ break;
+ case XDP_SETUP_XSK_POOL:
+ ret = idpf_xsk_pool_setup(vport, xdp);
+ break;
+ default:
+notsupp:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
new file mode 100644
index 000000000000..479f5ef3c604
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XDP_H_
+#define _IDPF_XDP_H_
+
+#include <net/libeth/xdp.h>
+
+#include "idpf_txrx.h"
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport);
+void idpf_xdpsqs_put(const struct idpf_vport *vport);
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
+bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
+
+/**
+ * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
+ * @desc: XDP descriptor to pull the DMA address and length from
+ * @i: descriptor index on the queue to fill
+ * @sq: XDP queue to produce the HW Tx descriptor on
+ * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
+ */
+static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct idpf_flex_tx_desc *tx_desc = sq->descs;
+ u32 cmd;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
+ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
+ if (desc.flags & LIBETH_XDP_TX_LAST)
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_DESC_CMD_EOP);
+ if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_FLEX_DESC_CMD_CS_EN);
+
+ tx_desc = &tx_desc[i];
+ tx_desc->buf_addr = cpu_to_le64(desc.addr);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
+#else
+ tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
+ tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
+{
+ u32 ntu, cmd;
+
+ ntu = xdpsq->next_to_use;
+ if (unlikely(!ntu))
+ ntu = xdpsq->desc_count;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
+#else
+ xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
+{
+ dma_wmb();
+ writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
+}
+
+/**
+ * idpf_xdp_tx_finalize - finalize sending over XDPSQ
+ * @_xdpsq: XDP Tx queue
+ * @sent: whether any frames were sent
+ * @flush: whether to update RS bit and the tail register
+ *
+ * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
+ * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
+ */
+static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ if ((!flush || unlikely(!sent)) &&
+ likely(xdpsq->desc_count - 1 != xdpsq->pending))
+ return;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ idpf_xdpsq_set_rs(xdpsq);
+ idpf_xdpsq_update_tail(xdpsq);
+
+ libeth_xdpsq_queue_timer(xdpsq->timer);
+
+ libeth_xdpsq_unlock(&xdpsq->xdp_lock);
+}
+
+struct idpf_xdp_rx_desc {
+ aligned_u64 qw0;
+#define IDPF_XDP_RX_BUFQ BIT_ULL(47)
+#define IDPF_XDP_RX_GEN BIT_ULL(46)
+#define IDPF_XDP_RX_LEN GENMASK_ULL(45, 32)
+#define IDPF_XDP_RX_PT GENMASK_ULL(25, 16)
+
+ aligned_u64 qw1;
+#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
+#define IDPF_XDP_RX_EOP BIT_ULL(1)
+
+ aligned_u64 qw2;
+#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
+
+ aligned_u64 qw3;
+} __aligned(4 * sizeof(u64));
+static_assert(sizeof(struct idpf_xdp_rx_desc) ==
+ sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
+
+#define idpf_xdp_rx_bufq(desc) !!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
+#define idpf_xdp_rx_gen(desc) !!((desc)->qw0 & IDPF_XDP_RX_GEN)
+#define idpf_xdp_rx_len(desc) FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
+#define idpf_xdp_rx_pt(desc) FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
+#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
+#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
+#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+
+static inline void
+idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw0 = ((const typeof(desc))rxd)->qw0;
+#else
+ desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
+ ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw1 = ((const typeof(desc))rxd)->qw1;
+#else
+ desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
+ rxd->status_err0_qw1;
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw2 = ((const typeof(desc))rxd)->qw2;
+#else
+ desc->qw2 = ((u64)rxd->hash3 << 24) |
+ ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
+ le16_to_cpu(rxd->hash1);
+#endif
+}
+
+void idpf_xdp_set_features(const struct idpf_vport *vport);
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+#endif /* _IDPF_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
new file mode 100644
index 000000000000..fd2cc43ab43c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <net/libeth/xsk.h>
+
+#include "idpf.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static void idpf_xsk_tx_timer(struct work_struct *work);
+
+static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ rxq->pool = pool;
+
+ idpf_queue_set(XSK, rxq);
+}
+
+static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid = U32_MAX;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (&grp->splitq.bufq_sets[j].bufq == bufq) {
+ qid = grp->splitq.rxq_sets[0]->rxq.idx;
+ goto setup;
+ }
+ }
+ }
+
+setup:
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ bufq->pool = pool;
+
+ idpf_queue_set(XSK, bufq);
+}
+
+static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
+ struct idpf_tx_queue *txq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, txq);
+
+ if (!idpf_queue_has(XDP, txq))
+ return;
+
+ qid = txq->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ txq->pool = pool;
+ libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
+ idpf_xsk_tx_timer);
+
+ idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
+ idpf_queue_set(XSK, txq);
+}
+
+static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ const struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, complq);
+
+ if (!idpf_queue_has(XDP, complq))
+ return;
+
+ qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ idpf_queue_set(XSK, complq);
+}
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type)
+{
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xsk_setup_rxq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_xsk_setup_bufq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_xsk_setup_txq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_xsk_setup_complq(vport, q);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
+{
+ struct idpf_compl_queue *complq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ rxq = q;
+ if (!idpf_queue_has_clear(XSK, rxq))
+ return;
+
+ rxq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q;
+ if (!idpf_queue_has_clear(XSK, bufq))
+ return;
+
+ bufq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = q;
+ if (!idpf_queue_has_clear(XSK, txq))
+ return;
+
+ idpf_queue_set(NOIRQ, txq);
+ txq->dev = txq->netdev->dev.parent;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ complq = q;
+ idpf_queue_clear(XSK, complq);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
+{
+ libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
+}
+
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ while (ntc != xdpsq->next_to_use) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == xdpsq->desc_count))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ if (xsk_frames)
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+}
+
+static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
+ u32 done)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ u32 cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done); i++) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == cnt))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = ntc;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return xsk_frames;
+}
+
+static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 done_frames;
+ u32 xsk_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ if (likely(!xdpsq->xdp_tx)) {
+ tx_ntc += done_frames;
+ if (tx_ntc >= tx_cnt)
+ tx_ntc -= tx_cnt;
+
+ xdpsq->next_to_clean = tx_ntc;
+ xsk_frames = done_frames;
+
+ goto finalize;
+ }
+
+ xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
+ if (xsk_frames)
+finalize:
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+
+ xdpsq->pending -= done_frames;
+
+ return done_frames;
+}
+
+static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ };
+
+ /*
+ * The queue is cleaned, the budget is already known, optimize out
+ * the second min() by passing the type limit.
+ */
+ return U32_MAX;
+}
+
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
+{
+ u32 free;
+
+ libeth_xdpsq_lock(&xsksq->xdp_lock);
+
+ free = xsksq->desc_count - xsksq->pending;
+ if (free < xsksq->thresh)
+ free += idpf_xsksq_complete(xsksq, xsksq->thresh);
+
+ return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
+ min(free - 1, xsksq->thresh),
+ libeth_xsktmo, idpf_xsk_xmit_prep,
+ idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
+LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
+ idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
+static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
+{
+ struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
+
+ desc = &desc[i];
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&desc->qword0 = i;
+#else
+ desc->qword0.buf_id = cpu_to_le16(i);
+#endif
+ desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
+}
+
+static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
+{
+ struct libeth_xskfq_fp fq = {
+ .pool = bufq->pool,
+ .fqes = bufq->xsk_buf,
+ .descs = bufq->split_buf,
+ .ntu = bufq->next_to_use,
+ .count = bufq->desc_count,
+ };
+ u32 done;
+
+ done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
+ writel(fq.ntu, bufq->tail);
+
+ bufq->next_to_use = fq.ntu;
+ bufq->pending -= done;
+
+ return done == count;
+}
+
+static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
+{
+ u32 count, rx_thresh = bufq->thresh;
+
+ count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
+
+ for (u32 i = 0; i < count; i += rx_thresh) {
+ if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
+ return false;
+ }
+
+ return true;
+}
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .pool = bufq->pool,
+ .count = bufq->desc_count,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_xskfq_create(&fq);
+ if (ret)
+ return ret;
+
+ bufq->xsk_buf = fq.fqes;
+ bufq->pending = fq.pending;
+ bufq->thresh = fq.thresh;
+ bufq->rx_buf_size = fq.buf_len;
+
+ if (!idpf_xskfq_refill(bufq))
+ netdev_err(bufq->pool->netdev,
+ "failed to allocate XSk buffers for qid %d\n",
+ bufq->pool->queue_id);
+
+ bufq->next_to_alloc = bufq->next_to_use;
+
+ idpf_queue_clear(HSPLIT_EN, bufq);
+ bufq->rx_hbuf_size = 0;
+
+ return 0;
+}
+
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .fqes = bufq->xsk_buf,
+ };
+
+ libeth_xskfq_destroy(&fq);
+
+ bufq->rx_buf_size = fq.buf_len;
+ bufq->thresh = fq.thresh;
+ bufq->pending = fq.pending;
+}
+
+struct idpf_xskfq_refill_set {
+ struct {
+ struct idpf_buf_queue *q;
+ u32 buf_id;
+ u32 pending;
+ } bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+};
+
+static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
+{
+ bool ret = true;
+
+ for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
+ struct idpf_buf_queue *bufq = set->bufqs[i].q;
+ u32 ntc;
+
+ if (!bufq)
+ continue;
+
+ ntc = set->bufqs[i].buf_id;
+ if (unlikely(++ntc == bufq->desc_count))
+ ntc = 0;
+
+ bufq->next_to_clean = ntc;
+ bufq->pending += set->bufqs[i].pending;
+
+ if (bufq->pending > bufq->thresh)
+ ret &= idpf_xskfq_refill(bufq);
+ }
+
+ return ret;
+}
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
+{
+ struct idpf_xskfq_refill_set set = { };
+ struct libeth_rq_napi_stats rs = { };
+ bool wake, gen, fail = false;
+ u32 ntc = rxq->next_to_clean;
+ struct libeth_xdp_buff *xdp;
+ LIBETH_XDP_ONSTACK_BULK(bq);
+ u32 cnt = rxq->desc_count;
+
+ wake = xsk_uses_need_wakeup(rxq->pool);
+ if (wake)
+ xsk_clear_rx_need_wakeup(rxq->pool);
+
+ gen = idpf_queue_has(GEN_CHK, rxq);
+
+ libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ xdp = rxq->xsk;
+
+ while (likely(rs.packets < budget)) {
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ struct idpf_buf_queue *bufq;
+ u32 bufq_id, buf_id;
+
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
+
+ idpf_xdp_get_qw0(&desc, rx_desc);
+ if (idpf_xdp_rx_gen(&desc) != gen)
+ break;
+
+ dma_rmb();
+
+ bufq_id = idpf_xdp_rx_bufq(&desc);
+ bufq = set.bufqs[bufq_id].q;
+ if (!bufq) {
+ bufq = &rxq->bufq_sets[bufq_id].bufq;
+ set.bufqs[bufq_id].q = bufq;
+ }
+
+ idpf_xdp_get_qw1(&desc, rx_desc);
+ buf_id = idpf_xdp_rx_buf(&desc);
+
+ set.bufqs[bufq_id].buf_id = buf_id;
+ set.bufqs[bufq_id].pending++;
+
+ xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
+ idpf_xdp_rx_len(&desc));
+
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, rxq);
+ }
+
+ if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
+ continue;
+
+ fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
+ xdp = NULL;
+
+ if (fail)
+ break;
+ }
+
+ idpf_xsk_finalize_rx(&bq);
+
+ rxq->next_to_clean = ntc;
+ rxq->xsk = xdp;
+
+ fail |= !idpf_xskfq_refill_set(&set);
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ if (!wake)
+ return unlikely(fail) ? budget : rs.packets;
+
+ if (unlikely(fail))
+ xsk_set_rx_need_wakeup(rxq->pool);
+
+ return rs.packets;
+}
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
+{
+ struct xsk_buff_pool *pool = bpf->xsk.pool;
+ u32 qid = bpf->xsk.queue_id;
+ bool restart;
+ int ret;
+
+ if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
+ LIBETH_RX_BUF_STRIDE)) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
+ netdev_name(vport->netdev),
+ LIBETH_RX_BUF_STRIDE, qid,
+ xsk_pool_get_rx_frame_size(pool));
+ return -EINVAL;
+ }
+
+ restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
+ if (!restart)
+ goto pool;
+
+ ret = idpf_qp_switch(vport, qid, false);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to disable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+pool:
+ ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to configure XSk pool for pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!restart)
+ return 0;
+
+ ret = idpf_qp_switch(vport, qid, true);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to enable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ goto err_dis;
+ }
+
+ return 0;
+
+err_dis:
+ libeth_xsk_setup_pool(vport->netdev, qid, false);
+
+ return ret;
+}
+
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+ struct idpf_q_vector *q_vector;
+
+ if (unlikely(idpf_vport_ctrl_is_locked(dev)))
+ return -EBUSY;
+
+ if (unlikely(!vport->link_up))
+ return -ENETDOWN;
+
+ if (unlikely(!vport->num_xdp_txq))
+ return -ENXIO;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ if (unlikely(!q_vector->xsksq))
+ return -ENXIO;
+
+ libeth_xsk_wakeup(&q_vector->csd, qid);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xsk.h b/drivers/net/ethernet/intel/idpf/xsk.h
new file mode 100644
index 000000000000..b622d08c03e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XSK_H_
+#define _IDPF_XSK_H_
+
+#include <linux/types.h>
+
+enum virtchnl2_queue_type;
+struct idpf_buf_queue;
+struct idpf_q_vector;
+struct idpf_rx_queue;
+struct idpf_tx_queue;
+struct idpf_vport;
+struct net_device;
+struct netdev_bpf;
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type);
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv);
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq);
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpq);
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
+#endif /* !_IDPF_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 64dfc362d1dc..44a85ad749a4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2372,7 +2372,7 @@ static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -2406,7 +2406,7 @@ static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
&checksum);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 503b239868e8..9db29b231d6a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -602,7 +602,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 2dcd64d6dec3..c8638502c2be 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -636,7 +636,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -668,7 +668,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c3f4f7cd264e..0fff1df81b7b 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -217,7 +217,7 @@ static inline int igb_skb_pad(void)
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define IGB_MNG_VLAN_NONE -1
+#define IGB_MNG_VLAN_NONE 0xFFFF
enum igb_tx_flags {
/* cmd_type flags */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7b8f32c5169a..f8a208c84f15 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -920,11 +920,11 @@ static int igb_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igb_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igb_ring));
if (!temp_ring) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 453deb6d14b3..85f9589cc568 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1531,8 +1531,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != IGB_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */
igb_vfta_set(hw, vid, pf_id, false, true);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 773895c663fd..9c08ebfad804 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -30,11 +30,12 @@ static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
- { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
+ { "lbtx_packets", IGBVF_STAT(stats.gptlbc, stats.base_gptlbc) },
+ { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
+ { "lbtx_bytes", IGBVF_STAT(stats.gotlbc, stats.base_gotlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
{ "tx_timeout_count", IGBVF_STAT(tx_timeout_count, zero_base) },
- { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ecb35b693ce5..f3e7218ba6f3 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -627,11 +627,11 @@ igc_ethtool_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igc_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igc_ring));
if (!temp_ring) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 0dd61719f1ed..5226d10cc95b 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -435,7 +435,7 @@ static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index efd121c03967..a47b8d39238c 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -123,7 +123,7 @@ s32 igc_validate_nvm_checksum(struct igc_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16)NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -IGC_ERR_NVM;
goto out;
@@ -155,7 +155,7 @@ s32 igc_update_nvm_checksum(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4ff19426ab74..3ea6765f9c5d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1739,9 +1739,9 @@ int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index bfeef5b0b99d..c2f8189a0738 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -774,7 +774,7 @@ static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
* from parsing capabilities and use this to calculate the number of resources
* per PF based on the max value passed in.
*
- * Return: the number of resources per PF or 0, if no PH are available.
+ * Return: the number of resources per PF or 0, if no PFs are available.
*/
static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
{
@@ -1953,6 +1953,16 @@ int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
@@ -1963,31 +1973,10 @@ int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
- /* 2.5 and 5 Gbps link speeds must be excluded from the
- * auto-negotiation set used during driver initialization due to
- * compatibility issues with certain switches. Those issues do not
- * exist in case of E610 2.5G SKU device (0x57b1).
- */
- if (!hw->phy.autoneg_advertised &&
- hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
- hw->phy.autoneg_advertised = hw->phy.speeds_supported;
-
- if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
- phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
- phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
- phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
- phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
- hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
-
- if (!hw->phy.autoneg_advertised &&
- hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ /* Initialize autoneg speeds */
+ if (!hw->phy.autoneg_advertised)
hw->phy.autoneg_advertised = hw->phy.speeds_supported;
- if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
- phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
- phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
- hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
-
/* Set PHY ID */
memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
@@ -3008,50 +2997,71 @@ static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*
- * Return: the exit code of the operation.
+ * Return: -ENOMEM when cannot allocate memory, -EDOM for checksum violation,
+ * -ENODATA when cannot find proper data, -EIO for faulty read or
+ * 0 on success.
+ *
+ * On success @civd stores collected data.
*/
static int
ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
struct ixgbe_orom_civd_info *civd)
{
- struct ixgbe_orom_civd_info tmp;
+ u32 orom_size = hw->flash.banks.orom_size;
+ u8 *orom_data;
u32 offset;
int err;
+ orom_data = kzalloc(orom_size, GFP_KERNEL);
+ if (!orom_data)
+ return -ENOMEM;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, orom_size);
+ if (err) {
+ err = -EIO;
+ goto cleanup;
+ }
+
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
* A simple modulo 256 sum of all of the bytes of the structure must
* equal 0.
*/
- for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
- offset += SZ_512) {
+ for (offset = 0; offset + SZ_512 <= orom_size; offset += SZ_512) {
+ struct ixgbe_orom_civd_info *tmp;
u8 sum = 0;
u32 i;
- err = ixgbe_read_flash_module(hw, bank,
- IXGBE_E610_SR_1ST_OROM_BANK_PTR,
- offset,
- (u8 *)&tmp, sizeof(tmp));
- if (err)
- return err;
+ BUILD_BUG_ON(sizeof(*tmp) > SZ_512);
+
+ tmp = (struct ixgbe_orom_civd_info *)&orom_data[offset];
/* Skip forward until we find a matching signature */
- if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
- sizeof(tmp.signature)))
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp->signature,
+ sizeof(tmp->signature)))
continue;
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(tmp); i++)
- sum += ((u8 *)&tmp)[i];
+ for (i = 0; i < sizeof(*tmp); i++)
+ sum += ((u8 *)tmp)[i];
+
+ if (sum) {
+ err = -EDOM;
+ goto cleanup;
+ }
- if (sum)
- return -EDOM;
+ *civd = *tmp;
+ err = 0;
- *civd = tmp;
- return 0;
+ goto cleanup;
}
- return -ENODATA;
+ err = -ENODATA;
+cleanup:
+ kfree(orom_data);
+ return err;
}
/**
@@ -3911,6 +3921,38 @@ static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
return err;
}
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ixgbe_hw *hw = priv;
+
+ return ixgbe_aci_send_cmd(hw, desc, buf, size);
+}
+
+int ixgbe_fwlog_init(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = adapter->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .debugfs_root = adapter->ixgbe_dbg_adapter,
+ .priv = hw,
+ };
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw)
+{
+ if (hw->mac.type != ixgbe_mac_e610)
+ return;
+
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
static const struct ixgbe_mac_operations mac_ops_e610 = {
.init_hw = ixgbe_init_hw_generic,
.start_hw = ixgbe_start_hw_e610,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
index 782c489b0fa7..11916b979d28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -96,5 +96,7 @@ int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
bool last_command, u8 command_flags);
int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
u8 *response_flags);
+int ixgbe_fwlog_init(struct ixgbe_hw *hw);
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw);
#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 1a2f1bdb91aa..2d660e9edb80 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1278,7 +1278,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
adapter->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct ixgbe_ring));
if (!temp_ring) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6218bdb7f941..90d4e57b1c93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -172,6 +172,7 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
@@ -3355,6 +3356,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
e_crit(drv, "%s\n", ixgbe_overheat_msg);
ixgbe_down(adapter);
break;
+ case libie_aqc_opc_fw_logs_event:
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
+ break;
default:
e_warn(hw, "unknown FW async event captured\n");
break;
@@ -11999,6 +12004,10 @@ skip_sriov:
ixgbe_devlink_init_regions(adapter);
devl_register(adapter->devlink);
devl_unlock(adapter->devlink);
+
+ if (ixgbe_fwlog_init(hw))
+ e_dev_info("Firmware logging not supported\n");
+
return 0;
err_netdev:
@@ -12056,6 +12065,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
devl_lock(adapter->devlink);
devl_unregister(adapter->devlink);
ixgbe_devlink_destroy_regions(adapter);
+ ixgbe_fwlog_deinit(&adapter->hw);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 36577091cd9e..b1bfeb21537a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include <linux/net/intel/libie/fwlog.h>
#include "ixgbe_type_e610.h"
/* Device IDs */
@@ -3752,6 +3753,7 @@ struct ixgbe_hw {
struct ixgbe_flash_info flash;
struct ixgbe_hw_dev_caps dev_caps;
struct ixgbe_hw_func_caps func_caps;
+ struct libie_fwlog fwlog;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index c2353aed0120..e67e2feb045b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -373,9 +373,9 @@ static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index bfa647086c70..76d2fa3ef518 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1060,9 +1060,9 @@ static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
return status;
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1163,7 +1163,7 @@ static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
return status;
}
-/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+/** ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
@@ -2318,7 +2318,7 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * ixgbe_get_lasi_ext_t_x550em - Determine external Base T PHY interrupt cause
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
@@ -2628,7 +2628,7 @@ static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
}
/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
- * @hw: point to hardware structure
+ * @hw: pointer to hardware structure
*
* Configures the link between the integrated KR PHY and the external X557 PHY
* The driver will call this function when it gets a link status change
@@ -2745,7 +2745,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
return -EINVAL;
- /* To turn on the LED, set mode to ON. */
+ /* To turn off the LED, set mode to OFF. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
MDIO_MMD_VEND1, &phy_data);
phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
@@ -2812,7 +2812,7 @@ int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
return ret_val;
}
-/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+/** ixgbe_get_lcd_t_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
*
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 7ac53171b041..bebad564188e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -276,9 +276,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
}
if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc(array_size(sizeof(*tx_ring),
- adapter->num_tx_queues +
- adapter->num_xdp_queues));
+ tx_ring = vmalloc_array(adapter->num_tx_queues +
+ adapter->num_xdp_queues,
+ sizeof(*tx_ring));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 535d0f71f521..28e25641b167 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4323,7 +4323,7 @@ static int ixgbevf_resume(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- u32 err;
+ int err;
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
index e6072758e3d8..70831c7e336e 100644
--- a/drivers/net/ethernet/intel/libie/Kconfig
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -14,3 +14,12 @@ config LIBIE_ADMINQ
help
Helper functions used by Intel Ethernet drivers for administration
queue command interface (aka adminq).
+
+config LIBIE_FWLOG
+ tristate
+ select LIBIE_ADMINQ
+ help
+ Library to support firmware logging on device that have support
+ for it. Firmware logging is using admin queue interface to communicate
+ with the device. Debugfs is a user interface used to config logging
+ and dump all collected logs.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
index e98f00b865d3..db57fc6780ea 100644
--- a/drivers/net/ethernet/intel/libie/Makefile
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -8,3 +8,7 @@ libie-y := rx.o
obj-$(CONFIG_LIBIE_ADMINQ) += libie_adminq.o
libie_adminq-y := adminq.o
+
+obj-$(CONFIG_LIBIE_FWLOG) += libie_fwlog.o
+
+libie_fwlog-y := fwlog.o
diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c
new file mode 100644
index 000000000000..f39cc11cb7c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/fwlog.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE_FWLOG"
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define LIBIE_NR_FW_LOG_MODULES (LIBIE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in
+ * libie_aqc_fw_logging_mod
+ */
+static const char * const libie_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in libie_fwlog_level
+ */
+static const char * const libie_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+static const char * const libie_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+static bool libie_fwlog_ring_empty(struct libie_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+static void libie_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int libie_fwlog_alloc_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * LIBIE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = LIBIE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += LIBIE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void libie_fwlog_free_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define LIBIE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * libie_fwlog_realloc_rings - reallocate the FW log rings
+ * @fwlog: pointer to the fwlog structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+static void libie_fwlog_realloc_rings(struct libie_fwlog *fwlog, int index)
+{
+ struct libie_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = LIBIE_FWLOG_INDEX_TO_BYTES(index) / LIBIE_AQ_MAX_BUF_LEN;
+ if (ring_size == fwlog->ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = libie_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+
+ fwlog->ring.rings = ring.rings;
+ fwlog->ring.size = ring.size;
+ fwlog->ring.index = index;
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+}
+
+/**
+ * libie_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This will always return false if called before libie_init_hw(), so it must be
+ * called after libie_init_hw().
+ */
+static bool libie_fwlog_supported(struct libie_fwlog *fwlog)
+{
+ return fwlog->supported;
+}
+
+/**
+ * libie_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @fwlog: pointer to the fwlog structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from libie_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+libie_aq_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_module_entry *entries, u16 num_entries,
+ u16 options, u16 log_resolution)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_config);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI) |
+ cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & LIBIE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & LIBIE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * libie_fwlog_set - Set the firmware logging settings
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * libie_fwlog_register. Note, that libie_fwlog_register does not need to be called
+ * for init.
+ */
+static int libie_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ return libie_aq_fwlog_set(fwlog, cfg->module_entries,
+ LIBIE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * libie_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @fwlog: pointer to the fwlog structure
+ * @reg: true to register and false to unregister
+ */
+static int libie_aq_fwlog_register(struct libie_fwlog *fwlog, bool reg)
+{
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_register);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ if (reg)
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_REGISTER;
+
+ return fwlog->send_cmd(fwlog->priv, &desc, NULL, 0);
+}
+
+/**
+ * libie_fwlog_register - Register the PF for firmware logging
+ * @fwlog: pointer to the fwlog structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in libie_fwlog_set.
+ */
+static int libie_fwlog_register(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, true);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to register for firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_unregister - Unregister the PF from firmware logging
+ * @fwlog: pointer to the fwlog structure
+ */
+static int libie_fwlog_unregister(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, false);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_print_module_cfg - print current FW logging module configuration
+ * @cfg: pointer to the fwlog cfg structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+libie_fwlog_print_module_cfg(struct libie_fwlog_cfg *cfg, int module,
+ struct seq_file *s)
+{
+ struct libie_fwlog_module_entry *entry;
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int libie_find_module_by_dentry(struct dentry **modules, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ if (d == modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * libie_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int libie_debugfs_module_show(struct seq_file *s, void *v)
+{
+ struct libie_fwlog *fwlog = s->private;
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ int module;
+
+ dentry = file_dentry(filp);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(&fwlog->pdev->dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ libie_fwlog_print_module_cfg(&fwlog->cfg, module, s);
+
+ return 0;
+}
+
+static int libie_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, libie_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * libie_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(libie_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ fwlog->cfg.module_entries[module].log_level = log_level;
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++)
+ fwlog->cfg.module_entries[i].log_level = log_level;
+ }
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = libie_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = libie_debugfs_module_write,
+};
+
+/**
+ * libie_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ fwlog->cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION,
+ LIBIE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ fwlog->cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_nr_messages_read,
+ .write = libie_debugfs_nr_messages_write,
+};
+
+/**
+ * libie_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(fwlog->cfg.options &
+ LIBIE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = libie_fwlog_register(fwlog);
+ else
+ ret = libie_fwlog_unregister(fwlog);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_enable_read,
+ .write = libie_debugfs_enable_write,
+};
+
+/**
+ * libie_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+ int index;
+
+ index = fwlog->ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", libie_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(libie_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ libie_fwlog_realloc_rings(fwlog, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_log_size_read,
+ .write = libie_debugfs_log_size_write,
+};
+
+/**
+ * libie_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ int data_copied = 0;
+ bool done = false;
+
+ if (libie_fwlog_ring_empty(&fwlog->ring))
+ return 0;
+
+ while (!libie_fwlog_ring_empty(&fwlog->ring) && !done) {
+ struct libie_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &fwlog->ring.rings[fwlog->ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * libie_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED)) {
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_data_read,
+ .write = libie_debugfs_data_write,
+};
+
+/**
+ * libie_debugfs_fwlog_init - setup the debugfs directory
+ * @fwlog: pointer to the fwlog structure
+ * @root: debugfs root entry on which fwlog director will be registered
+ */
+static void libie_debugfs_fwlog_init(struct libie_fwlog *fwlog,
+ struct dentry *root)
+{
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(LIBIE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ fwlog->debugfs = debugfs_create_dir("fwlog", root);
+ if (IS_ERR(fwlog->debugfs))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules", fwlog->debugfs);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(libie_fwlog_module_string[i],
+ 0600, fw_modules_dir, fwlog,
+ &libie_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_nr_messages_fops);
+
+ fwlog->debugfs_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(fwlog->debugfs);
+ kfree(fw_modules);
+}
+
+static bool libie_fwlog_ring_full(struct libie_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+/**
+ * libie_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int libie_aq_fwlog_get(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(LIBIE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_query);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_QUERY;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, buf, LIBIE_AQ_MAX_BUF_LEN);
+ if (status) {
+ dev_dbg(&fwlog->pdev->dev, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ LIBIE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = LIBIE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct libie_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct libie_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * libie_fwlog_set_supported - Set if FW logging is supported by FW
+ * @fwlog: pointer to the fwlog structure
+ *
+ * If FW returns success to the libie_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+static void libie_fwlog_set_supported(struct libie_fwlog *fwlog)
+{
+ struct libie_fwlog_cfg *cfg;
+ int status;
+
+ fwlog->supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ status = libie_aq_fwlog_get(fwlog, cfg);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "libie_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ fwlog->supported = true;
+
+ kfree(cfg);
+}
+
+/**
+ * libie_fwlog_init - Initialize FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ * @api: api structure to init fwlog
+ *
+ * This function should be called on driver initialization during
+ * libie_init_hw().
+ */
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api)
+{
+ fwlog->api = *api;
+ libie_fwlog_set_supported(fwlog);
+
+ if (libie_fwlog_supported(fwlog)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = libie_aq_fwlog_get(fwlog, &fwlog->cfg);
+ if (status)
+ return status;
+
+ fwlog->ring.rings = kcalloc(LIBIE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*fwlog->ring.rings),
+ GFP_KERNEL);
+ if (!fwlog->ring.rings) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ fwlog->ring.size = LIBIE_FWLOG_RING_SIZE_DFLT;
+ fwlog->ring.index = LIBIE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = libie_fwlog_alloc_ring_buffs(&fwlog->ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ return status;
+ }
+
+ libie_debugfs_fwlog_init(fwlog, api->debugfs_root);
+ } else {
+ dev_warn(&fwlog->pdev->dev, "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_init);
+
+/**
+ * libie_fwlog_deinit - unroll FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This function should be called in libie_deinit_hw().
+ */
+void libie_fwlog_deinit(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+ status = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(fwlog->debugfs_modules);
+
+ fwlog->debugfs_modules = NULL;
+
+ status = libie_fwlog_unregister(fwlog);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (fwlog->ring.rings) {
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_deinit);
+
+/**
+ * libie_get_fwlog_data - copy the FW log data from ARQ event
+ * @fwlog: fwlog that the FW log event is associated with
+ * @buf: event buffer pointer
+ * @len: len of event descriptor
+ */
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len)
+{
+ struct libie_fwlog_data *log;
+
+ log = &fwlog->ring.rings[fwlog->ring.tail];
+
+ memset(log->data, 0, PAGE_SIZE);
+ log->data_size = len;
+
+ memcpy(log->data, buf, log->data_size);
+ libie_fwlog_ring_increment(&fwlog->ring.tail, fwlog->ring.size);
+
+ if (libie_fwlog_ring_full(&fwlog->ring)) {
+ /* the rings are full so bump the head to create room */
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_get_fwlog_data);
+
+void libie_fwlog_reregister(struct libie_fwlog *fwlog)
+{
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED))
+ return;
+
+ if (libie_fwlog_register(fwlog))
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_reregister);
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 476e73e502fe..89ccb8eb82c7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2416,10 +2416,9 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- num_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -2985,6 +2984,13 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ /* This is not really the true transmit point, since we batch
+ * up several before hitting the hardware, but is the best we
+ * can do without more complexity to walk the packets in the
+ * pending section of the transmit queue.
+ */
+ skb_tx_timestamp(skb);
+
if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
@@ -5357,6 +5363,7 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = mvneta_ethtool_get_eee,
.set_eee = mvneta_ethtool_set_eee,
};
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 8ebb985d2573..ab0c99aa9f9a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4439,6 +4439,8 @@ out:
txq_pcpu->count += frags;
aggr_txq->count += frags;
+ skb_tx_timestamp(skb);
+
/* Enable transmit */
wmb();
mvpp2_aggr_txq_pend_desc_add(port, frags);
@@ -5252,14 +5254,14 @@ static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
+ ethtool_op_get_ts_info(dev, info);
if (!port->hwtstamp)
- return -EOPNOTSUPP;
+ return 0;
info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
@@ -6222,6 +6224,12 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
+static unsigned int mvpp2_xjg_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
unsigned int neg_mode,
struct phylink_link_state *state)
@@ -6256,6 +6264,7 @@ static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_inband_caps = mvpp2_xjg_pcs_inband_caps,
.pcs_get_state = mvpp2_xlg_pcs_get_state,
.pcs_config = mvpp2_xlg_pcs_config,
};
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index a88c006ea65b..01c6c0a2f283 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -437,6 +437,15 @@ static int octep_set_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_ethtool_ops = {
.get_drvinfo = octep_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -445,6 +454,7 @@ static const struct ethtool_ops octep_ethtool_ops = {
.get_ethtool_stats = octep_get_ethtool_stats,
.get_link_ksettings = octep_get_link_ksettings,
.set_link_ksettings = octep_set_link_ksettings,
+ .get_channels = octep_get_channels,
};
void octep_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index d60441928ba9..241a7e7c7ad2 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -244,6 +244,15 @@ static int octep_vf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_vf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_vf_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_drvinfo = octep_vf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -251,6 +260,7 @@ static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_sset_count = octep_vf_get_sset_count,
.get_ethtool_stats = octep_vf_get_ethtool_stats,
.get_link_ksettings = octep_vf_get_link_ksettings,
+ .get_channels = octep_vf_get_channels,
};
void octep_vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 69324ae09397..d374a4454836 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -2004,7 +2004,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
- cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", WQ_PERCPU, 0);
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 950231e7ea71..92ccf343dfe0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -161,10 +161,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d7030dfa5dad..a80c8e7c94f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -913,7 +913,7 @@ int rvu_mcs_init(struct rvu *rvu)
/* Initialize the wq for handling mcs interrupts */
INIT_LIST_HEAD(&rvu->mcs_intrq_head);
INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
- rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", WQ_PERCPU, 0);
if (!rvu->mcs_intr_wq) {
dev_err(rvu->dev, "mcs alloc workqueue failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index c6bb3aaa8e0d..2d78e08f985f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1164,6 +1164,9 @@ cpt:
rvu_program_channels(rvu);
cgx_start_linkup(rvu);
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX0);
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX1);
+
err = rvu_mcs_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 18c7bb39dbc7..b58283341923 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -1031,6 +1031,7 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
int blkaddr, int nixlf);
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 3303c475414a..3abd750a4bd7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -315,7 +315,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
spin_lock_init(&rvu->cgx_evq_lock);
INIT_LIST_HEAD(&rvu->cgx_evq_head);
INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
- rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", WQ_PERCPU, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 27c3a2daaaa9..3735372539bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -505,7 +505,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->nix_event_ctx = nix_event_context;
rvu_reporters->rvu_hw_nix_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
@@ -513,7 +515,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
@@ -521,7 +525,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
@@ -529,7 +535,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
@@ -1051,7 +1059,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->npa_event_ctx = npa_event_context;
rvu_reporters->rvu_hw_npa_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
@@ -1059,7 +1069,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
@@ -1067,7 +1079,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
@@ -1075,7 +1089,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 60db1f616cc8..828316211b24 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -6616,3 +6616,19 @@ unlock_grp:
return ret;
}
+
+/* On CN10k and older series of silicons, hardware may incorrectly
+ * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG
+ * to broadcacst XON on the same.
+ */
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+ u64 cfg;
+
+ if (!block->implemented || is_cn20k(rvu->pdev))
+ return;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 03099bc570bd..4415d0ce9aef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -376,7 +376,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu)
spin_lock_init(&rvu->rep_evtq_lock);
INIT_LIST_HEAD(&rvu->rep_evtq_head);
INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
- rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0);
+ rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", WQ_PERCPU, 0);
if (!rvu->rep_evt_wq) {
dev_err(rvu->dev, "REP workqueue allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index c691f0722154..77543d472345 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -798,7 +798,8 @@ int cn10k_ipsec_init(struct net_device *netdev)
pf->ipsec.sa_size = sa_size;
INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
- pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq",
+ WQ_PERCPU, 0);
if (!pf->ipsec.sa_workq) {
netdev_err(pf->netdev, "SA alloc workqueue failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 998c734ff839..b90e23dc49de 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1283,7 +1283,8 @@ end:
}
static void otx2_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_fw_data *rsp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 5027fae0aa77..e808995703cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3542,6 +3542,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
+ bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7ebb6e656884..25381f079b97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -854,6 +854,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 71ffb55d1fc4..65e7ef033bde 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -1500,7 +1500,7 @@ EXPORT_SYMBOL(prestera_device_unregister);
static int __init prestera_module_init(void)
{
- prestera_wq = alloc_workqueue("prestera", 0, 0);
+ prestera_wq = alloc_workqueue("prestera", WQ_PERCPU, 0);
if (!prestera_wq)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index c45d108b2f6d..3e13322470da 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -898,7 +898,7 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
- fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI | WQ_PERCPU, 1);
if (!fw->wq) {
err = -ENOMEM;
goto err_wq_alloc;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 0a80d8f8cff7..3dbb113b792c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -59,7 +59,9 @@ struct mtk_wed_flow_block_priv {
static const struct mtk_wed_soc_data mt7622_data = {
.regmap = {
.tx_bm_tkid = 0x088,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(3, 0),
.reset_idx_rx_mask = GENMASK(17, 16),
},
@@ -70,7 +72,9 @@ static const struct mtk_wed_soc_data mt7622_data = {
static const struct mtk_wed_soc_data mt7986_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -81,7 +85,10 @@ static const struct mtk_wed_soc_data mt7986_data = {
static const struct mtk_wed_soc_data mt7988_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x7d0,
+ .wpdma_rx_ring = {
+ 0x7d0,
+ 0x7d8,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -621,8 +628,8 @@ mtk_wed_amsdu_init(struct mtk_wed_device *dev)
return ret;
}
- /* eagle E1 PCIE1 tx ring 22 flow control issue */
- if (dev->wlan.id == 0x7991)
+ /* Kite and Eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.id == 0x7991 || dev->wlan.id == 0x7992)
wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
@@ -1239,7 +1246,11 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
return;
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[0],
+ dev->wlan.wpdma_rx[0]);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[1],
+ dev->wlan.wpdma_rx[1]);
if (!dev->wlan.hw_rro)
return;
@@ -2323,6 +2334,16 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+ if (dev->wlan.hw_rro) {
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ u32 addr = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
+ MTK_WED_RING_OFS_COUNT;
+
+ if (!wed_r32(dev, addr))
+ wed_w32(dev, addr, 1);
+ }
+ }
+
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index c1f0479d7a71..b49aee9a8b65 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -17,7 +17,7 @@ struct mtk_wed_wo;
struct mtk_wed_soc_data {
struct {
u32 tx_bm_tkid;
- u32 wpdma_rx_ring0;
+ u32 wpdma_rx_ring[MTK_WED_RX_QUEUES];
u32 reset_idx_tx_mask;
u32 reset_idx_rx_mask;
} regmap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6ec7d6e0181d..3c3e84100d5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -8,7 +8,6 @@ config MLX5_CORE
depends on PCI
select AUXILIARY_BUS
select NET_DEVLINK
- depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
@@ -208,3 +207,14 @@ config MLX5_DPLL
help
DPLL support in Mellanox Technologies ConnectX NICs.
+config MLX5_EN_PSP
+ bool "Mellanox Technologies support for PSP cryptography-offload acceleration"
+ depends on INET_PSP
+ depends on MLX5_CORE_EN
+ default y
+ help
+ mlx5 device offload support for Google PSP Security Protocol offload.
+ Adds support for PSP encryption offload and for SPI and key generation
+ interfaces to PSP Stack which supports PSP crypto offload.
+
+ If unsure, say Y.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a253c73db9e5..8ffa286a18f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o lib/nv_param.o
#
# Netdev basic
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o esw/legacy.o \
+ ecpf.o rdma.o esw/legacy.o esw/adj_vport.o \
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
@@ -85,7 +85,9 @@ mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge
mlx5_core-$(CONFIG_HWMON) += hwmon.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+ifneq ($(CONFIG_VXLAN),)
+ mlx5_core-y += lib/vxlan.o
+endif
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
@@ -110,6 +112,8 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
+mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/psp.o en_accel/psp_rxtx.o
+
#
# SW Steering
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e395ef5f356e..722282cebce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
+ if (mlx5_cmd_is_down(dev)) {
+ ent->ret = -ENXIO;
+ return;
+ }
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -1070,7 +1074,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 1fd403713baf..e9f319a9bdd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -145,7 +145,6 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn);
- cq->uar = dev->priv.uar;
cq->irqn = eq->core.irqn;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 2c0e0c16ca90..fceea83abbd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -10,6 +10,7 @@
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
+#include "lib/nv_param.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -203,11 +204,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- if (mlx5_lag_is_active(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
- return -EOPNOTSUPP;
- }
-
if (mlx5_core_is_mp_slave(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
return -EOPNOTSUPP;
@@ -534,6 +530,25 @@ mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ u32 val32 = val.vu32;
+ u32 max_num_channels;
+
+ max_num_channels = mlx5e_get_max_num_channels(mdev);
+ if (val32 > max_num_channels) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Requested num_doorbells (%u) exceeds maximum number of channels (%u)",
+ val32, max_num_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -613,6 +628,9 @@ static const struct devlink_param mlx5_devlink_eth_params[] = {
"hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
mlx5_devlink_hairpin_queue_size_validate),
+ DEVLINK_PARAM_GENERIC(NUM_DOORBELLS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_num_doorbells_validate),
};
static int mlx5_devlink_eth_params_register(struct devlink *devlink)
@@ -636,6 +654,10 @@ static int mlx5_devlink_eth_params_register(struct devlink *devlink)
mlx5_devlink_hairpin_params_init_values(devlink);
+ value.vu32 = MLX5_DEFAULT_NUM_DOORBELLS;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
+ value);
return 0;
}
@@ -650,6 +672,105 @@ static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_devlink_eth_params));
}
+#define MLX5_PCIE_CONG_THRESH_MAX 10000
+#define MLX5_PCIE_CONG_THRESH_DEF_LOW 7500
+#define MLX5_PCIE_CONG_THRESH_DEF_HIGH 9000
+
+static int
+mlx5_devlink_pcie_cong_thresh_validate(struct devlink *devl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 > MLX5_PCIE_CONG_THRESH_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Value %u > max supported (%u)",
+ val.vu16, MLX5_PCIE_CONG_THRESH_MAX);
+
+ return -EINVAL;
+ }
+
+ switch (id) {
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+ u32 id;
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+}
+
+static const struct devlink_param mlx5_devlink_pcie_cong_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ "pcie_cong_inbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ "pcie_cong_inbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ "pcie_cong_outbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ "pcie_cong_outbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+};
+
+static int mlx5_devlink_pcie_cong_params_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return 0;
+
+ err = devl_params_register(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+ if (err)
+ return err;
+
+ mlx5_devlink_pcie_cong_init_values(devlink);
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_params_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return;
+
+ devl_params_unregister(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+}
+
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -895,8 +1016,20 @@ int mlx5_devlink_params_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
+ err = mlx5_devlink_pcie_cong_params_register(devlink);
+ if (err)
+ goto pcie_cong_err;
+
+ err = mlx5_nv_param_register_dl_params(devlink);
+ if (err)
+ goto nv_param_err;
+
return 0;
+nv_param_err:
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
+pcie_cong_err:
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
@@ -907,6 +1040,8 @@ auxdev_reg_err:
void mlx5_devlink_params_unregister(struct devlink *devlink)
{
+ mlx5_nv_param_unregister_dl_params(devlink);
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devl_params_unregister(devlink, mlx5_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 961f75da6227..c9555119a661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -22,6 +22,11 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index 86253a89c24c..7cae0c6e5e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#include <linux/mlx5/vport.h>
+
#include "reporter_vnic.h"
#include "en_stats.h"
#include "devlink.h"
@@ -105,6 +107,15 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
}
if (MLX5_CAP_GEN(dev, nic_cap_reg))
mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_bar_uar_access))
+ devlink_fmsg_u32_pair_put(fmsg, "bar_uar_access",
+ VNIC_ENV_GET(&vnic, bar_uar_access));
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_odp_page_fault)) {
+ devlink_fmsg_u32_pair_put(fmsg, "odp_local_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_local_triggered_page_fault));
+ devlink_fmsg_u32_pair_put(fmsg, "odp_remote_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_remote_triggered_page_fault));
+ }
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
@@ -133,11 +144,11 @@ void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
health->vnic_reporter =
devlink_health_reporter_create(devlink,
&mlx5_reporter_vnic_ops,
- 0, dev);
+ dev);
if (IS_ERR(health->vnic_reporter))
mlx5_core_warn(dev,
- "Failed to create vnic reporter, err = %ld\n",
- PTR_ERR(health->vnic_reporter));
+ "Failed to create vnic reporter, err = %pe\n",
+ health->vnic_reporter);
}
void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0dd3bc0f4caa..14e3207b14e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/udp_tunnel.h>
#include <net/switchdev.h>
+#include <net/psp/types.h>
#include <net/xdp.h>
#include <linux/dim.h>
#include <linux/bits.h>
@@ -68,7 +69,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
@@ -344,6 +345,7 @@ struct mlx5e_cq {
/* data path - accessed per napi poll */
u16 event_ctr;
struct napi_struct *napi;
+ struct mlx5_uars_page *uar;
struct mlx5_core_cq mcq;
struct mlx5e_ch_stats *ch_stats;
@@ -788,6 +790,7 @@ struct mlx5e_channel {
int vec_ix;
int sd_ix;
int cpu;
+ struct mlx5_sq_bfreg *bfreg;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
@@ -936,6 +939,9 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_psp *psp;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_tls *tls;
#endif
@@ -950,6 +956,7 @@ struct mlx5e_priv {
struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root;
struct mlx5_devcom_comp_dev *devcom;
+ struct ethtool_fec_hist_range *fec_ranges;
};
struct mlx5e_dev {
@@ -1060,6 +1067,7 @@ struct mlx5e_create_cq_param {
struct mlx5e_ch_stats *ch_stats;
int node;
int ix;
+ struct mlx5_uars_page *uar;
};
struct mlx5e_cq_param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index ac65e3191480..c3408b3f7010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -57,7 +57,7 @@ struct mlx5e_l2_table {
bool promisc_enabled;
};
-#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1)
+#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_INDIR_TIRS)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -88,7 +88,7 @@ enum {
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
-#ifdef CONFIG_MLX5_EN_IPSEC
+#if defined(CONFIG_MLX5_EN_IPSEC) || defined(CONFIG_MLX5_EN_PSP)
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
MLX5E_ACCEL_FS_POL_FT_LEVEL,
@@ -132,7 +132,8 @@ struct mlx5e_ptp_fs;
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel);
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss);
void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index b4f3bd7d346e..195863b2c013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -138,8 +138,8 @@ void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
if (IS_ERR_OR_NULL(agent)) {
if (IS_ERR(agent))
netdev_warn(priv->netdev,
- "Failed to create hv vhca stats agent, err = %ld\n",
- PTR_ERR(agent));
+ "Failed to create hv vhca stats agent, err = %pe\n",
+ agent);
kvfree(priv->stats_agent.buf);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3cca06a74cf9..3692298e10f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,6 +6,7 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
@@ -611,6 +612,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
.ix = c->vec_ix,
+ .uar = c->bfreg->up,
};
}
@@ -810,7 +812,7 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
@@ -1003,7 +1005,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
bool allow_swp;
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO) ||
+ mlx5_is_psp_device(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -1229,7 +1232,6 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -1256,7 +1258,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 488ccdbc1e2c..00617c65fe3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -51,6 +51,7 @@ struct mlx5e_create_sq_param {
u32 tisn;
u8 tis_lst_sz;
u8 min_inline_mode;
+ u32 uar_page;
};
/* Striding RQ dynamic parameters */
@@ -132,7 +133,6 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
index 0ed017569a19..2eb666a46f39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES.
+#include "../devlink.h"
#include "en.h"
#include "pcie_cong_event.h"
@@ -23,6 +24,7 @@ struct mlx5e_pcie_cong_stats {
u32 pci_bw_inbound_low;
u32 pci_bw_outbound_high;
u32 pci_bw_outbound_low;
+ u32 pci_bw_stale_event;
};
struct mlx5e_pcie_cong_event {
@@ -41,13 +43,6 @@ struct mlx5e_pcie_cong_event {
struct mlx5e_pcie_cong_stats stats;
};
-/* In units of 0.01 % */
-static const struct mlx5e_pcie_cong_thresh default_thresh_config = {
- .inbound_high = 9000,
- .inbound_low = 7500,
- .outbound_high = 9000,
- .outbound_low = 7500,
-};
static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
@@ -58,6 +53,8 @@ static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
pci_bw_outbound_high) },
{ MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
pci_bw_outbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_stale_event) },
};
#define NUM_PCIE_CONG_COUNTERS ARRAY_SIZE(mlx5e_pcie_cong_stats_desc)
@@ -218,8 +215,10 @@ static void mlx5e_pcie_cong_event_work(struct work_struct *work)
}
changes = cong_event->state ^ new_cong_state;
- if (!changes)
+ if (!changes) {
+ cong_event->stats.pci_bw_stale_event++;
return;
+ }
cong_event->state = new_cong_state;
@@ -249,8 +248,60 @@ static int mlx5e_pcie_cong_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int
+mlx5e_pcie_cong_get_thresh_config(struct mlx5_core_dev *dev,
+ struct mlx5e_pcie_cong_thresh *config)
+{
+ u32 ids[4] = {
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ };
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val[4];
+
+ for (int i = 0; i < 4; i++) {
+ u32 id = ids[i];
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, id, &val[i]);
+ if (err)
+ return err;
+ }
+
+ config->inbound_low = val[0].vu16;
+ config->inbound_high = val[1].vu16;
+ config->outbound_low = val[2].vu16;
+ config->outbound_high = val[3].vu16;
+
+ return 0;
+}
+
+static int
+mlx5e_thresh_config_validate(struct mlx5_core_dev *mdev,
+ const struct mlx5e_pcie_cong_thresh *config)
+{
+ int err = 0;
+
+ if (config->inbound_low >= config->inbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe inbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->inbound_low, config->inbound_high);
+ }
+
+ if (config->outbound_low >= config->outbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe outbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->outbound_low, config->outbound_high);
+ }
+
+ return err;
+}
+
int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
{
+ struct mlx5e_pcie_cong_thresh thresh_config = {};
struct mlx5e_pcie_cong_event *cong_event;
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -258,6 +309,16 @@ int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
if (!mlx5_pcie_cong_event_supported(mdev))
return 0;
+ err = mlx5e_pcie_cong_get_thresh_config(mdev, &thresh_config);
+ if (WARN_ON(err))
+ return err;
+
+ err = mlx5e_thresh_config_validate(mdev, &thresh_config);
+ if (err) {
+ mlx5_core_err(mdev, "PCIe congestion event feature disabled\n");
+ return err;
+ }
+
cong_event = kvzalloc_node(sizeof(*cong_event), GFP_KERNEL,
mdev->priv.numa_node);
if (!cong_event)
@@ -269,7 +330,7 @@ int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
cong_event->priv = priv;
- err = mlx5_cmd_pcie_cong_event_set(mdev, &default_thresh_config,
+ err = mlx5_cmd_pcie_cong_event_set(mdev, &thresh_config,
&cong_event->obj_id);
if (err) {
mlx5_core_warn(mdev, "Error creating a PCIe congestion event object\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index 66d276a1be83..f4a19ffbb641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -66,23 +66,11 @@ struct mlx5e_port_buffer {
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
};
-#ifdef CONFIG_MLX5_CORE_EN_DCB
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
u32 *buffer_size,
u8 *prio2buffer);
-#else
-static inline int
-mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
- u32 change, unsigned int mtu,
- void *pfc,
- u32 *buffer_size,
- u8 *prio2buffer)
-{
- return 0;
-}
-#endif
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 391b4e9c9dc4..c93ee969ea64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -334,7 +334,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
sq->mdev = mdev;
sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->ptp_stats.sq[tc];
@@ -486,6 +486,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
csp.wq_ctrl = &txqsq->wq_ctrl;
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
@@ -577,6 +578,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->txq_sq_param.cqp;
@@ -626,6 +628,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->rq_param.cqp;
@@ -900,6 +903,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
+ c->bfreg = &mdev->priv.bfreg;
err = mlx5e_ptp_set_state(c, params);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 883c044852f1..1b3c9648220b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -66,6 +66,7 @@ struct mlx5e_ptp {
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
+ struct mlx5_sq_bfreg *bfreg;
};
static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0f5d7ea8956f..9d1c677814e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -488,8 +488,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
fdb_info,
br_offloads);
if (IS_ERR(work)) {
- WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
- PTR_ERR(work));
+ WARN_ONCE(1, "Failed to init switchdev work, err=%pe",
+ work);
return notifier_from_errno(PTR_ERR(work));
}
@@ -527,7 +527,8 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
br_offloads = mlx5_esw_bridge_init(esw);
rtnl_unlock();
if (IS_ERR(br_offloads)) {
- esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
+ esw_warn(mdev, "Failed to init esw bridge (err=%pe)\n",
+ br_offloads);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 16c44d628eda..b1415992ffa2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -651,25 +651,29 @@ void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
mutex_unlock(&c->icosq_recovery_lock);
}
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_RX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
.dump = mlx5e_rx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_RX_BURST_PERIOD,
};
-#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_rx_reporter_ops,
- MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
- netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %pe\n",
+ reporter);
return;
}
priv->rx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 85d5cb39b107..9e2cf191ed30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -539,26 +539,30 @@ void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
+#define MLX5E_REPORTER_TX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_TX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
.dump = mlx5e_tx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_TX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_TX_BURST_PERIOD,
};
-#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_tx_reporter_ops,
- MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
- "Failed to create tx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create tx reporter, err = %pe\n",
+ reporter);
return;
}
priv->tx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c68ba0e58fa6..c96cbc4b0dbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -75,15 +75,14 @@ struct mlx5e_rss {
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
struct mlx5_core_dev *mdev; /* primary */
- u32 drop_rqn;
- bool inner_ft_support;
+ struct mlx5e_rss_params params;
bool enabled;
refcount_t refcnt;
};
bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
{
- return rss->inner_ft_support;
+ return rss->params.inner_ft_support;
}
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
@@ -91,7 +90,7 @@ void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_ch
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size)
{
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
@@ -139,7 +138,8 @@ static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
+ err = mlx5e_rss_params_indir_init(&rss->indir,
+ from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
@@ -192,11 +192,12 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
return rss_tt;
}
-static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
+ bool rss_inner = rss->params.inner_ft_support;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir **tir_p;
@@ -204,7 +205,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
u32 rqtn;
int err;
- if (inner && !rss->inner_ft_support) {
+ if (inner && !rss_inner) {
mlx5e_rss_warn(rss->mdev,
"Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
tt);
@@ -227,8 +228,8 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
- rqtn, rss->inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ rqtn, rss_inner);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
@@ -264,15 +265,16 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
*tir_p = NULL;
}
-static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -335,7 +337,7 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
tt, err);
}
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
err = mlx5e_rss_update_tir(rss, tt, true);
@@ -355,14 +357,16 @@ static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
- rss->drop_rqn, rss->indir.max_table_size);
+ rss->params.drop_rqn,
+ rss->indir.max_table_size);
}
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch)
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params)
{
+ u32 rqt_max_size, rqt_size;
struct mlx5e_rss *rss;
int err;
@@ -370,29 +374,31 @@ struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_suppo
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
- mlx5e_rqt_size(mdev, nch),
- mlx5e_rqt_size(mdev, max_nch));
+ rqt_size = mlx5e_rqt_size(mdev, init_params->nch);
+ rqt_max_size = mlx5e_rqt_size(mdev, init_params->max_nch);
+ err = mlx5e_rss_params_indir_init(&rss->indir, rqt_size, rqt_max_size);
if (err)
goto err_free_rss;
rss->mdev = mdev;
- rss->inner_ft_support = inner_ft_support;
- rss->drop_rqn = drop_rqn;
+ rss->params = *params;
err = mlx5e_rss_init_no_tirs(rss);
if (err)
goto err_free_indir;
- if (type == MLX5E_RSS_INIT_NO_TIRS)
+ if (init_params->type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
+ err = mlx5e_rss_create_tirs(rss, init_params->pkt_merge_param,
+ false);
if (err)
goto err_destroy_rqt;
- if (inner_ft_support) {
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
+ if (params->inner_ft_support) {
+ err = mlx5e_rss_create_tirs(rss,
+ init_params->pkt_merge_param,
+ true);
if (err)
goto err_destroy_tirs;
}
@@ -418,7 +424,7 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, false);
- if (rss->inner_ft_support)
+ if (rss->params.inner_ft_support)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
@@ -448,7 +454,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
{
struct mlx5e_tir *tir;
- WARN_ON(inner && !rss->inner_ft_support);
+ WARN_ON(inner && !rss->params.inner_ft_support);
tir = rss_get_tir(rss, tt, inner);
WARN_ON(!tir);
@@ -468,10 +474,10 @@ bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn)
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -479,7 +485,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -512,10 +518,11 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->params.drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
- mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
+ mlx5e_rqt_get_rqtn(&rss->rqt),
+ rss->params.drop_rqn, err);
}
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -548,7 +555,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
}
inner_tir:
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
tir = rss_get_tir(rss, tt, true);
@@ -681,7 +688,7 @@ int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return err;
}
- if (!(rss->inner_ft_support))
+ if (!(rss->params.inner_ft_support))
return 0;
err = mlx5e_rss_update_tir(rss, tt, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index c6c1b2847cf5..5fb03cd0a411 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -13,19 +13,31 @@ enum mlx5e_rss_init_type {
MLX5E_RSS_INIT_TIRS
};
+struct mlx5e_rss_init_params {
+ enum mlx5e_rss_init_type type;
+ const struct mlx5e_packet_merge_param *pkt_merge_param;
+ unsigned int nch;
+ unsigned int max_nch;
+};
+
+struct mlx5e_rss_params {
+ bool inner_ft_support;
+ u32 drop_rqn;
+};
+
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size);
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch);
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
@@ -37,10 +49,10 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn);
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a2acbfee2b77..ac26a32845d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -54,17 +54,30 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON(res->rss[0]))
return -EINVAL;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
res->rss[0] = rss;
@@ -74,18 +87,30 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON_ONCE(res->rss[rss_idx]))
return -ENOSPC;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
- res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_NO_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
if (res->rss_active) {
u32 *vhca_ids = get_vhca_ids(res, 0);
@@ -438,7 +463,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch)
{
bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
@@ -454,7 +479,7 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
- res->pkt_merge_param = *init_pkt_merge_param;
+ res->pkt_merge_param = *pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_nch);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 1d049e2aa264..65a857c215e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -27,7 +27,7 @@ enum mlx5e_rx_res_features {
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
index 01d522b02947..d3db6146fcad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -136,8 +136,8 @@ mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
if (IS_ERR(hws_bwc_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
- nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, hws_bwc_matcher);
hmfs_matcher = ERR_CAST(hws_bwc_matcher);
goto out_unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 0c97c5899904..4d6924b644c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -148,8 +148,8 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
- nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, dr_matcher);
smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
index 8afcec0c5d3c..896f718483c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
@@ -93,8 +93,8 @@ mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule))
- mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
- PTR_ERR(flow_rule));
+ mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %pe\n",
+ flow_rule);
kvfree(spec);
@@ -322,8 +322,8 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
sizeof(u32) * 2,
(1 << ESW_VPORT_BITS) - 1, true);
if (IS_ERR(int_port_priv->metadata_mapping)) {
- mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n",
- PTR_ERR(int_port_priv->metadata_mapping));
+ mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%pe\n",
+ int_port_priv->metadata_mapping);
goto err_mapping;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 2162d776fe35..a14f216048cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
-#include <net/inet_ecn.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -233,7 +234,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -349,7 +350,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index a0fc76a1bc08..0735d10f2bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -172,8 +172,8 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
- PTR_ERR(e->pkt_reformat));
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %pe\n",
+ e->pkt_reformat);
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
@@ -1845,8 +1845,8 @@ static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event
queue_work(priv->wq, &fib_work->work);
} else if (IS_ERR(fib_work)) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
- mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
- PTR_ERR(fib_work));
+ mlx5_core_warn(priv->mdev, "Failed to init fib work, %pe\n",
+ fib_work);
}
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index b5c19396e096..996fcdb5a29d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -76,6 +76,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
+ ccp.uar = mdev->priv.bfreg.up;
err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5dc04bbfc71b..6760bb0336df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -309,10 +309,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
- struct mlx5_core_cq *mcq;
-
- mcq = &cq->mcq;
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, cq->uar->map, cq->wq.cc);
}
static inline struct mlx5e_sq_dma *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index d743e823362a..dbd88eb5c082 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -54,7 +54,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 33e32584b07f..8bef99e8367e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
+#include "en_accel/psp.h"
+#include "en_accel/psp_rxtx.h"
#if IS_ENABLED(CONFIG_GENEVE)
#include <net/geneve.h>
@@ -119,6 +121,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_accel_tx_psp_state psp_st;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -137,6 +142,13 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return false;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload(skb, dev)) {
+ if (unlikely(!mlx5e_psp_handle_tx_skb(dev, skb, &state->psp_st)))
+ return false;
+ }
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
@@ -157,8 +169,14 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ return mlx5e_psp_tx_ids_len(&state->psp_st);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec);
@@ -172,8 +190,14 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&accel->psp_st))
+ mlx5e_psp_tx_build_eseg(priv, skb, &accel->psp_st, eseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
@@ -199,6 +223,11 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ mlx5e_psp_handle_tx_wqe(wqe, &state->psp_st, inlseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
@@ -208,21 +237,40 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
- return mlx5e_ktls_init_rx(priv);
+ int err;
+
+ err = mlx5_accel_psp_fs_init_rx_tables(priv);
+ if (err)
+ goto out;
+
+ err = mlx5e_ktls_init_rx(priv);
+ if (err)
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
+
+out:
+ return err;
}
static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
}
static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
{
+ int err;
+
+ err = mlx5_accel_psp_fs_init_tx_tables(priv);
+ if (err)
+ return err;
+
return mlx5e_ktls_init_tx(priv);
}
static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_tx(priv);
+ mlx5_accel_psp_fs_cleanup_tx_tables(priv);
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4f83e3172767..1febdc5b81f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -138,7 +138,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %pe\n", flow);
out:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 65dc3529283b..6ccfc2af07b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -61,6 +61,7 @@ struct mlx5e_ipsec_rx {
struct mlx5_flow_table *pol_miss_ft;
struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
+ u8 ttc_rules_added : 1;
};
/* IPsec RX flow steering */
@@ -585,6 +586,20 @@ out:
return err;
}
+static struct mlx5_flow_destination
+ipsec_rx_decrypted_pkt_def_dest(struct mlx5_ttc_table *ttc, u32 family)
+{
+ struct mlx5_flow_destination dest;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return mlx5_ttc_get_default_dest(ttc, family2tt(family));
+
+ dest.ft = mlx5_get_ttc_flow_table(ttc);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ return dest;
+}
+
static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *old_dest,
struct mlx5_flow_destination *new_dest)
@@ -598,10 +613,10 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
- old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
+ old_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
@@ -614,12 +629,12 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
+ new_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
@@ -669,10 +684,13 @@ static void ipsec_mpv_work_handler(struct work_struct *_work)
complete(&work->master_priv->ipsec->comp);
}
-static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
+static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ if (rx->ttc_rules_added)
+ mlx5_ttc_destroy_ipsec_rules(ttc);
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
@@ -707,7 +725,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
{
/* disconnect */
if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
+ ipsec_rx_ft_disconnect(ipsec, rx, family);
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
@@ -764,7 +782,7 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
if (rx == ipsec->rx_esw)
return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
- *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
+ *dest = ipsec_rx_decrypted_pkt_def_dest(attr->ttc, attr->family);
err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
attr->prio);
@@ -807,10 +825,16 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
struct mlx5_flow_destination dest = {};
+ struct mlx5_ttc_table *ttc, *inner_ttc;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = rx->ft.sa;
- mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
+ if (mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest))
+ return;
+
+ ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ inner_ttc = mlx5e_fs_get_ttc(ipsec->fs, true);
+ rx->ttc_rules_added = !mlx5_ttc_create_ipsec_rules(ttc, inner_ttc);
}
static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
@@ -1705,8 +1729,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
- mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
- PTR_ERR(modify_hdr));
+ mlx5_core_err(mdev, "Failed to allocate modify_header %pe\n",
+ modify_hdr);
return PTR_ERR(modify_hdr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 3cc640669247..45b0d19e735c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -40,7 +40,7 @@
#include "en/txrx.h"
/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
-#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x2)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 65ccb33edafb..d7a11ff9bbdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -498,9 +498,9 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
@@ -508,8 +508,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 6ab02f3fc291..528b04d4de41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1676,7 +1676,7 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
if (!fs_id)
return;
- eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_MACSEC_TX_METADATA(fs_id));
}
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
new file mode 100644
index 000000000000..b4cb131c5f81
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -0,0 +1,952 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#include <linux/mlx5/device.h>
+#include <net/psp.h>
+#include <linux/psp.h>
+#include "mlx5_core.h"
+#include "psp.h"
+#include "lib/crypto.h"
+#include "en_accel/psp.h"
+#include "fs_core.h"
+
+enum accel_fs_psp_type {
+ ACCEL_FS_PSP4,
+ ACCEL_FS_PSP6,
+ ACCEL_FS_PSP_NUM_TYPES,
+};
+
+enum accel_psp_syndrome {
+ PSP_OK = 0,
+ PSP_ICV_FAIL,
+ PSP_BAD_TRAILER,
+};
+
+struct mlx5e_psp_tx {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rule;
+ struct mutex mutex; /* Protect PSP TX steering */
+ u32 refcnt;
+};
+
+struct mlx5e_psp_rx_err {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_modify_hdr *copy_modify_hdr;
+};
+
+struct mlx5e_accel_fs_psp_prot {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_destination default_dest;
+ struct mlx5e_psp_rx_err rx_err;
+ u32 refcnt;
+ struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+ struct mlx5_flow_handle *def_rule;
+};
+
+struct mlx5e_accel_fs_psp {
+ struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES];
+};
+
+struct mlx5e_psp_fs {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_psp_tx *tx_fs;
+ /* Rx manage */
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_accel_fs_psp *rx_fs;
+};
+
+/* PSP RX flow steering */
+static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i)
+{
+ if (i == ACCEL_FS_PSP4)
+ return MLX5_TT_IPV4_UDP;
+
+ return MLX5_TT_IPV6_UDP;
+}
+
+static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ if (rx_err->drop_rule) {
+ mlx5_del_flow_rules(rx_err->drop_rule);
+ rx_err->drop_rule = NULL;
+ }
+
+ if (rx_err->rule) {
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+ }
+
+ if (rx_err->copy_modify_hdr) {
+ mlx5_modify_header_dealloc(fs->mdev, rx_err->copy_modify_hdr);
+ rx_err->copy_modify_hdr = NULL;
+ }
+}
+
+static void accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ accel_psp_fs_rx_err_del_rules(fs, rx_err);
+
+ if (rx_err->ft) {
+ mlx5_destroy_flow_table(rx_err->ft);
+ rx_err->ft = NULL;
+ }
+}
+
+static void accel_psp_setup_syndrome_match(struct mlx5_flow_spec *spec,
+ enum accel_psp_syndrome syndrome)
+{
+ void *misc_params_2;
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc_params_2, psp_syndrome);
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc_params_2, psp_syndrome, syndrome);
+}
+
+static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *fte;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Action to copy 7 bit psp_syndrome to regB[23:29] */
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_PSP_SYNDROME);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 7);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, dst_offset, 23);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "fail to alloc psp copy modify_header_id err=%d\n", err);
+ goto out_spec;
+ }
+
+ accel_psp_setup_syndrome_match(spec, PSP_OK);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.modify_hdr = modify_hdr;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
+ &fs_prot->default_dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err);
+ goto out;
+ }
+ rx_err->rule = fte;
+
+ /* add default drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx err drop rule err=%d\n", err);
+ goto out_drop_rule;
+ }
+ rx_err->drop_rule = fte;
+ rx_err->copy_modify_hdr = modify_hdr;
+
+ goto out_spec;
+
+out_drop_rule:
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+out:
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
+out_spec:
+ kfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+ int err;
+
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; // MLX5E_ACCEL_FS_TCP_FT_LEVEL
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(fs->mdev, "fail to create psp rx inline ft err=%d\n", err);
+ return err;
+ }
+
+ rx_err->ft = ft;
+ err = accel_psp_fs_rx_err_add_rule(fs, fs_prot, rx_err);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ mlx5_destroy_flow_table(ft);
+ rx_err->ft = NULL;
+ return err;
+}
+
+static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ if (fs_prot->def_rule) {
+ mlx5_del_flow_rules(fs_prot->def_rule);
+ fs_prot->def_rule = NULL;
+ }
+
+ if (fs_prot->miss_rule) {
+ mlx5_del_flow_rules(fs_prot->miss_rule);
+ fs_prot->miss_rule = NULL;
+ }
+
+ if (fs_prot->miss_group) {
+ mlx5_destroy_flow_group(fs_prot->miss_group);
+ fs_prot->miss_group = NULL;
+ }
+
+ if (fs_prot->ft) {
+ mlx5_destroy_flow_table(fs_prot->ft);
+ fs_prot->ft = NULL;
+ }
+}
+
+static void setup_fte_udp_psp(struct mlx5_flow_spec *spec, u16 udp_port)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, udp_port);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, IPPROTO_UDP);
+}
+
+static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_group *miss_group;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Create FT */
+ ft_attr.max_fte = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "fail to create psp rx ft err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->ft = ft;
+
+ /* Create miss_group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ mlx5_core_err(mdev, "fail to create psp rx miss_group err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_group = miss_group;
+
+ /* Create miss rule */
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to create psp rx miss_rule err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_rule = rule;
+
+ /* Add default Rx psp rule */
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ /* Set bit[31, 30] PSP marker */
+ /* Set bit[29-23] psp_syndrome is set in error FT */
+#define MLX5E_PSP_MARKER_BIT (BIT(30) | BIT(31))
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, MLX5E_PSP_MARKER_BIT);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "fail to alloc psp set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto out_err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = modify_hdr;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->rx_err.ft;
+ rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "fail to add psp rule Rx decryption, err=%d, flow_act.action = %#04X\n",
+ err, flow_act.action);
+ goto out_err;
+ }
+
+ fs_prot->def_rule = rule;
+ goto out;
+
+out_err:
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+
+ /* The netdev unreg already happened, so all offloaded rule are already removed */
+ fs_prot = &accel_psp->fs_prot[type];
+
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return 0;
+}
+
+static int accel_psp_fs_rx_create(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ int err;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+
+ fs_prot->default_dest = mlx5_ttc_get_default_dest(ttc, fs_psp2tt(type));
+
+ err = accel_psp_fs_rx_err_create_ft(fs, fs_prot, &fs_prot->rx_err);
+ if (err)
+ return err;
+
+ err = accel_psp_fs_rx_create_ft(fs, fs_prot);
+ if (err)
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return err;
+}
+
+static int accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_accel_fs_psp *accel_psp;
+ struct mlx5_ttc_table *ttc;
+ int err = 0;
+
+ if (!fs || !fs->rx_fs)
+ return -EINVAL;
+
+ ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (fs_prot->refcnt++)
+ goto out;
+
+ /* create FT */
+ err = accel_psp_fs_rx_create(fs, type);
+ if (err) {
+ fs_prot->refcnt--;
+ goto out;
+ }
+
+ /* connect */
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->ft;
+ mlx5_ttc_fwd_dest(ttc, fs_psp2tt(type), &dest);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+ return err;
+}
+
+static void accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (--fs_prot->refcnt)
+ goto out;
+
+ /* disconnect */
+ mlx5_ttc_fwd_default_dest(ttc, fs_psp2tt(type));
+
+ /* remove FT */
+ accel_psp_fs_rx_destroy(fs, type);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+}
+
+static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ enum accel_fs_psp_type i;
+
+ if (!fs->rx_fs)
+ return;
+
+ accel_psp = fs->rx_fs;
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_destroy(&fs_prot->prot_mutex);
+ WARN_ON(fs_prot->refcnt);
+ }
+ kfree(fs->rx_fs);
+ fs->rx_fs = NULL;
+}
+
+static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ enum accel_fs_psp_type i;
+
+ accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL);
+ if (!accel_psp)
+ return -ENOMEM;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_init(&fs_prot->prot_mutex);
+ }
+
+ fs->rx_fs = accel_psp;
+
+ return 0;
+}
+
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!priv->psp)
+ return;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++)
+ accel_psp_fs_rx_ft_put(priv->psp->fs, i);
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err, i;
+
+ if (!priv->psp)
+ return 0;
+
+ fs = priv->psp->fs;
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ err = accel_psp_fs_rx_ft_get(fs, i);
+ if (err)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ i--;
+ while (i >= 0) {
+ accel_psp_fs_rx_ft_put(fs, i);
+ --i;
+ }
+
+ return err;
+}
+
+static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_act flow_act = {};
+ u32 *in, *mc, *outer_headers_c;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_psp_tx *tx_fs;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!spec || !in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ft_attr.max_fte = 1;
+#define MLX5E_PSP_PRIO 0
+ ft_attr.prio = MLX5E_PSP_PRIO;
+#define MLX5E_PSP_LEVEL 0
+ ft_attr.level = MLX5E_PSP_LEVEL;
+ ft_attr.autogroup.max_num_groups = 1;
+
+ tx_fs = fs->tx_fs;
+ ft = mlx5_create_flow_table(tx_fs->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow table, err = %d\n", err);
+ goto out;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ fg = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow group, err = %d\n", err);
+ goto err_create_fg;
+ }
+
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err);
+ goto err_add_flow_rule;
+ }
+
+ tx_fs->ft = ft;
+ tx_fs->fg = fg;
+ tx_fs->rule = rule;
+ goto out;
+
+err_add_flow_rule:
+ mlx5_destroy_flow_group(fg);
+err_create_fg:
+ mlx5_destroy_flow_table(ft);
+out:
+ kvfree(in);
+ kvfree(spec);
+ return err;
+}
+
+static void accel_psp_fs_tx_destroy(struct mlx5e_psp_tx *tx_fs)
+{
+ if (!tx_fs->ft)
+ return;
+
+ mlx5_del_flow_rules(tx_fs->rule);
+ mlx5_destroy_flow_group(tx_fs->fg);
+ mlx5_destroy_flow_table(tx_fs->ft);
+}
+
+static int accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+ int err = 0;
+
+ mutex_lock(&tx_fs->mutex);
+ if (tx_fs->refcnt++)
+ goto out;
+
+ err = accel_psp_fs_tx_create_ft_table(fs);
+ if (err)
+ tx_fs->refcnt--;
+out:
+ mutex_unlock(&tx_fs->mutex);
+ return err;
+}
+
+static void accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ mutex_lock(&tx_fs->mutex);
+ if (--tx_fs->refcnt)
+ goto out;
+
+ accel_psp_fs_tx_destroy(tx_fs);
+out:
+ mutex_unlock(&tx_fs->mutex);
+}
+
+static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ if (!tx_fs)
+ return;
+
+ mutex_destroy(&tx_fs->mutex);
+ WARN_ON(tx_fs->refcnt);
+ kfree(tx_fs);
+ fs->tx_fs = NULL;
+}
+
+static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_psp_tx *tx_fs;
+
+ ns = mlx5_get_flow_namespace(fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ mutex_init(&tx_fs->mutex);
+ tx_fs->ns = ns;
+ fs->tx_fs = tx_fs;
+ return 0;
+}
+
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return;
+
+ accel_psp_fs_tx_ft_put(priv->psp->fs);
+}
+
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return 0;
+
+ return accel_psp_fs_tx_ft_get(priv->psp->fs);
+}
+
+static void mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs *fs)
+{
+ accel_psp_fs_cleanup_rx(fs);
+ accel_psp_fs_cleanup_tx(fs);
+ kfree(fs);
+}
+
+static struct mlx5e_psp_fs *mlx5e_accel_psp_fs_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err = 0;
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return ERR_PTR(-ENOMEM);
+
+ fs->mdev = priv->mdev;
+ err = accel_psp_fs_init_tx(fs);
+ if (err)
+ goto err_tx;
+
+ fs->fs = priv->fs;
+ err = accel_psp_fs_init_rx(fs);
+ if (err)
+ goto err_rx;
+
+ return fs;
+
+err_rx:
+ accel_psp_fs_cleanup_tx(fs);
+err_tx:
+ kfree(fs);
+ return ERR_PTR(err);
+}
+
+static int
+mlx5e_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0; /* TODO: this should actually do things to the device */
+}
+
+static int
+mlx5e_psp_generate_key_spi(struct mlx5_core_dev *mdev,
+ enum mlx5_psp_gen_spi_in_key_size keysz,
+ unsigned int keysz_bytes,
+ struct psp_key_parsed *key)
+{
+ u32 out[MLX5_ST_SZ_DW(psp_gen_spi_out) + MLX5_ST_SZ_DW(key_spi)] = {};
+ u32 in[MLX5_ST_SZ_DW(psp_gen_spi_in)] = {};
+ void *outkey;
+ int err;
+
+ WARN_ON_ONCE(keysz_bytes > PSP_MAX_KEY);
+
+ MLX5_SET(psp_gen_spi_in, in, opcode, MLX5_CMD_OP_PSP_GEN_SPI);
+ MLX5_SET(psp_gen_spi_in, in, key_size, keysz);
+ MLX5_SET(psp_gen_spi_in, in, num_of_spi, 1);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ outkey = MLX5_ADDR_OF(psp_gen_spi_out, out, key_spi);
+ key->spi = cpu_to_be32(MLX5_GET(key_spi, outkey, spi));
+ memcpy(key->key, MLX5_ADDR_OF(key_spi, outkey, key) + 32 - keysz_bytes,
+ keysz_bytes);
+
+ return 0;
+}
+
+static int
+mlx5e_psp_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ enum mlx5_psp_gen_spi_in_key_size keysz;
+ u8 keysz_bytes;
+
+ switch (version) {
+ case PSP_VERSION_HDR0_AES_GCM_128:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128;
+ keysz_bytes = 16;
+ break;
+ case PSP_VERSION_HDR0_AES_GCM_256:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256;
+ keysz_bytes = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5e_psp_generate_key_spi(priv->mdev, keysz, keysz_bytes, assoc);
+}
+
+struct psp_key {
+ u32 id;
+};
+
+static int mlx5e_psp_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct psp_key_parsed *tx = &pas->tx;
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+ int err;
+
+ mdev = priv->mdev;
+ nkey = (struct psp_key *)pas->drv_data;
+
+ err = mlx5_create_encryption_key(mdev, tx->key,
+ psp_key_size(pas->version),
+ MLX5_ACCEL_OBJ_PSP_KEY,
+ &nkey->id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ atomic_inc(&psp->tx_key_cnt);
+ return 0;
+}
+
+static void mlx5e_psp_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+
+ nkey = (struct psp_key *)pas->drv_data;
+ mlx5_destroy_encryption_key(priv->mdev, nkey->id);
+ atomic_dec(&psp->tx_key_cnt);
+}
+
+static int mlx5e_psp_rotate_key(struct mlx5_core_dev *mdev)
+{
+ u32 in[MLX5_ST_SZ_DW(psp_rotate_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(psp_rotate_key_out)];
+
+ MLX5_SET(psp_rotate_key_in, in, opcode,
+ MLX5_CMD_OP_PSP_ROTATE_KEY);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int
+mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+
+ /* no support for protecting against external rotations */
+ psd->generation = 0;
+
+ return mlx5e_psp_rotate_key(priv->mdev);
+}
+
+static struct psp_dev_ops mlx5_psp_ops = {
+ .set_config = mlx5e_psp_set_config,
+ .rx_spi_alloc = mlx5e_psp_rx_spi_alloc,
+ .tx_key_add = mlx5e_psp_assoc_add,
+ .tx_key_del = mlx5e_psp_assoc_del,
+ .key_rotate = mlx5e_psp_key_rotate,
+};
+
+void mlx5e_psp_unregister(struct mlx5e_priv *priv)
+{
+ if (!priv->psp || !priv->psp->psp)
+ return;
+
+ psp_dev_unregister(priv->psp->psp);
+}
+
+void mlx5e_psp_register(struct mlx5e_priv *priv)
+{
+ /* FW Caps missing */
+ if (!priv->psp)
+ return;
+
+ priv->psp->caps.assoc_drv_spc = sizeof(u32);
+ priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
+ if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) &&
+ MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt))
+ priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
+
+ priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops,
+ &priv->psp->caps, NULL);
+ if (IS_ERR(priv->psp->psp))
+ mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n",
+ priv->psp->psp);
+}
+
+int mlx5e_psp_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_psp_fs *fs;
+ struct mlx5e_psp *psp;
+ int err;
+
+ if (!mlx5_is_psp_device(mdev)) {
+ mlx5_core_dbg(mdev, "PSP offload not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp)) {
+ mlx5_core_dbg(mdev, "SWP not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum)) {
+ mlx5_core_dbg(mdev, "SWP checksum not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) {
+ mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_lso)) {
+ mlx5_core_dbg(mdev, "PSP LSO not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ psp = kzalloc(sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return -ENOMEM;
+
+ priv->psp = psp;
+ fs = mlx5e_accel_psp_fs_init(priv);
+ if (IS_ERR(fs)) {
+ err = PTR_ERR(fs);
+ goto out_err;
+ }
+
+ psp->fs = fs;
+
+ mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n");
+ return 0;
+
+out_err:
+ priv->psp = NULL;
+ kfree(psp);
+ return err;
+}
+
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp *psp = priv->psp;
+
+ if (!psp)
+ return;
+
+ WARN_ON(atomic_read(&psp->tx_key_cnt));
+ mlx5e_accel_psp_fs_cleanup(psp->fs);
+ priv->psp = NULL;
+ kfree(psp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
new file mode 100644
index 000000000000..42bb671fb2cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_ACCEL_PSP_H__
+#define __MLX5E_ACCEL_PSP_H__
+#if IS_ENABLED(CONFIG_MLX5_EN_PSP)
+#include <net/psp/types.h>
+#include "en.h"
+
+struct mlx5e_psp {
+ struct psp_dev *psp;
+ struct psp_dev_caps caps;
+ struct mlx5e_psp_fs *fs;
+ atomic_t tx_key_cnt;
+};
+
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, psp))
+ return false;
+
+ if (!MLX5_CAP_PSP(mdev, psp_crypto_offload) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_decrypt))
+ return false;
+
+ return true;
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv);
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv);
+void mlx5e_psp_register(struct mlx5e_priv *priv);
+void mlx5e_psp_unregister(struct mlx5e_priv *priv);
+int mlx5e_psp_init(struct mlx5e_priv *priv);
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv);
+#else
+static inline int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) { }
+static inline int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) { }
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline void mlx5e_psp_register(struct mlx5e_priv *priv) { }
+static inline void mlx5e_psp_unregister(struct mlx5e_priv *priv) { }
+static inline int mlx5e_psp_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_psp_cleanup(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_ACCEL_PSP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
new file mode 100644
index 000000000000..828bff1137af
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+#include <net/psp/types.h>
+
+#include "en.h"
+#include "psp.h"
+#include "en_accel/psp_rxtx.h"
+#include "en_accel/psp.h"
+
+enum {
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
+};
+
+static void mlx5e_psp_set_swp(struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 OutL4
+ * Pkt: MAC IP ESP L4
+ *
+ * Tunnel(VXLAN TCP/UDP) over Transport Mode
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP UDP VXLAN IP L4
+ */
+ u8 inner_ipproto = 0;
+ struct ethhdr *eth;
+
+ /* Shared settings */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+ inner_ipproto = skb->inner_ipproto;
+ /* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* IP in IP tunneling like vxlan*/
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return;
+
+ eth = (struct ethhdr *)skb_inner_mac_header(skb);
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ inner_ipproto = ((struct iphdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ psp_st->inner_ipproto = inner_ipproto;
+ }
+}
+
+static bool mlx5e_psp_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct psp_assoc *pas;
+ bool ret = false;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas)
+ goto out;
+
+ ret = true;
+ psp_st->tailen = PSP_TRL_SIZE;
+ psp_st->spi = pas->tx.spi;
+ psp_st->ver = pas->version;
+ psp_st->keyid = *(u32 *)pas->drv_data;
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ u32 psp_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u16 dev_id = priv->psp->psp->id;
+ bool strip_icv = true;
+ u8 generation = 0;
+
+ /* TBD: report errors as SW counters to ethtool, any further handling ? */
+ if (MLX5_PSP_METADATA_SYNDROME(psp_meta_data) != MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED)
+ goto drop;
+
+ if (psp_dev_rcv(skb, dev_id, generation, strip_icv))
+ goto drop;
+
+ skb->decrypted = 1;
+ return false;
+
+drop:
+ kfree_skb(skb);
+ return true;
+}
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (!mlx5_is_psp_device(priv->mdev))
+ return;
+
+ if (unlikely(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))
+ return;
+
+ mlx5e_psp_set_swp(skb, psp_st, eseg);
+ /* Special WA for PSP LSO in ConnectX7 */
+ eseg->swp_outer_l3_offset = 0;
+ eseg->swp_inner_l3_offset = 0;
+
+ eseg->flow_table_metadata |= cpu_to_be32(psp_st->keyid);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+}
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(psp_st->tailen | MLX5_INLINE_SEG);
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net *net = sock_net(skb->sk);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+
+ if (!mlx5e_psp_set_state(priv, skb, psp_st))
+ return true;
+
+ /* psp_encap of the packet */
+ if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
+ return false;
+ }
+ if (skb_is_gso(skb)) {
+ ip6 = ipv6_hdr(skb);
+ th = inner_tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
+ &ip6->daddr, 0);
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
new file mode 100644
index 000000000000..70289c921bd6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_PSP_RXTX_H__
+#define __MLX5E_PSP_RXTX_H__
+
+#include <linux/skbuff.h>
+#include <net/xfrm.h>
+#include <net/psp.h>
+#include "en.h"
+#include "en/txrx.h"
+
+/* Bit30: PSP marker, Bit29-23: PSP syndrome, Bit22-0: PSP obj id */
+#define MLX5_PSP_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x3)
+#define MLX5_PSP_METADATA_SYNDROME(metadata) (((metadata) >> 23) & GENMASK(6, 0))
+#define MLX5_PSP_METADATA_HANDLE(metadata) ((metadata) & GENMASK(22, 0))
+
+struct mlx5e_accel_tx_psp_state {
+ u32 tailen;
+ u32 keyid;
+ __be32 spi;
+ u8 inner_ipproto;
+ u8 ver;
+};
+
+#ifdef CONFIG_MLX5_EN_PSP
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return (psp_state->tailen != 0);
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = !!psp_skb_get_assoc_rcu(skb);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st);
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg);
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg);
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ u8 inner_ipproto;
+
+ if (!mlx5e_psp_is_offload_state(psp_st))
+ return false;
+
+ inner_ipproto = psp_st->inner_ipproto;
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ sq->stats->csum_partial_inner++;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
+
+static inline unsigned int mlx5e_psp_tx_ids_len(struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ return psp_st->tailen;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_PSP_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+#else
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_PSP_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 6ed3a32b7e22..30424ccad584 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include "devlink.h"
#include "en.h"
#include "lib/crypto.h"
@@ -140,9 +141,22 @@ err_close_tises:
return err;
}
+static unsigned int
+mlx5e_get_devlink_param_num_doorbells(struct mlx5_core_dev *dev)
+{
+ const u32 param_id = DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS;
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, param_id, &val);
+ return err ? MLX5_DEFAULT_NUM_DOORBELLS : val.vu32;
+}
+
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ unsigned int num_doorbells, i;
int err;
err = mlx5_core_alloc_pd(mdev, &res->pdn);
@@ -163,17 +177,30 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
goto err_dealloc_transport_domain;
}
- err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
- if (err) {
- mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+ num_doorbells = min(mlx5e_get_devlink_param_num_doorbells(mdev),
+ mlx5e_get_max_num_channels(mdev));
+ res->bfregs = kcalloc(num_doorbells, sizeof(*res->bfregs), GFP_KERNEL);
+ if (!res->bfregs) {
+ err = -ENOMEM;
goto err_destroy_mkey;
}
+ for (i = 0; i < num_doorbells; i++) {
+ err = mlx5_alloc_bfreg(mdev, res->bfregs + i, false, false);
+ if (err) {
+ mlx5_core_warn(mdev,
+ "could only allocate %d/%d doorbells, err %d.\n",
+ i, num_doorbells, err);
+ break;
+ }
+ }
+ res->num_bfregs = i;
+
if (create_tises) {
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
- goto err_destroy_bfreg;
+ goto err_destroy_bfregs;
}
res->tisn_valid = true;
}
@@ -183,15 +210,17 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
- mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
- PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mlx5_core_err(mdev, "crypto dek init failed, %pe\n",
+ mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
}
return 0;
-err_destroy_bfreg:
- mlx5_free_bfreg(mdev, &res->bfreg);
+err_destroy_bfregs:
+ for (i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -209,7 +238,9 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mdev->mlx5e_res.dek_priv = NULL;
if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
- mlx5_free_bfreg(mdev, &res->bfreg);
+ for (unsigned int i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d507366d773e..53e5ae252eac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1494,7 +1494,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
}
static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
- const struct ethtool_rxfh_param *rxfh)
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
unsigned int count;
@@ -1504,8 +1505,10 @@ static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
- netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
- __func__, count, xor8_max_channels);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Number of channels (%u) exceeds the max for XOR8 RSS (%u)",
+ count, xor8_max_channels);
return -EINVAL;
}
}
@@ -1524,7 +1527,7 @@ static int mlx5e_set_rxfh(struct net_device *dev,
mutex_lock(&priv->state_lock);
- err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1550,7 +1553,7 @@ static int mlx5e_create_rxfh_context(struct net_device *dev,
mutex_lock(&priv->state_lock);
- err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1590,7 +1593,7 @@ static int mlx5e_modify_rxfh_context(struct net_device *dev,
mutex_lock(&priv->state_lock);
- err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1927,11 +1930,12 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static void mlx5e_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_stats_fec_get(priv, fec_stats);
+ mlx5e_stats_fec_get(priv, fec_stats, hist);
}
static int mlx5e_get_fecparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 265c4ca85f7d..8928d2dcd43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -905,6 +905,9 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
ft_attr->prio = MLX5E_NIC_PRIO;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -914,9 +917,17 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
}
}
+static bool mlx5e_ipsec_rss_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, ipsec_next_header) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, outer_l4_type_ext) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, inner_l4_type_ext);
+}
+
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel)
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
@@ -927,7 +938,13 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
+ ttc_params->ipsec_rss = ipsec_rss &&
+ mlx5e_ipsec_rss_supported(fs->mdev);
+
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -1293,7 +1310,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
return PTR_ERR_OR_ZERO(fs->ttc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 15eded36b872..a56825921c23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -49,10 +49,10 @@
#include "en.h"
#include "en/dim.h"
#include "en/txrx.h"
-#include "en/port_buffer.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
@@ -233,9 +233,13 @@ static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
{
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = *data,
+ };
+
priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
MLX5_DEVCOM_MPV,
- *data,
+ &attr,
mlx5e_devcom_event_mpv,
priv);
if (IS_ERR(priv->devcom))
@@ -778,13 +782,6 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
bitmap_free(rq->mpwqe.shampo->bitmap);
}
-static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
-{
- struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
-
- return !!rxq->mp_params.mp_ops;
-}
-
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -823,7 +820,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
- if (mlx5_rq_needs_separate_hd_pool(rq)) {
+ if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
/* Separate page pool for shampo headers */
struct page_pool_params pp_params = { };
@@ -1537,7 +1534,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
sq->xsk_pool = xsk_pool;
@@ -1622,7 +1619,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
int err;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
@@ -1707,7 +1704,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
@@ -1783,7 +1780,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, csp->uar_page);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
@@ -1887,6 +1884,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -2057,6 +2055,7 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -2117,6 +2116,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
@@ -2187,6 +2187,7 @@ static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct workqueue_struct *workqueue,
+ struct mlx5_uars_page *uar,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -2218,6 +2219,7 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
cq->mdev = mdev;
cq->netdev = netdev;
cq->workqueue = workqueue;
+ cq->uar = uar;
return 0;
}
@@ -2233,7 +2235,8 @@ static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq,
+ ccp->uar, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2278,7 +2281,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, cq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -2745,6 +2748,20 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
local_bh_enable();
}
+static void mlx5e_channel_pick_doorbell(struct mlx5e_channel *c)
+{
+ struct mlx5e_hw_objs *hw_objs = &c->mdev->mlx5e_res.hw_objs;
+
+ /* No dedicated Ethernet doorbells, use the global one. */
+ if (hw_objs->num_bfregs == 0) {
+ c->bfreg = &c->mdev->priv.bfreg;
+ return;
+ }
+
+ /* Round-robin between doorbells. */
+ c->bfreg = hw_objs->bfregs + c->vec_ix % hw_objs->num_bfregs;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct xsk_buff_pool *xsk_pool,
@@ -2799,6 +2816,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
+ mlx5e_channel_pick_doorbell(c);
+
netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq_locked(&c->napi, irq);
@@ -3041,11 +3060,9 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 mtu, prev_mtu;
+ u16 mtu;
int err;
- mlx5e_query_mtu(mdev, params, &prev_mtu);
-
err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
@@ -3055,18 +3072,6 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
__func__, mtu, params->sw_mtu);
- if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
- err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
- NULL, NULL, NULL);
- if (err) {
- netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
- __func__, mtu, err, prev_mtu);
-
- mlx5e_set_mtu(mdev, params, prev_mtu);
- return err;
- }
- }
-
params->sw_mtu = mtu;
return 0;
}
@@ -3584,7 +3589,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq,
+ mdev->priv.bfreg.up, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -5640,12 +5646,36 @@ static int mlx5e_queue_start(struct net_device *dev, void *newq,
return 0;
}
+static struct device *mlx5e_queue_get_dma_dev(struct net_device *dev,
+ int queue_index)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *channels;
+ struct device *pdev = NULL;
+ struct mlx5e_channel *ch;
+
+ channels = &priv->channels;
+
+ mutex_lock(&priv->state_lock);
+
+ if (queue_index >= channels->num)
+ goto out;
+
+ ch = channels->c[queue_index];
+ pdev = ch->pdev;
+out:
+ mutex_unlock(&priv->state_lock);
+
+ return pdev;
+}
+
static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
.ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
.ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
.ndo_queue_mem_free = mlx5e_queue_mem_free,
.ndo_queue_start = mlx5e_queue_start,
.ndo_queue_stop = mlx5e_queue_stop,
+ .ndo_queue_get_dma_dev = mlx5e_queue_get_dma_dev,
};
static void mlx5e_build_nic_netdev(struct net_device *netdev)
@@ -5873,6 +5903,10 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
+ err = mlx5e_psp_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "PSP initialization failed, %d\n", err);
+
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5885,6 +5919,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (take_rtnl)
rtnl_lock();
+ mlx5e_psp_register(priv);
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
@@ -5897,7 +5932,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
+ mlx5e_psp_unregister(priv);
mlx5e_ktls_cleanup(priv);
+ mlx5e_psp_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
@@ -6227,8 +6264,15 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->channel_stats)
goto err_free_tx_rates;
+ priv->fec_ranges = kcalloc(ETHTOOL_FEC_HIST_MAX,
+ sizeof(*priv->fec_ranges), GFP_KERNEL);
+ if (!priv->fec_ranges)
+ goto err_free_channel_stats;
+
return 0;
+err_free_channel_stats:
+ kfree(priv->channel_stats);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq_stats:
@@ -6252,6 +6296,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ kfree(priv->fec_ranges);
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
@@ -6745,6 +6790,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
* is already unregistered before changing to NIC profile.
*/
if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ mlx5e_psp_unregister(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index cd0242eb008c..0335ca8277ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -974,7 +974,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
@@ -1447,11 +1447,11 @@ static void mlx5e_rep_vnic_reporter_create(struct mlx5e_priv *priv,
reporter = devl_port_health_reporter_create(dl_port,
&mlx5_rep_vnic_reporter_ops,
- 0, rpriv);
+ rpriv);
if (IS_ERR(reporter)) {
mlx5_core_err(priv->mdev,
- "Failed to create representor vnic reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create representor vnic reporter, err = %pe\n",
+ reporter);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b8c609d91d11..263d5628ee44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -51,6 +51,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/ipsec.h"
#include "en_accel/macsec.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
@@ -1289,8 +1290,12 @@ static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *
tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
ipv4->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
+ bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
+
+ skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
+ SKB_GSO_TCP_FIXEDID;
+ }
skb->csum_start = (unsigned char *)tcp - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
@@ -1521,6 +1526,11 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ /* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
+ goto csum_unnecessary;
+ }
+
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
return; /* CQE csum covers all received bytes */
@@ -1549,7 +1559,7 @@ csum_none:
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1563,6 +1573,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
+ return true;
+ }
+
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
be32_to_cpu(cqe->ft_metadata));
@@ -1608,9 +1623,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return false;
}
-static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1620,16 +1637,20 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
- return;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return false;
+
+ if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
+ return true;
+
skb_reset_network_header(skb);
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
+ return false;
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1638,7 +1659,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1796,10 +1817,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, wi - head_wi - 1,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(
- &mxbuf->xdp));
+ xdp_update_skb_frags_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1855,7 +1875,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1902,7 +1923,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1951,7 +1973,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -2105,10 +2128,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, frag_page - head_page,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(
- &mxbuf->xdp));
+ xdp_update_skb_frags_info(skb, frag_page - head_page,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = head_page;
do
@@ -2122,10 +2145,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
- xdp_update_skb_shared_info(skb, sinfo->nr_frags,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(
- &mxbuf->xdp));
+ xdp_update_skb_frags_info(skb, sinfo->nr_frags,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2388,7 +2411,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nosplit_bytes += data_bcnt;
}
- mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
+ *skb = NULL;
+ goto free_hd_entry;
+ }
if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
@@ -2446,7 +2472,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2779,7 +2806,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index c6185ddba04b..7c029a7d0fd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1446,16 +1446,13 @@ static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
}
static void fec_set_block_stats(struct mlx5e_priv *priv,
+ int mode,
struct ethtool_fec_stats *fec_stats)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int mode = fec_active_mode(mdev);
-
- if (mode == MLX5E_FEC_NOFEC)
- return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
@@ -1494,14 +1491,130 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
phy_corrected_bits);
}
+#define MLX5_RS_HISTOGRAM_ENTRIES \
+ (MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist) / \
+ MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist[0]))
+
+enum {
+ MLX5E_HISTOGRAM_FEC_RS_544_514 = 1,
+ MLX5E_HISTOGRAM_FEC_LLRS = 2,
+ MLX5E_HISTOGRAM_FEC_RS_528_514 = 3,
+};
+
+static bool fec_rs_validate_hist_type(int mode, int hist_type)
+{
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_528_514;
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ case MLX5E_FEC_RS_544_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_544_514;
+ case MLX5E_FEC_LLRS_272_257_1:
+ return hist_type == MLX5E_HISTOGRAM_FEC_LLRS;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static u8
+fec_rs_histogram_fill_ranges(struct mlx5e_priv *priv, int mode,
+ const struct ethtool_fec_hist_range **ranges)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(pphcr_reg);
+ u8 hist_type, num_of_bins;
+
+ memset(priv->fec_ranges, 0,
+ ETHTOOL_FEC_HIST_MAX * sizeof(*priv->fec_ranges));
+ MLX5_SET(pphcr_reg, in, local_port, 1);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPHCR, 0, 0))
+ return 0;
+
+ hist_type = MLX5_GET(pphcr_reg, out, active_hist_type);
+ if (!fec_rs_validate_hist_type(mode, hist_type))
+ return 0;
+
+ num_of_bins = MLX5_GET(pphcr_reg, out, num_of_bins);
+ if (WARN_ON_ONCE(num_of_bins > MLX5_RS_HISTOGRAM_ENTRIES))
+ return 0;
+
+ for (int i = 0; i < num_of_bins; i++) {
+ void *bin_range = MLX5_ADDR_OF(pphcr_reg, out, bin_range[i]);
+
+ priv->fec_ranges[i].high = MLX5_GET(bin_range_layout, bin_range,
+ high_val);
+ priv->fec_ranges[i].low = MLX5_GET(bin_range_layout, bin_range,
+ low_val);
+ }
+ *ranges = priv->fec_ranges;
+
+ return num_of_bins;
+}
+
+static void fec_rs_histogram_fill_stats(struct mlx5e_priv *priv,
+ u8 num_of_bins,
+ struct ethtool_fec_hist *hist)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *rs_histogram_cntrs;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RS_FEC_HISTOGRAM_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ rs_histogram_cntrs = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set.rs_histogram_cntrs);
+ /* Guaranteed that num_of_bins is less than MLX5E_FEC_RS_HIST_MAX
+ * by fec_rs_histogram_fill_ranges().
+ */
+ for (int i = 0; i < num_of_bins; i++)
+ hist->values[i].sum = MLX5_GET64(rs_histogram_cntrs,
+ rs_histogram_cntrs,
+ hist[i]);
+}
+
+static void fec_set_histograms_stats(struct mlx5e_priv *priv, int mode,
+ struct ethtool_fec_hist *hist)
+{
+ u8 num_of_bins;
+
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ case MLX5E_FEC_RS_544_514:
+ case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ num_of_bins =
+ fec_rs_histogram_fill_ranges(priv, mode, &hist->ranges);
+ if (num_of_bins)
+ return fec_rs_histogram_fill_stats(priv, num_of_bins,
+ hist);
+ break;
+ default:
+ return;
+ }
+}
+
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
- if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ int mode = fec_active_mode(priv->mdev);
+
+ if (mode == MLX5E_FEC_NOFEC ||
+ !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
return;
fec_set_corrected_bits_total(priv, fec_stats);
- fec_set_block_stats(priv, fec_stats);
+ fec_set_block_stats(priv, mode, fec_stats);
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 72dbcc1928ef..09f155acb461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -117,7 +117,8 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
struct ethtool_eth_phy_stats *phy_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 32c07a8b03d1..00c2763e57ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -66,6 +66,7 @@
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
+#include "lib/mlx5.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#include "lag/lag.h"
@@ -757,11 +758,11 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir indir;
+ u32 rqt_size;
int err;
- err = mlx5e_rss_params_indir_init(&indir, mdev,
- mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, hp->num_channels));
+ rqt_size = mlx5e_rqt_size(mdev, hp->num_channels);
+ err = mlx5e_rss_params_indir_init(&indir, rqt_size, rqt_size);
if (err)
return err;
@@ -837,6 +838,9 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -5387,12 +5391,13 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
+ struct mlx5_devcom_match_attr attr = {};
struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- u64 mapping_id, key;
+ u64 mapping_id;
int err = 0;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
@@ -5448,8 +5453,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
err = netif_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
- memcpy(&key, &ppid.id, sizeof(key));
- mlx5_esw_offloads_devcom_init(esw, key);
+ memcpy(&attr.key.val, &ppid.id, sizeof(attr.key.val));
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(esw->dev);
+ mlx5_esw_offloads_devcom_init(esw, &attr);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 319061d31602..b7227afcb51d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -120,6 +121,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (unlikely(mlx5e_psp_txwqe_build_eseg_csum(sq, skb, &accel->psp_st, eseg)))
+ return;
+#endif
+
if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
return;
@@ -297,7 +303,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++;
}
- attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ attr->insz = mlx5e_accel_tx_ids_len(sq, skb, accel);
stats->bytes += attr->num_bytes;
}
@@ -653,7 +659,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- eseg->flow_table_metadata =
+ eseg->flow_table_metadata |=
cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
@@ -661,7 +667,7 @@ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
+ mlx5e_accel_tx_eseg(priv, skb, accel, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1ab77159409d..25499da177bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -32,9 +32,7 @@ enum {
MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
};
-enum {
- MLX5_EQ_DOORBEL_OFFSET = 0x40,
-};
+#define MLX5_EQ_DOORBELL_OFFSET 0x40
/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
* the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
@@ -309,7 +307,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
- MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
+ MLX5_SET(eqc, eqc, uar_page, priv->bfreg.up->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
@@ -322,7 +320,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
- eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+ eq->doorbell = priv->bfreg.up->map + MLX5_EQ_DOORBELL_OFFSET;
err = mlx5_debug_eq_add(dev, eq);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 7dd1dc3f77c7..c9a1654d83a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -87,8 +87,8 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
- "vport[%d] configure egress drop rule counter err(%ld)\n",
- vport->vport, PTR_ERR(drop_counter));
+ "vport[%d] configure egress drop rule counter err(%pe)\n",
+ vport->vport, drop_counter);
drop_counter = NULL;
}
vport->egress.legacy.drop_counter = drop_counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
new file mode 100644
index 000000000000..0091ba697bae
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+
+enum {
+ MLX5_ADJ_VPORT_DISCONNECT = 0x0,
+ MLX5_ADJ_VPORT_CONNECT = 0x1,
+};
+
+static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev,
+ u16 vport, bool connect)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT);
+ MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect, connect);
+ MLX5_SET(modify_vport_state_in, in, egress_connect, connect);
+
+ return mlx5_cmd_exec_in(dev, modify_vport_state, in);
+}
+
+static void mlx5_esw_destroy_esw_vport(struct mlx5_core_dev *dev, u16 vport)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_esw_vport_in)] = {};
+
+ MLX5_SET(destroy_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT);
+ MLX5_SET(destroy_esw_vport_in, in, vport_num, vport);
+
+ mlx5_cmd_exec_in(dev, destroy_esw_vport, in);
+}
+
+static int mlx5_esw_create_esw_vport(struct mlx5_core_dev *dev, u16 vhca_id,
+ u16 *vport_num)
+{
+ u32 out[MLX5_ST_SZ_DW(create_esw_vport_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_esw_vport_in)] = {};
+ int err;
+
+ MLX5_SET(create_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT);
+ MLX5_SET(create_esw_vport_in, in, managed_vhca_id, vhca_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *vport_num = MLX5_GET(create_esw_vport_out, out, vport_num);
+
+ return err;
+}
+
+static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id,
+ const void *rid_info_reg)
+{
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ err = mlx5_esw_create_esw_vport(esw->dev, vhca_id, &vport_num);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to create adjacent vport for vhca_id %d, err %d\n",
+ vhca_id, err);
+ return err;
+ }
+
+ esw_debug(esw->dev, "Created adjacent vport[%d] %d for vhca_id 0x%x\n",
+ esw->last_vport_idx, vport_num, vhca_id);
+
+ err = mlx5_esw_vport_alloc(esw, esw->last_vport_idx++, vport_num);
+ if (err)
+ goto destroy_esw_vport;
+
+ xa_set_mark(&esw->vports, vport_num, MLX5_ESW_VPT_VF);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ vport->adjacent = true;
+ vport->vhca_id = vhca_id;
+
+ vport->adj_info.parent_pci_devfn =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ parent_pci_device_function);
+ vport->adj_info.function_id =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg, function_id);
+
+ mlx5_fs_vport_egress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ mlx5_fs_vport_ingress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
+ if (err)
+ goto acl_ns_remove;
+
+ mlx5_esw_adj_vport_modify(esw->dev, vport_num, MLX5_ADJ_VPORT_CONNECT);
+ return 0;
+
+acl_ns_remove:
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+destroy_esw_vport:
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+ return err;
+}
+
+static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u16 vport_num = vport->vport;
+
+ esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n",
+ vport_num, vport->vhca_id);
+ mlx5_esw_adj_vport_modify(esw->dev, vport_num,
+ MLX5_ADJ_VPORT_DISCONNECT);
+ mlx5_esw_offloads_rep_remove(esw, vport);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+ /* Reset the vport index back so new adj vports can use this index.
+ * When vport count can incrementally change, this needs to be modified.
+ */
+ esw->last_vport_idx--;
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+}
+
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ if (!MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max))
+ return;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ mlx5_esw_adj_vport_destroy(esw, vport);
+ }
+}
+
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw)
+{
+ u32 delegated_vhca_max = MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max);
+ u32 in[MLX5_ST_SZ_DW(query_delegated_vhca_in)] = {};
+ int outlen, err, i = 0;
+ u8 *out;
+ u32 count;
+
+ if (!delegated_vhca_max)
+ return;
+
+ outlen = MLX5_ST_SZ_BYTES(query_delegated_vhca_out) +
+ delegated_vhca_max *
+ MLX5_ST_SZ_BYTES(delegated_function_vhca_rid_info);
+
+ esw_debug(esw->dev, "delegated_vhca_max=%d\n", delegated_vhca_max);
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return;
+
+ MLX5_SET(query_delegated_vhca_in, in, opcode,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA);
+
+ err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ if (err) {
+ kvfree(out);
+ esw_warn(esw->dev, "Failed to query delegated vhca, err %d\n",
+ err);
+ return;
+ }
+
+ count = MLX5_GET(query_delegated_vhca_out, out, functions_count);
+ esw_debug(esw->dev, "Delegated vhca functions count %d\n", count);
+
+ for (i = 0; i < count; i++) {
+ const void *rid_info, *rid_info_reg;
+ u16 vhca_id;
+
+ rid_info = MLX5_ADDR_OF(query_delegated_vhca_out, out,
+ delegated_function_vhca_rid_info[i]);
+
+ rid_info_reg = MLX5_ADDR_OF(delegated_function_vhca_rid_info,
+ rid_info, function_vhca_rid_info);
+
+ vhca_id = MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ vhca_id);
+ esw_debug(esw->dev, "Delegating vhca_id 0x%x\n", vhca_id);
+
+ err = mlx5_esw_adj_vport_create(esw, vhca_id, rid_info_reg);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to init adjacent vhca 0x%x, err %d\n",
+ vhca_id, err);
+ break;
+ }
+ }
+
+ kvfree(out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 76e35c827da0..60e10047770f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -81,7 +81,8 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
ft_attr.prio = FDB_BR_OFFLOAD;
fdb = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(fdb))
- esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
+ esw_warn(dev, "Failed to create bridge FDB Table (err=%pe)\n",
+ fdb);
return fdb;
}
@@ -121,8 +122,8 @@ mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to,
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
- vlan_proto, PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%pe)\n",
+ vlan_proto, fg);
return fg;
}
@@ -180,8 +181,8 @@ mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge ingress table VLAN filter flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -237,8 +238,8 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create MAC flow group for bridge ingress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
@@ -274,8 +275,8 @@ mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to,
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN flow group for bridge egress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -324,8 +325,8 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table MAC flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table MAC flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -354,8 +355,8 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table miss flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table miss flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -501,8 +502,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
if (IS_ERR(miss_fg)) {
- esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
- PTR_ERR(miss_fg));
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%pe)\n",
+ miss_fg);
miss_fg = NULL;
goto skip_miss_flow;
}
@@ -510,8 +511,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(miss_pkt_reformat)) {
esw_warn(esw->dev,
- "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(miss_pkt_reformat));
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ miss_pkt_reformat);
miss_pkt_reformat = NULL;
mlx5_destroy_flow_group(miss_fg);
miss_fg = NULL;
@@ -522,8 +523,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
br_offloads->skip_ft,
miss_pkt_reformat);
if (IS_ERR(miss_handle)) {
- esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
- PTR_ERR(miss_handle));
+ esw_warn(esw->dev, "Failed to create miss flow (err=%pe)\n",
+ miss_handle);
miss_handle = NULL;
mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
miss_pkt_reformat = NULL;
@@ -1048,8 +1049,8 @@ mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vl
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
@@ -1076,8 +1077,8 @@ mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_e
pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index c33accadae0f..cf88a106d80d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -27,6 +27,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
+ struct mlx5_vport *vport;
u32 controller_num = 0;
bool external;
u16 pfnum;
@@ -42,10 +43,18 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
+ u16 func_id = vport_num - 1;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
+ if (vport->adjacent) {
+ func_id = vport->adj_info.function_id;
+ pfnum = vport->adj_info.parent_pci_devfn;
+ }
+
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
- vport_num - 1, external);
+ func_id, external);
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 5f2d6c35f1ad..56e6f54b1e2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -971,8 +971,9 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
log_esw_max_sched_depth);
if (new_level > max_level) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC arbitration on leafs is not supported beyond max scheduling depth");
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on leafs is not supported beyond max depth %d",
+ max_level);
return -EOPNOTSUPP;
}
}
@@ -1444,8 +1445,9 @@ static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
new_level = node->level + 1;
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC arbitration on nodes is not supported beyond max scheduling depth");
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on nodes is not supported beyond max depth %d",
+ max_level);
return -EOPNOTSUPP;
}
@@ -1997,8 +1999,9 @@ mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
- NL_SET_ERR_MSG_MOD(extack,
- "Node hierarchy depth exceeds the maximum supported level");
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Node hierarchy depth %d exceeds the maximum supported level %d",
+ new_level, max_level);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
index 749c3957a128..407062096a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -45,8 +45,8 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
ft_attr.flags = vport_ns->flags;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
- PTR_ERR(fdb));
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %pe\n",
+ fdb);
}
return fdb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 4917d185d0c3..e2ffb87b94cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -257,8 +257,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
- dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%pe)\n",
+ dmac_v, dmac_c, vport, flow_rule);
flow_rule = NULL;
}
@@ -820,6 +820,7 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
+ vport->vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
goto out_free;
@@ -839,6 +840,18 @@ out_free:
return err;
}
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vportn);
+ if (IS_ERR(vport) || MLX5_VPORT_INVAL_VHCA_ID(vport))
+ return false;
+
+ *vhca_id = vport->vhca_id;
+ return true;
+}
+
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
@@ -929,7 +942,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
- ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
+ ret = mlx5_esw_vport_vhca_id_map(esw, vport);
if (ret)
goto err_vhca_mapping;
}
@@ -973,7 +986,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ mlx5_esw_vport_vhca_id_unmap(esw, vport);
if (vport->vport != MLX5_VPORT_PF &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
@@ -1038,6 +1051,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
+static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ esw->esw_funcs.host_funcs_disabled =
+ MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_pf_not_exist);
+
+ kvfree(query_host_out);
+ return 0;
+}
+
static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
@@ -1185,7 +1217,8 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- if (!vport->enabled)
+ /* Adjacent VFs are unloaded separately */
+ if (!vport->enabled || vport->adjacent)
continue;
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
@@ -1204,6 +1237,42 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
}
}
+static void mlx5_eswitch_unload_adj_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->enabled || !vport->adjacent)
+ continue;
+ mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
+ }
+}
+
+static int
+mlx5_eswitch_load_adj_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport,
+ enabled_events);
+ if (err)
+ goto unload_adj_vf_vport;
+ }
+
+ return 0;
+
+unload_adj_vf_vport:
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+ return err;
+}
+
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1278,17 +1347,19 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
- if (pf_needed) {
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
- /* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
- if (ret)
- goto pf_hca_err;
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ /* Enable external host PF HCA */
+ ret = host_pf_enable_hca(esw->dev);
+ if (ret)
+ goto pf_hca_err;
+ }
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
@@ -1311,8 +1382,16 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enabled_events);
if (ret)
goto vf_err;
+
+ /* Enable adjacent VF vports */
+ ret = mlx5_eswitch_load_adj_vf_vports(esw, enabled_events);
+ if (ret)
+ goto unload_vf_vports;
+
return 0;
+unload_vf_vports:
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
vf_err:
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
@@ -1320,9 +1399,10 @@ ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
pf_hca_err:
- if (pf_needed)
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1332,6 +1412,8 @@ pf_hca_err:
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
if (mlx5_core_ec_sriov_enabled(esw->dev))
@@ -1342,10 +1424,12 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- esw->mode == MLX5_ESWITCH_LEGACY)
+ if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY) &&
+ mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
@@ -1402,19 +1486,76 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
blocking_notifier_call_chain(&esw->n_head, 0, &info);
}
+static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_egress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+}
+
+static int mlx5_esw_ingress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_ingress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
+}
+
static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
- int total_vports;
int err;
if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
return 0;
- total_vports = mlx5_eswitch_get_total_vports(dev);
-
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_egress_acls_init(dev, total_vports);
+ err = mlx5_esw_egress_acls_init(dev);
if (err)
return err;
} else {
@@ -1422,7 +1563,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_ingress_acls_init(dev, total_vports);
+ err = mlx5_esw_ingress_acls_init(dev);
if (err)
goto err;
} else {
@@ -1433,7 +1574,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
err:
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
return err;
}
@@ -1443,9 +1584,9 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- mlx5_fs_ingress_acls_cleanup(dev);
+ mlx5_esw_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
}
/**
@@ -1674,7 +1815,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev)) {
+ if (!mlx5_core_is_ecpf(dev) ||
+ !mlx5_esw_host_functions_enabled(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1696,8 +1838,7 @@ out_free:
return err;
}
-static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
- int index, u16 vport_num)
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num)
{
struct mlx5_vport *vport;
int err;
@@ -1710,6 +1851,7 @@ static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
vport->vport = vport_num;
vport->index = index;
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ vport->vhca_id = MLX5_VHCA_ID_INVALID;
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
if (err)
@@ -1723,8 +1865,9 @@ insert_err:
return err;
}
-static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
+ esw->total_vports--;
xa_erase(&esw->vports, vport->vport);
kfree(vport);
}
@@ -1750,21 +1893,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
xa_init(&esw->vports);
- err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
- if (err)
- goto err;
- if (esw->first_host_vport == MLX5_VPORT_PF)
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
- idx++;
-
- for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
- err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (mlx5_esw_host_functions_enabled(dev)) {
+ err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
if (err)
goto err;
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ if (esw->first_host_vport == MLX5_VPORT_PF)
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
+ for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+ }
}
+
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
@@ -1806,6 +1951,9 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
if (err)
goto err;
+
+ /* Adjacent vports or other dynamically create vports will use this */
+ esw->last_vport_idx = ++idx;
return 0;
err:
@@ -1864,6 +2012,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto free_esw;
esw->dev = dev;
+ dev->priv.eswitch = esw;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
@@ -1874,11 +2023,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ err = mlx5_esw_host_functions_enabled_query(esw);
+ if (err)
+ goto abort;
+
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
- dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -2410,3 +2562,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
dev->num_ipsec_offloads--;
mutex_unlock(&esw->state_lock);
}
+
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ if (!dev->priv.eswitch)
+ return true;
+
+ return !dev->priv.eswitch->esw_funcs.host_funcs_disabled;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 45506ad56847..df3756d7e52e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -197,6 +197,11 @@ static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port
return mlx5_devlink_port_get(dl_port)->vport;
}
+#define MLX5_VHCA_ID_INVALID (-1)
+
+#define MLX5_VPORT_INVAL_VHCA_ID(vport) \
+ ((vport)->vhca_id == MLX5_VHCA_ID_INVALID)
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -209,6 +214,13 @@ struct mlx5_vport {
struct vport_egress egress;
u32 default_metadata;
u32 metadata;
+ int vhca_id;
+
+ bool adjacent; /* delegated vhca from adjacent function */
+ struct {
+ u16 parent_pci_devfn; /* Adjacent parent PCI device function */
+ u16 function_id; /* Function ID of the delegated VPort */
+ } adj_info;
struct mlx5_vport_info info;
@@ -323,6 +335,7 @@ struct mlx5_host_work {
struct mlx5_esw_functions {
struct mlx5_nb nb;
+ bool host_funcs_disabled;
u16 num_vfs;
u16 num_ec_vfs;
};
@@ -377,6 +390,7 @@ struct mlx5_eswitch {
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
+ u32 last_vport_idx;
int mode;
u16 manager_vport;
u16 first_host_vport;
@@ -410,6 +424,8 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
@@ -417,7 +433,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
@@ -615,6 +632,9 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \
@@ -817,9 +837,17 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
+
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
/**
* struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
@@ -893,6 +921,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -900,7 +929,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
-static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
+static inline void
+mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
@@ -960,6 +991,19 @@ static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
}
static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
+
+static inline bool
+mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
+static inline bool
+mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index bee906661282..52c3de24bea3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1016,8 +1016,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
- PTR_ERR(flow_rule));
+ esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %pe\n",
+ flow_rule);
out:
kvfree(spec);
return flow_rule;
@@ -1065,8 +1065,8 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
- vport_num, PTR_ERR(flow_rule));
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %pe\n",
+ vport_num, flow_rule);
kvfree(spec);
return flow_rule;
@@ -1213,7 +1213,8 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
MLX5_VPORT_PF);
@@ -1239,19 +1240,21 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
- mlx5_core_max_vfs(peer_dev)) {
- esw_set_peer_miss_rule_source_port(esw,
- peer_esw,
- spec, peer_vport->vport);
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
- flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow)) {
- err = PTR_ERR(flow);
- goto add_vf_flow_err;
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_vf_flow_err;
+ }
+ flows[peer_vport->index] = flow;
}
- flows[peer_vport->index] = flow;
}
if (mlx5_core_ec_sriov_enabled(peer_dev)) {
@@ -1301,7 +1304,9 @@ add_vf_flow_err:
mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[peer_vport->index]);
}
@@ -2154,7 +2159,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
- esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx rule err %pe\n",
+ flow_rule);
goto out;
}
@@ -2173,8 +2180,8 @@ static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "fs offloads: Failed to add vport rx drop rule err %ld\n",
- PTR_ERR(flow_rule));
+ "fs offloads: Failed to add vport rx drop rule err %pe\n",
+ flow_rule);
return PTR_ERR(flow_rule);
}
@@ -2373,7 +2380,20 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return 0;
}
-static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps,
+ vport->vport);
+
+ if (!rep)
+ return;
+ xa_erase(&esw->offloads.vport_reps, vport->vport);
+ kfree(rep);
+}
+
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2385,9 +2405,19 @@ static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx
rep->vport = vport->vport;
rep->vport_index = vport->index;
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
-
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ if (!esw->offloads.rep_ops[rep_type]) {
+ atomic_set(&rep->rep_data[rep_type].state,
+ REP_UNREGISTERED);
+ continue;
+ }
+ /* Dynamic/delegated vports add their representors after
+ * mlx5_eswitch_register_vport_reps, so mark them as registered
+ * for them to be loaded later with the others.
+ */
+ rep->esw = esw;
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ }
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
if (err)
goto insert_err;
@@ -2425,7 +2455,7 @@ static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
xa_init(&esw->offloads.vport_reps);
mlx5_esw_for_each_vport(esw, i, vport) {
- err = mlx5_esw_offloads_rep_init(esw, vport);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
if (err)
goto err;
}
@@ -3076,7 +3106,8 @@ err_out:
return err;
}
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr)
{
int i;
@@ -3095,7 +3126,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
esw->num_peers = 0;
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS,
- key,
+ attr,
mlx5_esw_offloads_devcom_event,
esw);
if (IS_ERR(esw->devcom))
@@ -3533,6 +3564,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
+ mlx5_esw_adjacent_vhcas_setup(esw);
+
err = mlx5_rdma_enable_roce(esw->dev);
if (err)
goto err_roce;
@@ -3597,6 +3630,7 @@ err_vport_metadata:
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
err_roce:
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -3630,6 +3664,7 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
mutex_destroy(&esw->offloads.termtbl_mutex);
}
@@ -3739,6 +3774,29 @@ void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
up_write(&esw->mode_lock);
}
+/* Returns false only when uplink netdev exists and its netns is different from
+ * devlink's netns. True for all others so entering switchdev mode is allowed.
+ */
+static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink,
+ bool immutable)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ struct net_device *netdev;
+ bool ret;
+
+ netdev = mlx5_uplink_netdev_get(mdev);
+ if (!netdev)
+ return true;
+
+ rtnl_lock();
+ netdev->netns_immutable = immutable;
+ ret = net_eq(dev_net(netdev), devlink_net(devlink));
+ rtnl_unlock();
+
+ mlx5_uplink_netdev_put(mdev, netdev);
+ return ret;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3781,6 +3839,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ err = -EINVAL;
+ goto skip;
+ }
+
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
@@ -3799,6 +3865,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
skip:
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && err)
+ mlx5_devlink_netdev_netns_immutable_set(devlink, false);
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
@@ -4059,7 +4127,8 @@ mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ (!mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ !mlx5_esw_host_functions_enabled(esw->dev)))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
@@ -4161,23 +4230,28 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
- int err;
- err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
- if (err) {
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
- vport_num, err);
- return err;
+ if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport),
+ "vport %d vhca_id is not set", vport->vport)) {
+ int err;
+
+ err = mlx5_vport_get_vhca_id(vport->dev, vport->vport,
+ &vhca_id);
+ if (err)
+ return err;
+ vport->vhca_id = vhca_id;
}
+ vhca_id = vport->vhca_id;
vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
if (!vhca_map_entry)
return -ENOMEM;
- *vhca_map_entry = vport_num;
+ *vhca_map_entry = vport->vport;
old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
if (xa_is_err(old_entry)) {
kfree(vhca_map_entry);
@@ -4187,17 +4261,12 @@ int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
return 0;
}
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- u16 *vhca_map_entry, vhca_id;
- int err;
-
- err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
- if (err)
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
- vport_num, err);
+ u16 *vhca_map_entry;
- vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id);
kfree(vhca_map_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4de6bf8d1b6..cb1319974f83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -475,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 80245c38dbad..2db3ffb0a2b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2793,30 +2793,32 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
+struct mlx5_vport_acl_root_ns {
+ u16 vport_idx;
+ struct mlx5_flow_root_namespace *root_ns;
+};
+
struct mlx5_flow_namespace *
mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_vport_acl_root_ns *vport_ns;
if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport_idx >= steering->esw_egress_acl_vports)
- return NULL;
- if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport_idx])
- return &steering->esw_egress_root_ns[vport_idx]->ns;
+ vport_ns = xa_load(&steering->esw_egress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport_idx >= steering->esw_ingress_acl_vports)
- return NULL;
- if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport_idx])
- return &steering->esw_ingress_root_ns[vport_idx]->ns;
+ vport_ns = xa_load(&steering->esw_ingress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
@@ -3575,118 +3577,102 @@ out_err:
return err;
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static void
+mlx5_fs_remove_vport_acl_root_ns(struct xarray *esw_acl_root_ns, u16 vport_idx)
{
- struct fs_prio *prio;
-
- steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns[vport])
- return -ENOMEM;
+ struct mlx5_vport_acl_root_ns *vport_ns;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ vport_ns = xa_erase(esw_acl_root_ns, vport_idx);
+ if (vport_ns) {
+ cleanup_root_ns(vport_ns->root_ns);
+ kfree(vport_ns);
+ }
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static int
+mlx5_fs_add_vport_acl_root_ns(struct mlx5_flow_steering *steering,
+ struct xarray *esw_acl_root_ns,
+ enum fs_flow_table_type table_type,
+ u16 vport_idx)
{
+ struct mlx5_vport_acl_root_ns *vport_ns;
struct fs_prio *prio;
+ int err;
- steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns[vport])
- return -ENOMEM;
+ /* sanity check, intended xarrays are used */
+ if (WARN_ON(esw_acl_root_ns != &steering->esw_egress_root_ns &&
+ esw_acl_root_ns != &steering->esw_ingress_root_ns))
+ return -EINVAL;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
-}
+ if (table_type != FS_FT_ESW_EGRESS_ACL &&
+ table_type != FS_FT_ESW_INGRESS_ACL) {
+ mlx5_core_err(steering->dev,
+ "Invalid table type %d for egress/ingress ACLs\n",
+ table_type);
+ return -EINVAL;
+ }
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
+ if (xa_load(esw_acl_root_ns, vport_idx))
+ return -EEXIST;
- steering->esw_egress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_egress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_egress_root_ns)
+ vport_ns = kzalloc(sizeof(*vport_ns), GFP_KERNEL);
+ if (!vport_ns)
return -ENOMEM;
- for (i = 0; i < total_vports; i++) {
- err = init_egress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
+ vport_ns->root_ns = create_root_ns(steering, table_type);
+ if (!vport_ns->root_ns) {
+ err = -ENOMEM;
+ goto kfree_vport_ns;
+ }
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&vport_ns->root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto cleanup_root_ns;
}
- steering->esw_egress_acl_vports = total_vports;
+
+ vport_ns->vport_idx = vport_idx;
+ err = xa_insert(esw_acl_root_ns, vport_idx, vport_ns, GFP_KERNEL);
+ if (err)
+ goto cleanup_root_ns;
return 0;
cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ cleanup_root_ns(vport_ns->root_ns);
+kfree_vport_ns:
+ kfree(vport_ns);
return err;
}
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_egress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_egress_acl_vports; i++)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_egress_root_ns,
+ FS_FT_ESW_EGRESS_ACL, vport_idx);
}
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
-
- steering->esw_ingress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_ingress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_ingress_root_ns)
- return -ENOMEM;
-
- for (i = 0; i < total_vports; i++) {
- err = init_ingress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
- }
- steering->esw_ingress_acl_vports = total_vports;
- return 0;
-
-cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
- return err;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_ingress_root_ns,
+ FS_FT_ESW_INGRESS_ACL, vport_idx);
}
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_ingress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_ingress_acl_vports; i++)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_egress_root_ns,
+ vport_idx);
+}
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_ingress_root_ns,
+ vport_idx);
}
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
@@ -3818,6 +3804,11 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ WARN_ON(!xa_empty(&steering->esw_egress_root_ns));
+ WARN_ON(!xa_empty(&steering->esw_ingress_root_ns));
+ xa_destroy(&steering->esw_egress_root_ns);
+ xa_destroy(&steering->esw_ingress_root_ns);
+
cleanup_root_ns(steering->root_ns);
cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
@@ -3908,6 +3899,8 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ xa_init(&steering->esw_egress_root_ns);
+ xa_init(&steering->esw_ingress_root_ns);
return 0;
err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e6a95b310b55..8458ce203dac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -151,16 +151,14 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_namespace **fdb_sub_ns;
- struct mlx5_flow_root_namespace **esw_egress_root_ns;
- struct mlx5_flow_root_namespace **esw_ingress_root_ns;
+ struct xarray esw_egress_root_ns;
+ struct xarray esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
struct mlx5_flow_root_namespace *port_sel_root_ns;
- int esw_egress_acl_vports;
- int esw_ingress_acl_vports;
struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
int rdma_transport_rx_vports;
@@ -379,10 +377,14 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 57476487e31f..eeb4437975f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -294,6 +294,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, psp)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PSP);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 22995131824a..89e399606877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -27,6 +27,7 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
+ struct delayed_work reset_timeout_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
@@ -259,6 +260,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
+ if (current_work() != &fw_reset->reset_timeout_work.work)
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@@ -330,6 +333,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ &fw_reset->reset_flags))
+ schedule_delayed_work(&fw_reset->reset_timeout_work,
+ msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@@ -739,6 +747,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
+static void mlx5_sync_reset_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_fw_reset *fw_reset =
+ container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
+ return;
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
+}
+
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@@ -822,6 +843,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@@ -865,6 +887,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+ INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
+ mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index cf7a1edd0530..aeeb136f5ebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -669,57 +669,64 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD \
+ MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
+static
+const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ecpf_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+ .dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD,
+};
+
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD,
};
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD,
};
-#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
-#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
-#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
-#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
-
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
- u64 grace_period;
- fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
- grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ecpf_ops;
} else if (mlx5_core_is_pf(dev)) {
- grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
} else {
/* VF or SF */
- grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
fw_ops = &mlx5_fw_reporter_ops;
}
- health->fw_reporter =
- devl_health_reporter_create(devlink, fw_ops, 0, dev);
+ health->fw_reporter = devl_health_reporter_create(devlink, fw_ops, dev);
if (IS_ERR(health->fw_reporter))
- mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
- PTR_ERR(health->fw_reporter));
-
- health->fw_fatal_reporter =
- devl_health_reporter_create(devlink,
- fw_fatal_ops,
- grace_period,
- dev);
+ mlx5_core_warn(dev, "Failed to create fw reporter, err = %pe\n",
+ health->fw_reporter);
+
+ health->fw_fatal_reporter = devl_health_reporter_create(devlink,
+ fw_fatal_ops,
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
- mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
+ mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %pe\n",
+ health->fw_fatal_reporter);
}
static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 82d3c2568244..14d339eceb92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -150,8 +150,8 @@ mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
if (IS_ERR(new_irq)) {
if (!least_loaded_irq) {
/* We failed to create an IRQ and we didn't find an IRQ */
- mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
- PTR_ERR(new_irq));
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %pe\n",
+ new_irq);
mutex_unlock(&pool->lock);
return new_irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d058cbb4a00c..59c00c911275 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -231,9 +232,13 @@ static void mlx5_do_bond_work(struct work_struct *work);
static void mlx5_ldev_free(struct kref *ref)
{
struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
+ struct net *net;
+
+ if (ldev->nb.notifier_call) {
+ net = read_pnet(&ldev->net);
+ unregister_netdevice_notifier_net(net, &ldev->nb);
+ }
- if (ldev->nb.notifier_call)
- unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
@@ -271,7 +276,8 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
+ write_pnet(&ldev->net, mlx5_core_net(dev));
+ if (register_netdevice_notifier_net(read_pnet(&ldev->net), &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -1404,6 +1410,36 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
return 0;
}
+static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+}
+
+static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = mlx5_query_nic_system_image_guid(dev),
+ .flags = MLX5_DEVCOM_MATCH_FLAGS_NS,
+ .net = mlx5_core_net(dev),
+ };
+
+ /* This component is use to sync adding core_dev to lag_dev and to sync
+ * changes of mlx5_adev_devices between LAG layer and other layers.
+ */
+ dev->priv.hca_devcom_comp =
+ mlx5_devcom_register_component(dev->priv.devc,
+ MLX5_DEVCOM_HCA_PORTS,
+ &attr, NULL, dev);
+ if (IS_ERR(dev->priv.hca_devcom_comp)) {
+ mlx5_core_err(dev,
+ "Failed to register devcom HCA component, err: %ld\n",
+ PTR_ERR(dev->priv.hca_devcom_comp));
+ return PTR_ERR(dev->priv.hca_devcom_comp);
+ }
+
+ return 0;
+}
+
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1425,6 +1461,7 @@ recheck:
}
mlx5_ldev_remove_mdev(ldev, dev);
mutex_unlock(&ldev->lock);
+ mlx5_lag_unregister_hca_devcom_comp(dev);
mlx5_ldev_put(ldev);
}
@@ -1435,7 +1472,7 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev))
return;
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (mlx5_lag_register_hca_devcom_comp(dev))
return;
recheck:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index c2f256bb2bc2..4918eee2b3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -67,6 +67,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ possible_net_t net;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 58bd749b5e4d..129725159a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -100,7 +100,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -129,7 +129,7 @@ static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
return -ENOMEM;
MLX5_SET(cqc, cqc_data, log_cq_size, 1);
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
@@ -163,7 +163,7 @@ static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
struct mlx5_wq_param param;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
param.db_numa_node = numa_node;
param.buf_numa_node = numa_node;
@@ -203,7 +203,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 214d732d18e9..d0ba83d77cd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -247,27 +247,24 @@ static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
return !!MLX5_GET(mtptm_reg, out, psta);
}
-static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
- struct system_counterval_t *sys_counterval,
- void *ctx)
+static int mlx5_mtctr_read(struct mlx5_core_dev *mdev,
+ bool real_time_mode,
+ struct system_counterval_t *sys_counterval,
+ u64 *device)
{
u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
- struct mlx5_core_dev *mdev = ctx;
- bool real_time_mode;
- u64 host, device;
+ u64 host;
int err;
- real_time_mode = mlx5_real_time_mode(mdev);
-
MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
- MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
- err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
- 0, 0);
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTCTR, 0, 0);
if (err)
return err;
@@ -281,8 +278,26 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
.cs_id = CSID_X86_ART,
.use_nsecs = true,
};
+ *device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+
+ return 0;
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ err = mlx5_mtctr_read(mdev, real_time_mode, sys_counterval, &device);
+ if (err)
+ return err;
- device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
@@ -291,6 +306,23 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
return 0;
}
+static int
+mlx5_mtctr_syncdevicecyclestime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ u64 device;
+ int err;
+
+ err = mlx5_mtctr_read(mdev, false, sys_counterval, &device);
+ if (err)
+ return err;
+ *device_time = ns_to_ktime(device);
+
+ return 0;
+}
+
static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts)
{
@@ -315,6 +347,32 @@ unlock:
mlx5_clock_unlock(clock);
return err;
}
+
+static int mlx5_ptp_getcrosscycles(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ ktime_get_snapshot(&history_begin);
+
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicecyclestime,
+ mdev, &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
+}
#endif /* CONFIG_X86 */
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
@@ -513,6 +571,24 @@ out:
return 0;
}
+static int mlx5_ptp_getcyclesx(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ struct mlx5_core_dev *mdev;
+ u64 cycles;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ cycles = mlx5_read_time(mdev, sts, false);
+ *ts = ns_to_timespec64(cycles);
+ mlx5_clock_unlock(clock);
+ return 0;
+}
+
static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -1229,6 +1305,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = mdev->clock;
+ bool expose_cycles;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1236,12 +1313,22 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+ expose_cycles = !MLX5_CAP_GEN(mdev, disciplined_fr_counter) ||
+ !mlx5_real_time_mode(mdev);
+
#ifdef CONFIG_X86
if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
- MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART)) {
clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+ if (expose_cycles)
+ clock->ptp_info.getcrosscycles =
+ mlx5_ptp_getcrosscycles;
+ }
#endif /* CONFIG_X86 */
+ if (expose_cycles)
+ clock->ptp_info.getcyclesx64 = mlx5_ptp_getcyclesx;
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(mdev);
@@ -1278,9 +1365,9 @@ static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
clock->ptp = ptp_clock_register(&clock->ptp_info,
clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %pe\n",
clock->shared ? "shared clock " : "",
- PTR_ERR(clock->ptp));
+ clock->ptp);
clock->ptp = NULL;
}
@@ -1348,14 +1435,20 @@ static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
{
struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = key,
+ };
+ struct mlx5_devcom_comp_dev *compd;
struct mlx5_devcom_comp_dev *pos;
- mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
- MLX5_DEVCOM_SHARED_CLOCK,
- key, NULL, mdev);
- if (IS_ERR(mdev->clock_state->compdev))
+ compd = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ &attr, NULL, mdev);
+ if (IS_ERR(compd))
return;
+ mdev->clock_state->compdev = compd;
+
mlx5_devcom_comp_lock(mdev->clock_state->compdev);
mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
if (peer_dev->clock) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
index c819c047bb9c..4821163a547f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -8,6 +8,7 @@ enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_PSP_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP,
MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 7b0766c89f4c..faa2833602c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -4,6 +4,7 @@
#include <linux/mlx5/vport.h>
#include <linux/list.h>
#include "lib/devcom.h"
+#include "lib/mlx5.h"
#include "mlx5_core.h"
static LIST_HEAD(devcom_dev_list);
@@ -22,11 +23,17 @@ struct mlx5_devcom_dev {
struct kref ref;
};
+struct mlx5_devcom_key {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ possible_net_t net;
+};
+
struct mlx5_devcom_comp {
struct list_head comp_list;
enum mlx5_devcom_component id;
- u64 key;
struct list_head comp_dev_list_head;
+ struct mlx5_devcom_key key;
mlx5_devcom_event_handler_t handler;
struct kref ref;
bool ready;
@@ -108,7 +115,8 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
}
static struct mlx5_devcom_comp *
-mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
+mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
+ mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
@@ -117,7 +125,10 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
return ERR_PTR(-ENOMEM);
comp->id = id;
- comp->key = key;
+ comp->key.key = attr->key;
+ comp->key.flags = attr->flags;
+ if (attr->flags & MLX5_DEVCOM_MATCH_FLAGS_NS)
+ write_pnet(&comp->key.net, attr->net);
comp->handler = handler;
init_rwsem(&comp->sem);
lockdep_register_key(&comp->lock_key);
@@ -180,21 +191,34 @@ devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
static bool
devcom_component_equal(struct mlx5_devcom_comp *devcom,
enum mlx5_devcom_component id,
- u64 key)
+ const struct mlx5_devcom_match_attr *attr)
{
- return devcom->id == id && devcom->key == key;
+ if (devcom->id != id)
+ return false;
+
+ if (devcom->key.flags != attr->flags)
+ return false;
+
+ if (memcmp(&devcom->key.key, &attr->key, sizeof(devcom->key.key)))
+ return false;
+
+ if (devcom->key.flags & MLX5_DEVCOM_MATCH_FLAGS_NS &&
+ !net_eq(read_pnet(&devcom->key.net), attr->net))
+ return false;
+
+ return true;
}
static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
devcom_for_each_component(comp) {
- if (devcom_component_equal(comp, id, key)) {
+ if (devcom_component_equal(comp, id, attr)) {
if (handler == comp->handler) {
kref_get(&comp->ref);
return comp;
@@ -212,7 +236,7 @@ devcom_component_get(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data)
{
@@ -223,14 +247,14 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
return ERR_PTR(-EINVAL);
mutex_lock(&comp_list_lock);
- comp = devcom_component_get(devc, id, key, handler);
+ comp = devcom_component_get(devc, id, attr, handler);
if (IS_ERR(comp)) {
devcom = ERR_PTR(-EINVAL);
goto out_unlock;
}
if (!comp) {
- comp = mlx5_devcom_comp_alloc(id, key, handler);
+ comp = mlx5_devcom_comp_alloc(id, attr, handler);
if (IS_ERR(comp)) {
devcom = ERR_CAST(comp);
goto out_unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index c79699b94a02..609c85f47917 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,6 +6,20 @@
#include <linux/mlx5/driver.h>
+enum mlx5_devom_match_flags {
+ MLX5_DEVCOM_MATCH_FLAGS_NS = BIT(0),
+};
+
+union mlx5_devcom_match_key {
+ u64 val;
+};
+
+struct mlx5_devcom_match_attr {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ struct net *net;
+};
+
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
@@ -25,7 +39,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index ca9ecec358b2..7adad784ad46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -9,7 +9,7 @@
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
-#define MLX5_TTC_MAX_NUM_GROUPS 4
+#define MLX5_TTC_MAX_NUM_GROUPS 7
#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
struct mlx5_fs_ttc_groups {
@@ -31,10 +31,14 @@ static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
int num_groups;
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
struct mlx5_ttc_rule rules[MLX5_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
+ u32 refcnt;
+ struct mutex mutex; /* Protect adding rules for ipsec crypto offload */
};
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
@@ -163,6 +167,8 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
enum TTC_GROUP_TYPE {
TTC_GROUPS_DEFAULT = 0,
TTC_GROUPS_USE_L4_TYPE = 1,
+ TTC_GROUPS_DEFAULT_ESP = 2,
+ TTC_GROUPS_USE_L4_TYPE_ESP = 3,
};
static const struct mlx5_fs_ttc_groups ttc_groups[] = {
@@ -184,6 +190,31 @@ static const struct mlx5_fs_ttc_groups ttc_groups[] = {
BIT(0),
},
},
+ [TTC_GROUPS_DEFAULT_ESP] = {
+ .num_groups = 6,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE + BIT(1) +
+ MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE_ESP] = {
+ .use_l4_type = true,
+ .num_groups = 7,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1) + MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
};
static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
@@ -207,6 +238,23 @@ static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
},
};
+static const struct mlx5_fs_ttc_groups *
+mlx5_ttc_get_fs_groups(bool use_l4_type, bool ipsec_rss)
+{
+ if (!ipsec_rss)
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &ttc_groups[TTC_GROUPS_DEFAULT];
+
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP] :
+ &ttc_groups[TTC_GROUPS_DEFAULT_ESP];
+}
+
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc)
+{
+ return ttc->groups == &ttc_groups[TTC_GROUPS_DEFAULT_ESP] ||
+ ttc->groups == &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP];
+}
+
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
@@ -257,6 +305,31 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0;
}
+static void mlx5_fs_ttc_set_match_ipv_outer(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ u16 etype)
+{
+ int match_ipv_outer =
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ u8 ipv;
+
+ ipv = mlx5_etype_to_ipv(etype);
+ if (match_ipv_outer && ipv) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, ipv);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ethertype, etype);
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+}
+
static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
u8 proto, bool use_l4_type)
{
@@ -279,16 +352,12 @@ static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, u16 etype, u8 proto,
- bool use_l4_type)
+ bool use_l4_type, bool ipsec_rss)
{
- int match_ipv_outer =
- MLX5_CAP_FLOWTABLE_NIC_RX(dev,
- ft_field_support.outer_ip_version);
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
- u8 ipv;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -305,15 +374,15 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
proto, use_l4_type);
}
- ipv = mlx5_etype_to_ipv(etype);
- if (match_ipv_outer && ipv) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
- } else if (etype) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+ if (etype)
+ mlx5_fs_ttc_set_match_ipv_outer(dev, spec, etype);
+
+ if (ipsec_rss && proto == IPPROTO_ESP) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_next_header, 0);
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -342,12 +411,16 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto,
- use_l4_type);
+ use_l4_type,
+ params->ipsec_rss);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -370,7 +443,7 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto,
- use_l4_type);
+ use_l4_type, false);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
@@ -385,10 +458,78 @@ del_rules:
return err;
}
+static int mlx5_create_ttc_table_ipsec_groups(struct mlx5_ttc_table *ttc,
+ bool use_ipv,
+ u32 *in, int *next_ix)
+{
+ u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
+ int ix = *next_ix;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+
+ /* decrypted ESP outer group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type_ext, 0);
+
+ /* decrypted ESP inner group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+ if (use_ipv)
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_version, 0);
+ else
+ MLX5_SET(fte_match_param, mc, outer_headers.ethertype, 0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.ip_version, 0);
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type_ext, 0);
+
+ /* undecrypted ESP group */
+ MLX5_SET_CFG(in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ if (use_ipv)
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ *next_ix = ix;
+
+ return 0;
+
+err:
+ return PTR_ERR(ttc->g[ttc->num_groups]);
+}
+
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
- bool use_ipv,
- const struct mlx5_fs_ttc_groups *groups)
+ bool use_ipv)
{
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
u32 *in;
@@ -436,8 +577,18 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
goto err;
ttc->num_groups++;
+ if (mlx5_ttc_has_esp_flow_group(ttc)) {
+ err = mlx5_create_ttc_table_ipsec_groups(ttc, use_ipv, in, &ix);
+ if (err)
+ goto err;
+
+ MLX5_SET(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header, 0);
+ }
+
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -527,6 +678,9 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
@@ -700,6 +854,7 @@ void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
kfree(ttc->g);
mlx5_destroy_flow_table(ttc->t);
+ mutex_destroy(&ttc->mutex);
kvfree(ttc);
}
@@ -709,7 +864,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
- const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
bool use_l4_type;
@@ -738,11 +892,10 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EOPNOTSUPP);
}
- groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
- &ttc_groups[TTC_GROUPS_DEFAULT];
+ ttc->groups = mlx5_ttc_get_fs_groups(use_l4_type, params->ipsec_rss);
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(ttc->groups);
ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
@@ -750,7 +903,7 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(err);
}
- err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
+ err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
if (err)
goto destroy_ft;
@@ -758,6 +911,9 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
if (err)
goto destroy_ft;
+ ttc->mdev = dev;
+ mutex_init(&ttc->mutex);
+
return ttc;
destroy_ft:
@@ -791,3 +947,194 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
return mlx5_ttc_fwd_dest(ttc, type, &dest);
}
+
+static void _mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ enum mlx5_traffic_types i;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ if (!ttc->rules[i].rule)
+ continue;
+
+ mlx5_del_flow_rules(ttc->rules[i].rule);
+ ttc->rules[i].rule = NULL;
+ }
+}
+
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return;
+
+ mutex_lock(&ttc->mutex);
+ if (--ttc->refcnt)
+ goto unlock;
+
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+unlock:
+ mutex_unlock(&ttc->mutex);
+}
+
+static int mlx5_ttc_get_tt_attrs(enum mlx5_traffic_types type,
+ u16 *etype, int *l4_type_ext,
+ enum mlx5_traffic_types *tir_tt)
+{
+ switch (type) {
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV4_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV6_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV4_UDP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_outer_rule(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_fs_ttc_set_match_ipv_outer(ttc->mdev, spec, etype);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(ttc, tir_tt);
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_inner_rule(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.ip_version, mlx5_etype_to_ipv(etype));
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(inner_ttc, tir_tt);
+
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc)
+{
+ struct mlx5_flow_handle *rule;
+ enum mlx5_traffic_types i;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return 0;
+
+ mutex_lock(&ttc->mutex);
+ if (ttc->refcnt)
+ goto skip;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_outer_rule(ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+ for (i = MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_inner_rule(ttc, inner_ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+skip:
+ ttc->refcnt++;
+ mutex_unlock(&ttc->mutex);
+ return 0;
+
+err_out:
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+ mutex_unlock(&ttc->mutex);
+ return PTR_ERR(rule);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index ab9434fe3ae6..95f6e56724a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -18,6 +18,14 @@ enum mlx5_traffic_types {
MLX5_TT_IPV4,
MLX5_TT_IPV6,
MLX5_TT_ANY,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP,
MLX5_NUM_TT,
MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY,
};
@@ -47,6 +55,7 @@ struct ttc_params {
bool inner_ttc;
DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
+ bool ipsec_rss;
};
const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
@@ -70,4 +79,14 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt);
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc);
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc);
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc);
+static inline bool mlx5_ttc_is_decrypted_esp_tt(enum mlx5_traffic_types tt)
+{
+ return tt >= MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP &&
+ tt <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP;
+}
+
#endif /* __MLX5_FS_TTC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index b7d4b1a2baf2..d524f0220513 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -164,6 +164,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
roce->rule = rule;
memset(spec, 0, sizeof(*spec));
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -178,6 +180,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
goto out;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags &= ~FLOW_ACT_IGNORE_FLOW_LEVEL;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
dst.ft = roce->ft_rdma;
rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 762d55ba9e51..e6be2f01daf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -45,11 +45,7 @@
#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
-/* MACsec RX flow steering */
-#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
-
/* MACsec fs_id handling for steering */
-#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
struct mlx5_sectag_header {
@@ -597,7 +593,7 @@ static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_MACSEC_MASK);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- macsec_fs_set_tx_fs_id(id));
+ MLX5_MACSEC_TX_METADATA(id));
*fs_id = id;
flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
@@ -2219,9 +2215,11 @@ static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
- MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action, data,
+ mlx5_macsec_fs_set_tx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset,
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT);
+ MLX5_SET(set_action_in, action, length, 8);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
index 34b80c3ef6a5..15acaff43641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
@@ -12,6 +12,21 @@
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+/* MACsec TX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK \
+ (MLX5_ETH_WQE_FT_META_MACSEC | MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK)
+#define MLX5_ETH_WQE_FT_META_MACSEC_SHIFT MLX5_ETH_WQE_FT_META_SHIFT
+
+/* MACsec fs_id handling for steering */
+#define mlx5_macsec_fs_set_tx_fs_id(fs_id) \
+ (((MLX5_ETH_WQE_FT_META_MACSEC) >> MLX5_ETH_WQE_FT_META_MACSEC_SHIFT) \
+ | ((fs_id) << 2))
+
+#define MLX5_MACSEC_TX_METADATA(fs_id) \
+ (mlx5_macsec_fs_set_tx_fs_id(fs_id) << \
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT)
+
+/* MACsec fs_id uses 4 bits, supports up to 16 interfaces */
#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
struct mlx5_macsec_fs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
new file mode 100644
index 000000000000..459a0b4d08e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "nv_param.h"
+#include "mlx5_core.h"
+
+enum {
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
+
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
+};
+
+struct mlx5_ifc_configuration_item_type_class_global_bits {
+ u8 type_class[0x8];
+ u8 parameter_index[0x18];
+};
+
+struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
+ u8 type_class[0x8];
+ u8 pf_index[0x6];
+ u8 pci_bus_index[0x8];
+ u8 parameter_index[0xa];
+};
+
+union mlx5_ifc_config_item_type_auto_bits {
+ struct mlx5_ifc_configuration_item_type_class_global_bits
+ configuration_item_type_class_global;
+ struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
+ configuration_item_type_class_per_host_pf;
+ u8 reserved_at_0[0x20];
+};
+
+struct mlx5_ifc_config_item_bits {
+ u8 valid[0x2];
+ u8 priority[0x2];
+ u8 header_type[0x2];
+ u8 ovr_en[0x1];
+ u8 rd_en[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_at_a[0x1];
+ u8 writer_id[0x5];
+ u8 version[0x4];
+ u8 reserved_at_14[0x2];
+ u8 host_id_valid[0x1];
+ u8 length[0x9];
+
+ union mlx5_ifc_config_item_type_auto_bits type;
+
+ u8 reserved_at_40[0x10];
+ u8 crc16[0x10];
+};
+
+struct mlx5_ifc_mnvda_reg_bits {
+ struct mlx5_ifc_config_item_bits configuration_item_header;
+
+ u8 configuration_item_data[64][0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_conf_bits {
+ u8 sriov_valid[0x1];
+ u8 reserved_at_1[0x10];
+ u8 per_pf_total_vf[0x1];
+ u8 reserved_at_12[0xe];
+
+ u8 sriov_en[0x1];
+ u8 reserved_at_21[0xf];
+ u8 total_vfs[0x10];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_cap_bits {
+ u8 max_vfs_per_pf_valid[0x1];
+ u8 reserved_at_1[0x13];
+ u8 per_pf_total_vf_supported[0x1];
+ u8 reserved_at_15[0xb];
+
+ u8 sriov_support[0x1];
+ u8 reserved_at_21[0xf];
+ u8 max_vfs_per_pf[0x10];
+
+ u8 reserved_at_40[0x60];
+};
+
+struct mlx5_ifc_nv_pf_pci_conf_bits {
+ u8 reserved_at_0[0x9];
+ u8 pf_total_vf_en[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 total_vf[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_nv_sw_offload_conf_bits {
+ u8 ip_over_vxlan_port[0x10];
+ u8 tunnel_ecn_copy_offload_disable[0x1];
+ u8 pci_atomic_mode[0x3];
+ u8 sr_enable[0x1];
+ u8 ptp_cyc2realtime[0x1];
+ u8 vector_calc_disable[0x1];
+ u8 uctx_en[0x1];
+ u8 prio_tag_required_en[0x1];
+ u8 esw_fdb_ipv4_ttl_modify_enable[0x1];
+ u8 mkey_by_name[0x1];
+ u8 ip_over_vxlan_en[0x1];
+ u8 one_qp_per_recovery[0x1];
+ u8 cqe_compression[0x3];
+ u8 tunnel_udp_entropy_proto_disable[0x1];
+ u8 reserved_at_21[0x1];
+ u8 ar_enable[0x1];
+ u8 log_max_outstanding_wqe[0x5];
+ u8 vf_migration[0x2];
+ u8 log_tx_psn_win[0x6];
+ u8 lro_log_timeout3[0x4];
+ u8 lro_log_timeout2[0x4];
+ u8 lro_log_timeout1[0x4];
+ u8 lro_log_timeout0[0x4];
+};
+
+#define MNVDA_HDR_SZ \
+ (MLX5_ST_SZ_BYTES(mnvda_reg) - \
+ MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
+
+#define MLX5_SET_CFG_ITEM_TYPE(_cls_name, _mnvda_ptr, _field, _val) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, \
+ configuration_item_header.type.configuration_item_type_class_##_cls_name._field, \
+ _val)
+
+#define MLX5_SET_CFG_HDR_LEN(_mnvda_ptr, _cls_name) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, configuration_item_header.length, \
+ MLX5_ST_SZ_BYTES(_cls_name))
+
+#define MLX5_GET_CFG_HDR_LEN(_mnvda_ptr) \
+ MLX5_GET(mnvda_reg, _mnvda_ptr, configuration_item_header.length)
+
+static int mlx5_nv_param_read(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ u32 param_idx, type_class;
+ u32 header_len;
+ void *cls_ptr;
+ int err;
+
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL; /* A caller bug */
+
+ err = mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 0);
+ if (!err)
+ return 0;
+
+ cls_ptr = MLX5_ADDR_OF(mnvda_reg, mnvda,
+ configuration_item_header.type.configuration_item_type_class_global);
+
+ type_class = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ type_class);
+ param_idx = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ parameter_index);
+ header_len = MLX5_GET_CFG_HDR_LEN(mnvda);
+
+ mlx5_core_warn(dev, "Failed to read mnvda reg: type_class 0x%x, param_idx 0x%x, header_len %u, err %d\n",
+ type_class, param_idx, header_len, err);
+
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_nv_param_write(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL;
+
+ if (WARN_ON(MLX5_GET_CFG_HDR_LEN(mnvda) == 0))
+ return -EINVAL;
+
+ return mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 1);
+}
+
+static int
+mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static const char *const
+ cqe_compress_str[] = { "balanced", "aggressive" };
+
+static int
+mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ u8 value = U8_MAX;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ value = MLX5_GET(nv_sw_offload_conf, data, cqe_compression);
+
+ if (value >= ARRAY_SIZE(cqe_compress_str))
+ return -EOPNOTSUPP;
+
+ strscpy(ctx->val.vstr, cqe_compress_str[value], sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cqe_compress_str); i++) {
+ if (!strcmp(val.vstr, cqe_compress_str[i]))
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are balanced/aggressive");
+ return -EOPNOTSUPP;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ int err = 0;
+ void *data;
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "aggressive"))
+ value = 1;
+ else /* balanced: can't be anything else already validated above */
+ value = 0;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_offload_conf, data, cqe_compression, value);
+
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_pf_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool sriov_en = false;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vbool = false;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ sriov_en = MLX5_GET(nv_global_pci_conf, data, sriov_en);
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vbool = sriov_en;
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ ctx->val.vbool = sriov_en &&
+ MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
+ return 0;
+}
+
+static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool per_pf_support;
+ void *cap, *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read global PCI capability");
+ return err;
+ }
+
+ cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ per_pf_support = MLX5_GET(nv_global_pci_cap, cap,
+ per_pf_total_vf_supported);
+
+ if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!per_pf_support) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read global PCI configuration");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ /* setup per PF sriov mode */
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, sriov_en, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to write global PCI configuration");
+ return err;
+ }
+
+ /* enable/disable sriov on this PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read per host PF configuration");
+ return err;
+ }
+ MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vu32 = 0;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vu32 = MLX5_GET(nv_global_pci_conf, data, total_vfs);
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ ctx->val.vu32 = MLX5_GET(nv_pf_pci_conf, data, total_vf);
+
+ return 0;
+}
+
+static int mlx5_devlink_total_vfs_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read global pci cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_GET(nv_global_pci_cap, data, per_pf_total_vf_supported)) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_pf_pci_conf, data, total_vf, ctx->val.vu32);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ u16 max;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, cap, sizeof(cap));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf_valid))
+ return 0; /* optimistic, but set might fail later */
+
+ max = MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf);
+ if (val.vu16 > max) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Max allowed by device is %u", max);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlx5_nv_param_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_enable_sriov_get,
+ mlx5_devlink_enable_sriov_set, NULL),
+ DEVLINK_PARAM_GENERIC(TOTAL_VFS, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_total_vfs_get,
+ mlx5_devlink_total_vfs_set,
+ mlx5_devlink_total_vfs_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_nv_param_devlink_cqe_compress_get,
+ mlx5_nv_param_devlink_cqe_compress_set,
+ mlx5_nv_param_devlink_cqe_compress_validate),
+};
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return 0;
+
+ return devl_params_register(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return;
+
+ devl_params_unregister(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
new file mode 100644
index 000000000000..9f4922ff7745
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_NV_PARAM_H
+#define __MLX5_NV_PARAM_H
+
+#include <linux/mlx5/driver.h>
+#include "devlink.h"
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink);
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink);
+
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index eeb0b7ea05f1..f5c2701f6e87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -210,13 +210,17 @@ static void sd_cleanup(struct mlx5_core_dev *dev)
static int sd_register(struct mlx5_core_dev *dev)
{
struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_devcom_match_attr attr = {};
struct mlx5_core_dev *peer, *primary;
struct mlx5_sd *sd, *primary_sd;
int err, i;
sd = mlx5_get_sd(dev);
+ attr.key.val = sd->group_id;
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
- sd->group_id, NULL, dev);
+ &attr, NULL, dev);
if (IS_ERR(devcom))
return PTR_ERR(devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8517d4e5d5ef..df93625c9dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -973,36 +973,14 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
mlx5_pci_disable_device(dev);
}
-static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- /* This component is use to sync adding core_dev to lag_dev and to sync
- * changes of mlx5_adev_devices between LAG layer and other layers.
- */
- if (!mlx5_lag_is_supported(dev))
- return;
-
- dev->priv.hca_devcom_comp =
- mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
- mlx5_query_nic_system_image_guid(dev),
- NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp))
- mlx5_core_err(dev, "Failed to register devcom HCA component\n");
-}
-
-static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
-}
-
static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %ld\n",
- PTR_ERR(dev->priv.devc));
- mlx5_register_hca_devcom_comp(dev);
+ mlx5_core_warn(dev, "failed to register devcom device %pe\n",
+ dev->priv.devc);
err = mlx5_query_board_id(dev);
if (err) {
@@ -1140,7 +1118,6 @@ err_eq_cleanup:
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
@@ -1171,7 +1148,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
}
@@ -1340,10 +1316,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
{
int err;
- dev->priv.uar = mlx5_get_uars_page(dev);
- if (IS_ERR(dev->priv.uar)) {
- mlx5_core_err(dev, "Failed allocating uar, aborting\n");
- err = PTR_ERR(dev->priv.uar);
+ err = mlx5_alloc_bfreg(dev, &dev->priv.bfreg, false, false);
+ if (err) {
+ mlx5_core_err(dev, "Failed allocating bfreg, %d\n", err);
return err;
}
@@ -1454,7 +1429,7 @@ err_eq_table:
err_irq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
return err;
}
@@ -1479,7 +1454,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
}
int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
@@ -1798,6 +1773,7 @@ static const int types[] = {
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
+ MLX5_CAP_PSP,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9d3504f5abfa..082259b56816 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -449,8 +449,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
-int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
-
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9bc9bd83c232..cd68c4b2c0bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
+ int err;
- if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_do(dev, in, in_size, out, out_size);
+ err = mlx5_cmd_do(dev, in, in_size, out, out_size);
+ /* If FW is gone (-ENXIO), proceed to forceful reclaim */
+ if (err != -ENXIO)
+ return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 692ef9c2f729..e18a850c615c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -54,7 +54,7 @@ static int mlx5_core_func_to_vport(const struct mlx5_core_dev *dev,
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
- * to be ssigned to each VF.
+ * to be assigned to each VF.
* @dev: PF to work on
* @num_vfs: Number of enabled VFs
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
index 0537de86f981..9b0f44253f33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
__entry->hw_fn_id = sfdev->fn_id;
__entry->sfnum = sfdev->sfnum;
),
- TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n",
+ TP_printk("(%s) sfdev=%p aux_id=%d hw_id=0x%x sfnum=%u\n",
__get_str(devname), __entry->sfdev,
__entry->aux_id, __entry->hw_fn_id,
__entry->sfnum)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index adeccc588e5d..6ef0c4be27e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -51,9 +51,6 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u8 size_log_rx, u8 size_log_tx,
struct mlx5hws_matcher_attr *attr)
{
- struct mlx5hws_bwc_matcher *first_matcher =
- bwc_matcher->complex_first_bwc_matcher;
-
memset(attr, 0, sizeof(*attr));
attr->priority = priority;
@@ -66,9 +63,6 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
-
- attr->isolated_matcher_end_ft_id =
- first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
static int
@@ -171,10 +165,16 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (!bwc_matcher->complex)
+ switch (bwc_matcher->matcher_type) {
+ case MLX5HWS_BWC_MATCHER_SIMPLE:
return hws_bwc_matcher_move_all_simple(bwc_matcher);
-
- return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_FIRST:
+ return mlx5hws_bwc_matcher_complex_move_first(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER:
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+ default:
+ return -EINVAL;
+ }
}
static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
@@ -249,6 +249,7 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_matcher->tx_size.size_log,
&attr);
+ bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_SIMPLE;
bwc_matcher->priority = priority;
bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
@@ -393,7 +394,7 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
"BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
rx_rules, tx_rules);
- if (bwc_matcher->complex)
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
else
mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
@@ -651,7 +652,8 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
- bool is_complex = !!bwc_rule->bwc_matcher->complex;
+ bool is_complex = bwc_rule->bwc_matcher->matcher_type ==
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
int ret = 0;
if (is_complex)
@@ -1147,7 +1149,7 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
- if (bwc_matcher->complex)
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
params,
flow_source,
@@ -1216,10 +1218,9 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
- /* For complex rule, the update should happen on the second matcher */
- if (bwc_rule->isolated_bwc_rule)
- return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
- rule_actions);
- else
- return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+ /* For complex rules, the update should happen on the last subrule. */
+ while (bwc_rule->next_subrule)
+ bwc_rule = bwc_rule->next_subrule;
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index af391d70c14f..b905511f5c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -18,6 +18,21 @@
#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+enum mlx5hws_bwc_matcher_type {
+ /* Standalone bwc matcher. */
+ MLX5HWS_BWC_MATCHER_SIMPLE,
+ /* The first matcher of a complex matcher. When rules are inserted into
+ * a matcher of this type, they are split into subrules and inserted
+ * into their corresponding submatchers.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST,
+ /* A submatcher that is part of a complex matcher. For most purposes
+ * these are treated as simple matchers, except when it comes to moving
+ * rules during resize.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER,
+};
+
struct mlx5hws_bwc_matcher_complex_data;
struct mlx5hws_bwc_matcher_size {
@@ -31,9 +46,9 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template **at;
struct mlx5hws_bwc_matcher_complex_data *complex;
- struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
u8 size_of_at_array;
+ enum mlx5hws_bwc_matcher_type matcher_type;
u32 priority;
struct mlx5hws_bwc_matcher_size rx_size;
struct mlx5hws_bwc_matcher_size tx_size;
@@ -43,8 +58,8 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
- struct mlx5hws_bwc_rule *isolated_bwc_rule;
- struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
+ struct mlx5hws_bwc_rule *next_subrule;
+ struct mlx5hws_bwc_complex_subrule_data *subrule_data;
u32 flow_source;
u16 bwc_queue_idx;
bool skip_rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index 14e79579c719..660630f18ce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -3,25 +3,27 @@
#include "internal.h"
-#define HWS_CLEAR_MATCH_PARAM(mask, field) \
- MLX5_SET(fte_match_param, (mask)->match_buf, field, 0)
-
-#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
-
-static const struct rhashtable_params hws_refcount_hash = {
- .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node,
- match_buf),
- .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
- match_buf),
- .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
- hash_node),
- .automatic_shrinking = true,
- .min_size = 1,
+/* We chain submatchers by applying three rules on a subrule: modify header (to
+ * set register C6), jump to table (to the next submatcher) and the mandatory
+ * last rule.
+ */
+#define HWS_NUM_CHAIN_ACTIONS 3
+
+static const struct rhashtable_params hws_rules_hash_params = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_subrule_data,
+ match_tag),
+ .key_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, match_tag),
+ .head_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, hash_node),
+ .automatic_shrinking = true, .min_size = 1,
};
-bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask)
+static bool
+hws_match_params_exceeds_definer(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ bool allow_jumbo)
{
struct mlx5hws_definer match_layout = {0};
struct mlx5hws_match_template *mt;
@@ -36,11 +38,11 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
mask->match_sz,
match_criteria_enable);
if (!mt) {
- mlx5hws_err(ctx, "BWC: failed creating match template\n");
+ mlx5hws_err(ctx, "Complex matcher: failed creating match template\n");
return false;
}
- ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, allow_jumbo);
if (ret) {
/* The only case that we're interested in is E2BIG,
* which means that the match parameters need to be
@@ -64,825 +66,481 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
return is_complex;
}
-static void
-hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx,
- enum mlx5hws_definer_fname fname,
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- struct mlx5hws_cmd_query_caps *caps = ctx->caps;
-
- switch (fname) {
- case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
- case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
- case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
- case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
- case MLX5HWS_DEFINER_FNAME_IP_VERSION_O:
- case MLX5HWS_DEFINER_FNAME_IP_VERSION_I:
- /* Because of the strict requirements for IP address matching
- * that require ethtype/ip_version matching as well, don't clear
- * these fields - have them in both parts of the complex matcher
- */
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_ID_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_ID_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.outer_second_cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.outer_second_svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.inner_second_cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.inner_second_svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_DSCP_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_DSCP_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_ECN_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_ECN_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_TTL_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_TTL_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_DST_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_DST_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_FRAG_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_FRAG_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.outer_ipv6_flow_label);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.inner_ipv6_flow_label);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_SPORT_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport);
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_SPORT_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_DPORT_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport);
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_DPORT_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
- break;
- case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags);
- break;
- case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM:
- case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_tcp_seq_num);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_tcp_ack_num);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.inner_tcp_seq_num);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.inner_tcp_ack_num);
- break;
- case MLX5HWS_DEFINER_FNAME_GTP_TEID:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid);
- break;
- case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags);
- break;
- case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.gtpu_first_ext_dw_0);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0);
- break;
- case MLX5HWS_DEFINER_FNAME_GTPU_DW2:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2);
- break;
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_2.outer_first_mpls_over_gre);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_2.outer_first_mpls_over_udp);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.geneve_tlv_option_0_data);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_id_0);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_0);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_1);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_id_2);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_2);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_id_3);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_3);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_VNI:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_vxlan_gpe_flags);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0:
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_vxlan_gpe_next_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_vxlan_gpe_vni);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_OAM:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.geneve_protocol_type);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_VNI:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni);
- break;
- case MLX5HWS_DEFINER_FNAME_SOURCE_QP:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn);
- break;
- case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.source_eswitch_owner_vhca_id);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_0:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_1:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_2:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_3:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_4:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_5:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_7:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_A:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_C:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_K:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_S:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key);
- break;
- case MLX5HWS_DEFINER_FNAME_ICMP_DW1:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.icmpv6_header_data);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code);
- break;
- case MLX5HWS_DEFINER_FNAME_MPLS0_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls);
- break;
- case MLX5HWS_DEFINER_FNAME_MPLS0_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_0:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_1:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_2:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_3:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3);
- break;
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK:
- /* assuming this is flex parser for geneve option */
- if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) {
- mlx5hws_err(ctx,
- "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n",
- mlx5hws_definer_fname_to_str(fname), fname,
- caps->flex_parser_id_geneve_tlv_option_0);
- break;
- }
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.geneve_tlv_option_0_exist);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_6:
- default:
- mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n",
- mlx5hws_definer_fname_to_str(fname), fname);
- break;
- }
+ return hws_match_params_exceeds_definer(ctx, match_criteria_enable,
+ mask, true);
}
-static bool
-hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc,
- int fc_sz,
- u32 combination_num)
+static int
+hws_get_last_set_dword_idx(const struct mlx5hws_match_parameters *mask)
{
- bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0};
- bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0};
- bool is_first_matcher;
int i;
- for (i = 0; i < fc_sz; i++) {
- is_first_matcher = !(combination_num & BIT(i));
- if (is_first_matcher)
- m1[fc[i].fname] = true;
- else
- m2[fc[i].fname] = true;
- }
-
- /* Not all the fields can be split into separate matchers.
- * Some should be together on the same matcher.
- * For example, IPv6 parts - the whole IPv6 address should be on the
- * same matcher in order for us to deduce if it's IPv6 or IPv4 address.
- */
- if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
- (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
- return false;
-
- if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
- (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
- return false;
+ for (i = mask->match_sz / 4 - 1; i >= 0; i--)
+ if (mask->match_buf[i])
+ return i;
- if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
- (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
- return false;
+ return -1;
+}
- if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
- (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
- return false;
+static bool hws_match_mask_is_empty(const struct mlx5hws_match_parameters *mask)
+{
+ return hws_get_last_set_dword_idx(mask) == -1;
+}
- /* Don't split outer IPv6 dest address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]))
- return false;
+static bool hws_dword_is_inner_ipaddr_off(int dword_off)
+{
+ /* IPv4 and IPv6 addresses share the same entry via a union, and the
+ * source and dest addresses are contiguous in the fte_match_param. So
+ * we need to check 8 words.
+ */
+ static const int inner_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, inner_headers.src_ipv4_src_ipv6);
- /* Don't split outer IPv6 source address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]))
- return false;
+ return dword_off >= inner_ip_dword_off &&
+ dword_off < inner_ip_dword_off + 8;
+}
- /* Don't split inner IPv6 dest address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]))
- return false;
+static bool hws_dword_is_outer_ipaddr_off(int dword_off)
+{
+ static const int outer_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, outer_headers.src_ipv4_src_ipv6);
- /* Don't split inner IPv6 source address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]))
- return false;
+ return dword_off >= outer_ip_dword_off &&
+ dword_off < outer_ip_dword_off + 8;
+}
- /* Don't split GRE parameters. */
- if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] ||
- m1[MLX5HWS_DEFINER_FNAME_GRE_K] ||
- m1[MLX5HWS_DEFINER_FNAME_GRE_S] ||
- m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) &&
- (m2[MLX5HWS_DEFINER_FNAME_GRE_C] ||
- m2[MLX5HWS_DEFINER_FNAME_GRE_K] ||
- m2[MLX5HWS_DEFINER_FNAME_GRE_S] ||
- m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]))
- return false;
+static void hws_add_dword_to_mask(struct mlx5hws_match_parameters *mask,
+ const struct mlx5hws_match_parameters *orig,
+ int dword_idx, bool *added_inner_ipv,
+ bool *added_outer_ipv)
+{
+ mask->match_buf[dword_idx] |= orig->match_buf[dword_idx];
- /* Don't split TCP ack/seq numbers. */
- if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
- m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) &&
- (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
- m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]))
- return false;
+ *added_inner_ipv = false;
+ *added_outer_ipv = false;
- /* Don't split flex parser. */
- if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) &&
- (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]))
- return false;
+ /* Any IP address fragment must be accompanied by a match on IP version.
+ * Use the `added_ipv` variables to keep track if we added IP versions
+ * specifically for this dword, so that we can roll them back if the
+ * match params become too large to fit into a definer.
+ */
+ if (hws_dword_is_inner_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0xf);
+ *added_inner_ipv = true;
+ }
+ if (hws_dword_is_outer_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0xf);
+ *added_outer_ipv = true;
+ }
+}
- return true;
+static void hws_remove_dword_from_mask(struct mlx5hws_match_parameters *mask,
+ int dword_idx, bool added_inner_ipv,
+ bool added_outer_ipv)
+{
+ mask->match_buf[dword_idx] = 0;
+ if (added_inner_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0);
+ if (added_outer_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0);
}
-static void
-hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx,
- struct mlx5hws_match_parameters *m,
- struct mlx5hws_match_parameters *m1,
- struct mlx5hws_match_parameters *m2,
- struct mlx5hws_definer_fc *fc,
- int fc_sz,
- u32 combination_num)
+/* Avoid leaving a single lower dword in `mask` if there are others present in
+ * `orig`. Splitting IPv6 addresses like this causes them to be interpreted as
+ * IPv4.
+ */
+static void hws_avoid_ipv6_split_of(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask,
+ int off)
{
- bool is_first_matcher;
- int i;
+ /* Masks are allocated to a full fte_match_param, but it can't hurt to
+ * double check.
+ */
+ if (orig->match_sz <= off + 3 || mask->match_sz <= off + 3)
+ return;
- memcpy(m1->match_buf, m->match_buf, m->match_sz);
- memcpy(m2->match_buf, m->match_buf, m->match_sz);
+ /* Lower dword is not set, nothing to do. */
+ if (!mask->match_buf[off + 3])
+ return;
- for (i = 0; i < fc_sz; i++) {
- is_first_matcher = !(combination_num & BIT(i));
- hws_bwc_matcher_complex_params_clear_fld(ctx,
- fc[i].fname,
- is_first_matcher ?
- m2 : m1);
- }
+ /* Higher dwords also present in `mask`, no ambiguity. */
+ if (mask->match_buf[off] || mask->match_buf[off + 1] ||
+ mask->match_buf[off + 2])
+ return;
+
+ /* There are no higher dwords in `orig`, i.e. we match on IPv4. */
+ if (!orig->match_buf[off] && !orig->match_buf[off + 1] &&
+ !orig->match_buf[off + 2])
+ return;
- MLX5_SET(fte_match_param, m2->match_buf,
- misc_parameters_2.metadata_reg_c_6, -1);
+ /* Put the lower dword back in `orig`. It is always safe to do this, the
+ * dword will just be picked up in the next submask.
+ */
+ orig->match_buf[off + 3] = mask->match_buf[off + 3];
+ mask->match_buf[off + 3] = 0;
}
-static void
-hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1,
- struct mlx5hws_match_parameters *mask_2)
+static void hws_avoid_ipv6_split(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
{
- kfree(mask_1->match_buf);
- kfree(mask_2->match_buf);
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.dst_ipv4_dst_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.dst_ipv4_dst_ipv6));
}
-static int
-hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx,
- u8 match_criteria,
- struct mlx5hws_match_parameters *mask,
- struct mlx5hws_match_parameters *mask_1,
- struct mlx5hws_match_parameters *mask_2)
+/* Build a subset of the `orig` match parameters into `mask`. This subset is
+ * guaranteed to fit in a single definer an as such is a candidate for being a
+ * part of a complex matcher. Upon successful execution, the match params that
+ * go into `mask` are cleared from `orig`.
+ */
+static int hws_get_simple_params(struct mlx5hws_context *ctx, u8 match_criteria,
+ struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
{
- struct mlx5hws_definer_fc *fc;
- u32 num_of_combinations;
- int fc_sz = 0;
- int res = 0;
- u32 i;
-
- if (MLX5_GET(fte_match_param, mask->match_buf,
- misc_parameters_2.metadata_reg_c_6)) {
- mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n");
- res = -EINVAL;
- goto out;
- }
+ bool added_inner_ipv, added_outer_ipv;
+ int dword_idx;
+ u32 *backup;
+ int ret;
- mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
- GFP_KERNEL);
- mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
- GFP_KERNEL);
- if (!mask_1->match_buf || !mask_2->match_buf) {
- mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n");
- res = -ENOMEM;
- goto free_params;
- }
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the match params before. */
+ if (dword_idx == -1)
+ return 0;
- mask_1->match_sz = mask->match_sz;
- mask_2->match_sz = mask->match_sz;
+ backup = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!backup)
+ return -ENOMEM;
- fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx,
- match_criteria,
- mask->match_buf,
- &fc_sz);
- if (!fc) {
- res = -ENOMEM;
- goto free_params;
- }
+ while (1) {
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the original match params
+ * into this subset, which still fits into a single matcher.
+ */
+ if (dword_idx == -1) {
+ ret = 0;
+ goto free_backup;
+ }
- if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) {
- mlx5hws_err(ctx,
- "Complex matcher: too many match parameters (%d)\n",
- fc_sz);
- res = -EINVAL;
- goto free_fc;
+ memcpy(backup, mask->match_buf, mask->match_sz);
+
+ /* Try to add this dword to the current subset. */
+ hws_add_dword_to_mask(mask, orig, dword_idx, &added_inner_ipv,
+ &added_outer_ipv);
+
+ if (hws_match_params_exceeds_definer(ctx, match_criteria, mask,
+ false)) {
+ /* We just added a match param that makes the definer
+ * too large. Revert and return what we had before.
+ * Note that we can't just zero out the affected fields,
+ * because it's possible that the dword we're looking at
+ * wasn't zero before (e.g. it included auto-added
+ * matches in IP version. This is why we employ the
+ * rather cumbersome memcpy for backing up.
+ */
+ memcpy(mask->match_buf, backup, mask->match_sz);
+ /* Possible future improvement: We can't add any more
+ * dwords, but it may be possible to squeeze in
+ * individual bytes, as definers have special slots for
+ * those.
+ *
+ * For now, keep the code simple. This results in an
+ * extra submatcher in some cases, but it's good enough.
+ */
+ ret = 0;
+ break;
+ }
+
+ /* The current subset of match params still fits in a single
+ * definer. Remove the dword from the original mask.
+ *
+ * Also remove any explicit match on IP version if we just
+ * included one here. We will still automatically add it to
+ * accompany any IP address fragment, but do not need to
+ * consider it by itself.
+ */
+ hws_remove_dword_from_mask(orig, dword_idx, added_inner_ipv,
+ added_outer_ipv);
}
- /* We have list of all the match fields from the match parameter.
- * Now try all the possibilities of splitting them into two match
- * buffers and look for the supported combination.
+ /* Make sure we have not picked up a single lower dword of an IPv6
+ * address, as the firmware will erroneously treat it as an IPv4
+ * address.
*/
- num_of_combinations = 1 << fc_sz;
+ hws_avoid_ipv6_split(orig, mask);
- /* Start from combination at index 1 - we know that 0 is unsupported */
- for (i = 1; i < num_of_combinations; i++) {
- if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i))
- continue;
+free_backup:
+ kfree(backup);
- hws_bwc_matcher_complex_params_comb_create(ctx,
- mask, mask_1, mask_2,
- fc, fc_sz, i);
- /* We now have two separate sets of match params.
- * Check if each of them can be used in its own matcher.
+ return ret;
+}
+
+static int
+hws_bwc_matcher_split_mask(struct mlx5hws_context *ctx, u8 match_criteria,
+ const struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *submasks,
+ int *num_submasks)
+{
+ struct mlx5hws_match_parameters mask_copy;
+ int ret, i = 0;
+
+ mask_copy.match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ mask_copy.match_buf = kzalloc(mask_copy.match_sz, GFP_KERNEL);
+ if (!mask_copy.match_buf)
+ return -ENOMEM;
+
+ memcpy(mask_copy.match_buf, mask->match_buf, mask->match_sz);
+
+ while (!hws_match_mask_is_empty(&mask_copy)) {
+ if (i >= MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS) {
+ mlx5hws_err(ctx,
+ "Complex matcher: mask too large for %d matchers\n",
+ MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS);
+ ret = -E2BIG;
+ goto free_copy;
+ }
+ /* All but the first matcher need to match on register C6 to
+ * connect pieces of the complex rule together.
*/
- if (!mlx5hws_bwc_match_params_is_complex(ctx,
- match_criteria,
- mask_1) &&
- !mlx5hws_bwc_match_params_is_complex(ctx,
- match_criteria,
- mask_2))
- break;
+ if (i > 0) {
+ MLX5_SET(fte_match_param, submasks[i].match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+ }
+ ret = hws_get_simple_params(ctx, match_criteria, &mask_copy,
+ &submasks[i]);
+ if (ret < 0)
+ goto free_copy;
+ i++;
}
- if (i == num_of_combinations) {
- /* We've scanned all the combinations, but to no avail */
- mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n");
- res = -EINVAL;
- goto free_fc;
- }
+ *num_submasks = i;
+ ret = 0;
- kfree(fc);
- return 0;
+free_copy:
+ kfree(mask_copy.match_buf);
-free_fc:
- kfree(fc);
-free_params:
- hws_bwc_matcher_complex_params_destroy(mask_1, mask_2);
-out:
- return res;
+ return ret;
}
-static int
-hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table)
+static struct mlx5hws_table *
+hws_isolated_table_create(const struct mlx5hws_bwc_matcher *cmatcher)
{
+ struct mlx5hws_bwc_complex_submatcher *first_subm;
struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
- struct mlx5hws_context *ctx = table->ctx;
struct mlx5hws_table_attr tbl_attr = {0};
- struct mlx5hws_table *isolated_tbl;
- int ret = 0;
+ struct mlx5hws_table *orig_tbl;
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_table *tbl;
+ int ret;
- tbl_attr.type = table->type;
- tbl_attr.level = table->level;
+ first_subm = &cmatcher->complex->submatchers[0];
+ orig_tbl = first_subm->tbl;
+ ctx = orig_tbl->ctx;
- bwc_matcher->complex->isolated_tbl =
- mlx5hws_table_create(ctx, &tbl_attr);
- isolated_tbl = bwc_matcher->complex->isolated_tbl;
- if (!isolated_tbl)
- return -EINVAL;
+ tbl_attr.type = orig_tbl->type;
+ tbl_attr.level = orig_tbl->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl)
+ return ERR_PTR(-EINVAL);
- /* Set the default miss of the isolated table to
- * point to the end anchor of the original matcher.
+ /* Set the default miss of the isolated table to point
+ * to the end anchor of the original matcher.
*/
- mlx5hws_cmd_set_attr_connect_miss_tbl(ctx,
- isolated_tbl->fw_ft_type,
- isolated_tbl->type,
- &ft_attr);
- ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id;
-
- ret = mlx5hws_cmd_flow_table_modify(ctx->mdev,
- &ft_attr,
- isolated_tbl->ft_id);
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx, tbl->fw_ft_type,
+ tbl->type, &ft_attr);
+ ft_attr.table_miss_id = first_subm->bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, tbl->ft_id);
if (ret) {
- mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n");
+ mlx5hws_err(ctx, "Complex matcher: failed to set isolated tbl default miss\n");
goto destroy_tbl;
}
- return 0;
+ return tbl;
destroy_tbl:
- mlx5hws_table_destroy(isolated_tbl);
- return ret;
+ mlx5hws_table_destroy(tbl);
+
+ return ERR_PTR(ret);
}
-static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl)
+static int hws_submatcher_init_first(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
{
- /* This table is isolated - no table is pointing to it, no need to
- * disconnect it from anywhere, it won't affect any other table's miss.
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
+
+ subm = &cmatcher->complex->submatchers[0];
+
+ /* The first submatcher lives in the original table and does not have an
+ * associated jump to table action. It also points to the outer complex
+ * matcher.
*/
- mlx5hws_table_destroy(isolated_tbl);
+ subm->tbl = table;
+ subm->action_tbl = NULL;
+ subm->bwc_matcher = cmatcher;
+
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ action_types);
+ if (ret)
+ return ret;
+
+ subm->bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
+
+ return 0;
+
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+
+ return ret;
}
-static int
-hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask)
+static int hws_submatcher_init(struct mlx5hws_bwc_matcher *cmatcher, int idx,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
{
- struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
- struct mlx5hws_context *ctx = table->ctx;
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ bool is_last;
int ret;
- isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
- if (!isolated_bwc_matcher)
- return -ENOMEM;
+ if (!idx)
+ return hws_submatcher_init_first(cmatcher, table, priority,
+ match_criteria, mask);
+
+ subm = &cmatcher->complex->submatchers[idx];
+ is_last = idx == cmatcher->complex->num_submatchers - 1;
+
+ subm->tbl = hws_isolated_table_create(cmatcher);
+ if (IS_ERR(subm->tbl))
+ return PTR_ERR(subm->tbl);
+
+ subm->action_tbl =
+ mlx5hws_action_create_dest_table(subm->tbl->ctx, subm->tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!subm->action_tbl) {
+ ret = -EINVAL;
+ goto destroy_tbl;
+ }
+
+ subm->bwc_matcher = kzalloc(sizeof(*subm->bwc_matcher), GFP_KERNEL);
+ if (!subm->bwc_matcher) {
+ ret = -ENOMEM;
+ goto destroy_action;
+ }
- bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher;
+ /* Every matcher other than the first also matches of register C6 to
+ * bind subrules together in the complex rule using the chain ids.
+ */
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
- /* Isolated BWC matcher needs access to the first BWC matcher */
- isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher;
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
- /* Isolated matcher needs to match on REG_C_6,
- * so make sure its criteria bit is on.
+ /* Every matcher other than the last sets register C6 and jumps to the
+ * next submatcher's table. The final submatcher will use the
+ * user-supplied actions and will attach an action template at rule
+ * insertion time.
*/
- match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
-
- ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher,
- isolated_tbl,
- 0,
- match_criteria_enable,
- mask,
- NULL);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n");
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ is_last ? NULL : action_types);
+ if (ret)
goto free_matcher;
- }
+
+ subm->bwc_matcher->matcher_type =
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
return 0;
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
free_matcher:
- kfree(bwc_matcher->complex->isolated_bwc_matcher);
+ kfree(subm->bwc_matcher);
+destroy_action:
+ mlx5hws_action_destroy(subm->action_tbl);
+destroy_tbl:
+ mlx5hws_table_destroy(subm->tbl);
+
return ret;
}
-static void
-hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+static void hws_submatcher_destroy(struct mlx5hws_bwc_matcher *cmatcher,
+ int idx)
{
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
- kfree(bwc_matcher);
+ struct mlx5hws_bwc_complex_submatcher *subm;
+
+ subm = &cmatcher->complex->submatchers[idx];
+
+ ida_destroy(&subm->chain_ida);
+ mutex_destroy(&subm->hash_lock);
+ rhashtable_destroy(&subm->rules_hash);
+
+ if (subm->bwc_matcher) {
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+ if (idx)
+ kfree(subm->bwc_matcher);
+ }
+
+ /* We own all of the isolated tables, but not the original one. */
+ if (idx) {
+ mlx5hws_action_destroy(subm->action_tbl);
+ mlx5hws_table_destroy(subm->tbl);
+ }
}
static int
-hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table)
+hws_complex_data_actions_init(struct mlx5hws_bwc_matcher_complex_data *cdata)
{
- struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ struct mlx5hws_context *ctx = cdata->submatchers[0].tbl->ctx;
u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
- struct mlx5hws_context *ctx = table->ctx;
struct mlx5hws_action_mh_pattern ptrn;
int ret = 0;
- /* Create action to jump to isolated table */
-
- bwc_matcher->complex->action_go_to_tbl =
- mlx5hws_action_create_dest_table(ctx,
- isolated_tbl,
- MLX5HWS_ACTION_FLAG_HWS_FDB);
- if (!bwc_matcher->complex->action_go_to_tbl) {
- mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n");
- return -EINVAL;
- }
-
/* Create modify header action to set REG_C_6 */
-
MLX5_SET(set_action_in, modify_hdr_action,
action_type, MLX5_MODIFICATION_TYPE_SET);
MLX5_SET(set_action_in, modify_hdr_action,
@@ -895,19 +553,18 @@ hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
ptrn.data = (void *)modify_hdr_action;
ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
- bwc_matcher->complex->action_metadata =
+ cdata->action_metadata =
mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
MLX5HWS_ACTION_FLAG_HWS_FDB);
- if (!bwc_matcher->complex->action_metadata) {
- ret = -EINVAL;
- goto destroy_action_go_to_tbl;
+ if (!cdata->action_metadata) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create set reg C6 action\n");
+ return -EINVAL;
}
/* Create last action */
-
- bwc_matcher->complex->action_last =
+ cdata->action_last =
mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
- if (!bwc_matcher->complex->action_last) {
+ if (!cdata->action_last) {
mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
ret = -EINVAL;
goto destroy_action_metadata;
@@ -916,196 +573,130 @@ hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
return 0;
destroy_action_metadata:
- mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
-destroy_action_go_to_tbl:
- mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ mlx5hws_action_destroy(cdata->action_metadata);
+
return ret;
}
static void
-hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+hws_complex_data_actions_destroy(struct mlx5hws_bwc_matcher_complex_data *cdata)
{
- mlx5hws_action_destroy(bwc_matcher->complex->action_last);
- mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
- mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ mlx5hws_action_destroy(cdata->action_last);
+ mlx5hws_action_destroy(cdata->action_metadata);
}
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
- u32 priority,
- u8 match_criteria_enable,
+ u32 priority, u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- enum mlx5hws_action_type complex_init_action_types[3];
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
- struct mlx5hws_match_parameters mask_1 = {0};
- struct mlx5hws_match_parameters mask_2 = {0};
+ struct mlx5hws_match_parameters
+ submasks[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
struct mlx5hws_context *ctx = table->ctx;
- int ret;
-
- ret = hws_bwc_matcher_complex_params_create(table->ctx,
- match_criteria_enable,
- mask, &mask_1, &mask_2);
- if (ret)
- goto err;
-
- bwc_matcher->complex =
- kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL);
- if (!bwc_matcher->complex) {
- ret = -ENOMEM;
- goto free_masks;
- }
+ int num_submatchers;
+ int i, ret;
- ret = rhashtable_init(&bwc_matcher->complex->refcount_hash,
- &hws_refcount_hash);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n");
- goto free_complex;
+ for (i = 0; i < ARRAY_SIZE(submasks); i++) {
+ submasks[i].match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ submasks[i].match_buf = kzalloc(submasks[i].match_sz,
+ GFP_KERNEL);
+ if (!submasks[i].match_buf) {
+ ret = -ENOMEM;
+ goto free_submasks;
+ }
}
- mutex_init(&bwc_matcher->complex->hash_lock);
- ida_init(&bwc_matcher->complex->metadata_ida);
-
- /* Create initial action template for the first matcher.
- * Usually the initial AT is just dummy, but in case of complex
- * matcher we know exactly which actions should it have.
- */
-
- complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
- complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL;
- complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST;
-
- /* Create the first matcher */
-
- ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
- table,
- priority,
- match_criteria_enable,
- &mask_1,
- complex_init_action_types);
+ ret = hws_bwc_matcher_split_mask(ctx, match_criteria_enable, mask,
+ submasks, &num_submatchers);
if (ret)
- goto destroy_ida;
-
- /* Create isolated table to hold the second isolated matcher */
+ goto free_submasks;
- ret = hws_bwc_isolated_table_create(bwc_matcher, table);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n");
- goto destroy_first_matcher;
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata) {
+ ret = -ENOMEM;
+ goto free_submasks;
}
- /* Now create the second BWC matcher - the isolated one */
+ bwc_matcher->complex = cdata;
+ cdata->num_submatchers = num_submatchers;
- ret = hws_bwc_isolated_matcher_create(bwc_matcher, table,
- match_criteria_enable, &mask_2);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n");
- goto destroy_isolated_tbl;
+ for (i = 0; i < num_submatchers; i++) {
+ ret = hws_submatcher_init(bwc_matcher, i, table, priority,
+ match_criteria_enable, &submasks[i]);
+ if (ret)
+ goto destroy_submatchers;
}
- /* Create action for isolated matcher's rules */
-
- ret = hws_bwc_isolated_actions_create(bwc_matcher, table);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n");
- goto destroy_isolated_matcher;
- }
+ ret = hws_complex_data_actions_init(cdata);
+ if (ret)
+ goto destroy_submatchers;
- hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
- return 0;
+ ret = 0;
+ goto free_submasks;
-destroy_isolated_matcher:
- isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
- hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
-destroy_isolated_tbl:
- hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
-destroy_first_matcher:
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
-destroy_ida:
- ida_destroy(&bwc_matcher->complex->metadata_ida);
- mutex_destroy(&bwc_matcher->complex->hash_lock);
- rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
-free_complex:
- kfree(bwc_matcher->complex);
+destroy_submatchers:
+ while (i--)
+ hws_submatcher_destroy(bwc_matcher, i);
+ kfree(cdata);
bwc_matcher->complex = NULL;
-free_masks:
- hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
-err:
+
+free_submasks:
+ for (i = 0; i < ARRAY_SIZE(submasks); i++)
+ kfree(submasks[i].match_buf);
+
return ret;
}
void
mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher =
- bwc_matcher->complex->isolated_bwc_matcher;
-
- hws_bwc_isolated_actions_destroy(bwc_matcher);
- hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
- hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
- ida_destroy(&bwc_matcher->complex->metadata_ida);
- mutex_destroy(&bwc_matcher->complex->hash_lock);
- rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+ int i;
+
+ hws_complex_data_actions_destroy(bwc_matcher->complex);
+ for (i = 0; i < bwc_matcher->complex->num_submatchers; i++)
+ hws_submatcher_destroy(bwc_matcher, i);
kfree(bwc_matcher->complex);
bwc_matcher->complex = NULL;
}
-static void
-hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- mutex_lock(&bwc_matcher->complex->hash_lock);
-}
-
-static void
-hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- mutex_unlock(&bwc_matcher->complex->hash_lock);
-}
-
static int
-hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
- struct mlx5hws_match_parameters *params)
+hws_complex_get_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ u32 *match_params)
+__must_hold(&subm->hash_lock)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
- struct rhashtable *refcount_hash;
- int ret, i;
-
- bwc_rule->complex_hash_node = NULL;
+ struct mlx5hws_bwc_matcher *bwc_matcher = subm->bwc_matcher;
+ struct mlx5hws_bwc_complex_subrule_data *sr_data, *old_data;
+ struct mlx5hws_match_template *mt;
+ int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node))
+ sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
+ if (!sr_data)
return -ENOMEM;
- ret = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ ret = ida_alloc(&subm->chain_ida, GFP_KERNEL);
if (ret < 0)
- goto err_free_node;
- node->tag = ret;
+ goto free_sr_data;
+ sr_data->chain_id = ret;
- refcount_set(&node->refcount, 1);
+ refcount_set(&sr_data->refcount, 1);
- /* Clear match buffer - turn off all the unrelated fields
- * in accordance with the match params mask for the first
- * matcher out of the two parts of the complex matcher.
- * The resulting mask is the key for the hash.
- */
- for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
- node->match_buf[i] = params->match_buf[i] &
- bwc_matcher->mt->match_param[i];
-
- refcount_hash = &bwc_matcher->complex->refcount_hash;
- old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
- &node->hash_node,
- hws_refcount_hash);
- if (IS_ERR(old_node)) {
- ret = PTR_ERR(old_node);
- goto err_free_ida;
+ mt = bwc_matcher->matcher->mt;
+ mlx5hws_definer_create_tag(match_params, mt->fc, mt->fc_sz,
+ (u8 *)&sr_data->match_tag);
+
+ old_data = rhashtable_lookup_get_insert_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ if (IS_ERR(old_data)) {
+ ret = PTR_ERR(old_data);
+ goto free_ida;
}
- if (old_node) {
+ if (old_data) {
/* Rule with the same tag already exists - update refcount */
- refcount_inc(&old_node->refcount);
+ refcount_inc(&old_data->refcount);
/* Let the new rule use the same tag as the existing rule.
* Note that we don't have any indication for the rule creation
* process that a rule with similar matching params already
@@ -1114,247 +705,281 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
* There's some performance advantage in skipping such cases,
* so this is left for future optimizations.
*/
- ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
- kfree(node);
- node = old_node;
+ bwc_rule->subrule_data = old_data;
+ ret = 0;
+ goto free_ida;
}
- bwc_rule->complex_hash_node = node;
+ bwc_rule->subrule_data = sr_data;
return 0;
-err_free_ida:
- ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
-err_free_node:
- kfree(node);
+free_ida:
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+free_sr_data:
+ kfree(sr_data);
+
return ret;
}
static void
-hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule,
- bool *is_last_rule)
+hws_complex_put_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ bool *is_last_rule)
+__must_hold(&subm->hash_lock)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- struct mlx5hws_bwc_complex_rule_hash_node *node;
+ struct mlx5hws_bwc_complex_subrule_data *sr_data;
if (is_last_rule)
*is_last_rule = false;
- node = bwc_rule->complex_hash_node;
- if (refcount_dec_and_test(&node->refcount)) {
- rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash,
- &node->hash_node,
- hws_refcount_hash);
- ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
- kfree(node);
+ sr_data = bwc_rule->subrule_data;
+ if (refcount_dec_and_test(&sr_data->refcount)) {
+ rhashtable_remove_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+ kfree(sr_data);
if (is_last_rule)
*is_last_rule = true;
}
- bwc_rule->complex_hash_node = NULL;
+ bwc_rule->subrule_data = NULL;
}
-int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
- struct mlx5hws_match_parameters *params,
- u32 flow_source,
- struct mlx5hws_rule_action rule_actions[],
- u16 bwc_queue_idx)
+static int hws_complex_subrule_create(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_bwc_rule *subrule,
+ u32 *match_params, u32 flow_source,
+ int bwc_queue_idx, int subm_idx,
+ struct mlx5hws_rule_action *actions,
+ u32 *chain_id)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_action chain_actions[HWS_NUM_CHAIN_ACTIONS] = {0};
u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
- struct mlx5hws_rule_action rule_actions_1[3] = {0};
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
- u32 *match_buf_2;
- u32 metadata_val;
- int ret = 0;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
- isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
- bwc_rule->isolated_bwc_rule =
- mlx5hws_bwc_rule_alloc(isolated_bwc_matcher);
- if (unlikely(!bwc_rule->isolated_bwc_rule))
- return -ENOMEM;
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
- hws_bwc_matcher_complex_hash_lock(bwc_matcher);
+ mutex_lock(&subm->hash_lock);
- /* Get a new hash node for this complex rule.
- * If this is a unique set of match params for the first matcher,
- * we will get a new hash node with newly allocated IDA.
- * Otherwise we will get an existing node with IDA and updated refcount.
- */
- ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params);
- if (unlikely(ret)) {
- mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n");
- goto free_isolated_rule;
+ ret = hws_complex_get_subrule_data(subrule, subm, match_params);
+ if (ret)
+ goto unlock;
+
+ *chain_id = subrule->subrule_data->chain_id;
+
+ if (!actions) {
+ MLX5_SET(set_action_in, modify_hdr_action, data, *chain_id);
+ chain_actions[0].action = cdata->action_metadata;
+ chain_actions[0].modify_header.data = modify_hdr_action;
+ chain_actions[1].action =
+ cdata->submatchers[subm_idx + 1].action_tbl;
+ chain_actions[2].action = cdata->action_last;
+ actions = chain_actions;
}
- /* No need to clear match buffer's fields in accordance to what
- * will actually be matched on first and second matchers.
- * Both matchers were created with the appropriate masks
- * and each of them holds the appropriate field copy array,
- * so rule creation will use only the fields that will be copied
- * in accordance with setters in field copy array.
- * We do, however, need to temporary allocate match buffer
- * for the second (isolated) rule in order to not modify
- * user's match params buffer.
- */
-
- match_buf_2 = kmemdup(params->match_buf,
- MLX5_ST_SZ_BYTES(fte_match_param),
- GFP_KERNEL);
- if (unlikely(!match_buf_2)) {
- mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n");
- ret = -ENOMEM;
- goto hash_node_put;
- }
+ ret = mlx5hws_bwc_rule_create_simple(subrule, match_params, actions,
+ flow_source, bwc_queue_idx);
+ if (ret)
+ goto put_subrule_data;
- /* On 2nd matcher, use unique 32-bit ID as a matching tag */
- metadata_val = bwc_rule->complex_hash_node->tag;
- MLX5_SET(fte_match_param, match_buf_2,
- misc_parameters_2.metadata_reg_c_6, metadata_val);
-
- /* Isolated rule's rule_actions contain all the original actions */
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule,
- match_buf_2,
- rule_actions,
- flow_source,
- bwc_queue_idx);
- kfree(match_buf_2);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Complex rule: failed creating isolated BWC rule (%d)\n",
- ret);
- goto hash_node_put;
- }
+ ret = 0;
+ goto unlock;
- /* First rule's rule_actions contain setting metadata and
- * jump to isolated table that contains the second matcher.
- * Set metadata value to a unique value for this rule.
- */
+put_subrule_data:
+ hws_complex_put_subrule_data(subrule, subm, NULL);
+unlock:
+ mutex_unlock(&subm->hash_lock);
- MLX5_SET(set_action_in, modify_hdr_action,
- action_type, MLX5_MODIFICATION_TYPE_SET);
- MLX5_SET(set_action_in, modify_hdr_action,
- field, MLX5_MODI_META_REG_C_6);
- MLX5_SET(set_action_in, modify_hdr_action,
- length, 0); /* zero means length of 32 */
- MLX5_SET(set_action_in, modify_hdr_action,
- offset, 0);
- MLX5_SET(set_action_in, modify_hdr_action,
- data, metadata_val);
+ return ret;
+}
- rule_actions_1[0].action = bwc_matcher->complex->action_metadata;
- rule_actions_1[0].modify_header.offset = 0;
- rule_actions_1[0].modify_header.data = modify_hdr_action;
+static int hws_complex_subrule_destroy(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher *cmatcher,
+ int subm_idx)
+{
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_context *ctx;
+ bool is_last_rule;
+ int ret = 0;
- rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl;
- rule_actions_1[2].action = bwc_matcher->complex->action_last;
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
+ ctx = subm->tbl->ctx;
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
- params->match_buf,
- rule_actions_1,
- flow_source,
- bwc_queue_idx);
+ mutex_lock(&subm->hash_lock);
- if (unlikely(ret)) {
+ hws_complex_put_subrule_data(bwc_rule, subm, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
mlx5hws_err(ctx,
- "Complex rule: failed creating first BWC rule (%d)\n",
- ret);
- goto destroy_isolated_rule;
- }
+ "Complex rule: failed to delete subrule %d (%d)\n",
+ subm_idx, ret);
- hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+ if (subm_idx)
+ mlx5hws_bwc_rule_free(bwc_rule);
- return 0;
+ mutex_unlock(&subm->hash_lock);
-destroy_isolated_rule:
- mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule);
-hash_node_put:
- hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL);
-free_isolated_rule:
- hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
- mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule);
return ret;
}
-int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
{
- struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_bwc_rule *isolated_bwc_rule;
- int ret_isolated, ret;
- bool is_last_rule;
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher *cmatcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_rule_action *subrule_actions;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_bwc_rule *subrule;
+ u32 *match_params;
+ u32 chain_id;
+ int i, ret;
- hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher);
+ cdata = cmatcher->complex;
+ if (!cdata)
+ return -EINVAL;
- hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule);
- bwc_rule->rule->skip_delete = !is_last_rule;
+ /* Duplicate user data because we will modify it to set register C6
+ * values. For the same reason, make sure that we allocate a full
+ * match_param even if the user gave us fewer bytes. We need to ensure
+ * there is space for the match on C6.
+ */
+ match_params = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_params)
+ return -ENOMEM;
- ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
- if (unlikely(ret))
- mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n");
+ memcpy(match_params, params->match_buf, params->match_sz);
+
+ ret = hws_complex_subrule_create(cmatcher, bwc_rule, match_params,
+ flow_source, bwc_queue_idx, 0,
+ NULL, &chain_id);
+ if (ret)
+ goto free_match_params;
+ subrules[0] = bwc_rule;
+
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ subm = &cdata->submatchers[i];
+ subrule = mlx5hws_bwc_rule_alloc(subm->bwc_matcher);
+ if (!subrule) {
+ ret = -ENOMEM;
+ goto destroy_subrules;
+ }
+
+ /* Match on the previous subrule's chain_id. This is how
+ * subrules are connected in steering.
+ */
+ MLX5_SET(fte_match_param, match_params,
+ misc_parameters_2.metadata_reg_c_6, chain_id);
+
+ /* The last subrule uses the complex rule's user-specified
+ * actions. Everything else uses the chaining rules based on the
+ * next table and chain_id.
+ */
+ subrule_actions =
+ i == cdata->num_submatchers - 1 ? rule_actions : NULL;
+
+ ret = hws_complex_subrule_create(cmatcher, subrule,
+ match_params, flow_source,
+ bwc_queue_idx, i,
+ subrule_actions, &chain_id);
+ if (ret) {
+ mlx5hws_bwc_rule_free(subrule);
+ goto destroy_subrules;
+ }
+
+ subrules[i] = subrule;
+ }
+
+ for (i = 0; i < cdata->num_submatchers - 1; i++)
+ subrules[i]->next_subrule = subrules[i + 1];
- isolated_bwc_rule = bwc_rule->isolated_bwc_rule;
- ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule);
- if (unlikely(ret_isolated))
- mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n");
+ kfree(match_params);
- hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher);
+ return 0;
- mlx5hws_bwc_rule_free(isolated_bwc_rule);
+destroy_subrules:
+ while (i--)
+ hws_complex_subrule_destroy(subrules[i], cmatcher, i);
+free_match_params:
+ kfree(match_params);
- return ret || ret_isolated;
+ return ret;
}
-static void
-hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher)
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
{
- struct mlx5hws_bwc_complex_rule_hash_node *node;
- struct rhashtable_iter iter;
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ int i, err, ret_val;
+
+ cdata = bwc_matcher->complex;
+
+ /* Construct a list of all the subrules we need to destroy. */
+ subrules[0] = bwc_rule;
+ for (i = 1; i < cdata->num_submatchers; i++)
+ subrules[i] = subrules[i - 1]->next_subrule;
+
+ ret_val = 0;
+ for (i = 0; i < cdata->num_submatchers; i++) {
+ err = hws_complex_subrule_destroy(subrules[i], bwc_matcher, i);
+ /* If something goes wrong, plow along to destroy all of the
+ * subrules but return an error upstack.
+ */
+ if (unlikely(err))
+ ret_val = err;
+ }
- rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter);
- rhashtable_walk_start(&iter);
+ return ret_val;
+}
- while ((node = rhashtable_walk_next(&iter)) != NULL) {
- if (IS_ERR(node))
+static void
+hws_bwc_matcher_init_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct list_head *rules_list;
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
continue;
- node->rtc_valid = false;
- }
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ if (!bwc_rule->subrule_data)
+ continue;
+ bwc_rule->subrule_data->was_moved = false;
+ }
+ }
}
-int
-mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
struct mlx5hws_bwc_rule *tmp_bwc_rule;
struct mlx5hws_rule_attr rule_attr;
- struct mlx5hws_table *isolated_tbl;
int move_error = 0, poll_error = 0;
struct mlx5hws_rule *tmp_rule;
struct list_head *rules_list;
u32 expected_completions = 1;
- u32 end_ft_id;
- int i, ret;
+ int i, ret = 0;
- /* We are rehashing the matcher that is the first part of the complex
- * matcher. Need to update the isolated matcher to point to the end_ft
- * of this new matcher. This needs to be done before moving any rules
- * to prevent possible steering loops.
- */
- isolated_tbl = bwc_matcher->complex->isolated_tbl;
- end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
- ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id);
- if (ret) {
- mlx5hws_err(ctx,
- "Failed updating end_ft of isolated matcher (%d)\n",
- ret);
- return ret;
- }
-
- hws_bwc_matcher_clear_hash_rtcs(bwc_matcher);
+ hws_bwc_matcher_init_move(bwc_matcher);
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
@@ -1369,15 +994,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
/* Check if a rule with similar tag has already
* been moved.
*/
- if (tmp_bwc_rule->complex_hash_node->rtc_valid) {
- /* This rule is a duplicate of rule with similar
- * tag that has already been moved earlier.
- * Just update this rule's RTCs.
+ if (tmp_bwc_rule->subrule_data->was_moved) {
+ /* This rule is a duplicate of rule with
+ * identical tag that has already been moved
+ * earlier. Just update this rule's RTCs.
*/
tmp_bwc_rule->rule->rtc_0 =
- tmp_bwc_rule->complex_hash_node->rtc_0;
+ tmp_bwc_rule->subrule_data->rtc_0;
tmp_bwc_rule->rule->rtc_1 =
- tmp_bwc_rule->complex_hash_node->rtc_1;
+ tmp_bwc_rule->subrule_data->rtc_1;
tmp_bwc_rule->rule->matcher =
tmp_bwc_rule->rule->matcher->resize_dst;
continue;
@@ -1425,12 +1050,12 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
/* Done moving the rule to the new matcher,
* now update RTCs for all the duplicated rules.
*/
- tmp_bwc_rule->complex_hash_node->rtc_0 =
+ tmp_bwc_rule->subrule_data->rtc_0 =
tmp_bwc_rule->rule->rtc_0;
- tmp_bwc_rule->complex_hash_node->rtc_1 =
+ tmp_bwc_rule->subrule_data->rtc_1 =
tmp_bwc_rule->rule->rtc_1;
- tmp_bwc_rule->complex_hash_node->rtc_valid = true;
+ tmp_bwc_rule->subrule_data->was_moved = true;
}
}
@@ -1442,3 +1067,35 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
return ret;
}
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_table *isolated_tbl;
+ u32 end_ft_id;
+ int i, ret;
+
+ cdata = bwc_matcher->complex;
+
+ /* We are rehashing the first submatcher. We need to update the
+ * subsequent submatchers to point to the end_ft of this new matcher.
+ * This needs to be done before moving any rules to prevent possible
+ * steering loops.
+ */
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ isolated_tbl = cdata->submatchers[i].tbl;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Complex matcher: failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
index a6887c7e39d5..d07de631ce9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -4,25 +4,60 @@
#ifndef HWS_BWC_COMPLEX_H_
#define HWS_BWC_COMPLEX_H_
-struct mlx5hws_bwc_complex_rule_hash_node {
- u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM];
- u32 tag;
+#define MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS 4
+
+/* A matcher can't contain two rules with the same match tag, but it is possible
+ * that two different complex rules' subrules have the same match tag. In that
+ * case, those subrules correspond to a single rule, and we need to refcount.
+ */
+struct mlx5hws_bwc_complex_subrule_data {
+ struct mlx5hws_rule_match_tag match_tag;
refcount_t refcount;
- bool rtc_valid;
+ /* The chain_id is what glues individual subrules into larger complex
+ * rules. It is the value that this subrule writes to register C6, and
+ * that the next subrule matches against.
+ */
+ u32 chain_id;
u32 rtc_0;
u32 rtc_1;
+ /* During rehash we iterate through all the subrules to move them. But
+ * two or more subrules can share the same physical rule in the
+ * submatcher, so we use `was_moved` to keep track if a given rule was
+ * already moved.
+ */
+ bool was_moved;
struct rhash_head hash_node;
};
+struct mlx5hws_bwc_complex_submatcher {
+ /* Isolated table that the matcher lives in. Not set for the first
+ * matcher, which lives in the original table.
+ */
+ struct mlx5hws_table *tbl;
+ /* Match a rule with this action to go to `tbl`. This is set in all
+ * submatchers but the first.
+ */
+ struct mlx5hws_action *action_tbl;
+ /* This submatcher's simple matcher. The first submatcher points to the
+ * outer (complex) matcher.
+ */
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct rhashtable rules_hash;
+ struct ida chain_ida;
+ struct mutex hash_lock; /* Protect the hash and ida. */
+};
+
struct mlx5hws_bwc_matcher_complex_data {
- struct mlx5hws_table *isolated_tbl;
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_bwc_complex_submatcher
+ submatchers[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS];
+ int num_submatchers;
+ /* Actions used by all but the last submatcher to point to the next
+ * submatcher in the chain. The last submatcher uses the action template
+ * from the complex matcher, to perform the actions that the user
+ * originally requested.
+ */
struct mlx5hws_action *action_metadata;
- struct mlx5hws_action *action_go_to_tbl;
struct mlx5hws_action *action_last;
- struct rhashtable refcount_hash;
- struct mutex hash_lock; /* Protect the refcount rhashtable */
- struct ida metadata_ida;
};
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
@@ -37,7 +72,10 @@ int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_match_parameters *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 0bdcab2e5cf3..f22eaf506d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -1200,34 +1200,20 @@ out:
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_function);
- MLX5_SET(query_hca_cap_in, in, function_id,
- mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+ if (!other_function) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index c6436c3a7a83..82fd122d4284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -1280,7 +1280,7 @@ hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
- if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.psp_syndrome, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param,
misc_parameters_2.ipsec_next_header, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
@@ -1831,80 +1831,6 @@ err_free_fc:
return ret;
}
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- u32 *match_param,
- int *fc_sz)
-{
- struct mlx5hws_definer_fc *compressed_fc = NULL;
- struct mlx5hws_definer_conv_data cd = {0};
- struct mlx5hws_definer_fc *fc;
- int ret;
-
- fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
- if (!fc)
- return NULL;
-
- cd.fc = fc;
- cd.ctx = ctx;
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
- ret = hws_definer_conv_outer(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
- ret = hws_definer_conv_inner(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
- ret = hws_definer_conv_misc(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
- ret = hws_definer_conv_misc2(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
- ret = hws_definer_conv_misc3(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
- ret = hws_definer_conv_misc4(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
- ret = hws_definer_conv_misc5(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- /* Allocate fc array on mt */
- compressed_fc = hws_definer_alloc_compressed_fc(fc);
- if (!compressed_fc) {
- mlx5hws_err(ctx,
- "Convert to compressed fc: failed to set field copy to match template\n");
- goto err_free_fc;
- }
- *fc_sz = hws_definer_get_fc_size(fc);
-
-err_free_fc:
- kfree(fc);
- return compressed_fc;
-}
-
static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
u32 hl_byte_off,
@@ -2067,7 +1993,7 @@ hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
static int
hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
- u8 *hl)
+ u8 *hl, bool allow_jumbo)
{
struct mlx5hws_definer_sel_ctrl ctrl = {0};
bool found;
@@ -2084,6 +2010,9 @@ hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
return 0;
}
+ if (!allow_jumbo)
+ return -E2BIG;
+
/* Try to create a full/limited jumbo definer */
ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
DW_SELECTORS_MATCH;
@@ -2160,7 +2089,8 @@ int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
int
mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
- struct mlx5hws_definer *match_definer)
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo)
{
u8 *match_hl;
int ret;
@@ -2182,7 +2112,8 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
}
/* Find the match definer layout for header layout match union */
- ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl,
+ allow_jumbo);
if (ret) {
if (ret == -E2BIG)
mlx5hws_dbg(ctx,
@@ -2370,7 +2301,7 @@ int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
struct mlx5hws_definer match_layout = {0};
int ret;
- ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, true);
if (ret) {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 62da55389331..141f3eb2e307 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -823,13 +823,8 @@ void mlx5hws_definer_free(struct mlx5hws_context *ctx,
int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
- struct mlx5hws_definer *match_definer);
-
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- u32 *match_param,
- int *fc_sz);
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo);
const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index b0595c9b09e4..24ef7d66fa8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -690,7 +690,7 @@ static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
size_t buf_sz;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
sq->mdev = mdev;
param.db_numa_node = numa_node;
@@ -764,7 +764,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
@@ -940,7 +940,7 @@ static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -963,7 +963,7 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
if (!cqc_data)
return -ENOMEM;
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
index baefb9a3fa05..1ebb2b15c080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
+#include "eswitch.h"
int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
bool other_vport,
@@ -34,34 +35,21 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
- HCA_CAP_OPMOD_GET_CUR);
+ if (!other_vport) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 4fd4e8483382..077a77fde670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1131,7 +1131,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0;
- cq->mcq.uar = uar;
cq->mdev = mdev;
return cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index da5c24fc7b30..2ed2e530b07d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "eswitch.h"
#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
@@ -1189,18 +1190,44 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
+static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
+ u16 vport_num, u16 *vhca_id)
+{
+ if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id))
+ return false;
+
+ return mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport_num, vhca_id);
+}
+
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
+
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(query_hca_cap_in, in, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(query_hca_cap_in, in, other_function, true);
MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
@@ -1212,7 +1239,9 @@ int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
void *hca_caps;
int err;
- *vhca_id = 0;
+ /* try get vhca_id via eswitch */
+ if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id))
+ return 0;
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
@@ -1229,12 +1258,14 @@ out_free:
kfree(query_ctx);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
void *set_hca_cap;
void *set_ctx;
int ret;
@@ -1243,14 +1274,29 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
if (!set_ctx)
return -ENOMEM;
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
+
MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
- MLX5_SET(set_hca_cap_in, set_ctx, function_id,
- mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 2f0316616fa4..c281153bd411 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -7,6 +7,10 @@
#include "mlx5_core.h"
#include "wq.h"
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+#include <asm/neon.h>
+#endif
+
#define TEST_WC_NUM_WQES 255
#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
@@ -94,7 +98,7 @@ static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -116,7 +120,7 @@ static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
return -ENOMEM;
MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
@@ -255,7 +259,29 @@ static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
mlx5_wq_destroy(&sq->wq_ctrl);
}
-static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
+static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
+ size_t mmio_wqe_size, unsigned int offset)
+{
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+ if (cpu_has_neon()) {
+ kernel_neon_begin();
+ asm volatile
+ (".arch_extension simd;\n\t"
+ "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
+ "st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
+ :
+ : "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
+ : "memory", "v0", "v1", "v2", "v3");
+ kernel_neon_end();
+ return;
+ }
+#endif
+ __iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
+ mmio_wqe_size / 8);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
+ bool signaled)
{
int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
struct mlx5_wqe_ctrl_seg *ctrl;
@@ -288,10 +314,9 @@ static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
*/
wmb();
- __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
- sizeof(mmio_wqe) / 8);
+ mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
- sq->bfreg.offset ^= buf_size;
+ *offset ^= buf_size;
}
static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
@@ -332,6 +357,7 @@ static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
{
+ unsigned int offset = 0;
unsigned long expires;
struct mlx5_wc_sq *sq;
int i, err;
@@ -358,9 +384,9 @@ static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
goto err_create_sq;
for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
- mlx5_wc_post_nop(sq, false);
+ mlx5_wc_post_nop(sq, &offset, false);
- mlx5_wc_post_nop(sq, true);
+ mlx5_wc_post_nop(sq, &offset, true);
expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
do {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 2bb2b77351bd..83c7cf3bbea3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -886,7 +886,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_PERCPU, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -2043,7 +2043,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
return 0;
fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
- 0, mlxsw_core);
+ mlxsw_core);
if (IS_ERR(fw_fatal)) {
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
return PTR_ERR(fw_fatal);
@@ -3381,7 +3381,7 @@ static int __init mlxsw_core_module_init(void)
if (err)
return err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_PERCPU, 0);
if (!mlxsw_wq) {
err = -ENOMEM;
goto err_alloc_workqueue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 50e591420bd9..b1094aaffa5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -170,8 +170,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
- WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
- pool->pool_size);
+ WARN_ON(!bitmap_empty(pool->usage, pool->pool_size));
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
devl_resource_occ_get_unregister(devlink,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index c376e06880c9..b03e5a3d5144 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -27,6 +27,8 @@ struct fbnic_dev {
struct net_device *netdev;
struct dentry *dbg_fbd;
struct device *hwmon;
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *otp_reporter;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -84,8 +86,9 @@ struct fbnic_dev {
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
- /* Lock protecting access to hw_stats */
- spinlock_t hw_stats_lock;
+ /* Firmware time since boot in milliseconds */
+ u64 firmware_time;
+ u64 prev_firmware_time;
struct fbnic_fw_log fw_log;
};
@@ -158,8 +161,13 @@ extern char fbnic_driver_name[];
void fbnic_devlink_free(struct fbnic_dev *fbd);
struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+int fbnic_devlink_health_create(struct fbnic_dev *fbd);
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd);
void fbnic_devlink_register(struct fbnic_dev *fbd);
void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...);
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg);
int fbnic_fw_request_mbx(struct fbnic_dev *fbd);
void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
@@ -190,6 +198,8 @@ void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd);
void fbnic_dbg_init(void);
void fbnic_dbg_exit(void);
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd);
+
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index a81db842aa53..d3a7ad921f18 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -790,6 +790,21 @@ enum {
#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
+
+/* We have 4 RSFEC engines present in our part, however we are only using 1.
+ * As such only CCW(0) and NCCW(0) will never be non-zero and the other
+ * registers can be ignored.
+ */
+#define FBNIC_RSFEC_CCW_LO(n) (0x10802 + 8 * (n)) /* 0x42008 + 32*n */
+#define FBNIC_RSFEC_CCW_HI(n) (0x10803 + 8 * (n)) /* 0x4200c + 32*n */
+#define FBNIC_RSFEC_NCCW_LO(n) (0x10804 + 8 * (n)) /* 0x42010 + 32*n */
+#define FBNIC_RSFEC_NCCW_HI(n) (0x10805 + 8 * (n)) /* 0x42014 + 32*n */
+
+#define FBNIC_PCS_MAX_LANES 4
+#define FBNIC_PCS_SYMBLERR_LO(n) \
+ (0x10880 + 2 * (n)) /* 0x42200 + 8*n */
+#define FBNIC_PCS_SYMBLERR_HI(n) \
+ (0x10881 + 2 * (n)) /* 0x42204 + 8*n */
#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */
/* MAC MAC registers (ASIC only) */
@@ -829,6 +844,10 @@ enum {
#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
#define FBNIC_CSR_START_MAC_STAT 0x11a00
+#define FBNIC_MAC_STAT_RX_XOFF_STB_L 0x11a00 /* 0x46800 */
+#define FBNIC_MAC_STAT_RX_XOFF_STB_H 0x11a01 /* 0x46804 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_L 0x11a04 /* 0x46810 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_H 0x11a05 /* 0x46814 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L 0x11a0a /* 0x46828 */
@@ -1159,4 +1178,22 @@ enum {
#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+/* OTP Registers
+ * These registers are accessible via bar4 offset and are written by CMRT
+ * on boot. For the write status, the register is broken up in half with OTP
+ * Write Data Status occupying the top 16 bits and the ECC status occupying the
+ * bottom 16 bits.
+ */
+#define FBNIC_NS_OTP_STATUS 0x0021d
+#define FBNIC_NS_OTP_WRITE_STATUS 0x0021e
+
+#define FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK CSR_GENMASK(31, 16)
+#define FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK CSR_GENMASK(15, 0)
+
+#define FBNIC_REGS_VERSION CSR_GENMASK(31, 16)
+#define FBNIC_REGS_HW_TYPE CSR_GENMASK(15, 8)
+enum{
+ FBNIC_CSR_VERSION_V1_0_ASIC = 1,
+};
+
#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index c5f81f139e7e..b62b1d5b1453 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -8,6 +8,7 @@
#include <net/devlink.h>
#include "fbnic.h"
+#include "fbnic_fw.h"
#include "fbnic_tlv.h"
#define FBNIC_SN_STR_LEN 24
@@ -369,6 +370,254 @@ static const struct devlink_ops fbnic_devlink_ops = {
.flash_update = fbnic_devlink_flash_update,
};
+static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 offset, index, index_count, length, size;
+ struct fbnic_fw_completion *fw_cmpl;
+ u8 *dump_data, **data;
+ int err;
+
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_coredump_info_msg(fbd, fw_cmpl, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump info msg");
+ goto cmpl_free;
+ }
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting on core dump info");
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ size = fw_cmpl->u.coredump_info.size;
+ err = fw_cmpl->result;
+
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ /* Handle error returned by firmware */
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware core dump returned error");
+ return err;
+ }
+ if (!size) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware core dump returned size 0");
+ return -EIO;
+ }
+
+ /* Read the dump, we can only transfer TLV_MAX_DATA at a time */
+ index_count = DIV_ROUND_UP(size, TLV_MAX_DATA);
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP,
+ sizeof(void *) * index_count + size);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Populate pointer table w/ pointer offsets */
+ dump_data = (void *)&fw_cmpl->u.coredump.data[index_count];
+ data = fw_cmpl->u.coredump.data;
+ fw_cmpl->u.coredump.size = size;
+ fw_cmpl->u.coredump.stride = TLV_MAX_DATA;
+
+ for (index = 0; index < index_count; index++) {
+ /* First iteration installs completion */
+ struct fbnic_fw_completion *cmpl_arg = index ? NULL : fw_cmpl;
+
+ offset = index * TLV_MAX_DATA;
+ length = min(size - offset, TLV_MAX_DATA);
+
+ data[index] = dump_data + offset;
+ err = fbnic_fw_xmit_coredump_read_msg(fbd, cmpl_arg,
+ offset, length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump msg");
+ if (cmpl_arg)
+ goto cmpl_free;
+ else
+ goto cmpl_cleanup;
+ }
+
+ if (wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ reinit_completion(&fw_cmpl->done);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Timed out waiting on core dump (%d/%d)",
+ index + 1, index_count);
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ /* If we didn't see the reply record as incomplete */
+ if (fw_cmpl->u.coredump.data[index]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "No data for core dump chunk (%d/%d)",
+ index + 1, index_count);
+ err = -EIO;
+ goto cmpl_cleanup;
+ }
+ }
+
+ devlink_fmsg_binary_pair_nest_start(fmsg, "FW coredump");
+
+ for (offset = 0; offset < size; offset += length) {
+ length = min_t(u32, size - offset, TLV_MAX_DATA);
+
+ devlink_fmsg_binary_put(fmsg, dump_data + offset, length);
+ }
+
+ devlink_fmsg_binary_pair_nest_end(fmsg);
+
+cmpl_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
+static int
+fbnic_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 sec, msec;
+
+ /* Device is most likely down, we're not exchanging heartbeats */
+ if (!fbd->prev_firmware_time)
+ return 0;
+
+ sec = div_u64_rem(fbd->firmware_time, MSEC_PER_SEC, &msec);
+
+ devlink_fmsg_pair_nest_start(fmsg, "last_heartbeat");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, "fw_uptime");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "sec", sec);
+ devlink_fmsg_u32_pair_put(fmsg, "msec", msec);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...)
+{
+ char msg[FBNIC_FW_LOG_MAX_SIZE];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(msg, FBNIC_FW_LOG_MAX_SIZE, format, args);
+ va_end(args);
+
+ devlink_health_report(fbd->fw_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_fw_ops = {
+ .name = "fw",
+ .dump = fbnic_fw_reporter_dump,
+ .diagnose = fbnic_fw_reporter_diagnose,
+};
+
+static u32 fbnic_read_otp_status(struct fbnic_dev *fbd)
+{
+ return fbnic_fw_rd32(fbd, FBNIC_NS_OTP_STATUS);
+}
+
+static int
+fbnic_otp_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 otp_status, otp_write_status, m;
+
+ otp_status = fbnic_read_otp_status(fbd);
+ otp_write_status = fbnic_fw_rd32(fbd, FBNIC_NS_OTP_WRITE_STATUS);
+
+ /* Dump OTP status */
+ devlink_fmsg_pair_nest_start(fmsg, "OTP");
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Status", otp_status);
+
+ /* Extract OTP Write Data status */
+ m = FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "Data",
+ FIELD_GET(m, otp_write_status));
+
+ /* Extract OTP Write ECC status */
+ m = FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "ECC",
+ FIELD_GET(m, otp_write_status));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg)
+{
+ /* Check if there is anything to report */
+ if (!fbnic_read_otp_status(fbd))
+ return;
+
+ devlink_health_report(fbd->otp_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_otp_ops = {
+ .name = "otp",
+ .dump = fbnic_otp_reporter_dump,
+};
+
+int fbnic_devlink_health_create(struct fbnic_dev *fbd)
+{
+ fbd->fw_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_fw_ops, fbd);
+ if (IS_ERR(fbd->fw_reporter)) {
+ dev_warn(fbd->dev,
+ "Failed to create FW fault reporter: %pe\n",
+ fbd->fw_reporter);
+ return PTR_ERR(fbd->fw_reporter);
+ }
+
+ fbd->otp_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_otp_ops, fbd);
+ if (IS_ERR(fbd->otp_reporter)) {
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+ dev_warn(fbd->dev,
+ "Failed to create OTP fault reporter: %pe\n",
+ fbd->otp_reporter);
+ return PTR_ERR(fbd->otp_reporter);
+ }
+
+ return 0;
+}
+
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd)
+{
+ devlink_health_reporter_destroy(fbd->otp_reporter);
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+}
+
void fbnic_devlink_free(struct fbnic_dev *fbd)
{
struct devlink *devlink = priv_to_devlink(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index dc7ba8d5fc43..a1c2db69b198 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <net/ipv6.h>
@@ -111,6 +112,20 @@ static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
+#define FBNIC_QUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
+};
+
+#define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
+
+#define FBNIC_STATS_LEN \
+ (FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
+
static void
fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
@@ -160,6 +175,7 @@ static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
swap(clone->num_rx_queues, orig->num_rx_queues);
swap(clone->num_tx_queues, orig->num_tx_queues);
swap(clone->num_napi, orig->num_napi);
+ swap(clone->hds_thresh, orig->hds_thresh);
}
static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
@@ -277,15 +293,21 @@ fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
ring->rx_mini_pending = fbn->hpq_size;
ring->rx_jumbo_pending = fbn->ppq_size;
ring->tx_pending = fbn->txq_size;
+
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
+ kernel_ring->hds_thresh = fbn->hds_thresh;
}
static void fbnic_set_rings(struct fbnic_net *fbn,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring)
{
fbn->rcq_size = ring->rx_pending;
fbn->hpq_size = ring->rx_mini_pending;
fbn->ppq_size = ring->rx_jumbo_pending;
fbn->txq_size = ring->tx_pending;
+ fbn->hds_thresh = kernel_ring->hds_thresh;
}
static int
@@ -316,8 +338,24 @@ fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
return -EINVAL;
}
+ if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
+ return -EINVAL;
+ }
+
+ /* If an XDP program is attached, we should check for potential frame
+ * splitting. If the new HDS threshold can cause splitting, we should
+ * only allow if the attached XDP program can handle frags.
+ */
+ if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
+ kernel_ring->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Use higher HDS threshold or multi-buf capable program");
+ return -EINVAL;
+ }
+
if (!netif_running(netdev)) {
- fbnic_set_rings(fbn, ring);
+ fbnic_set_rings(fbn, ring, kernel_ring);
return 0;
}
@@ -325,7 +363,7 @@ fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
if (!clone)
return -ENOMEM;
- fbnic_set_rings(clone, ring);
+ fbnic_set_rings(clone, ring, kernel_ring);
err = fbnic_alloc_napi_vectors(clone);
if (err)
@@ -398,6 +436,16 @@ static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
ethtool_sprintf(data, stat->string, idx);
}
+static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
const struct fbnic_stat *stat;
@@ -423,6 +471,9 @@ static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
ethtool_sprintf(&data, stat->string, idx);
}
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_strings(&data, i);
break;
}
}
@@ -440,6 +491,24 @@ static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
}
}
+static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ if (!ring) {
+ *data += FBNIC_XDP_STATS_LEN;
+ return;
+ }
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
+ u8 *p = (u8 *)ring + stat->offset;
+
+ **data = *(u64 *)p;
+ }
+}
+
static void fbnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -449,7 +518,7 @@ static void fbnic_get_ethtool_stats(struct net_device *dev,
fbnic_get_hw_stats(fbn->fbd);
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
FBNIC_HW_FIXED_STATS_LEN, &data);
@@ -486,14 +555,17 @@ static void fbnic_get_ethtool_stats(struct net_device *dev,
fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
FBNIC_HW_Q_STATS_LEN, &data);
}
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
}
static int fbnic_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return FBNIC_HW_STATS_LEN;
+ return FBNIC_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1310,7 +1382,7 @@ fbnic_get_rss_hash_opts(struct net_device *netdev,
#define FBNIC_L2_HASH_OPTIONS \
(RXH_L2DA | RXH_DISCARD)
#define FBNIC_L3_HASH_OPTIONS \
- (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
#define FBNIC_L4_HASH_OPTIONS \
(FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -1563,6 +1635,65 @@ static void fbnic_get_ts_stats(struct net_device *netdev,
}
}
+static int
+fbnic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_fw_completion *fw_cmpl;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ if (page_data->i2c_address != 0x50) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid i2c address. Only 0x50 is supported");
+ return -EINVAL;
+ }
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
+ page_data->length);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fw_cmpl->u.qsfp.length = page_data->length;
+ fw_cmpl->u.qsfp.offset = page_data->offset;
+ fw_cmpl->u.qsfp.page = page_data->page;
+ fw_cmpl->u.qsfp.bank = page_data->bank;
+
+ err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
+ page_data->bank, page_data->offset,
+ page_data->length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit EEPROM read request");
+ goto exit_free;
+ }
+
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ err = -ETIMEDOUT;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting for firmware response");
+ goto exit_cleanup;
+ }
+
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
+ goto exit_cleanup;
+ }
+
+ memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
+
+exit_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err ? : page_data->length;
+}
+
static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
{
if (counter->reported)
@@ -1570,6 +1701,63 @@ static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
}
static void
+fbnic_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
+
+ pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
+ pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
+}
+
+static void
+fbnic_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ fec_stats->corrected_blocks.total =
+ phy_stats->fec.corrected_blocks.value;
+ fec_stats->uncorrectable_blocks.total =
+ phy_stats->fec.uncorrectable_blocks.value;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
+fbnic_get_eth_phy_stats(struct net_device *netdev,
+ struct ethtool_eth_phy_stats *eth_phy_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 total = 0;
+ int i;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
+
+ eth_phy_stats->SymbolErrorDuringCarrier = total;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
fbnic_get_eth_mac_stats(struct net_device *netdev,
struct ethtool_eth_mac_stats *eth_mac_stats)
{
@@ -1676,8 +1864,11 @@ fbnic_get_rmon_stats(struct net_device *netdev,
}
static const struct ethtool_ops fbnic_ethtool_ops = {
+ .cap_link_lanes_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
@@ -1687,6 +1878,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.set_coalesce = fbnic_set_coalesce,
.get_ringparam = fbnic_get_ringparam,
.set_ringparam = fbnic_set_ringparam,
+ .get_pause_stats = fbnic_get_pause_stats,
.get_pauseparam = fbnic_phylink_get_pauseparam,
.set_pauseparam = fbnic_phylink_set_pauseparam,
.get_strings = fbnic_get_strings,
@@ -1708,7 +1900,10 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_ts_info = fbnic_get_ts_info,
.get_ts_stats = fbnic_get_ts_stats,
.get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
+ .get_fec_stats = fbnic_get_fec_stats,
.get_fecparam = fbnic_phylink_get_fecparam,
+ .get_module_eeprom_by_page = fbnic_get_module_eeprom_by_page,
+ .get_eth_phy_stats = fbnic_get_eth_phy_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
.get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
.get_rmon_stats = fbnic_get_rmon_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 0c55be7d2547..c87cb9ed09e7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -495,6 +495,11 @@ int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
fbd->last_heartbeat_request = req_time;
+ /* Set prev_firmware_time to 0 to avoid triggering firmware crash
+ * detection until we receive the second uptime in a heartbeat resp.
+ */
+ fbd->prev_firmware_time = 0;
+
/* Set heartbeat detection based on if we are taking ownership */
fbd->fw_heartbeat_enabled = take_ownership;
@@ -653,10 +658,14 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
fbd->fw_cap.anti_rollback_version =
fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+ /* Always assume we need a BMC reinit */
+ fbd->fw_cap.need_bmc_tcam_reinit = true;
+
return 0;
}
static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_OWNERSHIP_TIME),
FBNIC_TLV_ATTR_LAST
};
@@ -668,10 +677,14 @@ static int fbnic_fw_parse_ownership_resp(void *opaque,
/* Count the ownership response as a heartbeat reply */
fbd->last_heartbeat_response = jiffies;
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_OWNERSHIP_TIME);
+
return 0;
}
static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_HEARTBEAT_UPTIME),
FBNIC_TLV_ATTR_LAST
};
@@ -682,6 +695,9 @@ static int fbnic_fw_parse_heartbeat_resp(void *opaque,
fbd->last_heartbeat_response = jiffies;
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_HEARTBEAT_UPTIME);
+
return 0;
}
@@ -703,6 +719,7 @@ static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
goto free_message;
fbd->last_heartbeat_request = req_time;
+ fbd->prev_firmware_time = fbd->firmware_time;
return err;
@@ -763,7 +780,8 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
return;
/* Was the last heartbeat response long time ago? */
- if (!fbnic_fw_heartbeat_current(fbd)) {
+ if (!fbnic_fw_heartbeat_current(fbd) ||
+ fbd->firmware_time < fbd->prev_firmware_time) {
dev_warn(fbd->dev,
"Firmware did not respond to heartbeat message\n");
fbd->fw_heartbeat_enabled = false;
@@ -775,6 +793,215 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+/**
+ * fbnic_fw_xmit_coredump_info_msg - Create and transmit a coredump info message
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store info in
+ * @force: Force coredump event if one hasn't already occurred
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the FW for info related to coredump. If a coredump doesn't exist it
+ * can optionally force one if force is true.
+ */
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (force) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_COREDUMP_REQ_INFO_CREATE);
+ if (err)
+ goto free_msg;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_msg;
+
+ return 0;
+
+free_msg:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_info_resp_index[] = {
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_COREDUMP_INFO_AVAILABLE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_INFO_SIZE),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_INFO_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int
+fbnic_fw_parse_coredump_info_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_INFO_ERROR);
+ if (err)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_COREDUMP_INFO_AVAILABLE]) {
+ err = -ENOENT;
+ goto msg_err;
+ }
+
+ cmpl_data->u.coredump_info.size =
+ fta_get_uint(results, FBNIC_FW_COREDUMP_INFO_SIZE);
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_coredump_read_msg - Create and transmit a coredump read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion struct to store coredump
+ * @offset: Offset into coredump requested
+ * @length: Length of section of cordeump to fetch
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the firmware to provide a section of the cordeump back in a message.
+ * The response will have an offset and size matching the values provided.
+ */
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (offset) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+ }
+
+ if (length) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_COREDUMP_READ_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_READ_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_coredump_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ u32 index, last_offset, last_length;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_READ_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_COREDUMP_READ_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_LENGTH);
+
+ if (length > le16_to_cpu(data_hdr->hdr.len) - sizeof(u32)) {
+ dev_err(fbd->dev, "length greater than size of message\n");
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ /* Only the last offset can have a length != stride */
+ last_length =
+ (cmpl_data->u.coredump.size % cmpl_data->u.coredump.stride) ? :
+ cmpl_data->u.coredump.stride;
+ last_offset = cmpl_data->u.coredump.size - last_length;
+
+ /* Verify offset and length */
+ if (offset % cmpl_data->u.coredump.stride || offset > last_offset) {
+ dev_err(fbd->dev, "offset %d out of range\n", offset);
+ err = -EINVAL;
+ } else if (length != ((offset == last_offset) ?
+ last_length : cmpl_data->u.coredump.stride)) {
+ dev_err(fbd->dev, "length %d out of range for offset %d\n",
+ length, offset);
+ err = -EINVAL;
+ }
+ if (err)
+ goto msg_err;
+
+ /* If data pointer is NULL it is already filled, just skip the copy */
+ index = offset / cmpl_data->u.coredump.stride;
+ if (!cmpl_data->u.coredump.data[index])
+ goto msg_err;
+
+ /* Copy data and mark index filled by setting pointer to NULL */
+ memcpy(cmpl_data->u.coredump.data[index],
+ fbnic_tlv_attr_get_value_ptr(data_hdr), length);
+ cmpl_data->u.coredump.data[index] = NULL;
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data,
unsigned int id, unsigned int len)
@@ -958,6 +1185,138 @@ static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
}
/**
+ * fbnic_fw_xmit_qsfp_read_msg - Transmit a QSFP read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store EEPROM response in
+ * @page: Refers to page number on page enabled QSFP modules
+ * @bank: Refers to a collection of pages
+ * @offset: Offset into QSFP EEPROM requested
+ * @length: Length of section of QSFP EEPROM to fetch
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Asks the firmware to provide a section of the QSFP EEPROM back in a
+ * message. The response will have an offset and size matching the values
+ * provided.
+ */
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!length || length > TLV_MAX_DATA)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_QSFP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_BANK, bank);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_PAGE, page);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_OFFSET, offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_LENGTH, length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_qsfp_read_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_BANK),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_PAGE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_QSFP_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_QSFP_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_qsfp_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset, page, bank;
+ u8 *data;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ bank = fta_get_uint(results, FBNIC_FW_QSFP_BANK);
+ if (bank != cmpl_data->u.qsfp.bank) {
+ dev_warn(fbd->dev, "bank not equal to bank requested: %d vs %d\n",
+ bank, cmpl_data->u.qsfp.bank);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ page = fta_get_uint(results, FBNIC_FW_QSFP_PAGE);
+ if (page != cmpl_data->u.qsfp.page) {
+ dev_warn(fbd->dev, "page not equal to page requested: %d vs %d\n",
+ page, cmpl_data->u.qsfp.page);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_QSFP_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_QSFP_LENGTH);
+
+ if (length != cmpl_data->u.qsfp.length ||
+ offset != cmpl_data->u.qsfp.offset) {
+ dev_warn(fbd->dev,
+ "offset/length not equal to size requested: %d/%d vs %d/%d\n",
+ offset, length,
+ cmpl_data->u.qsfp.offset, cmpl_data->u.qsfp.length);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ err = fta_get_sint(results, FBNIC_FW_QSFP_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_QSFP_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ /* Copy data */
+ data = fbnic_tlv_attr_get_value_ptr(data_hdr);
+ memcpy(cmpl_data->u.qsfp.data, data, length);
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
* fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
* @fbd: FBNIC device structure
* @cmpl_data: Completion data structure to store sensor response
@@ -1204,6 +1563,11 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(COREDUMP_GET_INFO_RESP,
+ fbnic_coredump_info_resp_index,
+ fbnic_fw_parse_coredump_info_resp),
+ FBNIC_TLV_PARSER(COREDUMP_READ_RESP, fbnic_coredump_resp_index,
+ fbnic_fw_parse_coredump_resp),
FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
fbnic_fw_start_upgrade_resp_index,
fbnic_fw_parse_fw_start_upgrade_resp),
@@ -1213,6 +1577,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
fbnic_fw_finish_upgrade_req_index,
fbnic_fw_parse_fw_finish_upgrade_req),
+ FBNIC_TLV_PARSER(QSFP_READ_RESP,
+ fbnic_qsfp_read_resp_index,
+ fbnic_fw_parse_qsfp_read_resp),
FBNIC_TLV_PARSER(TSENE_READ_RESP,
fbnic_tsene_read_resp_index,
fbnic_fw_parse_tsene_read_resp),
@@ -1410,6 +1777,109 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
} while (time_is_after_jiffies(timeout));
}
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd)
+{
+ struct fbnic_tlv_msg *mac_array;
+ int i, addr_count = 0, err;
+ struct fbnic_tlv_msg *msg;
+ u32 rx_flags = 0;
+
+ /* Nothing to do if there is no FW to sync with */
+ if (!fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready)
+ return 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Populate the unicast MAC addrs and capture PROMISC/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_PROMISC_IDX;
+ i >= fbd->mac_addr_boundary; i--) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (test_bit(FBNIC_MAC_ADDR_T_PROMISC, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ if (!test_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_UC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Repeat for multicast addrs, record BROADCAST/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;
+ i < fbd->mac_addr_boundary; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (!test_bit(FBNIC_MAC_ADDR_T_MULTICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_MC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ /* Report flags at end of list */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS,
+ rx_flags);
+ if (err)
+ goto free_message;
+
+ /* Send message of to FW notifying it of current RPC config */
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+ return 0;
+free_message_nospc:
+ err = -ENOSPC;
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
const size_t str_sz)
{
@@ -1423,11 +1893,12 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fw_version, str_sz);
}
-struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size)
{
struct fbnic_fw_completion *cmpl;
- cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ cmpl = kzalloc(sizeof(*cmpl) + priv_size, GFP_KERNEL);
if (!cmpl)
return NULL;
@@ -1438,6 +1909,11 @@ struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
return cmpl;
}
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+{
+ return __fbnic_fw_alloc_cmpl(msg_type, 0);
+}
+
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
{
kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index fde331696fdd..1ecd777aaada 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -51,8 +51,10 @@ struct fbnic_fw_cap {
} stored;
u8 active_slot;
u8 bmc_mac_addr[4][ETH_ALEN];
- u8 bmc_present : 1;
- u8 all_multi : 1;
+ u8 bmc_present : 1;
+ u8 need_bmc_tcam_reinit : 1;
+ u8 need_bmc_macda_sync : 1;
+ u8 all_multi : 1;
u8 link_speed;
u8 link_fec;
u32 anti_rollback_version;
@@ -65,10 +67,25 @@ struct fbnic_fw_completion {
int result;
union {
struct {
+ u32 size;
+ } coredump_info;
+ struct {
+ u32 size;
+ u16 stride;
+ u8 *data[];
+ } coredump;
+ struct {
u32 offset;
u32 length;
} fw_update;
struct {
+ u16 length;
+ u8 offset;
+ u8 page;
+ u8 bank;
+ u8 data[] __aligned(sizeof(u32)) __counted_by(length);
+ } qsfp;
+ struct {
s32 millivolts;
s32 millidegrees;
} tsene;
@@ -87,16 +104,28 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force);
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length);
int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data,
unsigned int id, unsigned int len);
int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
const u8 *data, u32 offset, u16 length,
int cancel_error);
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length);
int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
bool send_log_history);
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd);
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size);
struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
@@ -132,17 +161,24 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ = 0x18,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP = 0x19,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ = 0x20,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP = 0x21,
FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
+ FBNIC_TLV_MSG_ID_QSFP_READ_REQ = 0x38,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP = 0x39,
FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ = 0x43,
FBNIC_TLV_MSG_ID_LOG_MSG_REQ = 0x44,
FBNIC_TLV_MSG_ID_LOG_MSG_RESP = 0x45,
+ FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ = 0x46,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -186,6 +222,16 @@ enum {
};
enum {
+ FBNIC_FW_QSFP_BANK = 0x0,
+ FBNIC_FW_QSFP_PAGE = 0x1,
+ FBNIC_FW_QSFP_OFFSET = 0x2,
+ FBNIC_FW_QSFP_LENGTH = 0x3,
+ FBNIC_FW_QSFP_ERROR = 0x4,
+ FBNIC_FW_QSFP_DATA = 0x5,
+ FBNIC_FW_QSFP_MSG_MAX
+};
+
+enum {
FBNIC_FW_TSENE_THERM = 0x0,
FBNIC_FW_TSENE_VOLT = 0x1,
FBNIC_FW_TSENE_ERROR = 0x2,
@@ -194,10 +240,37 @@ enum {
enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_TIME = 0x1,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
enum {
+ FBNIC_FW_HEARTBEAT_UPTIME = 0x0,
+ FBNIC_FW_HEARTBEAT_NUMBER_OF_MESSAGES = 0x1,
+ FBNIC_FW_HEARTBEAT_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_REQ_INFO_CREATE = 0x0,
+ FBNIC_FW_COREDUMP_REQ_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_INFO_AVAILABLE = 0x0,
+ FBNIC_FW_COREDUMP_INFO_SIZE = 0x1,
+ FBNIC_FW_COREDUMP_INFO_ERROR = 0x2,
+ FBNIC_FW_COREDUMP_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_READ_OFFSET = 0x0,
+ FBNIC_FW_COREDUMP_READ_LENGTH = 0x1,
+ FBNIC_FW_COREDUMP_READ_DATA = 0x2,
+ FBNIC_FW_COREDUMP_READ_ERROR = 0x3,
+ FBNIC_FW_COREDUMP_READ_MSG_MAX
+};
+
+enum {
FBNIC_FW_START_UPGRADE_ERROR = 0x0,
FBNIC_FW_START_UPGRADE_SECTION = 0x1,
FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
@@ -235,4 +308,19 @@ enum {
FBNIC_FW_LOG_MSG_MAX
};
+enum {
+ FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS = 0x0,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY = 0x1,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY = 0x2,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR = 0x3,
+ FBNIC_FW_RPC_MAC_SYNC_MSG_MAX
+};
+
+#define FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC 1
+#define FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI 2
+#define FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST 4
+
+#define FW_RPC_MAC_SYNC_UC_ARRAY_SIZE 8
+#define FW_RPC_MAC_SYNC_MC_ARRAY_SIZE 8
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
index c1663f042245..85a883dba385 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
@@ -72,7 +72,7 @@ void fbnic_fw_log_free(struct fbnic_dev *fbd)
}
int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
- char *msg)
+ const char *msg)
{
struct fbnic_fw_log_entry *entry, *head, *tail, *next;
struct fbnic_fw_log *log = &fbd->fw_log;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
index cb6555f40a24..50ec79003108 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
@@ -41,5 +41,5 @@ void fbnic_fw_log_disable(struct fbnic_dev *fbd);
int fbnic_fw_log_init(struct fbnic_dev *fbd);
void fbnic_fw_log_free(struct fbnic_dev *fbd);
int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
- char *msg);
+ const char *msg);
#endif /* _FBNIC_FW_LOG_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index 4223d8100e64..8b9b2076beec 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+#include <linux/rtnetlink.h>
+
#include "fbnic.h"
static void fbnic_hw_stat_rst32(struct fbnic_dev *fbd, u32 reg,
@@ -421,9 +423,9 @@ static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
struct fbnic_hw_q_stats *hw_q)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
fbnic_get_hw_rxq_stats32(fbd, hw_q);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
@@ -510,20 +512,68 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
&pcie->ob_rd_no_np_cred);
}
+static void fbnic_reset_phy_stats(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, true, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, true, &phy_stats->pcs);
+}
+
+static void fbnic_get_phy_stats32(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, false, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, false, &phy_stats->pcs);
+}
+
+static void fbnic_reset_hw_mac_stats(struct fbnic_dev *fbd,
+ struct fbnic_mac_stats *mac_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, true, &mac_stats->eth_mac);
+ mac->get_pause_stats(fbd, true, &mac_stats->pause);
+ mac->get_eth_ctrl_stats(fbd, true, &mac_stats->eth_ctrl);
+ mac->get_rmon_stats(fbd, true, &mac_stats->rmon);
+}
+
void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_reset_phy_stats(fbd, &fbd->hw_stats.phy);
fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
+
+ /* Once registered, the only other access to MAC stats is via the
+ * ethtool API which is protected by the rtnl_lock. The call to
+ * fbnic_reset_hw_stats() during PCI recovery is also protected
+ * by the rtnl_lock hence, we don't need the spinlock to access
+ * the MAC stats.
+ */
+ if (fbd->netdev)
+ ASSERT_RTNL();
+ fbnic_reset_hw_mac_stats(fbd, &fbd->hw_stats.mac);
+}
+
+void fbnic_init_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock_init(&fbd->hw_stats.lock);
+
+ fbnic_reset_hw_stats(fbd);
}
static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
+ fbnic_get_phy_stats32(fbd, &fbd->hw_stats.phy);
fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
@@ -533,19 +583,19 @@ static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
__fbnic_get_hw_stats32(fbd);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
void fbnic_get_hw_stats(struct fbnic_dev *fbd)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
__fbnic_get_hw_stats32(fbd);
fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 4fe239717497..aa3f429a9aed 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -5,6 +5,7 @@
#define _FBNIC_HW_STATS_H_
#include <linux/ethtool.h>
+#include <linux/spinlock.h>
#include "fbnic_csr.h"
@@ -22,6 +23,16 @@ struct fbnic_hw_stat {
struct fbnic_stat_counter bytes;
};
+struct fbnic_fec_stats {
+ struct fbnic_stat_counter corrected_blocks, uncorrectable_blocks;
+};
+
+struct fbnic_pcs_stats {
+ struct {
+ struct fbnic_stat_counter lanes[FBNIC_PCS_MAX_LANES];
+ } SymbolErrorDuringCarrier;
+};
+
/* Note: not updated by fbnic_get_hw_stats() */
struct fbnic_eth_ctrl_stats {
struct fbnic_stat_counter MACControlFramesTransmitted;
@@ -39,6 +50,12 @@ struct fbnic_rmon_stats {
struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX];
};
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_pause_stats {
+ struct fbnic_stat_counter tx_pause_frames;
+ struct fbnic_stat_counter rx_pause_frames;
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -55,8 +72,14 @@ struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FrameTooLongErrors;
};
+struct fbnic_phy_stats {
+ struct fbnic_fec_stats fec;
+ struct fbnic_pcs_stats pcs;
+};
+
struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
+ struct fbnic_pause_stats pause;
struct fbnic_eth_ctrl_stats eth_ctrl;
struct fbnic_rmon_stats rmon;
};
@@ -115,6 +138,7 @@ struct fbnic_pcie_stats {
};
struct fbnic_hw_stats {
+ struct fbnic_phy_stats phy;
struct fbnic_mac_stats mac;
struct fbnic_tmi_stats tmi;
struct fbnic_tti_stats tti;
@@ -122,11 +146,15 @@ struct fbnic_hw_stats {
struct fbnic_rxb_stats rxb;
struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
struct fbnic_pcie_stats pcie;
+
+ /* Lock protecting the access to hw stats */
+ spinlock_t lock;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_init_hw_stats(struct fbnic_dev *fbd);
void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
struct fbnic_hw_q_stats *hw_q);
void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index fd8d67f9048e..8f998d26b9a3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -632,6 +632,50 @@ static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
}
static void
+fbnic_pcs_rsfec_stat_rd32(struct fbnic_dev *fbd, u32 reg, bool reset,
+ struct fbnic_stat_counter *stat)
+{
+ u32 pcs_rsfec_stat;
+
+ /* The PCS/RFSEC registers are only 16b wide each. So what we will
+ * have after the 64b read is 0x0000xxxx0000xxxx. To make it usable
+ * as a full stat we will shift the upper bits into the lower set of
+ * 0s and then mask off the math at 32b.
+ *
+ * Read ordering must be lower reg followed by upper reg.
+ */
+ pcs_rsfec_stat = rd32(fbd, reg) & 0xffff;
+ pcs_rsfec_stat |= rd32(fbd, reg + 1) << 16;
+
+ /* RFSEC registers clear themselves upon being read so there is no
+ * need to store the old_reg_value.
+ */
+ if (!reset)
+ stat->value += pcs_rsfec_stat;
+}
+
+static void
+fbnic_mac_get_fec_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *s)
+{
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_CCW_LO(0), reset,
+ &s->corrected_blocks);
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_NCCW_LO(0), reset,
+ &s->uncorrectable_blocks);
+}
+
+static void
+fbnic_mac_get_pcs_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *s)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_PCS_SYMBLERR_LO(i), reset,
+ &s->SymbolErrorDuringCarrier.lanes[i]);
+}
+
+static void
fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats)
{
@@ -666,6 +710,16 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
}
static void
+fbnic_mac_get_pause_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->tx_pause_frames,
+ MAC_STAT_TX_XOFF_STB);
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->rx_pause_frames,
+ MAC_STAT_RX_XOFF_STB);
+}
+
+static void
fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_ctrl_stats *ctrl_stats)
{
@@ -809,7 +863,10 @@ static const struct fbnic_mac fbnic_mac_asic = {
.pcs_disable = fbnic_pcs_disable_asic,
.pcs_get_link = fbnic_pcs_get_link_asic,
.pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_fec_stats = fbnic_mac_get_fec_stats,
+ .get_pcs_stats = fbnic_mac_get_pcs_stats,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
+ .get_pause_stats = fbnic_mac_get_pause_stats,
.get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats,
.get_rmon_stats = fbnic_mac_get_rmon_stats,
.link_down = fbnic_mac_link_down_asic,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 86fa06da2b3e..ede5ff0dae22 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -79,8 +79,14 @@ struct fbnic_mac {
bool (*pcs_get_link)(struct fbnic_dev *fbd);
int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *fec_stats);
+ void (*get_pcs_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *pcs_stats);
void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats);
+ void (*get_pause_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats);
void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_ctrl_stats *ctrl_stats);
void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 40581550da1a..d12b4cad84a5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -183,11 +183,10 @@ static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
return ret;
}
-void __fbnic_set_rx_mode(struct net_device *netdev)
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
bool uc_promisc = false, mc_promisc = false;
- struct fbnic_dev *fbd = fbn->fbd;
+ struct net_device *netdev = fbd->netdev;
struct fbnic_mac_addr *mac_addr;
int err;
@@ -224,49 +223,8 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
uc_promisc |= !!(netdev->flags & IFF_PROMISC);
mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
- /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
- mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
- if (uc_promisc) {
- if (!is_zero_ether_addr(mac_addr->value.addr8) ||
- mac_addr->state != FBNIC_TCAM_S_VALID) {
- eth_zero_addr(mac_addr->value.addr8);
- eth_broadcast_addr(mac_addr->mask.addr8);
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- set_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_ADD;
- }
- } else if (mc_promisc &&
- (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
- /* We have to add a special handler for multicast as the
- * BMC may have an all-multi rule already in place. As such
- * adding a rule ourselves won't do any good so we will have
- * to modify the rules for the ALL MULTI below if the BMC
- * already has the rule in place.
- */
- if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
- mac_addr->state != FBNIC_TCAM_S_VALID) {
- eth_zero_addr(mac_addr->value.addr8);
- eth_broadcast_addr(mac_addr->mask.addr8);
- mac_addr->value.addr8[0] ^= 1;
- mac_addr->mask.addr8[0] ^= 1;
- set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_ADD;
- }
- } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
- if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- } else {
- mac_addr->state = FBNIC_TCAM_S_DELETE;
- }
- }
+ /* Update the promiscuous rules */
+ fbnic_promisc_sync(fbd, uc_promisc, mc_promisc);
/* Add rules for BMC all multicast if it is enabled */
fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
@@ -282,9 +240,12 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
static void fbnic_set_rx_mode(struct net_device *netdev)
{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
/* No need to update the hardware if we are not running */
if (netif_running(netdev))
- __fbnic_set_rx_mode(netdev);
+ __fbnic_set_rx_mode(fbd);
}
static int fbnic_set_mac(struct net_device *netdev, void *p)
@@ -301,10 +262,9 @@ static int fbnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-void fbnic_clear_rx_mode(struct net_device *netdev)
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct net_device *netdev = fbd->netdev;
int idx;
for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
@@ -411,11 +371,12 @@ static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
+ u64 rx_over = 0, rx_missed = 0, rx_length = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
- u64 rx_over = 0, rx_missed = 0;
+
unsigned int start, i;
fbnic_get_hw_stats(fbd);
@@ -427,12 +388,12 @@ static void fbnic_get_stats64(struct net_device *dev,
tx_dropped = stats->dropped;
/* Record drops from Tx HW Datapath */
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
fbd->hw_stats.tti.cm_drop.frames.value +
fbd->hw_stats.tti.frame_drop.frames.value +
fbd->hw_stats.tti.tbi_drop.frames.value;
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
stats64->tx_bytes = tx_bytes;
stats64->tx_packets = tx_packets;
@@ -463,7 +424,7 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_packets = stats->packets;
rx_dropped = stats->dropped;
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
/* Record drops for the host FIFOs.
* 4: network to Host, 6: BMC to Host
* Exclude the BMC and MC FIFOs as those stats may contain drops
@@ -483,7 +444,7 @@ static void fbnic_get_stats64(struct net_device *dev,
/* Report packets with errors */
rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
}
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
@@ -493,6 +454,7 @@ static void fbnic_get_stats64(struct net_device *dev,
stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
struct fbnic_ring *rxr = fbn->rx[i];
if (!rxr)
@@ -504,14 +466,66 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_bytes = stats->bytes;
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ rx_length = stats->rx.length_errors;
} while (u64_stats_fetch_retry(&stats->syncp, start));
stats64->rx_bytes += rx_bytes;
stats64->rx_packets += rx_packets;
stats64->rx_dropped += rx_dropped;
+ stats64->rx_errors += rx_length;
+ stats64->rx_length_errors += rx_length;
+
+ if (!xdpr)
+ continue;
+
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
}
}
+bool fbnic_check_split_frames(struct bpf_prog *prog, unsigned int mtu,
+ u32 hds_thresh)
+{
+ if (!prog)
+ return false;
+
+ if (prog->aux->xdp_has_frags)
+ return false;
+
+ return mtu + ETH_HLEN > hds_thresh;
+}
+
+static int fbnic_bpf(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog, *prev_prog;
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (bpf->command != XDP_SETUP_PROG)
+ return -EINVAL;
+
+ if (fbnic_check_split_frames(prog, netdev->mtu,
+ fbn->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "MTU too high, or HDS threshold is too low for single buffer XDP");
+ return -EOPNOTSUPP;
+ }
+
+ prev_prog = xchg(&fbn->xdp_prog, prog);
+ if (prev_prog)
+ bpf_prog_put(prev_prog);
+
+ return 0;
+}
+
static const struct net_device_ops fbnic_netdev_ops = {
.ndo_open = fbnic_open,
.ndo_stop = fbnic_stop,
@@ -521,6 +535,7 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
+ .ndo_bpf = fbnic_bpf,
.ndo_hwtstamp_get = fbnic_hwtstamp_get,
.ndo_hwtstamp_set = fbnic_hwtstamp_set,
};
@@ -557,12 +572,12 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
rx->hw_drop_overruns;
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -572,6 +587,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
u64 stop, wake, csum, lso;
+ struct fbnic_ring *xdpr;
unsigned int start;
u64 bytes, packets;
@@ -595,6 +611,19 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
tx->hw_gso_wire_packets = lso;
tx->stop = stop;
tx->wake = wake;
+
+ xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
+ if (xdpr) {
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes += bytes;
+ tx->packets += packets;
+ }
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -682,6 +711,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netdev->netdev_ops = &fbnic_netdev_ops;
netdev->stat_ops = &fbnic_stat_ops;
+ netdev->queue_mgmt_ops = &fbnic_queue_mgmt_ops;
+ netdev->netmem_tx = true;
fbnic_set_ethtool_ops(netdev);
@@ -699,6 +730,10 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+ /* Initialize the hds_thresh */
+ netdev->cfg->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+ fbn->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 86576ae04262..e84e0527c3a9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -18,7 +18,9 @@
#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
struct fbnic_net {
- struct fbnic_ring *tx[FBNIC_MAX_TXQS];
+ struct bpf_prog *xdp_prog;
+
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS + FBNIC_MAX_XDPQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
@@ -31,6 +33,8 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u32 hds_thresh;
+
u16 rx_usecs;
u16 tx_usecs;
@@ -90,8 +94,8 @@ void fbnic_time_init(struct fbnic_net *fbn);
int fbnic_time_start(struct fbnic_net *fbn);
void fbnic_time_stop(struct fbnic_net *fbn);
-void __fbnic_set_rx_mode(struct net_device *netdev);
-void fbnic_clear_rx_mode(struct net_device *netdev);
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd);
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd);
void fbnic_phylink_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause);
@@ -102,4 +106,7 @@ int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
int fbnic_phylink_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam);
int fbnic_phylink_init(struct net_device *netdev);
+
+bool fbnic_check_split_frames(struct bpf_prog *prog,
+ unsigned int mtu, u32 hds_threshold);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 28e23e3ffca8..a7a6b4db8016 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -135,7 +135,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_rss_reinit_hw(fbn->fbd, fbn);
- __fbnic_set_rx_mode(fbn->netdev);
+ __fbnic_set_rx_mode(fbn->fbd);
/* Enable Tx/Rx processing */
fbnic_napi_enable(fbn);
@@ -152,7 +152,7 @@ void fbnic_down_noidle(struct fbnic_net *fbn)
fbnic_napi_disable(fbn);
netif_tx_disable(fbn->netdev);
- fbnic_clear_rx_mode(fbn->netdev);
+ fbnic_clear_rx_mode(fbn->fbd);
fbnic_clear_rules(fbn->fbd);
fbnic_rss_disable_hw(fbn->fbd);
fbnic_disable(fbn);
@@ -167,6 +167,20 @@ void fbnic_down(struct fbnic_net *fbn)
fbnic_flush(fbn);
}
+static int fbnic_fw_config_after_crash(struct fbnic_dev *fbd)
+{
+ if (fbnic_fw_xmit_ownership_msg(fbd, true)) {
+ dev_err(fbd->dev, "NIC failed to take ownership\n");
+
+ return -1;
+ }
+
+ fbnic_rpc_reset_valid_entries(fbd);
+ __fbnic_set_rx_mode(fbd);
+
+ return 0;
+}
+
static void fbnic_health_check(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
@@ -182,13 +196,11 @@ static void fbnic_health_check(struct fbnic_dev *fbd)
if (tx_mbx->head != tx_mbx->tail)
return;
- /* TBD: Need to add a more thorough recovery here.
- * Specifically I need to verify what all the firmware will have
- * changed since we had setup and it rebooted. May just need to
- * perform a down/up. For now we will just reclaim ownership so
- * the heartbeat can catch the next fault.
- */
- fbnic_fw_xmit_ownership_msg(fbd, true);
+ fbnic_devlink_fw_report(fbd, "Firmware crashed detected!");
+ fbnic_devlink_otp_check(fbd, "error detected after firmware recovery");
+
+ if (fbnic_fw_config_after_crash(fbd))
+ dev_err(fbd->dev, "Firmware recovery failed after crash\n");
}
static void fbnic_service_task(struct work_struct *work)
@@ -204,8 +216,13 @@ static void fbnic_service_task(struct work_struct *work)
fbnic_health_check(fbd);
- if (netif_carrier_ok(fbd->netdev))
+ fbnic_bmc_rpc_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev)) {
+ netdev_lock(fbd->netdev);
fbnic_napi_depletion_check(fbd->netdev);
+ netdev_unlock(fbd->netdev);
+ }
if (netif_running(fbd->netdev))
schedule_delayed_work(&fbd->service_task, HZ);
@@ -264,6 +281,10 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
+ err = fbnic_devlink_health_create(fbd);
+ if (err)
+ goto free_fbd;
+
/* Populate driver with hardware-specific info and handlers */
fbd->max_num_queues = info->max_num_queues;
@@ -274,7 +295,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = fbnic_alloc_irqs(fbd);
if (err)
- goto free_fbd;
+ goto err_destroy_health;
err = fbnic_mac_init(fbd);
if (err) {
@@ -301,11 +322,11 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err);
fbnic_devlink_register(fbd);
+ fbnic_devlink_otp_check(fbd, "error detected during probe");
fbnic_dbg_fbd_init(fbd);
- spin_lock_init(&fbd->hw_stats_lock);
/* Capture snapshot of hardware stats so netdev can calculate delta */
- fbnic_reset_hw_stats(fbd);
+ fbnic_init_hw_stats(fbd);
fbnic_hwmon_register(fbd);
@@ -344,6 +365,8 @@ init_failure_mode:
return 0;
free_irqs:
fbnic_free_irqs(fbd);
+err_destroy_health:
+ fbnic_devlink_health_destroy(fbd);
free_fbd:
fbnic_devlink_free(fbd);
@@ -378,6 +401,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_fw_free_mbx(fbd);
fbnic_free_irqs(fbd);
+ fbnic_devlink_health_destroy(fbd);
fbnic_devlink_free(fbd);
}
@@ -390,12 +414,14 @@ static int fbnic_pm_suspend(struct device *dev)
goto null_uc_addr;
rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
null_uc_addr:
@@ -450,6 +476,9 @@ static int __fbnic_pm_resume(struct device *dev)
*/
fbnic_fw_log_enable(fbd, list_empty(&fbd->fw_log.entries));
+ /* Since the FW should be up, check if it reported OTP errors */
+ fbnic_devlink_otp_check(fbd, "error detected after PM resume");
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
@@ -460,10 +489,12 @@ static int __fbnic_pm_resume(struct device *dev)
fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
err = __fbnic_open(fbn);
+ netdev_unlock(netdev);
rtnl_unlock();
if (err)
goto err_free_mbx;
@@ -489,6 +520,10 @@ static void __fbnic_pm_attach(struct device *dev)
struct net_device *netdev = fbd->netdev;
struct fbnic_net *fbn;
+ rtnl_lock();
+ fbnic_reset_hw_stats(fbd);
+ rtnl_unlock();
+
if (fbnic_init_failure(fbd))
return;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 8ff07b5562e3..7f31e890031c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -6,6 +6,7 @@
#include <net/ipv6.h>
#include "fbnic.h"
+#include "fbnic_fw.h"
#include "fbnic_netdev.h"
#include "fbnic_rpc.h"
@@ -71,6 +72,8 @@ u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, OV6_FL_LBL, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, IV6_FL_LBL, flow_hash);
return rss_en_mask;
}
@@ -129,12 +132,9 @@ void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
else
clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
mac_addr->act_tcam);
- } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) &&
- !is_zero_ether_addr(mac_addr->mask.addr8) &&
- mac_addr->state == FBNIC_TCAM_S_VALID) {
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_DELETE;
+ } else {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BMC);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
}
/* We have to add a special handler for multicast as the
@@ -236,8 +236,25 @@ void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
act_tcam->mask.tcam[j] = 0xffff;
act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd)
+{
+ int err;
+
+ if (fbd->fw_cap.need_bmc_tcam_reinit) {
+ fbnic_bmc_rpc_init(fbd);
+ __fbnic_set_rx_mode(fbd);
+ fbd->fw_cap.need_bmc_tcam_reinit = false;
+ }
- fbnic_bmc_rpc_all_multi_config(fbd, false);
+ if (fbd->fw_cap.need_bmc_macda_sync) {
+ err = fbnic_fw_xmit_rpc_macda_sync(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Writing MACDA table to FW failed, err: %d\n", err);
+ fbd->fw_cap.need_bmc_macda_sync = false;
+ }
}
#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
@@ -452,6 +469,50 @@ int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
return 0;
}
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc)
+{
+ struct fbnic_mac_addr *mac_addr;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_PROMISC);
+ }
+}
+
void fbnic_sift_macda(struct fbnic_dev *fbd)
{
int dest, src;
@@ -535,6 +596,21 @@ static void fbnic_clear_macda(struct fbnic_dev *fbd)
}
}
+static void fbnic_clear_valid_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ fbnic_clear_macda_entry(fbd, idx);
+
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+ }
+}
+
static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
struct fbnic_mac_addr *mac_addr)
{
@@ -556,7 +632,7 @@ static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
void fbnic_write_macda(struct fbnic_dev *fbd)
{
- int idx;
+ int idx, updates = 0;
for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
@@ -565,6 +641,9 @@ void fbnic_write_macda(struct fbnic_dev *fbd)
if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
continue;
+ /* Record update count */
+ updates++;
+
/* Clear by writing 0s. */
if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
/* Invalidate entry and clear addr state info */
@@ -578,6 +657,14 @@ void fbnic_write_macda(struct fbnic_dev *fbd)
mac_addr->state = FBNIC_TCAM_S_VALID;
}
+
+ /* If reinitializing the BMC TCAM we are doing an initial update */
+ if (fbd->fw_cap.need_bmc_tcam_reinit)
+ updates++;
+
+ /* If needed notify firmware of changes to MACDA TCAM */
+ if (updates != 0 && fbnic_bmc_present(fbd))
+ fbd->fw_cap.need_bmc_macda_sync = true;
}
static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
@@ -1052,13 +1139,25 @@ void fbnic_write_ip_addr(struct fbnic_dev *fbd)
}
}
-void fbnic_clear_rules(struct fbnic_dev *fbd)
+static void fbnic_clear_valid_act_tcam(struct fbnic_dev *fbd)
{
- u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
- FBNIC_RPC_ACT_TBL0_DEST_BMC);
int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
struct fbnic_act_tcam *act_tcam;
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
/* Clear MAC rules */
fbnic_clear_macda(fbd);
@@ -1073,6 +1172,11 @@ void fbnic_clear_rules(struct fbnic_dev *fbd)
* the interface back up.
*/
if (fbnic_bmc_present(fbd)) {
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
act_tcam = &fbd->act_tcam[i];
if (act_tcam->state == FBNIC_TCAM_S_VALID &&
@@ -1081,21 +1185,10 @@ void fbnic_clear_rules(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
act_tcam->state = FBNIC_TCAM_S_UPDATE;
-
- i--;
}
}
- /* Work from the bottom up deleting all other rules from hardware */
- do {
- act_tcam = &fbd->act_tcam[i];
-
- if (act_tcam->state != FBNIC_TCAM_S_VALID)
- continue;
-
- fbnic_clear_act_tcam(fbd, i);
- act_tcam->state = FBNIC_TCAM_S_UPDATE;
- } while (i--);
+ fbnic_clear_valid_act_tcam(fbd);
}
static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
@@ -1145,3 +1238,9 @@ void fbnic_write_rules(struct fbnic_dev *fbd)
fbnic_update_act_tcam(fbd, i);
}
}
+
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd)
+{
+ fbnic_clear_valid_act_tcam(fbd);
+ fbnic_clear_valid_macda(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index 6892414195c3..3d4925b2ac75 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -184,6 +184,7 @@ struct fbnic_net;
void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd);
void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
void fbnic_rss_key_fill(u32 *buffer);
@@ -201,6 +202,9 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc);
+
struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
struct fbnic_ip_addr *ip_addr,
const struct in_addr *addr,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index f9543d03485f..cf773cc78e40 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -2,11 +2,14 @@
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
#include <net/tcp.h>
+#include <net/xdp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -14,6 +17,13 @@
#include "fbnic_txrx.h"
enum {
+ FBNIC_XDP_PASS = 0,
+ FBNIC_XDP_CONSUME,
+ FBNIC_XDP_TX,
+ FBNIC_XDP_LEN_ERR,
+};
+
+enum {
FBNIC_XMIT_CB_TS = 0x01,
};
@@ -27,6 +37,8 @@ struct fbnic_xmit_cb {
#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+#define FBNIC_XMIT_NOUNMAP ((void *)1)
+
static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
{
unsigned long csr_base = (unsigned long)ring->doorbell;
@@ -305,6 +317,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
unsigned int tail = ring->tail, first;
unsigned int size, data_len;
skb_frag_t *frag;
+ bool is_net_iov;
dma_addr_t dma;
__le64 *twd;
@@ -320,6 +333,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
goto dma_error;
+ is_net_iov = false;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -332,6 +346,8 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
FIELD_PREP(FBNIC_TWD_TYPE_MASK,
FBNIC_TWD_TYPE_AL));
+ if (is_net_iov)
+ ring->tx_buf[tail] = FBNIC_XMIT_NOUNMAP;
tail++;
tail &= ring->size_mask;
@@ -345,6 +361,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
goto dma_error;
+ is_net_iov = skb_frag_is_net_iov(frag);
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
@@ -380,6 +397,8 @@ dma_error:
twd = &ring->desc[tail];
if (tail == first)
fbnic_unmap_single_twd(dev, twd);
+ else if (ring->tx_buf[tail] == FBNIC_XMIT_NOUNMAP)
+ ring->tx_buf[tail] = NULL;
else
fbnic_unmap_page_twd(dev, twd);
}
@@ -564,7 +583,11 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
desc_cnt--;
while (desc_cnt--) {
- fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+ if (ring->tx_buf[head] != FBNIC_XMIT_NOUNMAP)
+ fbnic_unmap_page_twd(nv->dev,
+ &ring->desc[head]);
+ else
+ ring->tx_buf[head] = NULL;
head++;
head &= ring->size_mask;
}
@@ -606,6 +629,54 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
}
}
+static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+
+ while (hw_head != head) {
+ struct page *page;
+ u64 twd;
+
+ if (unlikely(!(ring->desc[head] & FBNIC_TWD_TYPE(AL))))
+ goto next_desc;
+
+ twd = le64_to_cpu(ring->desc[head]);
+ page = ring->tx_buf[head];
+
+ /* TYPE_AL is 2, TYPE_LAST_AL is 3. So this trick gives
+ * us one increment per packet, with no branches.
+ */
+ total_packets += FIELD_GET(FBNIC_TWD_TYPE_MASK, twd) -
+ FBNIC_TWD_TYPE_AL;
+ total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
+
+ page_pool_put_page(page->pp, page, -1, pp_allow_direct);
+next_desc:
+ head++;
+ head &= ring->size_mask;
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ if (discard) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+ return;
+ }
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+}
+
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
struct fbnic_ring *ring,
u64 tcd, int *ts_head, int *head0)
@@ -657,44 +728,65 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
}
static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
- struct page *page)
+ netmem_ref netmem)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
+ page_pool_fragment_netmem(netmem, FBNIC_PAGECNT_BIAS_MAX);
rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
- rx_buf->page = page;
+ rx_buf->netmem = netmem;
}
-static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
- unsigned int idx)
+static struct page *
+fbnic_page_pool_get_head(struct fbnic_q_triad *qt, unsigned int idx)
{
- struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct fbnic_rx_buf *rx_buf = &qt->sub0.rx_buf[idx];
rx_buf->pagecnt_bias--;
- return rx_buf->page;
+ /* sub0 is always fed system pages, from the NAPI-level page_pool */
+ return netmem_to_page(rx_buf->netmem);
+}
+
+static netmem_ref
+fbnic_page_pool_get_data(struct fbnic_q_triad *qt, unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &qt->sub1.rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->netmem;
}
static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
- struct fbnic_napi_vector *nv, int budget)
+ int budget)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- struct page *page = rx_buf->page;
+ netmem_ref netmem = rx_buf->netmem;
- if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
- page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
+ if (!page_pool_unref_netmem(netmem, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_netmem(ring->page_pool, netmem, -1,
+ !!budget);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
}
static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_q_triad *qt, s32 ts_head, s32 head0)
+ struct fbnic_q_triad *qt, s32 ts_head, s32 head0,
+ s32 head1)
{
if (head0 >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
else if (ts_head >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head);
+
+ if (head1 >= 0) {
+ qt->cmpl.deferred_head = -1;
+ if (napi_budget)
+ fbnic_clean_twq1(nv, true, &qt->sub1, false, head1);
+ else
+ qt->cmpl.deferred_head = head1;
+ }
}
static void
@@ -702,6 +794,7 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
int napi_budget)
{
struct fbnic_ring *cmpl = &qt->cmpl;
+ s32 head1 = cmpl->deferred_head;
s32 head0 = -1, ts_head = -1;
__le64 *raw_tcd, done;
u32 head = cmpl->head;
@@ -719,7 +812,10 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
case FBNIC_TCD_TYPE_0:
- if (!(tcd & FBNIC_TCD_TWQ1))
+ if (tcd & FBNIC_TCD_TWQ1)
+ head1 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD1_MASK,
+ tcd);
+ else
head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
tcd);
/* Currently all err status bits are related to
@@ -752,11 +848,11 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
}
/* Unmap and free processed buffers */
- fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0);
+ fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0, head1);
}
-static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_ring *ring, unsigned int hw_head)
+static void fbnic_clean_bdq(struct fbnic_ring *ring, unsigned int hw_head,
+ int napi_budget)
{
unsigned int head = ring->head;
@@ -764,7 +860,7 @@ static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
return;
do {
- fbnic_page_pool_drain(ring, head, nv, napi_budget);
+ fbnic_page_pool_drain(ring, head, napi_budget);
head++;
head &= ring->size_mask;
@@ -773,10 +869,10 @@ static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
ring->head = head;
}
-static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem)
{
__le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
- dma_addr_t dma = page_pool_get_dma_addr(page);
+ dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
u64 bd, i = FBNIC_BD_FRAG_COUNT;
bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
@@ -794,7 +890,7 @@ static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
} while (--i);
}
-static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
+static void fbnic_fill_bdq(struct fbnic_ring *bdq)
{
unsigned int count = fbnic_desc_unused(bdq);
unsigned int i = bdq->tail;
@@ -803,10 +899,10 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
return;
do {
- struct page *page;
+ netmem_ref netmem;
- page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page) {
+ netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
+ if (!netmem) {
u64_stats_update_begin(&bdq->stats.syncp);
bdq->stats.rx.alloc_failed++;
u64_stats_update_end(&bdq->stats.syncp);
@@ -814,8 +910,8 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
break;
}
- fbnic_page_pool_init(bdq, i, page);
- fbnic_bd_prep(bdq, i, page);
+ fbnic_page_pool_init(bdq, i, netmem);
+ fbnic_bd_prep(bdq, i, netmem);
i++;
i &= bdq->size_mask;
@@ -862,7 +958,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
{
unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ struct page *page = fbnic_page_pool_get_head(qt, hdr_pg_idx);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
unsigned char *hdr_start;
@@ -877,7 +973,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
frame_sz = hdr_pg_end - hdr_pg_start;
- xdp_init_buff(&pkt->buff, frame_sz, NULL);
+ xdp_init_buff(&pkt->buff, frame_sz, &qt->xdp_rxq);
hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
FBNIC_BD_FRAG_SIZE;
@@ -888,13 +984,12 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
/* Build frame around buffer */
hdr_start = page_address(page) + hdr_pg_start;
-
+ net_prefetch(pkt->buff.data);
xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
len - FBNIC_RX_PAD, true);
- pkt->data_truesize = 0;
- pkt->data_len = 0;
- pkt->nr_frags = 0;
+ pkt->hwtstamp = 0;
+ pkt->add_frag_failed = false;
}
static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
@@ -904,9 +999,9 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
- struct skb_shared_info *shinfo;
+ netmem_ref netmem = fbnic_page_pool_get_data(qt, pg_idx);
unsigned int truesize;
+ bool added;
truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
@@ -915,88 +1010,171 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
FBNIC_BD_FRAG_SIZE;
/* Sync DMA buffer */
- dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
- pg_off, truesize, DMA_BIDIRECTIONAL);
-
- /* Add page to xdp shared info */
- shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
-
- /* We use gso_segs to store truesize */
- pkt->data_truesize += truesize;
-
- __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
-
- /* Store data_len in gso_size */
- pkt->data_len += len;
+ page_pool_dma_sync_netmem_for_cpu(qt->sub1.page_pool, netmem,
+ pg_off, truesize);
+
+ added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
+ if (unlikely(!added)) {
+ pkt->add_frag_failed = true;
+ netdev_err_once(nv->napi.dev,
+ "Failed to add fragment to xdp_buff\n");
+ }
}
-static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
+static void fbnic_put_pkt_buff(struct fbnic_q_triad *qt,
struct fbnic_pkt_buff *pkt, int budget)
{
- struct skb_shared_info *shinfo;
struct page *page;
- int nr_frags;
if (!pkt->buff.data_hard_start)
return;
- shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
- nr_frags = pkt->nr_frags;
+ if (xdp_buff_has_frags(&pkt->buff)) {
+ struct skb_shared_info *shinfo;
+ netmem_ref netmem;
+ int nr_frags;
+
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = shinfo->nr_frags;
- while (nr_frags--) {
- page = skb_frag_page(&shinfo->frags[nr_frags]);
- page_pool_put_full_page(nv->page_pool, page, !!budget);
+ while (nr_frags--) {
+ netmem = skb_frag_netmem(&shinfo->frags[nr_frags]);
+ page_pool_put_full_netmem(qt->sub1.page_pool, netmem,
+ !!budget);
+ }
}
page = virt_to_page(pkt->buff.data_hard_start);
- page_pool_put_full_page(nv->page_pool, page, !!budget);
+ page_pool_put_full_page(qt->sub0.page_pool, page, !!budget);
}
static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
struct fbnic_pkt_buff *pkt)
{
- unsigned int nr_frags = pkt->nr_frags;
- struct skb_shared_info *shinfo;
- unsigned int truesize;
struct sk_buff *skb;
- truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
- pkt->buff.data_hard_start;
-
- /* Build frame around buffer */
- skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&pkt->buff);
+ if (!skb)
return NULL;
- /* Push data pointer to start of data, put tail to end of data */
- skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
- __skb_put(skb, pkt->buff.data_end - pkt->buff.data);
+ /* Add timestamp if present */
+ if (pkt->hwtstamp)
+ skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
+
+ return skb;
+}
- /* Add tracking for metadata at the start of the frame */
- skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
+static long fbnic_pkt_tx(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+ int size, offset, nsegs = 1, data_len = 0;
+ unsigned int tail = ring->tail;
+ struct skb_shared_info *shinfo;
+ skb_frag_t *frag = NULL;
+ struct page *page;
+ dma_addr_t dma;
+ __le64 *twd;
- /* Add Rx frags */
- if (nr_frags) {
- /* Verify that shared info didn't move */
+ if (unlikely(xdp_buff_has_frags(&pkt->buff))) {
shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
- WARN_ON(skb_shinfo(skb) != shinfo);
+ nsegs += shinfo->nr_frags;
+ data_len = shinfo->xdp_frags_size;
+ frag = &shinfo->frags[0];
+ }
- skb->truesize += pkt->data_truesize;
- skb->data_len += pkt->data_len;
- shinfo->nr_frags = nr_frags;
- skb->len += pkt->data_len;
+ if (fbnic_desc_unused(ring) < nsegs) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
+ return -FBNIC_XDP_CONSUME;
}
- skb_mark_for_recycle(skb);
+ page = virt_to_page(pkt->buff.data_hard_start);
+ offset = offset_in_page(pkt->buff.data);
+ dma = page_pool_get_dma_addr(page);
- /* Set MAC header specific fields */
- skb->protocol = eth_type_trans(skb, nv->napi.dev);
+ size = pkt->buff.data_end - pkt->buff.data;
- /* Add timestamp if present */
- if (pkt->hwtstamp)
- skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
+ while (nsegs--) {
+ dma_sync_single_range_for_device(nv->dev, dma, offset, size,
+ DMA_BIDIRECTIONAL);
+ dma += offset;
- return skb;
+ ring->tx_buf[tail] = page;
+
+ twd = &ring->desc[tail];
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ offset = skb_frag_off(frag);
+ page = skb_frag_page(frag);
+ dma = page_pool_get_dma_addr(page);
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ frag++;
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ ring->tail = tail;
+
+ return -FBNIC_XDP_TX;
+}
+
+static void fbnic_pkt_commit_tail(struct fbnic_napi_vector *nv,
+ unsigned int pkt_tail)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(pkt_tail, ring->doorbell);
+}
+
+static struct sk_buff *fbnic_run_xdp(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct bpf_prog *xdp_prog;
+ int act;
+
+ xdp_prog = READ_ONCE(fbn->xdp_prog);
+ if (!xdp_prog)
+ goto xdp_pass;
+
+ /* Should never happen, config paths enforce HDS threshold > MTU */
+ if (xdp_buff_has_frags(&pkt->buff) && !xdp_prog->aux->xdp_has_frags)
+ return ERR_PTR(-FBNIC_XDP_LEN_ERR);
+
+ act = bpf_prog_run_xdp(xdp_prog, &pkt->buff);
+ switch (act) {
+ case XDP_PASS:
+xdp_pass:
+ return fbnic_build_skb(nv, pkt);
+ case XDP_TX:
+ return ERR_PTR(fbnic_pkt_tx(nv, pkt));
+ default:
+ bpf_warn_invalid_xdp_action(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ return ERR_PTR(-FBNIC_XDP_CONSUME);
}
static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
@@ -1050,10 +1228,10 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
- u64 csum_complete = 0, csum_none = 0;
+ u64 csum_complete = 0, csum_none = 0, length_errors = 0;
+ s32 head0 = -1, head1 = -1, pkt_tail = -1;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
- s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
@@ -1094,8 +1272,10 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* We currently ignore the action table index */
break;
case FBNIC_RCD_TYPE_META:
- if (likely(!fbnic_rcd_metadata_err(rcd)))
- skb = fbnic_build_skb(nv, pkt);
+ if (unlikely(pkt->add_frag_failed))
+ skb = NULL;
+ else if (likely(!fbnic_rcd_metadata_err(rcd)))
+ skb = fbnic_run_xdp(nv, pkt);
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
@@ -1107,15 +1287,20 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
+ } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) {
+ pkt_tail = nv->qt[0].sub1.tail;
+ bytes += xdp_get_buff_len(&pkt->buff);
} else {
if (!skb) {
alloc_failed++;
dropped++;
+ } else if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) {
+ length_errors++;
} else {
dropped++;
}
- fbnic_put_pkt_buff(nv, pkt, 1);
+ fbnic_put_pkt_buff(qt, pkt, 1);
}
pkt->buff.data_hard_start = NULL;
@@ -1140,16 +1325,20 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
rcq->stats.rx.alloc_failed += alloc_failed;
rcq->stats.rx.csum_complete += csum_complete;
rcq->stats.rx.csum_none += csum_none;
+ rcq->stats.rx.length_errors += length_errors;
u64_stats_update_end(&rcq->stats.syncp);
+ if (pkt_tail >= 0)
+ fbnic_pkt_commit_tail(nv, pkt_tail);
+
/* Unmap and free processed buffers */
if (head0 >= 0)
- fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
- fbnic_fill_bdq(nv, &qt->sub0);
+ fbnic_clean_bdq(&qt->sub0, head0, budget);
+ fbnic_fill_bdq(&qt->sub0);
if (head1 >= 0)
- fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
- fbnic_fill_bdq(nv, &qt->sub1);
+ fbnic_clean_bdq(&qt->sub1, head1, budget);
+ fbnic_fill_bdq(&qt->sub1);
/* Record the current head/tail of the queue */
if (rcq->head != head) {
@@ -1220,8 +1409,9 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ fbn->rx_stats.rx.length_errors += stats->rx.length_errors;
/* Remember to add new stats here */
- BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
}
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
@@ -1243,6 +1433,22 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
+static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ struct fbnic_queue_stats *stats = &xdpr->stats;
+
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->rx_stats.bytes += stats->bytes;
+ fbn->rx_stats.packets += stats->packets;
+ fbn->rx_stats.dropped += stats->dropped;
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+}
+
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
@@ -1256,6 +1462,19 @@ static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
fbn->tx[txr->q_idx] = NULL;
}
+static void fbnic_remove_xdp_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_xdp_counters(fbn, xdpr);
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr);
+ fbn->tx[xdpr->q_idx] = NULL;
+}
+
static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
struct fbnic_ring *rxr)
{
@@ -1269,6 +1488,12 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
fbn->rx[rxr->q_idx] = NULL;
}
+static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt)
+{
+ page_pool_destroy(qt->sub0.page_pool);
+ page_pool_destroy(qt->sub1.page_pool);
+}
+
static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
@@ -1277,6 +1502,7 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++) {
fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_xdp_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
}
@@ -1287,8 +1513,7 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
}
fbnic_napi_free_irq(fbd, nv);
- page_pool_destroy(nv->page_pool);
- netif_napi_del(&nv->napi);
+ netif_napi_del_locked(&nv->napi);
fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
@@ -1302,23 +1527,22 @@ void fbnic_free_napi_vectors(struct fbnic_net *fbn)
fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
-#define FBNIC_PAGE_POOL_FLAGS \
- (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
-
-static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
- struct fbnic_napi_vector *nv)
+static int
+fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_q_triad *qt,
+ unsigned int rxq_idx)
{
struct page_pool_params pp_params = {
.order = 0,
- .flags = FBNIC_PAGE_POOL_FLAGS,
- .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
+ .flags = PP_FLAG_DMA_MAP |
+ PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fbn->hpq_size + fbn->ppq_size,
.nid = NUMA_NO_NODE,
- .dev = nv->dev,
+ .dev = fbn->netdev->dev.parent,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
.max_len = PAGE_SIZE,
- .napi = &nv->napi,
.netdev = fbn->netdev,
+ .queue_idx = rxq_idx,
};
struct page_pool *pp;
@@ -1338,9 +1562,24 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
if (IS_ERR(pp))
return PTR_ERR(pp);
- nv->page_pool = pp;
+ qt->sub0.page_pool = pp;
+ if (netif_rxq_has_unreadable_mp(fbn->netdev, rxq_idx)) {
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ goto err_destroy_sub0;
+ } else {
+ page_pool_get(pp);
+ }
+ qt->sub1.page_pool = pp;
return 0;
+
+err_destroy_sub0:
+ page_pool_destroy(pp);
+ return PTR_ERR(pp);
}
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
@@ -1350,6 +1589,7 @@ static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
+ ring->deferred_head = -1;
}
static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
@@ -1359,11 +1599,18 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
{
int txt_count = txq_count, rxt_count = rxq_count;
u32 __iomem *uc_addr = fbd->uc_addr0;
+ int xdp_count = 0, qt_count, err;
struct fbnic_napi_vector *nv;
struct fbnic_q_triad *qt;
- int qt_count, err;
u32 __iomem *db;
+ /* We need to reserve at least one Tx Queue Triad for an XDP ring */
+ if (rxq_count) {
+ xdp_count = 1;
+ if (!txt_count)
+ txt_count = 1;
+ }
+
qt_count = txt_count + rxq_count;
if (!qt_count)
return -EINVAL;
@@ -1387,37 +1634,33 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
/* Tie napi to netdev */
fbn->napi[fbnic_napi_idx(nv)] = nv;
- netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+ netif_napi_add_config_locked(fbn->netdev, &nv->napi, fbnic_poll,
+ fbnic_napi_idx(nv));
/* Record IRQ to NAPI struct */
- netif_napi_set_irq(&nv->napi,
- pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+ netif_napi_set_irq_locked(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev),
+ nv->v_idx));
/* Tie nv back to PCIe dev */
nv->dev = fbd->dev;
- /* Allocate page pool */
- if (rxq_count) {
- err = fbnic_alloc_nv_page_pool(fbn, nv);
- if (err)
- goto napi_del;
- }
-
/* Request the IRQ for napi vector */
err = fbnic_napi_request_irq(fbd, nv);
if (err)
- goto pp_destroy;
+ goto napi_del;
/* Initialize queue triads */
qt = nv->qt;
while (txt_count) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
/* Configure Tx queue */
db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
/* Assign Tx queue to netdev if applicable */
if (txq_count > 0) {
- u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
fbn->tx[txq_idx] = &qt->sub0;
@@ -1427,6 +1670,28 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
FBNIC_RING_F_DISABLED);
}
+ /* Configure XDP queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ1_TAIL];
+
+ /* Assign XDP queue to netdev if applicable
+ *
+ * The setup for this is in itself a bit different.
+ * 1. We only need one XDP Tx queue per NAPI vector.
+ * 2. We associate it to the first Rx queue index.
+ * 3. The hardware side is associated based on the Tx Queue.
+ * 4. The netdev queue is offset by FBNIC_MAX_TXQs.
+ */
+ if (xdp_count > 0) {
+ unsigned int xdp_idx = FBNIC_MAX_TXQS + rxq_idx;
+
+ fbnic_ring_init(&qt->sub1, db, xdp_idx, flags);
+ fbn->tx[xdp_idx] = &qt->sub1;
+ xdp_count--;
+ } else {
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
/* Configure Tx completion queue */
db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
fbnic_ring_init(&qt->cmpl, db, 0, 0);
@@ -1463,10 +1728,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
return 0;
-pp_destroy:
- page_pool_destroy(nv->page_pool);
napi_del:
- netif_napi_del(&nv->napi);
+ netif_napi_del_locked(&nv->napi);
fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
@@ -1680,6 +1943,12 @@ static void fbnic_free_qt_resources(struct fbnic_net *fbn,
fbnic_free_ring_resources(dev, &qt->cmpl);
fbnic_free_ring_resources(dev, &qt->sub1);
fbnic_free_ring_resources(dev, &qt->sub0);
+
+ if (xdp_rxq_info_is_reg(&qt->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+ fbnic_free_qt_page_pools(qt);
+ }
}
static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
@@ -1692,6 +1961,10 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
if (err)
return err;
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
if (err)
goto free_sub1;
@@ -1699,20 +1972,37 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
return 0;
free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
return err;
}
static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt)
{
struct device *dev = fbn->netdev->dev.parent;
int err;
- err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ err = fbnic_alloc_qt_page_pools(fbn, qt, qt->cmpl.q_idx);
if (err)
return err;
+ err = xdp_rxq_info_reg(&qt->xdp_rxq, fbn->netdev, qt->sub0.q_idx,
+ nv->napi.napi_id);
+ if (err)
+ goto free_page_pools;
+
+ err = xdp_rxq_info_reg_mem_model(&qt->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ qt->sub0.page_pool);
+ if (err)
+ goto unreg_rxq;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ goto unreg_mm;
+
err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
if (err)
goto free_sub0;
@@ -1727,19 +2017,21 @@ free_sub1:
fbnic_free_ring_resources(dev, &qt->sub1);
free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
+unreg_mm:
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+unreg_rxq:
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+free_page_pools:
+ fbnic_free_qt_page_pools(qt);
return err;
}
static void fbnic_free_nv_resources(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
- int i, j;
-
- /* Free Tx Resources */
- for (i = 0; i < nv->txt_count; i++)
- fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ int i;
- for (j = 0; j < nv->rxt_count; j++, i++)
+ for (i = 0; i < nv->txt_count + nv->rxt_count; i++)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
}
@@ -1752,19 +2044,19 @@ static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++) {
err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
if (err)
- goto free_resources;
+ goto free_qt_resources;
}
/* Allocate Rx Resources */
for (j = 0; j < nv->rxt_count; j++, i++) {
- err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
+ err = fbnic_alloc_rx_qt_resources(fbn, nv, &nv->qt[i]);
if (err)
- goto free_resources;
+ goto free_qt_resources;
}
return 0;
-free_resources:
+free_qt_resources:
while (i--)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
return err;
@@ -1871,6 +2163,15 @@ static void fbnic_disable_twq0(struct fbnic_ring *txr)
fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
}
+static void fbnic_disable_twq1(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ1_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ1_CTL, twq_ctl);
+}
+
static void fbnic_disable_tcq(struct fbnic_ring *txr)
{
fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
@@ -1897,36 +2198,48 @@ void fbnic_napi_disable(struct fbnic_net *fbn)
int i;
for (i = 0; i < fbn->num_napi; i++) {
- napi_disable(&fbn->napi[i]->napi);
+ napi_disable_locked(&fbn->napi[i]->napi);
fbnic_nv_irq_disable(fbn->napi[i]);
}
}
-void fbnic_disable(struct fbnic_net *fbn)
+static void __fbnic_nv_disable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = fbn->fbd;
- int i, j, t;
-
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
+ int i, t;
- /* Disable Tx queue triads */
- for (t = 0; t < nv->txt_count; t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ /* Disable Tx queue triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_disable_twq0(&qt->sub0);
- fbnic_disable_tcq(&qt->cmpl);
- }
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_twq1(&qt->sub1);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
- /* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ /* Disable Rx queue triads */
+ for (i = 0; i < nv->rxt_count; i++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_disable_bdq(&qt->sub0, &qt->sub1);
- fbnic_disable_rcq(&qt->cmpl);
- }
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
}
+}
+
+static void
+fbnic_nv_disable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_disable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_disable(fbn->napi[i]);
fbnic_wrfl(fbd);
}
@@ -2015,73 +2328,119 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
return err;
}
-void fbnic_flush(struct fbnic_net *fbn)
+static int
+fbnic_wait_queue_idle(struct fbnic_net *fbn, bool rx, unsigned int idx)
{
- int i;
+ static const unsigned int tx_regs[] = {
+ FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TQS_IDLE(0),
+ FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TCQ_IDLE(0),
+ }, rx_regs[] = {
+ FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_PPQ_IDLE(0),
+ FBNIC_QM_RCQ_IDLE(0),
+ };
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int val, mask, off;
+ const unsigned int *regs;
+ unsigned int reg_cnt;
+ int i, err;
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
- int j, t;
+ regs = rx ? rx_regs : tx_regs;
+ reg_cnt = rx ? ARRAY_SIZE(rx_regs) : ARRAY_SIZE(tx_regs);
- /* Flush any processed Tx Queue Triads and drop the rest */
- for (t = 0; t < nv->txt_count; t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
- struct netdev_queue *tx_queue;
+ off = idx / 32;
+ mask = BIT(idx % 32);
- /* Clean the work queues of unprocessed work */
- fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ for (i = 0; i < reg_cnt; i++) {
+ err = read_poll_timeout_atomic(fbnic_rd32, val, val & mask,
+ 2, 500000, false,
+ fbd, regs[i] + off);
+ if (err) {
+ netdev_err(fbd->netdev,
+ "wait for queue %s%d idle failed 0x%04x(%d): %08x (mask: %08x)\n",
+ rx ? "Rx" : "Tx", idx, regs[i] + off, i,
+ val, mask);
+ return err;
+ }
+ }
- /* Reset completion queue descriptor ring */
- memset(qt->cmpl.desc, 0, qt->cmpl.size);
+ return 0;
+}
- /* Nothing else to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+static void fbnic_nv_flush(struct fbnic_napi_vector *nv)
+{
+ int j, t;
- /* Reset BQL associated with Tx queue */
- tx_queue = netdev_get_tx_queue(nv->napi.dev,
- qt->sub0.q_idx);
- netdev_tx_reset_queue(tx_queue);
- }
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+ struct netdev_queue *tx_queue;
- /* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ fbnic_clean_twq1(nv, false, &qt->sub1, true,
+ qt->sub1.tail);
- /* Clean the work queues of unprocessed work */
- fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
- fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
- /* Reset completion queue descriptor ring */
- memset(qt->cmpl.desc, 0, qt->cmpl.size);
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
- fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
- qt->cmpl.pkt->buff.data_hard_start = NULL;
- }
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+ }
+
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(&qt->sub0, qt->sub0.tail, 0);
+ fbnic_clean_bdq(&qt->sub1, qt->sub1.tail, 0);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(qt, qt->cmpl.pkt, 0);
+ memset(qt->cmpl.pkt, 0, sizeof(struct fbnic_pkt_buff));
}
}
-void fbnic_fill(struct fbnic_net *fbn)
+void fbnic_flush(struct fbnic_net *fbn)
{
int i;
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
- int j, t;
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_flush(fbn->napi[i]);
+}
- /* Configure NAPI mapping and populate pages
- * in the BDQ rings to use for Rx
- */
- for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+static void fbnic_nv_fill(struct fbnic_napi_vector *nv)
+{
+ int j, t;
- /* Populate the header and payload BDQs */
- fbnic_fill_bdq(nv, &qt->sub0);
- fbnic_fill_bdq(nv, &qt->sub1);
- }
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(&qt->sub0);
+ fbnic_fill_bdq(&qt->sub1);
}
}
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_fill(fbn->napi[i]);
+}
+
static void fbnic_enable_twq0(struct fbnic_ring *twq)
{
u32 log_size = fls(twq->size_mask);
@@ -2104,6 +2463,28 @@ static void fbnic_enable_twq0(struct fbnic_ring *twq)
fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
}
+static void fbnic_enable_twq1(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *tcq)
{
@@ -2232,13 +2613,22 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
{
struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
- u32 rcq_ctl;
+ u32 hds_thresh = fbn->hds_thresh;
+ u32 rcq_ctl = 0;
fbnic_config_drop_mode_rcq(nv, rcq);
- rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
- FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
- FBNIC_RX_MAX_HDR) |
+ /* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
+ * be split at L4. It would also result in the frames being split at
+ * L2/L3 depending on the frame size.
+ */
+ if (fbn->hds_thresh < FBNIC_HDR_BYTES_MIN) {
+ rcq_ctl = FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT;
+ hds_thresh = FBNIC_HDR_BYTES_MIN;
+ }
+
+ rcq_ctl |= FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, hds_thresh) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
FBNIC_RX_PAYLD_OFFSET) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
@@ -2266,32 +2656,47 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
}
-void fbnic_enable(struct fbnic_net *fbn)
+static void __fbnic_nv_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = fbn->fbd;
- int i;
+ int j, t;
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
- int j, t;
+ /* Setup Tx Queue Triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- /* Setup Tx Queue Triads */
- for (t = 0; t < nv->txt_count; t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_twq1(&qt->sub1);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
- fbnic_enable_twq0(&qt->sub0);
- fbnic_enable_tcq(nv, &qt->cmpl);
- }
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- /* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ page_pool_enable_direct_recycling(qt->sub0.page_pool,
+ &nv->napi);
+ page_pool_enable_direct_recycling(qt->sub1.page_pool,
+ &nv->napi);
- fbnic_enable_bdq(&qt->sub0, &qt->sub1);
- fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
- fbnic_enable_rcq(nv, &qt->cmpl);
- }
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
+ fbnic_enable_rcq(nv, &qt->cmpl);
}
+}
+
+static void fbnic_nv_enable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_enable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_enable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_enable(fbn->napi[i]);
fbnic_wrfl(fbd);
}
@@ -2310,7 +2715,7 @@ void fbnic_napi_enable(struct fbnic_net *fbn)
for (i = 0; i < fbn->num_napi; i++) {
struct fbnic_napi_vector *nv = fbn->napi[i];
- napi_enable(&nv->napi);
+ napi_enable_locked(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2363,3 +2768,123 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
fbnic_wrfl(fbd);
}
+
+static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_q_triad *qt = qmem;
+ struct fbnic_napi_vector *nv;
+
+ if (!netif_running(dev))
+ return fbnic_alloc_qt_page_pools(fbn, qt, idx);
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_ring_init(&qt->sub0, real->sub0.doorbell, real->sub0.q_idx,
+ real->sub0.flags);
+ fbnic_ring_init(&qt->sub1, real->sub1.doorbell, real->sub1.q_idx,
+ real->sub1.flags);
+ fbnic_ring_init(&qt->cmpl, real->cmpl.doorbell, real->cmpl.q_idx,
+ real->cmpl.flags);
+
+ return fbnic_alloc_rx_qt_resources(fbn, nv, qt);
+}
+
+static void fbnic_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_q_triad *qt = qmem;
+
+ if (!netif_running(dev))
+ fbnic_free_qt_page_pools(qt);
+ else
+ fbnic_free_qt_resources(fbn, qt);
+}
+
+static void __fbnic_nv_restart(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_nv_enable(fbn, nv);
+ fbnic_nv_fill(nv);
+
+ napi_enable_locked(&nv->napi);
+ fbnic_nv_irq_enable(nv);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(nv->v_idx / 32), BIT(nv->v_idx % 32));
+ fbnic_wrfl(fbd);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx);
+}
+
+static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *real;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_aggregate_ring_rx_counters(fbn, &real->sub0);
+ fbnic_aggregate_ring_rx_counters(fbn, &real->sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl);
+
+ memcpy(real, qmem, sizeof(*real));
+
+ __fbnic_nv_restart(fbn, nv);
+
+ return 0;
+}
+
+static int fbnic_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_napi_vector *nv;
+ int i, t;
+ int err;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ napi_disable_locked(&nv->napi);
+ fbnic_nv_irq_disable(nv);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_stop_subqueue(dev, nv->qt[i].sub0.q_idx);
+ fbnic_nv_disable(fbn, nv);
+
+ for (t = 0; t < nv->txt_count + nv->rxt_count; t++) {
+ err = fbnic_wait_queue_idle(fbn, t >= nv->txt_count,
+ nv->qt[t].sub0.q_idx);
+ if (err)
+ goto err_restart;
+ }
+
+ fbnic_synchronize_irq(fbn->fbd, nv->v_idx);
+ fbnic_nv_flush(nv);
+
+ page_pool_disable_direct_recycling(real->sub0.page_pool);
+ page_pool_disable_direct_recycling(real->sub1.page_pool);
+
+ memcpy(qmem, real, sizeof(*real));
+
+ return 0;
+
+err_restart:
+ __fbnic_nv_restart(fbn, nv);
+ return err;
+}
+
+const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct fbnic_q_triad),
+ .ndo_queue_mem_alloc = fbnic_queue_mem_alloc,
+ .ndo_queue_mem_free = fbnic_queue_mem_free,
+ .ndo_queue_start = fbnic_queue_start,
+ .ndo_queue_stop = fbnic_queue_stop,
+};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 34693596e5eb..31fac0ba0902 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -35,6 +35,7 @@ struct fbnic_net;
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+#define FBNIC_MAX_XDPQS 128u
/* These apply to TWQs, TCQ, RCQ */
#define FBNIC_QUEUE_SIZE_MIN 16u
@@ -50,10 +51,10 @@ struct fbnic_net;
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM_PAD 128
#define FBNIC_RX_HROOM \
- (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM)
+ (ALIGN(FBNIC_RX_TROOM + FBNIC_RX_HROOM_PAD, 128) - FBNIC_RX_TROOM)
#define FBNIC_RX_PAD 0
-#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD)
#define FBNIC_RX_PAYLD_OFFSET 0
#define FBNIC_RX_PAYLD_PG_CL 0
@@ -61,12 +62,16 @@ struct fbnic_net;
#define FBNIC_RING_F_CTX BIT(1)
#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+#define FBNIC_HDS_THRESH_MAX \
+ (4096 - FBNIC_RX_HROOM - FBNIC_RX_TROOM - FBNIC_RX_PAD)
+#define FBNIC_HDS_THRESH_DEFAULT \
+ (1536 - FBNIC_RX_PAD)
+#define FBNIC_HDR_BYTES_MIN 128
+
struct fbnic_pkt_buff {
struct xdp_buff buff;
ktime_t hwtstamp;
- u32 data_truesize;
- u16 data_len;
- u16 nr_frags;
+ bool add_frag_failed;
};
struct fbnic_queue_stats {
@@ -85,6 +90,7 @@ struct fbnic_queue_stats {
u64 alloc_failed;
u64 csum_complete;
u64 csum_none;
+ u64 length_errors;
} rx;
};
u64 dropped;
@@ -94,7 +100,7 @@ struct fbnic_queue_stats {
#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
struct fbnic_rx_buf {
- struct page *page;
+ netmem_ref netmem;
long pagecnt_bias;
};
@@ -115,6 +121,17 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
+ union {
+ /* Rx BDQs only */
+ struct page_pool *page_pool;
+
+ /* Deferred_head is used to cache the head for TWQ1 if
+ * an attempt is made to clean TWQ1 with zero napi_budget.
+ * We do not use it for any other ring.
+ */
+ s32 deferred_head;
+ };
+
struct fbnic_queue_stats stats;
/* Slow path fields follow */
@@ -124,12 +141,12 @@ struct fbnic_ring {
struct fbnic_q_triad {
struct fbnic_ring sub0, sub1, cmpl;
+ struct xdp_rxq_info xdp_rxq;
};
struct fbnic_napi_vector {
struct napi_struct napi;
struct device *dev; /* Device for DMA unmapping */
- struct page_pool *page_pool;
struct fbnic_dev *fbd;
u16 v_idx;
@@ -139,6 +156,8 @@ struct fbnic_napi_vector {
struct fbnic_q_triad qt[];
};
+extern const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops;
+
netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
index 79b800d2b72c..0277d9737369 100644
--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -326,6 +326,8 @@ static const struct net_device_ops lan865x_netdev_ops = {
.ndo_start_xmit = lan865x_send_packet,
.ndo_set_rx_mode = lan865x_set_multicast_list,
.ndo_set_mac_address = lan865x_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
static int lan865x_probe(struct spi_device *spi)
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 35e1c0cf345e..a4d6706590d2 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -3,7 +3,7 @@ config SPARX5_SWITCH
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || ARCH_LAN969X || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
depends on BRIDGE || BRIDGE=n
select PHYLINK
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 832f4ae57c83..049541eeaae0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1212,6 +1212,22 @@ static int sparx5_get_ts_info(struct net_device *dev,
return 0;
}
+static void sparx5_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int sparx5_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1224,6 +1240,8 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
.get_ts_info = sparx5_get_ts_info,
+ .get_pauseparam = sparx5_get_pauseparam,
+ .set_pauseparam = sparx5_set_pauseparam,
};
int sparx_stats_init(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index ef072e24c46d..ada6c78a2bef 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -881,7 +881,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout)))) {
if (hwc->hwc_timeout != 0)
- dev_err(hwc->dev, "HWC: Request timed out!\n");
+ dev_err(hwc->dev, "HWC: Request timed out: %u ms\n",
+ hwc->hwc_timeout);
+
+ /* Reduce further waiting if HWC no response */
+ if (hwc->hwc_timeout > 1)
+ hwc->hwc_timeout = 1;
err = -ETIMEDOUT;
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index d30721d4516f..7697c9b52ed3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -174,6 +174,7 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
struct gdma_context *gc;
+ int err;
gc = apc->ac->gdma_dev->gdma_context;
@@ -195,11 +196,45 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
*/
apc->bpf_prog = prog;
- if (old_prog)
- bpf_prog_put(old_prog);
+ if (apc->port_is_up) {
+ /* Re-create rxq's after xdp prog was loaded or unloaded.
+ * Ex: re create rxq's to switch from full pages to smaller
+ * size page fragments when xdp prog is unloaded and
+ * vice-versa.
+ */
+
+ /* Pre-allocate buffers to prevent failure in mana_attach */
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Insufficient memory for tx/rx re-config");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev,
+ "mana_detach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at detach");
+ goto err_dealloc_rxbuffs;
+ }
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev,
+ "mana_attach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at attach");
+ goto err_dealloc_rxbuffs;
+ }
- if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
+ mana_pre_dealloc_rxbufs(apc);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
if (prog)
ndev->max_mtu = MANA_XDP_MTU_MAX;
@@ -207,6 +242,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
return 0;
+
+err_dealloc_rxbuffs:
+ apc->bpf_prog = old_prog;
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
}
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 550843e2164b..0142fd98392c 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -57,6 +57,15 @@ static bool mana_en_need_log(struct mana_port_context *apc, int err)
return true;
}
+static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
+ bool from_pool)
+{
+ if (from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(page);
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
@@ -630,21 +639,40 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
}
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
-static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
- u32 *headroom)
+static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
+ int mtu, u32 *datasize, u32 *alloc_size,
+ u32 *headroom, u32 *frag_count)
{
- if (mtu > MANA_XDP_MTU_MAX)
- *headroom = 0; /* no support for XDP */
- else
- *headroom = XDP_PACKET_HEADROOM;
+ u32 len, buf_size;
- *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ /* Calculate datasize first (consistent across all cases) */
+ *datasize = mtu + ETH_HLEN;
- /* Using page pool in this case, so alloc_size is PAGE_SIZE */
- if (*alloc_size < PAGE_SIZE)
- *alloc_size = PAGE_SIZE;
+ /* For xdp and jumbo frames make sure only one packet fits per page */
+ if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
+ if (mana_xdp_get(apc)) {
+ *headroom = XDP_PACKET_HEADROOM;
+ *alloc_size = PAGE_SIZE;
+ } else {
+ *headroom = 0; /* no support for XDP */
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
+ *headroom);
+ }
- *datasize = mtu + ETH_HLEN;
+ *frag_count = 1;
+ return;
+ }
+
+ /* Standard MTU case - optimize for multiple packets per page */
+ *headroom = 0;
+
+ /* Calculate base buffer size needed */
+ len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
+
+ /* Calculate how many packets can fit in a page */
+ *frag_count = PAGE_SIZE / buf_size;
+ *alloc_size = buf_size;
}
int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
@@ -656,8 +684,9 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
void *va;
int i;
- mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
- &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
+ mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
+ &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
+ &mpc->rxbpre_frag_count);
dev = mpc->ac->gdma_dev->gdma_context->dev;
@@ -1842,8 +1871,11 @@ drop_xdp:
drop:
if (from_pool) {
- page_pool_recycle_direct(rxq->page_pool,
- virt_to_head_page(buf_va));
+ if (rxq->frag_count == 1)
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(buf_va));
+ else
+ page_pool_free_va(rxq->page_pool, buf_va, true);
} else {
WARN_ON_ONCE(rxq->xdp_save_va);
/* Save for reuse */
@@ -1859,33 +1891,46 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
dma_addr_t *da, bool *from_pool)
{
struct page *page;
+ u32 offset;
void *va;
-
*from_pool = false;
- /* Reuse XDP dropped page if available */
- if (rxq->xdp_save_va) {
- va = rxq->xdp_save_va;
- rxq->xdp_save_va = NULL;
- } else {
- page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
+ /* Don't use fragments for jumbo frames or XDP where it's 1 fragment
+ * per page.
+ */
+ if (rxq->frag_count == 1) {
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_va) {
+ va = rxq->xdp_save_va;
+ page = virt_to_head_page(va);
+ rxq->xdp_save_va = NULL;
+ } else {
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
+ return NULL;
+
+ *from_pool = true;
+ va = page_to_virt(page);
+ }
+
+ *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *da)) {
+ mana_put_rx_page(rxq, page, *from_pool);
return NULL;
+ }
- *from_pool = true;
- va = page_to_virt(page);
+ return va;
}
- *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *da)) {
- if (*from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(virt_to_head_page(va));
-
+ page = page_pool_dev_alloc_frag(rxq->page_pool, &offset,
+ rxq->alloc_size);
+ if (!page)
return NULL;
- }
+
+ va = page_to_virt(page) + offset;
+ *da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
+ *from_pool = true;
return va;
}
@@ -1902,9 +1947,9 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
-
- dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
- DMA_FROM_DEVICE);
+ if (!rxoob->from_pool || rxq->frag_count == 1)
+ dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+ DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;
*old_fp = rxoob->from_pool;
@@ -2100,10 +2145,8 @@ static void mana_destroy_txq(struct mana_port_context *apc)
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- netdev_lock_ops_to_full(napi->dev);
napi_disable_locked(napi);
netif_napi_del_locked(napi);
- netdev_unlock_full_to_ops(napi->dev);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2256,10 +2299,8 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
- netdev_lock_ops_to_full(net);
netif_napi_add_locked(net, &cq->napi, mana_poll);
napi_enable_locked(&cq->napi);
- netdev_unlock_full_to_ops(net);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2295,10 +2336,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (napi_initialized) {
napi_synchronize(napi);
- netdev_lock_ops_to_full(napi->dev);
napi_disable_locked(napi);
netif_napi_del_locked(napi);
- netdev_unlock_full_to_ops(napi->dev);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2315,15 +2354,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rx_oob->buf_va)
continue;
- dma_unmap_single(dev, rx_oob->sgl[0].address,
- rx_oob->sgl[0].size, DMA_FROM_DEVICE);
-
page = virt_to_head_page(rx_oob->buf_va);
- if (rx_oob->from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(page);
+ if (rxq->frag_count == 1 || !rx_oob->from_pool) {
+ dma_unmap_single(dev, rx_oob->sgl[0].address,
+ rx_oob->sgl[0].size, DMA_FROM_DEVICE);
+ mana_put_rx_page(rxq, page, rx_oob->from_pool);
+ } else {
+ page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
+ }
rx_oob->buf_va = NULL;
}
@@ -2429,11 +2468,22 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = mpc->rx_queue_size;
+ pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
pprm.order = get_order(rxq->alloc_size);
+ pprm.queue_idx = rxq->rxq_idx;
+ pprm.dev = gc->dev;
+
+ /* Let the page pool do the dma map when page sharing with multiple
+ * fragments enabled for rx buffers.
+ */
+ if (rxq->frag_count > 1) {
+ pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pprm.max_len = PAGE_SIZE;
+ pprm.dma_dir = DMA_FROM_DEVICE;
+ }
rxq->page_pool = page_pool_create(&pprm);
@@ -2472,9 +2522,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
- mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
- &rxq->headroom);
-
+ mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
+ &rxq->headroom, &rxq->frag_count);
/* Create page pool for RX queue */
err = mana_create_page_pool(rxq, gc);
if (err) {
@@ -2549,18 +2598,14 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netdev_lock_ops_to_full(ndev);
netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
- netdev_unlock_full_to_ops(ndev);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- netdev_lock_ops_to_full(ndev);
napi_enable_locked(&cq->napi);
- netdev_unlock_full_to_ops(ndev);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index f80f1a6953fa..f252ecdcd2cd 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -495,14 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 80e4675582bf..dde60c4572fa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -564,8 +564,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
- vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- priv->stats_ring_size));
+ vmalloc_array(priv->stats_ring_size,
+ NFP_FL_STATS_ELEM_RS);
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 08086eb76996..91a227929a5f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -1169,14 +1169,10 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, &dim_sample);
@@ -1184,14 +1180,10 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, &dim_sample);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ab3cd06ed63e..ee0db3d5fd66 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -1279,14 +1279,10 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, &dim_sample);
@@ -1294,14 +1290,10 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, &dim_sample);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 71301dbd8fb5..48390b2fd44d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -797,7 +797,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pf->dev_info = dev_info;
- pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
+ pf->wq = alloc_workqueue("nfp-%s", WQ_PERCPU, 2, pci_name(pdev));
if (!pf->wq) {
err = -ENOMEM;
goto err_pci_priv_unset;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a36215195923..16c828dd5c1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1788,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
- return -EOPNOTSUPP;
+ return 0;
return nfp_net_rss_key_sz(nn);
}
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 01fe76786f77..c99758adf3ad 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -24,6 +24,7 @@ config IONIC
select NET_DEVLINK
select DIMLIB
select PAGE_POOL
+ select AUXILIARY_BUS
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 4e7642a2d25f..a598972fef41 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,5 +5,5 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o ionic_fw.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o ionic_aux.o
ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 04f00ea94230..85198e6a806e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -65,16 +65,9 @@ struct ionic {
int watchdog_period;
};
-struct ionic_admin_ctx {
- struct completion work;
- union ionic_adminq_cmd cmd;
- union ionic_adminq_comp comp;
-};
-
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
const int err, const bool do_msg);
-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_api.h b/drivers/net/ethernet/pensando/ionic/ionic_api.h
new file mode 100644
index 000000000000..bd88666836b8
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_api.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_API_H_
+#define _IONIC_API_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ionic_if.h"
+#include "ionic_regs.h"
+
+/**
+ * struct ionic_aux_dev - Auxiliary device information
+ * @lif: Logical interface
+ * @idx: Index identifier
+ * @adev: Auxiliary device
+ */
+struct ionic_aux_dev {
+ struct ionic_lif *lif;
+ int idx;
+ struct auxiliary_device adev;
+};
+
+/**
+ * struct ionic_admin_ctx - Admin command context
+ * @work: Work completion wait queue element
+ * @cmd: Admin command (64B) to be copied to the queue
+ * @comp: Admin completion (16B) copied from the queue
+ */
+struct ionic_admin_ctx {
+ struct completion work;
+ union ionic_adminq_cmd cmd;
+ union ionic_adminq_comp comp;
+};
+
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
+
+/**
+ * struct ionic_intr_info - Interrupt information
+ * @name: Name identifier
+ * @rearm_count: Interrupt rearm count
+ * @index: Interrupt index position
+ * @vector: Interrupt number
+ * @dim_coal_hw: Interrupt coalesce value in hardware units
+ * @affinity_mask: CPU affinity mask
+ * @aff_notify: context for notification of IRQ affinity changes
+ */
+struct ionic_intr_info {
+ char name[IONIC_INTR_NAME_MAX_SZ];
+ u64 rearm_count;
+ unsigned int index;
+ unsigned int vector;
+ u32 dim_coal_hw;
+ cpumask_var_t *affinity_mask;
+ struct irq_affinity_notify aff_notify;
+};
+
+/**
+ * ionic_adminq_post_wait - Post an admin command and wait for response
+ * @lif: Logical interface
+ * @ctx: API admin command context
+ *
+ * Post the command to an admin queue in the ethernet driver. If this command
+ * succeeds, then the command has been posted, but that does not indicate a
+ * completion. If this command returns success, then the completion callback
+ * will eventually be called.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+
+/**
+ * ionic_error_to_errno - Transform ionic_if errors to os errno
+ * @code: Ionic error number
+ *
+ * Return: Negative OS error number or zero
+ */
+int ionic_error_to_errno(enum ionic_status_code code);
+
+/**
+ * ionic_request_rdma_reset - request reset or disable the device or lif
+ * @lif: Logical interface
+ *
+ * The reset is triggered asynchronously. It will wait until reset request
+ * completes or times out.
+ */
+void ionic_request_rdma_reset(struct ionic_lif *lif);
+
+/**
+ * ionic_intr_alloc - Reserve a device interrupt
+ * @lif: Logical interface
+ * @intr: Reserved ionic interrupt structure
+ *
+ * Reserve an interrupt index and get irq number for that index.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);
+
+/**
+ * ionic_intr_free - Release a device interrupt index
+ * @lif: Logical interface
+ * @intr: Interrupt index
+ *
+ * Mark the interrupt index unused so that it can be reserved again.
+ */
+void ionic_intr_free(struct ionic_lif *lif, int intr);
+
+/**
+ * ionic_get_cmb - Reserve cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @pgaddr: First page bus addr (contiguous)
+ * @order: Log base two number of pages (PAGE_SIZE)
+ * @stride_log2: Size of stride to determine CMB pool
+ * @expdb: Will be set to true if this CMB region has expdb enabled
+ *
+ * Return: zero or negative error status
+ */
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb);
+
+/**
+ * ionic_put_cmb - Release cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @order: Log base two number of pages (PAGE_SIZE)
+ */
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+
+#endif /* _IONIC_API_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.c b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
new file mode 100644
index 000000000000..a2be338eb3e5
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_aux.h"
+
+static DEFINE_IDA(aux_ida);
+
+static void ionic_auxbus_release(struct device *dev)
+{
+ struct ionic_aux_dev *ionic_adev;
+
+ ionic_adev = container_of(dev, struct ionic_aux_dev, adev.dev);
+ ida_free(&aux_ida, ionic_adev->adev.id);
+ kfree(ionic_adev);
+}
+
+int ionic_auxbus_register(struct ionic_lif *lif)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct auxiliary_device *aux_dev;
+ int err, id;
+
+ if (!(le64_to_cpu(lif->ionic->ident.lif.capabilities) & IONIC_LIF_CAP_RDMA))
+ return 0;
+
+ ionic_adev = kzalloc(sizeof(*ionic_adev), GFP_KERNEL);
+ if (!ionic_adev)
+ return -ENOMEM;
+
+ aux_dev = &ionic_adev->adev;
+
+ id = ida_alloc(&aux_ida, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(lif->ionic->dev, "Failed to allocate aux id: %d\n", id);
+ kfree(ionic_adev);
+ return id;
+ }
+
+ aux_dev->id = id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &lif->ionic->pdev->dev;
+ aux_dev->dev.release = ionic_auxbus_release;
+ ionic_adev->lif = lif;
+ err = auxiliary_device_init(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to initialize %s aux device: %d\n",
+ aux_dev->name, err);
+ ida_free(&aux_ida, id);
+ kfree(ionic_adev);
+ return err;
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to add %s aux device: %d\n",
+ aux_dev->name, err);
+ auxiliary_device_uninit(aux_dev);
+ return err;
+ }
+
+ lif->ionic_adev = ionic_adev;
+ return 0;
+}
+
+void ionic_auxbus_unregister(struct ionic_lif *lif)
+{
+ mutex_lock(&lif->adev_lock);
+ if (!lif->ionic_adev)
+ goto out;
+
+ auxiliary_device_delete(&lif->ionic_adev->adev);
+ auxiliary_device_uninit(&lif->ionic_adev->adev);
+
+ lif->ionic_adev = NULL;
+out:
+ mutex_unlock(&lif->adev_lock);
+}
+
+void ionic_request_rdma_reset(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ union ionic_dev_cmd cmd = {
+ .cmd.opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .cmd.lif_index = cpu_to_le16(lif->index),
+ };
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err)
+ pr_warn("%s request_reset: error %d\n", __func__, err);
+}
+EXPORT_SYMBOL_NS(ionic_request_rdma_reset, "NET_IONIC");
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.h b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
new file mode 100644
index 000000000000..f5528a9f187d
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_AUX_H_
+#define _IONIC_AUX_H_
+
+int ionic_auxbus_register(struct ionic_lif *lif);
+void ionic_auxbus_unregister(struct ionic_lif *lif);
+
+#endif /* _IONIC_AUX_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 136bfa3516d0..70d86c5f52fb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -9,6 +9,7 @@
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_debugfs.h"
/* Supported devices */
@@ -271,6 +272,8 @@ static int ionic_setup_one(struct ionic *ionic)
}
ionic_debugfs_add_ident(ionic);
+ ionic_map_cmb(ionic);
+
err = ionic_init(ionic);
if (err) {
dev_err(dev, "Cannot init device: %d, aborting\n", err);
@@ -375,6 +378,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_deregister_devlink;
}
+ ionic_auxbus_register(ionic->lif);
+
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
@@ -416,6 +421,7 @@ static void ionic_remove(struct pci_dev *pdev)
if (ionic->lif->doorbell_wa)
cancel_delayed_work_sync(&ionic->doorbell_check_dwork);
+ ionic_auxbus_unregister(ionic->lif);
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
@@ -445,6 +451,7 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
timer_delete_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
ionic_txrx_free(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 093c5358b6e8..ab27e9225c1e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -199,13 +199,201 @@ void ionic_init_devinfo(struct ionic *ionic)
dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
}
+static void ionic_map_disc_cmb(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ u32 length_reg0, length, offset, num_regions;
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int err, sz, i;
+ u64 end;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_discover_cmb(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ sz = min(sizeof(ident->cmb_layout),
+ sizeof(idev->dev_cmd_regs->data));
+ memcpy_fromio(&ident->cmb_layout,
+ &idev->dev_cmd_regs->data, sz);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err) {
+ dev_warn(dev, "Cannot discover CMB layout, disabling CMB\n");
+ return;
+ }
+
+ bar += 2;
+
+ num_regions = le32_to_cpu(ident->cmb_layout.num_regions);
+ if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS) {
+ dev_warn(dev, "Invalid number of CMB entries (%d)\n",
+ num_regions);
+ return;
+ }
+
+ dev_dbg(dev, "ionic_cmb_layout_identity num_regions %d flags %x:\n",
+ num_regions, ident->cmb_layout.flags);
+
+ for (i = 0; i < num_regions; i++) {
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+ length = le32_to_cpu(ident->cmb_layout.region[i].length);
+ end = offset + length;
+
+ dev_dbg(dev, "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n",
+ i, ident->cmb_layout.region[i].bar_num,
+ ident->cmb_layout.region[i].cmb_type,
+ offset, length);
+
+ if (end > (bar->len >> IONIC_CMB_SHIFT_64K)) {
+ dev_warn(dev, "Out of bounds CMB region %d offset %x length %u\n",
+ i, offset, length);
+ return;
+ }
+ }
+
+ /* if first entry matches PCI config, expdb is not supported */
+ if (ident->cmb_layout.region[0].bar_num == bar->res_index &&
+ le32_to_cpu(ident->cmb_layout.region[0].length) == bar->len &&
+ !ident->cmb_layout.region[0].offset) {
+ dev_warn(dev, "No CMB mapping discovered\n");
+ return;
+ }
+
+ /* process first entry for regular mapping */
+ length_reg0 = le32_to_cpu(ident->cmb_layout.region[0].length);
+ if (!length_reg0) {
+ dev_warn(dev, "region len = 0. No CMB mapping discovered\n");
+ return;
+ }
+
+ /* Verify first entry size matches expected 8MB size (in 64KB pages) */
+ if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K) {
+ dev_warn(dev, "Unexpected CMB size in entry 0: %u pages\n",
+ length_reg0);
+ return;
+ }
+
+ sz = BITS_TO_LONGS((length_reg0 << IONIC_CMB_SHIFT_64K) /
+ PAGE_SIZE) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ dev_warn(dev, "No memory for CMB, disabling\n");
+ idev->phy_cmb_pages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+ idev->cmb_npages = 0;
+ return;
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ /* check this region matches first region length as to
+ * ease implementation
+ */
+ if (le32_to_cpu(ident->cmb_layout.region[i].length) !=
+ length_reg0)
+ continue;
+
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+
+ switch (ident->cmb_layout.region[i].cmb_type) {
+ case IONIC_CMB_TYPE_DEVMEM:
+ idev->phy_cmb_pages = bar->bus_addr + offset;
+ idev->cmb_npages =
+ (length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE;
+ dev_dbg(dev, "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n",
+ &bar->bus_addr, i, length);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB64:
+ idev->phy_cmb_expdb64_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb64_pages %pad\n",
+ &idev->phy_cmb_expdb64_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB128:
+ idev->phy_cmb_expdb128_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb128_pages %pad\n",
+ &idev->phy_cmb_expdb128_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB256:
+ idev->phy_cmb_expdb256_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb256_pages %pad\n",
+ &idev->phy_cmb_expdb256_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB512:
+ idev->phy_cmb_expdb512_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb512_pages %pad\n",
+ &idev->phy_cmb_expdb512_pages);
+ break;
+
+ default:
+ dev_warn(dev, "[%d] Invalid cmb_type (%d)\n",
+ i, ident->cmb_layout.region[i].cmb_type);
+ break;
+ }
+ }
+}
+
+static void ionic_map_classic_cmb(struct ionic *ionic)
+{
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int sz;
+
+ bar += 2;
+ /* classic CMB mapping */
+ idev->phy_cmb_pages = bar->bus_addr;
+ idev->cmb_npages = bar->len / PAGE_SIZE;
+ dev_dbg(dev, "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n",
+ &bar->bus_addr, bar->len);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+
+ sz = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+ }
+}
+
+void ionic_map_cmb(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+
+ if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) {
+ dev_dbg(dev, "No CMB, disabling\n");
+ return;
+ }
+
+ if (ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_DISC_CMB))
+ ionic_map_disc_cmb(ionic);
+ else
+ ionic_map_classic_cmb(ionic);
+}
+
int ionic_dev_setup(struct ionic *ionic)
{
struct ionic_dev_bar *bar = ionic->bars;
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
- int size;
u32 sig;
int err;
@@ -255,16 +443,11 @@ int ionic_dev_setup(struct ionic *ionic)
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
return 0;
}
- idev->phy_cmb_pages = bar->bus_addr;
- idev->cmb_npages = bar->len / PAGE_SIZE;
- size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
- idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
- if (!idev->cmb_inuse)
- dev_warn(dev, "No memory for CMB, disabling\n");
-
return 0;
}
@@ -277,6 +460,11 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+
if (ionic->wq) {
destroy_workqueue(ionic->wq);
ionic->wq = NULL;
@@ -698,28 +886,79 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
ionic_dev_cmd_go(idev, &cmd);
}
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .discover_cmb.opcode = IONIC_CMD_DISCOVER_CMB,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
int ionic_db_page_num(struct ionic_lif *lif, int pid)
{
return (lif->hw_index * lif->dbid_count) + pid;
}
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb)
{
struct ionic_dev *idev = &lif->ionic->idev;
- int ret;
+ void __iomem *nonexpdb_pgptr;
+ phys_addr_t nonexpdb_pgaddr;
+ int i, idx;
mutex_lock(&idev->cmb_inuse_lock);
- ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
+ idx = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
- if (ret < 0)
- return ret;
+ if (idx < 0)
+ return idx;
+
+ *pgid = (u32)idx;
+
+ if (idev->phy_cmb_expdb64_pages &&
+ stride_log2 == IONIC_EXPDB_64B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb128_pages &&
+ stride_log2 == IONIC_EXPDB_128B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb256_pages &&
+ stride_log2 == IONIC_EXPDB_256B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb512_pages &&
+ stride_log2 == IONIC_EXPDB_512B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else {
+ *pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = false;
+ }
- *pgid = ret;
- *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
+ /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */
+ nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ for (i = 0; i < (1 << order); i++) {
+ nonexpdb_pgptr =
+ ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE);
+ if (!nonexpdb_pgptr) {
+ ionic_put_cmb(lif, *pgid, order);
+ return -ENOMEM;
+ }
+ memset_io(nonexpdb_pgptr, 0, PAGE_SIZE);
+ iounmap(nonexpdb_pgptr);
+ }
return 0;
}
+EXPORT_SYMBOL_NS(ionic_get_cmb, "NET_IONIC");
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
@@ -729,6 +968,7 @@ void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
+EXPORT_SYMBOL_NS(ionic_put_cmb, "NET_IONIC");
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c8c710cfe70c..35566f97eaea 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,6 +12,7 @@
#include "ionic_if.h"
#include "ionic_regs.h"
+#include "ionic_api.h"
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
@@ -34,6 +35,11 @@
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
+#define IONIC_EXPDB_64B_WQE_LG2 6
+#define IONIC_EXPDB_128B_WQE_LG2 7
+#define IONIC_EXPDB_256B_WQE_LG2 8
+#define IONIC_EXPDB_512B_WQE_LG2 9
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -170,6 +176,11 @@ struct ionic_dev {
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
+ dma_addr_t phy_cmb_expdb64_pages;
+ dma_addr_t phy_cmb_expdb128_pages;
+ dma_addr_t phy_cmb_expdb256_pages;
+ dma_addr_t phy_cmb_expdb512_pages;
+
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
@@ -273,19 +284,6 @@ struct ionic_queue {
char name[IONIC_QUEUE_NAME_MAX_SZ];
} ____cacheline_aligned_in_smp;
-#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
-#define IONIC_INTR_NAME_MAX_SZ 32
-
-struct ionic_intr_info {
- char name[IONIC_INTR_NAME_MAX_SZ];
- u64 rearm_count;
- unsigned int index;
- unsigned int vector;
- u32 dim_coal_hw;
- cpumask_var_t *affinity_mask;
- struct irq_affinity_notify aff_notify;
-};
-
struct ionic_cq {
struct ionic_lif *lif;
struct ionic_queue *bound_q;
@@ -363,8 +361,8 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
-void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev);
+void ionic_map_cmb(struct ionic *ionic);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 92f30ff2d631..2d9efadb5d2a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -978,7 +978,7 @@ static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_dev *idev = &lif->ionic->idev;
- u32 err = -EINVAL;
+ int err;
u8 *src;
if (!page_data->length)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9886cd66ce68..47559c909c8b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -56,6 +56,9 @@ enum ionic_cmd_opcode {
IONIC_CMD_VF_SETATTR = 61,
IONIC_CMD_VF_CTRL = 62,
+ /* CMB command */
+ IONIC_CMD_DISCOVER_CMB = 80,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -269,9 +272,11 @@ union ionic_drv_identity {
/**
* enum ionic_dev_capability - Device capabilities
* @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations
+ * @IONIC_DEV_CAP_DISC_CMB: Device supports CMB discovery operations
*/
enum ionic_dev_capability {
IONIC_DEV_CAP_VF_CTRL = BIT(0),
+ IONIC_DEV_CAP_DISC_CMB = BIT(1),
};
/**
@@ -395,6 +400,7 @@ enum ionic_logical_qtype {
* @IONIC_Q_F_4X_DESC: Quadruple main descriptor size
* @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size
* @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size
+ * @IONIC_QIDENT_F_EXPDB: Queue supports express doorbell
*/
enum ionic_q_feature {
IONIC_QIDENT_F_CQ = BIT_ULL(0),
@@ -407,6 +413,7 @@ enum ionic_q_feature {
IONIC_Q_F_4X_DESC = BIT_ULL(7),
IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8),
IONIC_Q_F_4X_SG_DESC = BIT_ULL(9),
+ IONIC_QIDENT_F_EXPDB = BIT_ULL(10),
};
/**
@@ -495,6 +502,16 @@ union ionic_lif_config {
};
/**
+ * enum ionic_lif_rdma_cap_stats - LIF stat type
+ * @IONIC_LIF_RDMA_STAT_GLOBAL: Global stats
+ * @IONIC_LIF_RDMA_STAT_QP: Queue pair stats
+ */
+enum ionic_lif_rdma_cap_stats {
+ IONIC_LIF_RDMA_STAT_GLOBAL = BIT(0),
+ IONIC_LIF_RDMA_STAT_QP = BIT(1),
+};
+
+/**
* struct ionic_lif_identity - LIF identity information (type-specific)
*
* @capabilities: LIF capabilities
@@ -513,10 +530,10 @@ union ionic_lif_config {
* @eth.config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
- * @rdma.version: RDMA version of opcodes and queue descriptors
+ * @rdma.version: RDMA capability version
* @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported
* @rdma.admin_opcodes: Number of RDMA admin opcodes supported
- * @rdma.rsvd: reserved byte(s)
+ * @rdma.minor_version: RDMA capability minor version
* @rdma.npts_per_lif: Page table size per LIF
* @rdma.nmrs_per_lif: Number of memory regions per LIF
* @rdma.nahs_per_lif: Number of address handles per LIF
@@ -526,12 +543,17 @@ union ionic_lif_config {
* @rdma.rrq_stride: Remote RQ work request stride
* @rdma.rsq_stride: Remote SQ work request stride
* @rdma.dcqcn_profiles: Number of DCQCN profiles
- * @rdma.rsvd_dimensions: reserved byte(s)
+ * @rdma.udma_shift: Log2 number of queues per queue group
+ * @rdma.rsvd_dimensions: Reserved byte
+ * @rdma.page_size_cap: Supported page sizes
* @rdma.aq_qtype: RDMA Admin Qtype
* @rdma.sq_qtype: RDMA Send Qtype
* @rdma.rq_qtype: RDMA Receive Qtype
* @rdma.cq_qtype: RDMA Completion Qtype
* @rdma.eq_qtype: RDMA Event Qtype
+ * @rdma.stats_type: Supported statistics type
+ * (enum ionic_lif_rdma_cap_stats)
+ * @rdma.rsvd1: Reserved byte(s)
* @words: word access to struct contents
*/
union ionic_lif_identity {
@@ -557,7 +579,7 @@ union ionic_lif_identity {
u8 version;
u8 qp_opcodes;
u8 admin_opcodes;
- u8 rsvd;
+ u8 minor_version;
__le32 npts_per_lif;
__le32 nmrs_per_lif;
__le32 nahs_per_lif;
@@ -567,12 +589,16 @@ union ionic_lif_identity {
u8 rrq_stride;
u8 rsq_stride;
u8 dcqcn_profiles;
- u8 rsvd_dimensions[10];
+ u8 udma_shift;
+ u8 rsvd_dimensions;
+ __le64 page_size_cap;
struct ionic_lif_logical_qtype aq_qtype;
struct ionic_lif_logical_qtype sq_qtype;
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
+ __le16 stats_type;
+ u8 rsvd1[162];
} __packed rdma;
} __packed;
__le32 words[478];
@@ -2195,6 +2221,80 @@ struct ionic_vf_ctrl_comp {
};
/**
+ * struct ionic_discover_cmb_cmd - CMB discovery command
+ * @opcode: Opcode for the command
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_cmd {
+ u8 opcode;
+ u8 rsvd[63];
+};
+
+/**
+ * struct ionic_discover_cmb_comp - CMB discover command completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+#define IONIC_MAX_CMB_REGIONS 16
+#define IONIC_CMB_SHIFT_64K 16
+
+enum ionic_cmb_type {
+ IONIC_CMB_TYPE_DEVMEM = 0,
+ IONIC_CMB_TYPE_EXPDB64 = 1,
+ IONIC_CMB_TYPE_EXPDB128 = 2,
+ IONIC_CMB_TYPE_EXPDB256 = 3,
+ IONIC_CMB_TYPE_EXPDB512 = 4,
+};
+
+/**
+ * union ionic_cmb_region - Configuration for CMB region
+ * @bar_num: CMB mapping number from FW
+ * @cmb_type: Type of CMB this region describes (enum ionic_cmb_type)
+ * @rsvd: Reserved
+ * @offset: Offset within BAR in 64KB pages
+ * @length: Length of the CMB region
+ * @words: 32-bit words for direct access to the entire region
+ */
+union ionic_cmb_region {
+ struct {
+ u8 bar_num;
+ u8 cmb_type;
+ u8 rsvd[6];
+ __le32 offset;
+ __le32 length;
+ } __packed;
+ __le32 words[4];
+};
+
+/**
+ * union ionic_discover_cmb_identity - CMB layout identity structure
+ * @num_regions: Number of CMB regions, up to 16
+ * @flags: Feature and capability bits (0 for express
+ * doorbell, 1 for 4K alignment indicator,
+ * 31-24 for version information)
+ * @region: CMB mappings region, entry 0 for regular
+ * mapping, entries 1-7 for WQE sizes 64,
+ * 128, 256, 512, 1024, 2048 and 4096 bytes
+ * @words: Full union buffer size
+ */
+union ionic_discover_cmb_identity {
+ struct {
+ __le32 num_regions;
+#define IONIC_CMB_FLAG_EXPDB BIT(0)
+#define IONIC_CMB_FLAG_4KALIGN BIT(1)
+#define IONIC_CMB_FLAG_VERSION 0xff000000
+ __le32 flags;
+ union ionic_cmb_region region[IONIC_MAX_CMB_REGIONS];
+ };
+ __le32 words[478];
+};
+
+/**
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
@@ -3054,6 +3154,8 @@ union ionic_dev_cmd {
struct ionic_vf_getattr_cmd vf_getattr;
struct ionic_vf_ctrl_cmd vf_ctrl;
+ struct ionic_discover_cmb_cmd discover_cmb;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -3093,6 +3195,8 @@ union ionic_dev_cmd_comp {
struct ionic_vf_getattr_comp vf_getattr;
struct ionic_vf_ctrl_comp vf_ctrl;
+ struct ionic_discover_cmb_comp discover_cmb;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
@@ -3234,6 +3338,9 @@ union ionic_adminq_comp {
#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
+
+/* BAR2 */
+#define IONIC_BAR2_CMB_ENTRY_SIZE 0x800000
#define IONIC_DEV_CMD_DONE 0x00000001
#define IONIC_ASIC_TYPE_NONE 0
@@ -3287,6 +3394,7 @@ struct ionic_identity {
union ionic_port_identity port;
union ionic_qos_identity qos;
union ionic_q_identity txq;
+ union ionic_discover_cmb_identity cmb_layout;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 48cb5d30b5f6..b28966ae50c2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -19,6 +19,7 @@
#include "ionic_bus.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
@@ -243,29 +244,36 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
0, intr->name, &qcq->napi);
}
-static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
{
struct ionic *ionic = lif->ionic;
- int index;
+ int index, err;
index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
- if (index == ionic->nintrs) {
- netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
- __func__, index, ionic->nintrs);
+ if (index == ionic->nintrs)
return -ENOSPC;
- }
set_bit(index, ionic->intrs);
ionic_intr_init(&ionic->idev, intr, index);
+ err = ionic_bus_get_irq(ionic, intr->index);
+ if (err < 0) {
+ clear_bit(index, ionic->intrs);
+ return err;
+ }
+
+ intr->vector = err;
+
return 0;
}
+EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC");
-static void ionic_intr_free(struct ionic *ionic, int index)
+void ionic_intr_free(struct ionic_lif *lif, int index)
{
- if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
- clear_bit(index, ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
+ clear_bit(index, lif->ionic->intrs);
}
+EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC");
static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
@@ -400,7 +408,7 @@ static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
@@ -510,13 +518,6 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
goto err_out;
}
- err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- qcq->q.name, err);
- goto err_out_free_intr;
- }
- qcq->intr.vector = err;
ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -545,7 +546,7 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
return 0;
err_out_free_intr:
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
err_out:
return err;
}
@@ -672,7 +673,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
- new->cmb_order);
+ new->cmb_order, 0, NULL);
if (err) {
netdev_err(lif->netdev,
"Cannot allocate queue order %d from cmb: err %d\n",
@@ -740,7 +741,7 @@ err_out_free_q:
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
- ionic_intr_free(lif->ionic, new->intr.index);
+ ionic_intr_free(lif, new->intr.index);
}
err_out_free_page_pool:
page_pool_destroy(new->q.page_pool);
@@ -3293,6 +3294,7 @@ int ionic_lif_alloc(struct ionic *ionic)
mutex_init(&lif->queue_lock);
mutex_init(&lif->config_lock);
+ mutex_init(&lif->adev_lock);
spin_lock_init(&lif->adminq_lock);
@@ -3349,6 +3351,7 @@ err_out_free_lif_info:
lif->info = NULL;
lif->info_pa = 0;
err_out_free_mutex:
+ mutex_destroy(&lif->adev_lock);
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
@@ -3384,6 +3387,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
@@ -3446,6 +3450,8 @@ int ionic_restart_lif(struct ionic_lif *lif)
netif_device_attach(lif->netdev);
ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+ ionic_auxbus_register(ionic->lif);
+
return 0;
err_txrx_free:
@@ -3528,6 +3534,7 @@ void ionic_lif_free(struct ionic_lif *lif)
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
+ mutex_destroy(&lif->adev_lock);
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index e01756fb7fdd..43bdd0fb8733 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -10,6 +10,7 @@
#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
+#include "ionic_api.h"
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
@@ -225,6 +226,8 @@ struct ionic_lif {
dma_addr_t info_pa;
u32 info_sz;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
+ struct ionic_aux_dev *ionic_adev;
+ struct mutex adev_lock; /* lock for aux_dev actions */
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0e60a6bef99a..14dc055be3e9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -72,7 +72,7 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
}
}
-static int ionic_error_to_errno(enum ionic_status_code code)
+int ionic_error_to_errno(enum ionic_status_code code)
{
switch (code) {
case IONIC_RC_SUCCESS:
@@ -114,6 +114,7 @@ static int ionic_error_to_errno(enum ionic_status_code code)
return -EIO;
}
}
+EXPORT_SYMBOL_NS(ionic_error_to_errno, "NET_IONIC");
static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
{
@@ -480,6 +481,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
return __ionic_adminq_post_wait(lif, ctx, true);
}
+EXPORT_SYMBOL_NS(ionic_adminq_post_wait, "NET_IONIC");
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 1adc7fbb3f2f..94c5689b5abd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -87,20 +87,21 @@ qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
return 0;
}
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
+
static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = qed_fw_fatal_reporter_recover,
.dump = qed_fw_fatal_reporter_dump,
+ .default_graceful_period = QED_REPORTER_FW_GRACEFUL_PERIOD,
};
-#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
-
void qed_fw_reporters_create(struct devlink *devlink)
{
struct qed_devlink *dl = devlink_priv(devlink);
- dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
- QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ dl->fw_reporter = devlink_health_reporter_create(devlink,
+ &qed_fw_fatal_reporter_ops, dl);
if (IS_ERR(dl->fw_reporter)) {
DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(dl->fw_reporter));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 886061d7351a..d4685ad4b169 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1214,7 +1214,8 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
hwfn = &cdev->hwfns[i];
hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
- 0, 0, cdev->pdev->bus->number,
+ WQ_PERCPU, 0,
+ cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn),
hwfn->abs_pf_id);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 5d725f59db24..8be567a6ad44 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -183,9 +183,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -218,9 +215,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -255,9 +249,6 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn)
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
- if (!p_buffer)
- break;
-
list_del(&p_buffer->list_entry);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index a4434eb38950..ba7efb108637 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -60,6 +60,21 @@ config QCOM_EMAC
low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
Precision Clock Synchronization Protocol.
+config QCOM_PPE
+ tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
+ depends on COMMON_CLK && HAS_IOMEM && OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This driver supports the Qualcomm Technologies, Inc. packet
+ process engine (PPE) available with IPQ SoC. The PPE includes
+ the Ethernet MACs, Ethernet DMA (EDMA) and switch core that
+ supports L3 flow offload, L2 switch function, RSS and tunnel
+ offload.
+
+ To compile this driver as a module, choose M here. The module
+ will be called qcom-ppe.
+
source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
endif # NET_VENDOR_QUALCOMM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 9250976dd884..166a59aea363 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
obj-y += emac/
+obj-$(CONFIG_QCOM_PPE) += ppe/
obj-$(CONFIG_RMNET) += rmnet/
diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
new file mode 100644
index 000000000000..9e60b2400c16
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
+#
+
+obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
new file mode 100644
index 000000000000..be747510d947
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE platform device probe, DTSI parser and PPE clock initializations. */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+
+#define PPE_PORT_MAX 8
+#define PPE_CLK_RATE 353000000
+
+/* ICC clocks for enabling PPE device. The avg_bw and peak_bw with value 0
+ * will be updated by the clock rate of PPE.
+ */
+static const struct icc_bulk_data ppe_icc_data[] = {
+ {
+ .name = "ppe",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "ppe_cfg",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "qos_gen",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "timeout_ref",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "nssnoc_memnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc_1",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+};
+
+static const struct regmap_range ppe_readable_ranges[] = {
+ regmap_reg_range(0x0, 0x1ff), /* Global */
+ regmap_reg_range(0x400, 0x5ff), /* LPI CSR */
+ regmap_reg_range(0x1000, 0x11ff), /* GMAC0 */
+ regmap_reg_range(0x1200, 0x13ff), /* GMAC1 */
+ regmap_reg_range(0x1400, 0x15ff), /* GMAC2 */
+ regmap_reg_range(0x1600, 0x17ff), /* GMAC3 */
+ regmap_reg_range(0x1800, 0x19ff), /* GMAC4 */
+ regmap_reg_range(0x1a00, 0x1bff), /* GMAC5 */
+ regmap_reg_range(0xb000, 0xefff), /* PRX CSR */
+ regmap_reg_range(0xf000, 0x1efff), /* IPE */
+ regmap_reg_range(0x20000, 0x5ffff), /* PTX CSR */
+ regmap_reg_range(0x60000, 0x9ffff), /* IPE L2 CSR */
+ regmap_reg_range(0xb0000, 0xeffff), /* IPO CSR */
+ regmap_reg_range(0x100000, 0x17ffff), /* IPE PC */
+ regmap_reg_range(0x180000, 0x1bffff), /* PRE IPO CSR */
+ regmap_reg_range(0x1d0000, 0x1dffff), /* Tunnel parser */
+ regmap_reg_range(0x1e0000, 0x1effff), /* Ingress parse */
+ regmap_reg_range(0x200000, 0x2fffff), /* IPE L3 */
+ regmap_reg_range(0x300000, 0x3fffff), /* IPE tunnel */
+ regmap_reg_range(0x400000, 0x4fffff), /* Scheduler */
+ regmap_reg_range(0x500000, 0x503fff), /* XGMAC0 */
+ regmap_reg_range(0x504000, 0x507fff), /* XGMAC1 */
+ regmap_reg_range(0x508000, 0x50bfff), /* XGMAC2 */
+ regmap_reg_range(0x50c000, 0x50ffff), /* XGMAC3 */
+ regmap_reg_range(0x510000, 0x513fff), /* XGMAC4 */
+ regmap_reg_range(0x514000, 0x517fff), /* XGMAC5 */
+ regmap_reg_range(0x600000, 0x6fffff), /* BM */
+ regmap_reg_range(0x800000, 0x9fffff), /* QM */
+ regmap_reg_range(0xb00000, 0xbef800), /* EDMA */
+};
+
+static const struct regmap_access_table ppe_reg_table = {
+ .yes_ranges = ppe_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
+};
+
+static const struct regmap_config regmap_config_ipq9574 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .rd_table = &ppe_reg_table,
+ .wr_table = &ppe_reg_table,
+ .max_register = 0xbef800,
+ .fast_io = true,
+};
+
+static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
+{
+ unsigned long ppe_rate = ppe_dev->clk_rate;
+ struct device *dev = ppe_dev->dev;
+ struct reset_control *rstc;
+ struct clk_bulk_data *clks;
+ struct clk *clk;
+ int ret, i;
+
+ for (i = 0; i < ppe_dev->num_icc_paths; i++) {
+ ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
+ ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
+ Bps_to_icc(ppe_rate);
+
+ /* PPE does not have an explicit peak bandwidth requirement,
+ * so set the peak bandwidth to be equal to the average
+ * bandwidth.
+ */
+ ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
+ Bps_to_icc(ppe_rate);
+ }
+
+ ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
+ ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ /* The PPE clocks have a common parent clock. Setting the clock
+ * rate of "ppe" ensures the clock rate of all PPE clocks is
+ * configured to the same rate.
+ */
+ clk = devm_clk_get(dev, "ppe");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_set_rate(clk, ppe_rate);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &clks);
+ if (ret < 0)
+ return ret;
+
+ /* Reset the PPE. */
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ return ret;
+
+ /* The delay 10 ms of assert is necessary for resetting PPE. */
+ usleep_range(10000, 11000);
+
+ return reset_control_deassert(rstc);
+}
+
+static int qcom_ppe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ppe_device *ppe_dev;
+ void __iomem *base;
+ int ret, num_icc;
+
+ num_icc = ARRAY_SIZE(ppe_icc_data);
+ ppe_dev = devm_kzalloc(dev, struct_size(ppe_dev, icc_paths, num_icc),
+ GFP_KERNEL);
+ if (!ppe_dev)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
+
+ ppe_dev->regmap = devm_regmap_init_mmio(dev, base, &regmap_config_ipq9574);
+ if (IS_ERR(ppe_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
+ "PPE initialize regmap failed\n");
+ ppe_dev->dev = dev;
+ ppe_dev->clk_rate = PPE_CLK_RATE;
+ ppe_dev->num_ports = PPE_PORT_MAX;
+ ppe_dev->num_icc_paths = num_icc;
+
+ ret = ppe_clock_init_and_reset(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE clock config failed\n");
+
+ ret = ppe_hw_config(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE HW config failed\n");
+
+ ppe_debugfs_setup(ppe_dev);
+ platform_set_drvdata(pdev, ppe_dev);
+
+ return 0;
+}
+
+static void qcom_ppe_remove(struct platform_device *pdev)
+{
+ struct ppe_device *ppe_dev;
+
+ ppe_dev = platform_get_drvdata(pdev);
+ ppe_debugfs_teardown(ppe_dev);
+}
+
+static const struct of_device_id qcom_ppe_of_match[] = {
+ { .compatible = "qcom,ipq9574-ppe" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
+
+static struct platform_driver qcom_ppe_driver = {
+ .driver = {
+ .name = "qcom_ppe",
+ .of_match_table = qcom_ppe_of_match,
+ },
+ .probe = qcom_ppe_probe,
+ .remove = qcom_ppe_remove,
+};
+module_platform_driver(qcom_ppe_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ PPE driver");
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
new file mode 100644
index 000000000000..27458f0bc206
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_H__
+#define __PPE_H__
+
+#include <linux/compiler.h>
+#include <linux/interconnect.h>
+
+struct device;
+struct regmap;
+struct dentry;
+
+/**
+ * struct ppe_device - PPE device private data.
+ * @dev: PPE device structure.
+ * @regmap: PPE register map.
+ * @clk_rate: PPE clock rate.
+ * @num_ports: Number of PPE ports.
+ * @debugfs_root: Debugfs root entry.
+ * @num_icc_paths: Number of interconnect paths.
+ * @icc_paths: Interconnect path array.
+ *
+ * PPE device is the instance of PPE hardware, which is used to
+ * configure PPE packet process modules such as BM (buffer management),
+ * QM (queue management), and scheduler.
+ */
+struct ppe_device {
+ struct device *dev;
+ struct regmap *regmap;
+ unsigned long clk_rate;
+ unsigned int num_ports;
+ struct dentry *debugfs_root;
+ unsigned int num_icc_paths;
+ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+};
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
new file mode 100644
index 000000000000..e9a0e22907a6
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
@@ -0,0 +1,2034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE HW initialization configs such as BM(buffer management),
+ * QM(queue management) and scheduler configs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_regs.h"
+
+#define PPE_QUEUE_SCH_PRI_NUM 8
+
+/**
+ * struct ppe_bm_port_config - PPE BM port configuration.
+ * @port_id_start: The fist BM port ID to configure.
+ * @port_id_end: The last BM port ID to configure.
+ * @pre_alloc: BM port dedicated buffer number.
+ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
+ * @ceil: Ceil to generate the back pressure.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold value.
+ * @resume_ceil: Ceil to resume from the back pressure state.
+ * @dynamic: Dynamic threshold used or not.
+ *
+ * The is for configuring the threshold that impacts the port
+ * flow control.
+ */
+struct ppe_bm_port_config {
+ unsigned int port_id_start;
+ unsigned int port_id_end;
+ unsigned int pre_alloc;
+ unsigned int in_fly_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ unsigned int resume_ceil;
+ bool dynamic;
+};
+
+/**
+ * struct ppe_qm_queue_config - PPE queue config.
+ * @queue_start: PPE start of queue ID.
+ * @queue_end: PPE end of queue ID.
+ * @prealloc_buf: Queue dedicated buffer number.
+ * @ceil: Ceil to start drop packet from queue.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold.
+ * @dynamic: Threshold value is decided dynamically or statically.
+ *
+ * Queue configuration decides the threshold to drop packet from PPE
+ * hardware queue.
+ */
+struct ppe_qm_queue_config {
+ unsigned int queue_start;
+ unsigned int queue_end;
+ unsigned int prealloc_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ bool dynamic;
+};
+
+/**
+ * enum ppe_scheduler_direction - PPE scheduler direction for packet.
+ * @PPE_SCH_INGRESS: Scheduler for the packet on ingress,
+ * @PPE_SCH_EGRESS: Scheduler for the packet on egress,
+ */
+enum ppe_scheduler_direction {
+ PPE_SCH_INGRESS = 0,
+ PPE_SCH_EGRESS = 1,
+};
+
+/**
+ * struct ppe_scheduler_bm_config - PPE arbitration for buffer config.
+ * @valid: Arbitration entry valid or not.
+ * @dir: Arbitration entry for egress or ingress.
+ * @port: Port ID to use arbitration entry.
+ * @backup_port_valid: Backup port valid or not.
+ * @backup_port: Backup port ID to use.
+ *
+ * Configure the scheduler settings for accessing and releasing the PPE buffers.
+ */
+struct ppe_scheduler_bm_config {
+ bool valid;
+ enum ppe_scheduler_direction dir;
+ unsigned int port;
+ bool backup_port_valid;
+ unsigned int backup_port;
+};
+
+/**
+ * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config.
+ * @ensch_port_bmp: Port bit map for enqueue scheduler.
+ * @ensch_port: Port ID to enqueue scheduler.
+ * @desch_port: Port ID to dequeue scheduler.
+ * @desch_backup_port_valid: Dequeue for the backup port valid or not.
+ * @desch_backup_port: Backup port ID to dequeue scheduler.
+ *
+ * Configure the scheduler settings for enqueuing and dequeuing packets on
+ * the PPE port.
+ */
+struct ppe_scheduler_qm_config {
+ unsigned int ensch_port_bmp;
+ unsigned int ensch_port;
+ unsigned int desch_port;
+ bool desch_backup_port_valid;
+ unsigned int desch_backup_port;
+};
+
+/**
+ * struct ppe_scheduler_port_config - PPE port scheduler config.
+ * @port: Port ID to be scheduled.
+ * @flow_level: Scheduler flow level or not.
+ * @node_id: Node ID, for level 0, queue ID is used.
+ * @loop_num: Loop number of scheduler config.
+ * @pri_max: Max priority configured.
+ * @flow_id: Strict priority ID.
+ * @drr_node_id: Node ID for scheduler.
+ *
+ * PPE port scheduler configuration which decides the priority in the
+ * packet scheduler for the egress port.
+ */
+struct ppe_scheduler_port_config {
+ unsigned int port;
+ bool flow_level;
+ unsigned int node_id;
+ unsigned int loop_num;
+ unsigned int pri_max;
+ unsigned int flow_id;
+ unsigned int drr_node_id;
+};
+
+/**
+ * struct ppe_port_schedule_resource - PPE port scheduler resource.
+ * @ucastq_start: Unicast queue start ID.
+ * @ucastq_end: Unicast queue end ID.
+ * @mcastq_start: Multicast queue start ID.
+ * @mcastq_end: Multicast queue end ID.
+ * @flow_id_start: Flow start ID.
+ * @flow_id_end: Flow end ID.
+ * @l0node_start: Scheduler node start ID for queue level.
+ * @l0node_end: Scheduler node end ID for queue level.
+ * @l1node_start: Scheduler node start ID for flow level.
+ * @l1node_end: Scheduler node end ID for flow level.
+ *
+ * PPE scheduler resource allocated among the PPE ports.
+ */
+struct ppe_port_schedule_resource {
+ unsigned int ucastq_start;
+ unsigned int ucastq_end;
+ unsigned int mcastq_start;
+ unsigned int mcastq_end;
+ unsigned int flow_id_start;
+ unsigned int flow_id_end;
+ unsigned int l0node_start;
+ unsigned int l0node_end;
+ unsigned int l1node_start;
+ unsigned int l1node_end;
+};
+
+/* There are total 2048 buffers available in PPE, out of which some
+ * buffers are reserved for some specific purposes per PPE port. The
+ * rest of the pool of 1550 buffers are assigned to the general 'group0'
+ * which is shared among all ports of the PPE.
+ */
+static const int ipq9574_ppe_bm_group_config = 1550;
+
+/* The buffer configurations per PPE port. There are 15 BM ports and
+ * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0,
+ * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for
+ * EIP port.
+ */
+static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+ {
+ /* Buffer configuration for the BM port ID 0 of EDMA. */
+ .port_id_start = 0,
+ .port_id_end = 0,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 1146,
+ .weight = 7,
+ .resume_offset = 8,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 1-7 of EDMA. */
+ .port_id_start = 1,
+ .port_id_end = 7,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 8-13 of PPE ports. */
+ .port_id_start = 8,
+ .port_id_end = 13,
+ .pre_alloc = 0,
+ .in_fly_buf = 128,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 14 of EIP. */
+ .port_id_start = 14,
+ .port_id_end = 14,
+ .pre_alloc = 0,
+ .in_fly_buf = 40,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+};
+
+/* QM fetches the packet from PPE buffer management for transmitting the
+ * packet out. The QM group configuration limits the total number of buffers
+ * enqueued by all PPE hardware queues.
+ * There are total 2048 buffers available, out of which some buffers are
+ * dedicated to hardware exception handlers. The remaining buffers are
+ * assigned to the general 'group0', which is the group assigned to all
+ * queues by default.
+ */
+static const int ipq9574_ppe_qm_group_config = 2000;
+
+/* Default QM settings for unicast and multicast queues for IPQ9754. */
+static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
+ {
+ /* QM settings for unicast queues 0 to 255. */
+ .queue_start = 0,
+ .queue_end = 255,
+ .prealloc_buf = 0,
+ .ceil = 1200,
+ .weight = 7,
+ .resume_offset = 36,
+ .dynamic = true,
+ },
+ {
+ /* QM settings for multicast queues 256 to 299. */
+ .queue_start = 256,
+ .queue_end = 299,
+ .prealloc_buf = 0,
+ .ceil = 250,
+ .weight = 0,
+ .resume_offset = 36,
+ .dynamic = false,
+ },
+};
+
+/* PPE scheduler configuration for BM includes multiple entries. Each entry
+ * indicates the primary port to be assigned the buffers for the ingress or
+ * to release the buffers for the egress. Backup port ID will be used when
+ * the primary port ID is down.
+ */
+static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = {
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+};
+
+/* PPE scheduler configuration for QM includes multiple entries. Each entry
+ * contains ports to be dispatched for enqueueing and dequeueing. The backup
+ * port for dequeueing is supported to be used when the primary port for
+ * dequeueing is down.
+ */
+static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = {
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x86, 0, 5, true, 4},
+ {0x8C, 1, 6, true, 0},
+ {0x1C, 7, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x34, 3, 6, true, 0},
+ {0x8C, 4, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x8C, 5, 4, true, 1},
+ {0xA8, 0, 6, true, 2},
+ {0x98, 5, 1, true, 0},
+ {0x98, 6, 5, true, 2},
+ {0x89, 1, 6, true, 4},
+ {0xA4, 3, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0xA8, 0, 2, true, 1},
+ {0x98, 6, 5, true, 0},
+ {0xC4, 4, 3, true, 1},
+ {0x94, 6, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x98, 2, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0x94, 3, 5, true, 1},
+ {0x8C, 4, 6, true, 0},
+ {0x94, 1, 5, true, 3},
+ {0x94, 6, 1, true, 0},
+ {0xD0, 3, 5, true, 2},
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x94, 1, 5, true, 0},
+ {0x98, 2, 6, true, 1},
+ {0x8C, 4, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x8C, 0, 5, true, 4},
+ {0x89, 1, 6, true, 2},
+ {0x98, 5, 0, true, 1},
+ {0x94, 6, 5, true, 3},
+ {0x92, 0, 6, true, 2},
+ {0x98, 1, 5, true, 0},
+ {0x98, 6, 2, true, 1},
+ {0xD0, 0, 5, true, 3},
+ {0x94, 6, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0x8C, 1, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0xB0, 2, 3, true, 1},
+ {0xC4, 4, 5, true, 0},
+ {0x8C, 6, 4, true, 1},
+ {0xA4, 3, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x4C, 0, 5, true, 4},
+ {0x8C, 6, 0, true, 1},
+ {0x34, 7, 6, true, 3},
+ {0x94, 5, 0, true, 1},
+ {0x98, 6, 5, true, 2},
+};
+
+static const struct ppe_scheduler_port_config ppe_port_sch_config[] = {
+ {
+ .port = 0,
+ .flow_level = true,
+ .node_id = 0,
+ .loop_num = 1,
+ .pri_max = 1,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 0,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 8,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 16,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 24,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 32,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 40,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 48,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 56,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 256,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 264,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 1,
+ .flow_level = true,
+ .node_id = 36,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 1,
+ .drr_node_id = 8,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 144,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 272,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 2,
+ .flow_level = true,
+ .node_id = 40,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 2,
+ .drr_node_id = 12,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 160,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 276,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 3,
+ .flow_level = true,
+ .node_id = 44,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 3,
+ .drr_node_id = 16,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 176,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 280,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 4,
+ .flow_level = true,
+ .node_id = 48,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 4,
+ .drr_node_id = 20,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 192,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 284,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 5,
+ .flow_level = true,
+ .node_id = 52,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 5,
+ .drr_node_id = 24,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 208,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 288,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 6,
+ .flow_level = true,
+ .node_id = 56,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 6,
+ .drr_node_id = 28,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 224,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 292,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 7,
+ .flow_level = true,
+ .node_id = 60,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 7,
+ .drr_node_id = 32,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 240,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 296,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+};
+
+/* The scheduler resource is applied to each PPE port, The resource
+ * includes the unicast & multicast queues, flow nodes and DRR nodes.
+ */
+static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
+ { .ucastq_start = 0,
+ .ucastq_end = 63,
+ .mcastq_start = 256,
+ .mcastq_end = 271,
+ .flow_id_start = 0,
+ .flow_id_end = 0,
+ .l0node_start = 0,
+ .l0node_end = 7,
+ .l1node_start = 0,
+ .l1node_end = 0,
+ },
+ { .ucastq_start = 144,
+ .ucastq_end = 159,
+ .mcastq_start = 272,
+ .mcastq_end = 275,
+ .flow_id_start = 36,
+ .flow_id_end = 39,
+ .l0node_start = 48,
+ .l0node_end = 63,
+ .l1node_start = 8,
+ .l1node_end = 11,
+ },
+ { .ucastq_start = 160,
+ .ucastq_end = 175,
+ .mcastq_start = 276,
+ .mcastq_end = 279,
+ .flow_id_start = 40,
+ .flow_id_end = 43,
+ .l0node_start = 64,
+ .l0node_end = 79,
+ .l1node_start = 12,
+ .l1node_end = 15,
+ },
+ { .ucastq_start = 176,
+ .ucastq_end = 191,
+ .mcastq_start = 280,
+ .mcastq_end = 283,
+ .flow_id_start = 44,
+ .flow_id_end = 47,
+ .l0node_start = 80,
+ .l0node_end = 95,
+ .l1node_start = 16,
+ .l1node_end = 19,
+ },
+ { .ucastq_start = 192,
+ .ucastq_end = 207,
+ .mcastq_start = 284,
+ .mcastq_end = 287,
+ .flow_id_start = 48,
+ .flow_id_end = 51,
+ .l0node_start = 96,
+ .l0node_end = 111,
+ .l1node_start = 20,
+ .l1node_end = 23,
+ },
+ { .ucastq_start = 208,
+ .ucastq_end = 223,
+ .mcastq_start = 288,
+ .mcastq_end = 291,
+ .flow_id_start = 52,
+ .flow_id_end = 55,
+ .l0node_start = 112,
+ .l0node_end = 127,
+ .l1node_start = 24,
+ .l1node_end = 27,
+ },
+ { .ucastq_start = 224,
+ .ucastq_end = 239,
+ .mcastq_start = 292,
+ .mcastq_end = 295,
+ .flow_id_start = 56,
+ .flow_id_end = 59,
+ .l0node_start = 128,
+ .l0node_end = 143,
+ .l1node_start = 28,
+ .l1node_end = 31,
+ },
+ { .ucastq_start = 240,
+ .ucastq_end = 255,
+ .mcastq_start = 296,
+ .mcastq_end = 299,
+ .flow_id_start = 60,
+ .flow_id_end = 63,
+ .l0node_start = 144,
+ .l0node_end = 159,
+ .l1node_start = 32,
+ .l1node_end = 35,
+ },
+ { .ucastq_start = 64,
+ .ucastq_end = 143,
+ .mcastq_start = 0,
+ .mcastq_end = 0,
+ .flow_id_start = 1,
+ .flow_id_end = 35,
+ .l0node_start = 8,
+ .l0node_end = 47,
+ .l1node_start = 1,
+ .l1node_end = 7,
+ },
+};
+
+/* Set the PPE queue level scheduler configuration. */
+static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_C_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_E_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
+ val);
+}
+
+/* Set the PPE flow level scheduler configuration. */
+static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_C_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_E_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
+}
+
+/**
+ * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
+ * @ppe_dev: PPE device
+ * @node_id: PPE queue ID or flow ID
+ * @flow_level: Flow level scheduler or queue level scheduler
+ * @port: PPE port ID set scheduler configuration
+ * @scheduler_cfg: PPE scheduler configuration
+ *
+ * PPE scheduler configuration supports queue level and flow level on
+ * the PPE egress port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ if (flow_level)
+ return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+
+ return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+}
+
+/**
+ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
+ * @ppe_dev: PPE device
+ * @queue_dst: PPE queue destination configuration
+ * @queue_base: PPE queue base ID
+ * @profile_id: Profile ID
+ *
+ * The PPE unicast queue base ID and profile ID are configured based on the
+ * destination port information that can be service code or CPU code or the
+ * destination port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base, int profile_id)
+{
+ int index, profile_size;
+ u32 val, reg;
+
+ profile_size = queue_dst.src_profile << 8;
+ if (queue_dst.service_code_en)
+ index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
+ queue_dst.service_code;
+ else if (queue_dst.cpu_code_en)
+ index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
+ queue_dst.cpu_code;
+ else
+ index = profile_size + queue_dst.dest_port;
+
+ val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
+ val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
+ reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @priority: PPE internal priority to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the PPE
+ * internal priority.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 4) + priority;
+ val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
+ reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @rss_hash: Packet hash value to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the RSS hash value.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 8) + rss_hash;
+ val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
+ reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_port_resource_get - Get PPE resource per port
+ * @ppe_dev: PPE device
+ * @port: PPE port
+ * @type: Resource type
+ * @res_start: Resource start ID returned
+ * @res_end: Resource end ID returned
+ *
+ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end)
+{
+ struct ppe_port_schedule_resource res;
+
+ /* The reserved resource with the maximum port ID of PPE is
+ * also allowed to be acquired.
+ */
+ if (port > ppe_dev->num_ports)
+ return -EINVAL;
+
+ res = ppe_scheduler_res[port];
+ switch (type) {
+ case PPE_RES_UCAST:
+ *res_start = res.ucastq_start;
+ *res_end = res.ucastq_end;
+ break;
+ case PPE_RES_MCAST:
+ *res_start = res.mcastq_start;
+ *res_end = res.mcastq_end;
+ break;
+ case PPE_RES_FLOW_ID:
+ *res_start = res.flow_id_start;
+ *res_end = res.flow_id_end;
+ break;
+ case PPE_RES_L0_NODE:
+ *res_start = res.l0node_start;
+ *res_end = res.l0node_end;
+ break;
+ case PPE_RES_L1_NODE:
+ *res_start = res.l1node_start;
+ *res_end = res.l1node_end;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_sc_config_set - Set PPE service code configuration
+ * @ppe_dev: PPE device
+ * @sc: Service ID, 0-255 supported by PPE
+ * @cfg: Service code configuration
+ *
+ * PPE service code is used by the PPE during its packet processing stages,
+ * to perform or bypass certain selected packet operations on the packet.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg)
+{
+ u32 val, reg, servcode_val[2] = {};
+ unsigned long bitmap_value;
+ int ret;
+
+ val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
+
+ bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
+ reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
+ PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
+ PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
+ reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc;
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
+ PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap);
+ PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service);
+ PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel);
+ PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
+ val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
+ reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_counter_enable_set - Set PPE port counter enabled
+ * @ppe_dev: PPE device
+ * @port: PPE port ID
+ *
+ * Enable PPE counters on the given port for the unicast packet, multicast
+ * packet and VLAN packet received and transmitted by PPE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port)
+{
+ u32 reg, mru_mtu_val[3];
+ int ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true);
+ PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
+ ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN);
+ if (ret)
+ return ret;
+
+ reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
+}
+
+static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0:
+ val = cfg.hash_sip_mix[0];
+ break;
+ case 1:
+ val = cfg.hash_dip_mix[0];
+ break;
+ case 2:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 3:
+ val = cfg.hash_dport_mix;
+ break;
+ case 4:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_IPV4_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, val));
+}
+
+static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0 ... 3:
+ val = cfg.hash_sip_mix[index];
+ break;
+ case 4 ... 7:
+ val = cfg.hash_dip_mix[index - 4];
+ break;
+ case 8:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 9:
+ val = cfg.hash_dport_mix;
+ break;
+ case 10:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_VAL, val));
+}
+
+/**
+ * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received.
+ * @ppe_dev: PPE device.
+ * @mode: Configure RSS hash for the packet type IPv4 and IPv6.
+ * @cfg: RSS hash configuration.
+ *
+ * PPE RSS hash settings are configured for the packet type IPv4 and IPv6.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 val, reg;
+ int i, ret;
+
+ if (mode & PPE_RSS_HASH_MODE_IPV4) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (mode & PPE_RSS_HASH_MODE_IPV6) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping
+ * @ppe_dev: PPE device
+ * @ring_id: Ethernet DMA ring ID
+ * @queue_map: Bit map of queue IDs to given Ethernet DMA ring
+ *
+ * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
+{
+ u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT];
+
+ memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
+ reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
+
+ return regmap_bulk_write(ppe_dev->regmap, reg,
+ queue_bitmap_val,
+ ARRAY_SIZE(queue_bitmap_val));
+}
+
+static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+{
+ u32 reg, val, bm_fc_val[2];
+ int ret;
+
+ reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Configure BM flow control related threshold. */
+ PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
+ PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
+ PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
+ PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
+ PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
+ PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
+
+ /* Configure low/high bits of the ceiling for the BM port. */
+ val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
+ val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Assign the default group ID 0 to the BM port. */
+ val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
+ reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
+ val);
+ if (ret)
+ return ret;
+
+ /* Enable BM port flow control. */
+ reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN);
+}
+
+/* Configure the buffer threshold for the port flow control function. */
+static int ppe_config_bm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_bm_port_config *port_cfg;
+ unsigned int i, bm_port_id, port_cfg_cnt;
+ u32 reg, val;
+ int ret;
+
+ /* Configure the allocated buffer number only for group 0.
+ * The buffer number of group 1-3 is already cleared to 0
+ * after PPE reset during the probe of PPE driver.
+ */
+ reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
+ val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ ipq9574_ppe_bm_group_config);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ val);
+ if (ret)
+ goto bm_config_fail;
+
+ /* Configure buffer thresholds for the BM ports. */
+ port_cfg = ipq9574_ppe_bm_port_config;
+ port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
+ for (i = 0; i < port_cfg_cnt; i++) {
+ for (bm_port_id = port_cfg[i].port_id_start;
+ bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
+ ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
+ port_cfg[i]);
+ if (ret)
+ goto bm_config_fail;
+ }
+ }
+
+ return 0;
+
+bm_config_fail:
+ dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
+ return ret;
+}
+
+/* Configure PPE hardware queue depth, which is decided by the threshold
+ * of queue.
+ */
+static int ppe_config_qm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_qm_queue_config *queue_cfg;
+ int ret, i, queue_id, queue_cfg_count;
+ u32 reg, multicast_queue_cfg[5];
+ u32 unicast_queue_cfg[4];
+ u32 group_cfg[3];
+
+ /* Assign the buffer number to the group 0 by default. */
+ reg = PPE_AC_GRP_CFG_TBL_ADDR;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ queue_cfg = ipq9574_ppe_qm_queue_config;
+ queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
+ for (i = 0; i < queue_cfg_count; i++) {
+ queue_id = queue_cfg[i].queue_start;
+
+ /* Configure threshold for dropping packets separately for
+ * unicast and multicast PPE queues.
+ */
+ while (queue_id <= queue_cfg[i].queue_end) {
+ if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true);
+ PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
+ PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
+ queue_cfg[i].dynamic);
+ PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg,
+ queue_cfg[i].weight);
+ PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ } else {
+ reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ }
+
+ /* Enable enqueue. */
+ reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_ENQ_OPR_TBL_ENQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ /* Enable dequeue. */
+ reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_DEQ_OPR_TBL_DEQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ queue_id++;
+ }
+ }
+
+ /* Enable queue counter for all PPE hardware queues. */
+ ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
+ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
+ if (ret)
+ goto qm_config_fail;
+
+ return 0;
+
+qm_config_fail:
+ dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
+ return ret;
+}
+
+static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
+ const struct ppe_scheduler_port_config config)
+{
+ struct ppe_scheduler_cfg sch_cfg;
+ int ret, i;
+
+ for (i = 0; i < config.loop_num; i++) {
+ if (!config.pri_max) {
+ /* Round robin scheduler without priority. */
+ sch_cfg.flow_id = config.flow_id;
+ sch_cfg.pri = 0;
+ sch_cfg.drr_node_id = config.drr_node_id;
+ } else {
+ sch_cfg.flow_id = config.flow_id + (i / config.pri_max);
+ sch_cfg.pri = i % config.pri_max;
+ sch_cfg.drr_node_id = config.drr_node_id + i;
+ }
+
+ /* Scheduler weight, must be more than 0. */
+ sch_cfg.drr_node_wt = 1;
+ /* Byte based to be scheduled. */
+ sch_cfg.unit_is_packet = false;
+ /* Frame + CRC calculated. */
+ sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC;
+
+ ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
+ config.flow_level,
+ config.port,
+ sch_cfg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize scheduler settings for PPE buffer utilization and dispatching
+ * packet on PPE queue.
+ */
+static int ppe_config_scheduler(struct ppe_device *ppe_dev)
+{
+ const struct ppe_scheduler_port_config *port_cfg;
+ const struct ppe_scheduler_qm_config *qm_cfg;
+ const struct ppe_scheduler_bm_config *bm_cfg;
+ int ret, i, count;
+ u32 val, reg;
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
+ bm_cfg = ipq9574_ppe_sch_bm_config;
+
+ /* Configure the depth of BM scheduler entries. */
+ val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
+
+ ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each BM scheduler entry with the valid ingress port and
+ * egress port, the second port takes effect when the specified port
+ * is in the inactive state.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].dir);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID,
+ bm_cfg[i].backup_port_valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT,
+ bm_cfg[i].backup_port);
+
+ reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config);
+ qm_cfg = ipq9574_ppe_sch_qm_config;
+
+ /* Configure the depth of QM scheduler entries. */
+ val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count);
+ ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each QM scheduler entry with enqueue port and dequeue
+ * port, the second port takes effect when the specified dequeue
+ * port is in the inactive port.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
+ qm_cfg[i].ensch_port_bmp);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
+ qm_cfg[i].ensch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
+ qm_cfg[i].desch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
+ qm_cfg[i].desch_backup_port_valid);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
+ qm_cfg[i].desch_backup_port);
+
+ reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ppe_port_sch_config);
+ port_cfg = ppe_port_sch_config;
+
+ /* Configure scheduler per PPE queue or flow. */
+ for (i = 0; i < count; i++) {
+ if (port_cfg[i].port >= ppe_dev->num_ports)
+ break;
+
+ ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ return 0;
+
+sch_config_fail:
+ dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
+ return ret;
+};
+
+/* Configure PPE queue destination of each PPE port. */
+static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
+{
+ int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
+ struct ppe_queue_ucast_dest queue_dst;
+
+ for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ q_base = res_start;
+ queue_dst.dest_port = port_id;
+
+ /* Configure queue base ID and profile ID that is same as
+ * physical port ID.
+ */
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base, port_id);
+ if (ret)
+ return ret;
+
+ /* Queue priority range supported by each PPE port */
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ pri_max = res_end - res_start;
+
+ /* Redirect ARP reply packet with the max priority on CPU port,
+ * which keeps the ARP reply directed to CPU (CPU code is 101)
+ * with highest priority queue of EDMA.
+ */
+ if (port_id == 0) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ queue_dst.cpu_code_en = true;
+ queue_dst.cpu_code = 101;
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base + pri_max,
+ 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of internal priority. */
+ for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
+ q_offset = index > pri_max ? pri_max : index;
+
+ ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
+ index, q_offset);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of RSS hash as 0 to avoid the
+ * random hardware value that will lead to the unexpected
+ * destination queue generated.
+ */
+ for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
+ ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
+ index, 0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize the service code 1 used by CPU port. */
+static int ppe_servcode_init(struct ppe_device *ppe_dev)
+{
+ struct ppe_sc_cfg sc_cfg = {};
+
+ bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+
+ bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress);
+
+ bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress);
+
+ return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
+}
+
+/* Initialize PPE port configurations. */
+static int ppe_port_config_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, val, mru_mtu_val[3];
+ int i, ret;
+
+ /* MTU and MRU settings are not required for CPU port 0. */
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable Ethernet port counter */
+ ret = ppe_counter_enable_set(ppe_dev, i);
+ if (ret)
+ return ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ /* Drop the packet when the packet size is more than the MTU
+ * and redirect the packet to the CPU port when the received
+ * packet size is more than the MRU of the physical interface.
+ */
+ PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_REDIRECT_TO_CPU);
+ PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_MC_MTU_CTRL_TBL_MTU_CMD,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable CPU port counters. */
+ return ppe_counter_enable_set(ppe_dev, 0);
+}
+
+/* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive.
+ * RSS settings are to calculate the random RSS hash value generated during
+ * packet receive. This hash is then used to generate the queue offset used
+ * to determine the queue used to transmit the packet.
+ */
+static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
+{
+ u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
+ u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
+ struct ppe_rss_hash_cfg hash_cfg;
+ int i, ret;
+
+ hash_cfg.hash_seed = get_random_u32();
+ hash_cfg.hash_mask = 0xfff;
+
+ /* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP
+ * and UDP-Lite packets.
+ */
+ hash_cfg.hash_fragment_mode = false;
+
+ /* The final common seed configs used to calculate the RSS has value,
+ * which is available for both IPv4 and IPv6 packet.
+ */
+ for (i = 0; i < ARRAY_SIZE(fins); i++) {
+ hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
+ hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
+ }
+
+ /* RSS seeds for IP protocol, L4 destination & source port and
+ * destination & source IP used to calculate the RSS hash value.
+ */
+ hash_cfg.hash_protocol_mix = 0x13;
+ hash_cfg.hash_dport_mix = 0xb;
+ hash_cfg.hash_sport_mix = 0x13;
+ hash_cfg.hash_dip_mix[0] = 0xb;
+ hash_cfg.hash_sip_mix[0] = 0x13;
+
+ /* Configure RSS seed configs for IPv4 packet. */
+ ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ips); i++) {
+ hash_cfg.hash_sip_mix[i] = ips[i];
+ hash_cfg.hash_dip_mix[i] = ips[i];
+ }
+
+ /* Configure RSS seed configs for IPv6 packet. */
+ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
+}
+
+/* Initialize mapping between PPE queues assigned to CPU port 0
+ * to Ethernet DMA ring 0.
+ */
+static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
+{
+ u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
+ int ret, queue_id, queue_max;
+
+ ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
+ &queue_id, &queue_max);
+ if (ret)
+ return ret;
+
+ for (; queue_id <= queue_max; queue_id++)
+ queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
+
+ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
+}
+
+/* Initialize PPE bridge settings to only enable L2 frame receive and
+ * transmit between CPU port and PPE Ethernet ports.
+ */
+static int ppe_bridge_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, mask, port_cfg[4], vsi_cfg[2];
+ int ret, i;
+
+ /* Configure the following settings for CPU port0:
+ * a.) Enable Bridge TX
+ * b.) Disable FDB new address learning
+ * c.) Disable station move address learning
+ */
+ mask = PPE_PORT_BRIDGE_TXMAC_EN;
+ mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
+ mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
+ ret = regmap_update_bits(ppe_dev->regmap,
+ PPE_PORT_BRIDGE_CTRL_ADDR,
+ mask,
+ PPE_PORT_BRIDGE_TXMAC_EN);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable invalid VSI forwarding for all the physical ports
+ * to CPU port0, in case no VSI is assigned to the physical
+ * port.
+ */
+ reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+
+ if (ret)
+ return ret;
+
+ PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
+ PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) {
+ /* Set the VSI forward membership to include only CPU port0.
+ * FDB learning and forwarding take place only after switchdev
+ * is supported later to create the VSI and join the physical
+ * ports to the VSI port member.
+ */
+ reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+
+ PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+ PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ppe_hw_config(struct ppe_device *ppe_dev)
+{
+ int ret;
+
+ ret = ppe_config_bm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_qm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_scheduler(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queue_dest_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_servcode_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_port_config_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_rss_hash_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queues_to_ring_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ return ppe_bridge_init(ppe_dev);
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
new file mode 100644
index 000000000000..4bb45ca40144
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_CONFIG_H__
+#define __PPE_CONFIG_H__
+
+#include <linux/types.h>
+
+#include "ppe.h"
+
+/* There are different table index ranges for configuring queue base ID of
+ * the destination port, CPU code and service code.
+ */
+#define PPE_QUEUE_BASE_DEST_PORT 0
+#define PPE_QUEUE_BASE_CPU_CODE 1024
+#define PPE_QUEUE_BASE_SERVICE_CODE 2048
+
+#define PPE_QUEUE_INTER_PRI_NUM 16
+#define PPE_QUEUE_HASH_NUM 256
+
+/* The service code is used by EDMA port to transmit packet to PPE. */
+#define PPE_EDMA_SC_BYPASS_ID 1
+
+/* The PPE RSS hash configured for IPv4 and IPv6 packet separately. */
+#define PPE_RSS_HASH_MODE_IPV4 BIT(0)
+#define PPE_RSS_HASH_MODE_IPV6 BIT(1)
+#define PPE_RSS_HASH_IP_LENGTH 4
+#define PPE_RSS_HASH_TUPLES 5
+
+/* PPE supports 300 queues, each bit presents as one queue. */
+#define PPE_RING_TO_QUEUE_BITMAP_WORD_CNT 10
+
+/**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+ * preamble, Ethernet packet and CRC.
+ * @PPE_SCH_WITH_FRAME_CRC: The scheduled frame includes Ethernet frame and CRC
+ * excluding IPG and preamble.
+ * @PPE_SCH_WITH_L3_PAYLOAD: The scheduled frame includes layer 3 packet data.
+ */
+enum ppe_scheduler_frame_mode {
+ PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC = 0,
+ PPE_SCH_WITH_FRAME_CRC = 1,
+ PPE_SCH_WITH_L3_PAYLOAD = 2,
+};
+
+/**
+ * struct ppe_scheduler_cfg - PPE scheduler configuration.
+ * @flow_id: PPE flow ID.
+ * @pri: Scheduler priority.
+ * @drr_node_id: Node ID for scheduled traffic.
+ * @drr_node_wt: Weight for scheduled traffic.
+ * @unit_is_packet: Packet based or byte based unit for scheduled traffic.
+ * @frame_mode: Packet mode to be scheduled.
+ *
+ * PPE scheduler supports commit rate and exceed rate configurations.
+ */
+struct ppe_scheduler_cfg {
+ int flow_id;
+ int pri;
+ int drr_node_id;
+ int drr_node_wt;
+ bool unit_is_packet;
+ enum ppe_scheduler_frame_mode frame_mode;
+};
+
+/**
+ * enum ppe_resource_type - PPE resource type.
+ * @PPE_RES_UCAST: Unicast queue resource.
+ * @PPE_RES_MCAST: Multicast queue resource.
+ * @PPE_RES_L0_NODE: Level 0 for queue based node resource.
+ * @PPE_RES_L1_NODE: Level 1 for flow based node resource.
+ * @PPE_RES_FLOW_ID: Flow based node resource.
+ */
+enum ppe_resource_type {
+ PPE_RES_UCAST,
+ PPE_RES_MCAST,
+ PPE_RES_L0_NODE,
+ PPE_RES_L1_NODE,
+ PPE_RES_FLOW_ID,
+};
+
+/**
+ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
+ * @src_profile: Source profile.
+ * @service_code_en: Enable service code to map the queue base ID.
+ * @service_code: Service code.
+ * @cpu_code_en: Enable CPU code to map the queue base ID.
+ * @cpu_code: CPU code.
+ * @dest_port: destination port.
+ *
+ * PPE egress queue ID is decided by the service code if enabled, otherwise
+ * by the CPU code if enabled, or by destination port if both service code
+ * and CPU code are disabled.
+ */
+struct ppe_queue_ucast_dest {
+ int src_profile;
+ bool service_code_en;
+ int service_code;
+ bool cpu_code_en;
+ int cpu_code;
+ int dest_port;
+};
+
+/* Hardware bitmaps for bypassing features of the ingress packet. */
+enum ppe_sc_ingress_type {
+ PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
+ PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
+ PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
+ PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
+ PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
+ PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
+ PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
+ PPE_SC_BYPASS_INGRESS_ACL = 7,
+ PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
+ PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
+ PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
+ PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
+ PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
+ PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
+ PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
+ /* Values 21-23 are not specified by hardware. */
+ PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
+ PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
+ PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
+ PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
+ PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_INGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of the egress packet. */
+enum ppe_sc_egress_type {
+ PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
+ PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
+ PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
+ PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
+ PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
+ PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
+ PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
+ PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
+ PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
+ PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
+ PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
+ PPE_SC_BYPASS_EGRESS_POLICER = 11,
+ PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
+ PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
+ PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
+ PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
+ PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
+ PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
+ PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
+ PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
+ PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
+ PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
+ PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
+ PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_EGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing counter of packet. */
+enum ppe_sc_counter_type {
+ PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
+ PPE_SC_BYPASS_COUNTER_RX = 1,
+ PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
+ PPE_SC_BYPASS_COUNTER_TX = 3,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_COUNTER_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of tunnel packet. */
+enum ppe_sc_tunnel_type {
+ PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
+ PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
+ PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
+ PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
+ PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
+ PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
+ PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
+ PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
+ PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
+ PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
+ PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
+ PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
+ PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
+ /* Values 18-19 are not specified by hardware. */
+ PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_TUNNEL_SIZE,
+};
+
+/**
+ * struct ppe_sc_bypass - PPE service bypass bitmaps
+ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
+ * @egress: Bitmap of features that can be bypassed on the egress packet.
+ * @counter: Bitmap of features that can be bypassed on the counter type.
+ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
+ */
+struct ppe_sc_bypass {
+ DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+};
+
+/**
+ * struct ppe_sc_cfg - PPE service code configuration.
+ * @dest_port_valid: Generate destination port or not.
+ * @dest_port: Destination port ID.
+ * @bitmaps: Bitmap of bypass features.
+ * @is_src: Destination port acts as source port, packet sent to CPU.
+ * @next_service_code: New service code generated.
+ * @eip_field_update_bitmap: Fields updated as actions taken for EIP.
+ * @eip_hw_service: Selected hardware functions for EIP.
+ * @eip_offset_sel: Packet offset selection, using packet's layer 4 offset
+ * or using packet's layer 3 offset for EIP.
+ *
+ * Service code is generated during the packet passing through PPE.
+ */
+struct ppe_sc_cfg {
+ bool dest_port_valid;
+ int dest_port;
+ struct ppe_sc_bypass bitmaps;
+ bool is_src;
+ int next_service_code;
+ int eip_field_update_bitmap;
+ int eip_hw_service;
+ int eip_offset_sel;
+};
+
+/**
+ * enum ppe_action_type - PPE action of the received packet.
+ * @PPE_ACTION_FORWARD: Packet forwarded per L2/L3 process.
+ * @PPE_ACTION_DROP: Packet dropped by PPE.
+ * @PPE_ACTION_COPY_TO_CPU: Packet copied to CPU port per multicast queue.
+ * @PPE_ACTION_REDIRECT_TO_CPU: Packet redirected to CPU port per unicast queue.
+ */
+enum ppe_action_type {
+ PPE_ACTION_FORWARD = 0,
+ PPE_ACTION_DROP = 1,
+ PPE_ACTION_COPY_TO_CPU = 2,
+ PPE_ACTION_REDIRECT_TO_CPU = 3,
+};
+
+/**
+ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
+ * @hash_mask: Mask of the generated hash value.
+ * @hash_fragment_mode: Hash generation mode for the first fragment of TCP,
+ * UDP and UDP-Lite packets, to use either 3 tuple or 5 tuple for RSS hash
+ * key computation.
+ * @hash_seed: Seed to generate RSS hash.
+ * @hash_sip_mix: Source IP selection.
+ * @hash_dip_mix: Destination IP selection.
+ * @hash_protocol_mix: Protocol selection.
+ * @hash_sport_mix: Source L4 port selection.
+ * @hash_dport_mix: Destination L4 port selection.
+ * @hash_fin_inner: RSS hash value first selection.
+ * @hash_fin_outer: RSS hash value second selection.
+ *
+ * PPE RSS hash value is generated for the packet based on the RSS hash
+ * configured.
+ */
+struct ppe_rss_hash_cfg {
+ u32 hash_mask;
+ bool hash_fragment_mode;
+ u32 hash_seed;
+ u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_protocol_mix;
+ u8 hash_sport_mix;
+ u8 hash_dport_mix;
+ u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
+ u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
+};
+
+int ppe_hw_config(struct ppe_device *ppe_dev);
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg);
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base,
+ int profile_id);
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset);
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset);
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end);
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
+ struct ppe_sc_cfg cfg);
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg hash_cfg);
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
+ int ring_id,
+ u32 *queue_map);
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
new file mode 100644
index 000000000000..fd959a76ff43
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs routines for display of PPE counters useful for debug. */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+#include "ppe_regs.h"
+
+#define PPE_PKT_CNT_TBL_SIZE 3
+#define PPE_DROP_PKT_CNT_TBL_SIZE 5
+
+#define PPE_W0_PKT_CNT GENMASK(31, 0)
+#define PPE_W2_DROP_PKT_CNT_LOW GENMASK(31, 8)
+#define PPE_W3_DROP_PKT_CNT_HIGH GENMASK(7, 0)
+
+#define PPE_GET_PKT_CNT(tbl_cnt) \
+ FIELD_GET(PPE_W0_PKT_CNT, *(tbl_cnt))
+#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cnt) \
+ FIELD_GET(PPE_W2_DROP_PKT_CNT_LOW, *((tbl_cnt) + 0x2))
+#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cnt) \
+ FIELD_GET(PPE_W3_DROP_PKT_CNT_HIGH, *((tbl_cnt) + 0x3))
+
+/**
+ * enum ppe_cnt_size_type - PPE counter size type
+ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
+ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
+ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
+ *
+ * PPE takes the different register size to record the packet counters.
+ * It uses single register, or register table with 3 words or 5 words.
+ * The counter with table size 5 words also records the drop counter.
+ * There are also some other counter types occupying sizes less than 32
+ * bits, which is not covered by this enumeration type.
+ */
+enum ppe_cnt_size_type {
+ PPE_PKT_CNT_SIZE_1WORD,
+ PPE_PKT_CNT_SIZE_3WORD,
+ PPE_PKT_CNT_SIZE_5WORD,
+};
+
+/**
+ * enum ppe_cnt_type - PPE counter type.
+ * @PPE_CNT_BM: Packet counter processed by BM.
+ * @PPE_CNT_PARSE: Packet counter parsed on ingress.
+ * @PPE_CNT_PORT_RX: Packet counter on the ingress port.
+ * @PPE_CNT_VLAN_RX: VLAN packet counter received.
+ * @PPE_CNT_L2_FWD: Packet counter processed by L2 forwarding.
+ * @PPE_CNT_CPU_CODE: Packet counter marked with various CPU codes.
+ * @PPE_CNT_VLAN_TX: VLAN packet counter transmitted.
+ * @PPE_CNT_PORT_TX: Packet counter on the egress port.
+ * @PPE_CNT_QM: Packet counter processed by QM.
+ */
+enum ppe_cnt_type {
+ PPE_CNT_BM,
+ PPE_CNT_PARSE,
+ PPE_CNT_PORT_RX,
+ PPE_CNT_VLAN_RX,
+ PPE_CNT_L2_FWD,
+ PPE_CNT_CPU_CODE,
+ PPE_CNT_VLAN_TX,
+ PPE_CNT_PORT_TX,
+ PPE_CNT_QM,
+};
+
+/**
+ * struct ppe_debugfs_entry - PPE debugfs entry.
+ * @name: Debugfs file name.
+ * @counter_type: PPE packet counter type.
+ * @ppe: PPE device.
+ *
+ * The PPE debugfs entry is used to create the debugfs file and passed
+ * to debugfs_create_file() as private data.
+ */
+struct ppe_debugfs_entry {
+ const char *name;
+ enum ppe_cnt_type counter_type;
+ struct ppe_device *ppe;
+};
+
+static const struct ppe_debugfs_entry debugfs_files[] = {
+ {
+ .name = "bm",
+ .counter_type = PPE_CNT_BM,
+ },
+ {
+ .name = "parse",
+ .counter_type = PPE_CNT_PARSE,
+ },
+ {
+ .name = "port_rx",
+ .counter_type = PPE_CNT_PORT_RX,
+ },
+ {
+ .name = "vlan_rx",
+ .counter_type = PPE_CNT_VLAN_RX,
+ },
+ {
+ .name = "l2_forward",
+ .counter_type = PPE_CNT_L2_FWD,
+ },
+ {
+ .name = "cpu_code",
+ .counter_type = PPE_CNT_CPU_CODE,
+ },
+ {
+ .name = "vlan_tx",
+ .counter_type = PPE_CNT_VLAN_TX,
+ },
+ {
+ .name = "port_tx",
+ .counter_type = PPE_CNT_PORT_TX,
+ },
+ {
+ .name = "qm",
+ .counter_type = PPE_CNT_QM,
+ },
+};
+
+static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type,
+ u32 *cnt, u32 *drop_cnt)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
+ u32 value;
+ int ret;
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ ret = regmap_read(ppe_dev->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ *cnt = value;
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(pkt_cnt);
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
+
+ /* Drop counter with low 24 bits. */
+ value = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
+ *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
+
+ /* Drop counter with high 8 bits. */
+ value = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
+ *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
+ break;
+ }
+
+ return 0;
+}
+
+static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ regmap_write(ppe_dev->regmap, reg, 0);
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ break;
+ }
+}
+
+static int ppe_bm_counter_get(struct ppe_device *ppe_dev, struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt, pkt_cnt1;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "BM SILENT_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of packets dropped because hardware buffers were
+ * available only partially for the packet.
+ */
+ seq_printf(seq, "%-24s", "BM OVERFLOW_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of currently occupied buffers, that can't be flushed. */
+ seq_printf(seq, "%-24s", "BM USED/REACT:");
+ tag = 0;
+ for (i = 0; i < PPE_BM_USED_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_BM_USED_CNT_TBL_ADDR + i * PPE_BM_USED_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets before the pause frame sent.
+ */
+ pkt_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
+
+ reg = PPE_BM_REACT_CNT_TBL_ADDR + i * PPE_BM_REACT_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets after pause frame sent out.
+ */
+ pkt_cnt1 = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
+
+ if (pkt_cnt > 0 || pkt_cnt1 > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, pkt_cnt1,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets processed by the ingress parser module of PPE. */
+static int ppe_parse_pkt_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, cnt = 0, tunnel_cnt = 0;
+ int i, ret, tag = 0;
+
+ seq_printf(seq, "%-24s", "PARSE TPRX/IPRX:");
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &tunnel_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (tunnel_cnt > 0 || cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", tunnel_cnt, cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped on the ingress port. */
+static int ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "PORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "VPORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped by layer 2 processing. */
+static int ppe_l2_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "L2 RX/RX_DROP:");
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of VLAN packets received by PPE. */
+static int ppe_vlan_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN RX:");
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets handed to CPU by PPE. */
+static int ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i;
+
+ seq_printf(seq, "%-24s", "CPU CODE:");
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (!pkt_cnt)
+ continue;
+
+ /* There are 256 CPU codes saved in the first 256 entries
+ * of register table, and 128 drop codes for each PPE port
+ * (0-7), the total entries is 256 + 8 * 128.
+ */
+ if (i < 256)
+ seq_printf(seq, "%10u(cpucode:%d)", pkt_cnt, i);
+ else
+ seq_printf(seq, "%10u(port=%04d),dropcode:%d", pkt_cnt,
+ (i - 256) % 8, (i - 256) / 8);
+ seq_putc(seq, '\n');
+ seq_printf(seq, "%-24s", "");
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets forwarded by VLAN on the egress direction. */
+static int ppe_vlan_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN TX:");
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or dropped on the egress port. */
+static int ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "VPORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "PORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or pending by the PPE queue. */
+static int ppe_queue_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt = 0, pend_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "QUEUE TX/PEND/DROP:");
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (i < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC * i;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ reg = PPE_UNICAST_DROP_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC *
+ (i * PPE_UNICAST_DROP_TYPES + PPE_UNICAST_DROP_FORCE_OFFSET);
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ } else {
+ int mq_offset = i - PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES;
+
+ reg = PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC * mq_offset;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ if (mq_offset < PPE_P0_MULTICAST_QUEUE_NUM) {
+ reg = PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset);
+ } else {
+ mq_offset -= PPE_P0_MULTICAST_QUEUE_NUM;
+
+ reg = PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR;
+ reg += (mq_offset / PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_QUEUE_PORT_ADDR_INC;
+ reg += (mq_offset % PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_DROP_CNT_TBL_INC *
+ PPE_MULTICAST_DROP_TYPES;
+ }
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pkt_cnt > 0 || pend_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u/%u(%s=%04d)",
+ pkt_cnt, pend_cnt, drop_cnt, "queue", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* Display the various packet counters of PPE. */
+static int ppe_packet_counter_show(struct seq_file *seq, void *v)
+{
+ struct ppe_debugfs_entry *entry = seq->private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ int ret;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ ret = ppe_bm_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PARSE:
+ ret = ppe_parse_pkt_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_RX:
+ ret = ppe_port_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_RX:
+ ret = ppe_vlan_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_L2_FWD:
+ ret = ppe_l2_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_CPU_CODE:
+ ret = ppe_cpu_code_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_TX:
+ ret = ppe_vlan_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_TX:
+ ret = ppe_port_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_QM:
+ ret = ppe_queue_counter_get(ppe_dev, seq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Flush the various packet counters of PPE. */
+static ssize_t ppe_packet_counter_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ppe_debugfs_entry *entry = file_inode(file)->i_private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ u32 reg;
+ int i;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PARSE:
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_RX:
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_RX:
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_L2_FWD:
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_CPU_CODE:
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_TX:
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_TX:
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_QM:
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(ppe_packet_counter);
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev)
+{
+ struct ppe_debugfs_entry *entry;
+ int i;
+
+ ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
+ if (IS_ERR(ppe_dev->debugfs_root))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) {
+ entry = devm_kzalloc(ppe_dev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ entry->ppe = ppe_dev;
+ entry->counter_type = debugfs_files[i].counter_type;
+
+ debugfs_create_file(debugfs_files[i].name, 0444,
+ ppe_dev->debugfs_root, entry,
+ &ppe_packet_counter_fops);
+ }
+}
+
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
+{
+ debugfs_remove_recursive(ppe_dev->debugfs_root);
+ ppe_dev->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
new file mode 100644
index 000000000000..81f49a709123
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs counters setup. */
+
+#ifndef __PPE_DEBUGFS_H__
+#define __PPE_DEBUGFS_H__
+
+#include "ppe.h"
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev);
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
+
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
new file mode 100644
index 000000000000..746dfbb5a682
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE hardware register and table declarations. */
+#ifndef __PPE_REGS_H__
+#define __PPE_REGS_H__
+
+#include <linux/bitfield.h>
+
+/* PPE scheduler configurations for buffer manager block. */
+#define PPE_BM_SCH_CTRL_ADDR 0xb000
+#define PPE_BM_SCH_CTRL_INC 4
+#define PPE_BM_SCH_CTRL_SCH_DEPTH GENMASK(7, 0)
+#define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
+#define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
+
+/* PPE drop counters. */
+#define PPE_DROP_CNT_TBL_ADDR 0xb024
+#define PPE_DROP_CNT_TBL_ENTRIES 8
+#define PPE_DROP_CNT_TBL_INC 4
+
+/* BM port drop counters. */
+#define PPE_DROP_STAT_TBL_ADDR 0xe000
+#define PPE_DROP_STAT_TBL_ENTRIES 30
+#define PPE_DROP_STAT_TBL_INC 0x10
+
+/* Egress VLAN counters. */
+#define PPE_EG_VSI_COUNTER_TBL_ADDR 0x41000
+#define PPE_EG_VSI_COUNTER_TBL_ENTRIES 64
+#define PPE_EG_VSI_COUNTER_TBL_INC 0x10
+
+/* Port TX counters. */
+#define PPE_PORT_TX_COUNTER_TBL_ADDR 0x45000
+#define PPE_PORT_TX_COUNTER_TBL_ENTRIES 8
+#define PPE_PORT_TX_COUNTER_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_COUNTER_TBL_ADDR 0x47000
+#define PPE_VPORT_TX_COUNTER_TBL_ENTRIES 256
+#define PPE_VPORT_TX_COUNTER_TBL_INC 0x10
+
+/* Queue counters. */
+#define PPE_QUEUE_TX_COUNTER_TBL_ADDR 0x4a000
+#define PPE_QUEUE_TX_COUNTER_TBL_ENTRIES 300
+#define PPE_QUEUE_TX_COUNTER_TBL_INC 0x10
+
+/* RSS settings are to calculate the random RSS hash value generated during
+ * packet receive to ARM cores. This hash is then used to generate the queue
+ * offset used to determine the queue used to transmit the packet to ARM cores.
+ */
+#define PPE_RSS_HASH_MASK_ADDR 0xb4318
+#define PPE_RSS_HASH_MASK_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_ADDR 0xb431c
+#define PPE_RSS_HASH_SEED_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_ADDR 0xb4320
+#define PPE_RSS_HASH_MIX_ENTRIES 11
+#define PPE_RSS_HASH_MIX_INC 4
+#define PPE_RSS_HASH_MIX_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_ADDR 0xb4350
+#define PPE_RSS_HASH_FIN_ENTRIES 5
+#define PPE_RSS_HASH_FIN_INC 4
+#define PPE_RSS_HASH_FIN_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_OUTER GENMASK(9, 5)
+
+#define PPE_RSS_HASH_MASK_IPV4_ADDR 0xb4380
+#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_IPV4_ADDR 0xb4384
+#define PPE_RSS_HASH_SEED_IPV4_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_IPV4_ADDR 0xb4390
+#define PPE_RSS_HASH_MIX_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_MIX_IPV4_INC 4
+#define PPE_RSS_HASH_MIX_IPV4_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_IPV4_ADDR 0xb43b0
+#define PPE_RSS_HASH_FIN_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_FIN_IPV4_INC 4
+#define PPE_RSS_HASH_FIN_IPV4_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_IPV4_OUTER GENMASK(9, 5)
+
+#define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
+#define PPE_BM_SCH_CFG_TBL_ENTRIES 128
+#define PPE_BM_SCH_CFG_TBL_INC 0x10
+#define PPE_BM_SCH_CFG_TBL_PORT_NUM GENMASK(3, 0)
+#define PPE_BM_SCH_CFG_TBL_DIR BIT(4)
+#define PPE_BM_SCH_CFG_TBL_VALID BIT(5)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
+
+/* PPE service code configuration for the ingress direction functions,
+ * including bypass configuration for relevant PPE switch core functions
+ * such as flow entry lookup bypass.
+ */
+#define PPE_SERVICE_TBL_ADDR 0x15000
+#define PPE_SERVICE_TBL_ENTRIES 256
+#define PPE_SERVICE_TBL_INC 0x10
+#define PPE_SERVICE_W0_BYPASS_BITMAP GENMASK(31, 0)
+#define PPE_SERVICE_W1_RX_CNT_EN BIT(0)
+
+#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W0_BYPASS_BITMAP, tbl_cfg, value)
+#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port egress VLAN configurations. */
+#define PPE_PORT_EG_VLAN_TBL_ADDR 0x20020
+#define PPE_PORT_EG_VLAN_TBL_ENTRIES 8
+#define PPE_PORT_EG_VLAN_TBL_INC 4
+#define PPE_PORT_EG_VLAN_TBL_VLAN_TYPE BIT(0)
+#define PPE_PORT_EG_VLAN_TBL_CTAG_MODE GENMASK(2, 1)
+#define PPE_PORT_EG_VLAN_TBL_STAG_MODE GENMASK(4, 3)
+#define PPE_PORT_EG_VLAN_TBL_VSI_TAG_MODE_EN BIT(5)
+#define PPE_PORT_EG_VLAN_TBL_PCP_PROP_CMD BIT(6)
+#define PPE_PORT_EG_VLAN_TBL_DEI_PROP_CMD BIT(7)
+#define PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN BIT(8)
+
+/* PPE queue counters enable/disable control. */
+#define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+
+/* PPE service code configuration on the egress direction. */
+#define PPE_EG_SERVICE_TBL_ADDR 0x43000
+#define PPE_EG_SERVICE_TBL_ENTRIES 256
+#define PPE_EG_SERVICE_TBL_INC 0x10
+#define PPE_EG_SERVICE_W0_UPDATE_ACTION GENMASK(31, 0)
+#define PPE_EG_SERVICE_W1_NEXT_SERVCODE GENMASK(7, 0)
+#define PPE_EG_SERVICE_W1_HW_SERVICE GENMASK(13, 8)
+#define PPE_EG_SERVICE_W1_OFFSET_SEL BIT(14)
+#define PPE_EG_SERVICE_W1_TX_CNT_EN BIT(15)
+
+#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W0_UPDATE_ACTION, tbl_cfg, value)
+#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_NEXT_SERVCODE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_HW_SERVICE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_OFFSET_SEL, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port bridge configuration */
+#define PPE_PORT_BRIDGE_CTRL_ADDR 0x60300
+#define PPE_PORT_BRIDGE_CTRL_ENTRIES 8
+#define PPE_PORT_BRIDGE_CTRL_INC 4
+#define PPE_PORT_BRIDGE_NEW_LRN_EN BIT(0)
+#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN BIT(3)
+#define PPE_PORT_BRIDGE_TXMAC_EN BIT(16)
+
+/* PPE port control configurations for the traffic to the multicast queues. */
+#define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
+#define PPE_MC_MTU_CTRL_TBL_ENTRIES 8
+#define PPE_MC_MTU_CTRL_TBL_INC 4
+#define PPE_MC_MTU_CTRL_TBL_MTU GENMASK(13, 0)
+#define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
+#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
+
+/* PPE VSI configurations */
+#define PPE_VSI_TBL_ADDR 0x63800
+#define PPE_VSI_TBL_ENTRIES 64
+#define PPE_VSI_TBL_INC 0x10
+#define PPE_VSI_W0_MEMBER_PORT_BITMAP GENMASK(7, 0)
+#define PPE_VSI_W0_UUC_BITMAP GENMASK(15, 8)
+#define PPE_VSI_W0_UMC_BITMAP GENMASK(23, 16)
+#define PPE_VSI_W0_BC_BITMAP GENMASK(31, 24)
+#define PPE_VSI_W1_NEW_ADDR_LRN_EN BIT(0)
+#define PPE_VSI_W1_NEW_ADDR_FWD_CMD GENMASK(2, 1)
+#define PPE_VSI_W1_STATION_MOVE_LRN_EN BIT(3)
+#define PPE_VSI_W1_STATION_MOVE_FWD_CMD GENMASK(5, 4)
+
+#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_MEMBER_PORT_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UUC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UMC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_BC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_FWD_CMD, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_FWD_CMD, (tbl_cfg) + 0x1, value)
+
+/* PPE port control configurations for the traffic to the unicast queues. */
+#define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
+#define PPE_MRU_MTU_CTRL_TBL_ENTRIES 256
+#define PPE_MRU_MTU_CTRL_TBL_INC 0x10
+#define PPE_MRU_MTU_CTRL_W0_MRU GENMASK(13, 0)
+#define PPE_MRU_MTU_CTRL_W0_MRU_CMD GENMASK(15, 14)
+#define PPE_MRU_MTU_CTRL_W0_MTU GENMASK(29, 16)
+#define PPE_MRU_MTU_CTRL_W0_MTU_CMD GENMASK(31, 30)
+#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN BIT(0)
+#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN BIT(1)
+#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE GENMASK(3, 2)
+#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW BIT(31)
+#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH GENMASK(1, 0)
+
+#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE service code configuration for destination port and counter. */
+#define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
+#define PPE_IN_L2_SERVICE_TBL_ENTRIES 256
+#define PPE_IN_L2_SERVICE_TBL_INC 0x10
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID BIT(0)
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID GENMASK(4, 1)
+#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION BIT(5)
+#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP GENMASK(29, 6)
+#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
+#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
+
+/* L2 Port configurations */
+#define PPE_L2_VP_PORT_TBL_ADDR 0x98000
+#define PPE_L2_VP_PORT_TBL_ENTRIES 256
+#define PPE_L2_VP_PORT_TBL_INC 0x10
+#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN BIT(0)
+#define PPE_L2_VP_PORT_W0_DST_INFO GENMASK(9, 2)
+
+#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN, tbl_cfg, value)
+#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_DST_INFO, tbl_cfg, value)
+
+/* Port RX and RX drop counters. */
+#define PPE_PORT_RX_CNT_TBL_ADDR 0x150000
+#define PPE_PORT_RX_CNT_TBL_ENTRIES 256
+#define PPE_PORT_RX_CNT_TBL_INC 0x20
+
+/* Physical port RX and RX drop counters. */
+#define PPE_PHY_PORT_RX_CNT_TBL_ADDR 0x156000
+#define PPE_PHY_PORT_RX_CNT_TBL_ENTRIES 8
+#define PPE_PHY_PORT_RX_CNT_TBL_INC 0x20
+
+/* Counters for the packet to CPU port. */
+#define PPE_DROP_CPU_CNT_TBL_ADDR 0x160000
+#define PPE_DROP_CPU_CNT_TBL_ENTRIES 1280
+#define PPE_DROP_CPU_CNT_TBL_INC 0x10
+
+/* VLAN counters. */
+#define PPE_VLAN_CNT_TBL_ADDR 0x178000
+#define PPE_VLAN_CNT_TBL_ENTRIES 64
+#define PPE_VLAN_CNT_TBL_INC 0x10
+
+/* PPE L2 counters. */
+#define PPE_PRE_L2_CNT_TBL_ADDR 0x17c000
+#define PPE_PRE_L2_CNT_TBL_ENTRIES 64
+#define PPE_PRE_L2_CNT_TBL_INC 0x20
+
+/* Port TX drop counters. */
+#define PPE_PORT_TX_DROP_CNT_TBL_ADDR 0x17d000
+#define PPE_PORT_TX_DROP_CNT_TBL_ENTRIES 8
+#define PPE_PORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR 0x17e000
+#define PPE_VPORT_TX_DROP_CNT_TBL_ENTRIES 256
+#define PPE_VPORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Counters for the tunnel packet. */
+#define PPE_TPR_PKT_CNT_TBL_ADDR 0x1d0080
+#define PPE_TPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_TPR_PKT_CNT_TBL_INC 4
+
+/* Counters for the all packet received. */
+#define PPE_IPR_PKT_CNT_TBL_ADDR 0x1e0080
+#define PPE_IPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_IPR_PKT_CNT_TBL_INC 4
+
+/* PPE service code configuration for the tunnel packet. */
+#define PPE_TL_SERVICE_TBL_ADDR 0x306000
+#define PPE_TL_SERVICE_TBL_ENTRIES 256
+#define PPE_TL_SERVICE_TBL_INC 4
+#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP GENMASK(31, 0)
+
+/* Port scheduler global config. */
+#define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
+#define PPE_PSCH_SCH_DEPTH_CFG_INC 4
+#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH GENMASK(7, 0)
+
+/* PPE queue level scheduler configurations. */
+#define PPE_L0_FLOW_MAP_TBL_ADDR 0x402000
+#define PPE_L0_FLOW_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_MAP_TBL_FLOW_ID GENMASK(5, 0)
+#define PPE_L0_FLOW_MAP_TBL_C_PRI GENMASK(8, 6)
+#define PPE_L0_FLOW_MAP_TBL_E_PRI GENMASK(11, 9)
+#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT GENMASK(21, 12)
+#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT GENMASK(31, 22)
+
+#define PPE_L0_C_FLOW_CFG_TBL_ADDR 0x404000
+#define PPE_L0_C_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_E_FLOW_CFG_TBL_ADDR 0x406000
+#define PPE_L0_E_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR 0x408000
+#define PPE_L0_FLOW_PORT_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L0_COMP_CFG_TBL_ADDR 0x428000
+#define PPE_L0_COMP_CFG_TBL_ENTRIES 300
+#define PPE_L0_COMP_CFG_TBL_INC 0x10
+#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE queue to Ethernet DMA ring mapping table. */
+#define PPE_RING_Q_MAP_TBL_ADDR 0x42a000
+#define PPE_RING_Q_MAP_TBL_ENTRIES 24
+#define PPE_RING_Q_MAP_TBL_INC 0x40
+
+/* Table addresses for per-queue dequeue setting. */
+#define PPE_DEQ_OPR_TBL_ADDR 0x430000
+#define PPE_DEQ_OPR_TBL_ENTRIES 300
+#define PPE_DEQ_OPR_TBL_INC 0x10
+#define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
+
+/* PPE flow level scheduler configurations. */
+#define PPE_L1_FLOW_MAP_TBL_ADDR 0x440000
+#define PPE_L1_FLOW_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_MAP_TBL_FLOW_ID GENMASK(3, 0)
+#define PPE_L1_FLOW_MAP_TBL_C_PRI GENMASK(6, 4)
+#define PPE_L1_FLOW_MAP_TBL_E_PRI GENMASK(9, 7)
+#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT GENMASK(19, 10)
+#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT GENMASK(29, 20)
+
+#define PPE_L1_C_FLOW_CFG_TBL_ADDR 0x442000
+#define PPE_L1_C_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_E_FLOW_CFG_TBL_ADDR 0x444000
+#define PPE_L1_E_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR 0x446000
+#define PPE_L1_FLOW_PORT_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L1_COMP_CFG_TBL_ADDR 0x46a000
+#define PPE_L1_COMP_CFG_TBL_ENTRIES 64
+#define PPE_L1_COMP_CFG_TBL_INC 0x10
+#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE port scheduler configurations for egress. */
+#define PPE_PSCH_SCH_CFG_TBL_ADDR 0x47a000
+#define PPE_PSCH_SCH_CFG_TBL_ENTRIES 128
+#define PPE_PSCH_SCH_CFG_TBL_INC 0x10
+#define PPE_PSCH_SCH_CFG_TBL_DES_PORT GENMASK(3, 0)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT GENMASK(7, 4)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP GENMASK(15, 8)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN BIT(16)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT GENMASK(20, 17)
+
+/* There are 15 BM ports and 4 BM groups supported by PPE.
+ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
+ * PPE physical port 1-6 and BM port 14 is for EIP port.
+ */
+#define PPE_BM_PORT_FC_MODE_ADDR 0x600100
+#define PPE_BM_PORT_FC_MODE_ENTRIES 15
+#define PPE_BM_PORT_FC_MODE_INC 0x4
+#define PPE_BM_PORT_FC_MODE_EN BIT(0)
+
+#define PPE_BM_PORT_GROUP_ID_ADDR 0x600180
+#define PPE_BM_PORT_GROUP_ID_ENTRIES 15
+#define PPE_BM_PORT_GROUP_ID_INC 0x4
+#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
+
+/* Counters for PPE buffers used for packets cached. */
+#define PPE_BM_USED_CNT_TBL_ADDR 0x6001c0
+#define PPE_BM_USED_CNT_TBL_ENTRIES 15
+#define PPE_BM_USED_CNT_TBL_INC 0x4
+#define PPE_BM_USED_CNT_VAL GENMASK(10, 0)
+
+/* Counters for PPE buffers used for packets received after pause frame sent. */
+#define PPE_BM_REACT_CNT_TBL_ADDR 0x600240
+#define PPE_BM_REACT_CNT_TBL_ENTRIES 15
+#define PPE_BM_REACT_CNT_TBL_INC 0x4
+#define PPE_BM_REACT_CNT_VAL GENMASK(8, 0)
+
+#define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
+#define PPE_BM_SHARED_GROUP_CFG_ENTRIES 4
+#define PPE_BM_SHARED_GROUP_CFG_INC 0x4
+#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
+
+#define PPE_BM_PORT_FC_CFG_TBL_ADDR 0x601000
+#define PPE_BM_PORT_FC_CFG_TBL_ENTRIES 15
+#define PPE_BM_PORT_FC_CFG_TBL_INC 0x10
+#define PPE_BM_PORT_FC_W0_REACT_LIMIT GENMASK(8, 0)
+#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD GENMASK(17, 9)
+#define PPE_BM_PORT_FC_W0_RESUME_OFFSET GENMASK(28, 18)
+#define PPE_BM_PORT_FC_W0_CEILING_LOW GENMASK(31, 29)
+#define PPE_BM_PORT_FC_W1_CEILING_HIGH GENMASK(7, 0)
+#define PPE_BM_PORT_FC_W1_WEIGHT GENMASK(10, 8)
+#define PPE_BM_PORT_FC_W1_DYNAMIC BIT(11)
+#define PPE_BM_PORT_FC_W1_PRE_ALLOC GENMASK(22, 12)
+
+#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_REACT_LIMIT, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_THRESHOLD, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_OFFSET, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_CEILING_LOW, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_CEILING_HIGH, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_WEIGHT, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_DYNAMIC, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_PRE_ALLOC, (tbl_cfg) + 0x1, value)
+
+/* The queue base configurations based on destination port,
+ * service code or CPU code.
+ */
+#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
+#define PPE_UCAST_QUEUE_MAP_TBL_ENTRIES 3072
+#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
+#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
+#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
+
+/* The queue offset configurations based on RSS hash value. */
+#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
+#define PPE_UCAST_HASH_MAP_TBL_ENTRIES 4096
+#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
+#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
+
+/* The queue offset configurations based on PPE internal priority. */
+#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
+#define PPE_UCAST_PRIORITY_MAP_TBL_ENTRIES 256
+#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
+#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
+
+/* PPE unicast queue (0-255) configurations. */
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WRED_EN BIT(1)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_FC_EN BIT(2)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_CLR_AWARE BIT(3)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID GENMASK(5, 4)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(16, 6)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC BIT(17)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT GENMASK(20, 18)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(31, 21)
+#define PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME GENMASK(23, 13)
+
+#define PPE_AC_UNICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME, (tbl_cfg) + 0x3, value)
+
+/* PPE multicast queue (256-299) configurations. */
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR 0x84a000
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_FC_EN BIT(1)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_CLR_AWARE BIT(2)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID GENMASK(4, 3)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(15, 5)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(26, 16)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME GENMASK(17, 7)
+
+#define PPE_AC_MULTICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME, (tbl_cfg) + 0x2, value)
+
+/* PPE admission control group (0-3) configurations */
+#define PPE_AC_GRP_CFG_TBL_ADDR 0x84c000
+#define PPE_AC_GRP_CFG_TBL_ENTRIES 0x4
+#define PPE_AC_GRP_CFG_TBL_INC 0x10
+#define PPE_AC_GRP_W0_AC_EN BIT(0)
+#define PPE_AC_GRP_W0_AC_FC_EN BIT(1)
+#define PPE_AC_GRP_W0_CLR_AWARE BIT(2)
+#define PPE_AC_GRP_W0_THRESHOLD_LOW GENMASK(31, 25)
+#define PPE_AC_GRP_W1_THRESHOLD_HIGH GENMASK(3, 0)
+#define PPE_AC_GRP_W1_BUF_LIMIT GENMASK(14, 4)
+#define PPE_AC_GRP_W2_RESUME_GRN GENMASK(15, 5)
+#define PPE_AC_GRP_W2_PRE_ALLOC GENMASK(26, 16)
+
+#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_GRP_W1_BUF_LIMIT, (tbl_cfg) + 0x1, value)
+
+/* Counters for packets handled by unicast queues (0-255). */
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR 0x84e000
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Counters for packets handled by multicast queues (256-299). */
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR 0x852000
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Table addresses for per-queue enqueue setting. */
+#define PPE_ENQ_OPR_TBL_ADDR 0x85c000
+#define PPE_ENQ_OPR_TBL_ENTRIES 300
+#define PPE_ENQ_OPR_TBL_INC 0x10
+#define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
+
+/* Unicast drop count includes the possible drops with WRED for the green,
+ * yellow and red categories.
+ */
+#define PPE_UNICAST_DROP_CNT_TBL_ADDR 0x9e0000
+#define PPE_UNICAST_DROP_CNT_TBL_ENTRIES 1536
+#define PPE_UNICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_UNICAST_DROP_TYPES 6
+#define PPE_UNICAST_DROP_FORCE_OFFSET 3
+
+/* There are 16 multicast queues dedicated to CPU port 0. Multicast drop
+ * count includes the force drop for green, yellow and red category packets.
+ */
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR 0x9f0000
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ENTRIES 48
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_P0_MULTICAST_QUEUE_NUM 16
+
+/* Each PPE physical port has four dedicated multicast queues, providing
+ * a total of 12 entries per port. The multicast drop count includes forced
+ * drops for green, yellow, and red category packets.
+ */
+#define PPE_MULTICAST_QUEUE_PORT_ADDR_INC 0x1000
+#define PPE_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_MULTICAST_DROP_TYPES 3
+#define PPE_MULTICAST_QUEUE_NUM 4
+#define PPE_MULTICAST_DROP_CNT_TBL_ENTRIES 12
+
+#define PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset) \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + \
+ (mq_offset) * PPE_P0_MULTICAST_DROP_CNT_TBL_INC * \
+ PPE_MULTICAST_DROP_TYPES)
+
+#define PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + PPE_MULTICAST_QUEUE_PORT_ADDR_INC)
+#endif
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index fe136f61586f..272c83bfdc6c 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -58,7 +58,7 @@ config 8139TOO
config 8139TOO_PIO
bool "Use PIO instead of MMIO"
default y
- depends on 8139TOO
+ depends on 8139TOO && !NO_IOPORT_MAP
help
This instructs the driver to use programmed I/O ports (PIO) instead
of PCI shared memory (MMIO). This can possibly solve some problems
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 9c601f271c02..8903ae90afcb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3409,7 +3409,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3514,7 +3514,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_write(tp, 0xea80, 0x0003);
r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3715,7 +3715,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
@@ -5441,10 +5441,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
*/
- if (rtl_aspm_is_safe(tp))
+ if (rtl_aspm_is_safe(tp)) {
+ dev_info(&pdev->dev, "System vendor flags ASPM as safe\n");
rc = 0;
- else
+ } else {
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ }
tp->aspm_manageable = !rc;
tp->dash_type = rtl_get_dash_type(tp);
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index f65fc76f8b4d..d63e0c61bb68 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
+rswitch-objs := rswitch_main.o rswitch_l2.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 94b6fb94f8f1..9d3bd65b85ff 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -802,7 +802,6 @@ static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
- struct sk_buff *skb;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -815,6 +814,8 @@ static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
stats = &priv->stats[q];
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ struct sk_buff *skb = NULL;
+
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 4c3e8cc5046f..d0979abd36de 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -12,19 +12,18 @@
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
-#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
- .enable = PTPTMEC,
- .disable = PTPTMDC,
- .increment = PTPTIVC0,
- .config_t0 = PTPTOVC00,
- .config_t1 = PTPTOVC10,
- .config_t2 = PTPTOVC20,
- .monitor_t0 = PTPGPTPTM00,
- .monitor_t1 = PTPGPTPTM10,
- .monitor_t2 = PTPGPTPTM20,
-};
+#define PTPTMEC_REG 0x0010
+#define PTPTMDC_REG 0x0014
+#define PTPTIVC0_REG 0x0020
+#define PTPTOVC00_REG 0x0030
+#define PTPTOVC10_REG 0x0034
+#define PTPTOVC20_REG 0x0038
+#define PTPGPTPTM00_REG 0x0050
+#define PTPGPTPTM10_REG 0x0054
+#define PTPGPTPTM20_REG 0x0058
+
+#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
@@ -38,20 +37,21 @@ static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC);
addend = neg_adj ? addend - diff : addend + diff;
- iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(addend, ptp_priv->addr + PTPTIVC0_REG);
return 0;
}
-/* Caller must hold the lock */
static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0);
- ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) |
- ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32);
+ lockdep_assert_held(&ptp_priv->lock);
+
+ ts->tv_nsec = ioread32(ptp_priv->addr + PTPGPTPTM00_REG);
+ ts->tv_sec = ioread32(ptp_priv->addr + PTPGPTPTM10_REG) |
+ ((s64)ioread32(ptp_priv->addr + PTPGPTPTM20_REG) << 32);
}
static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
@@ -73,14 +73,14 @@ static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable);
- iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC00_REG);
+ iowrite32(1, ptp_priv->addr + PTPTMEC_REG);
+ iowrite32(ts->tv_sec >> 32, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(ts->tv_sec, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(ts->tv_nsec, ptp_priv->addr + PTPTOVC00_REG);
}
static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
@@ -130,17 +130,6 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
-{
- if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
- return -EINVAL;
-
- ptp_priv->offs = &gen4_offs;
-
- return 0;
-}
-
static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
{
/* Timer increment in ns.
@@ -151,27 +140,20 @@ static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
return div_s64(1000000000LL << 27, rate);
}
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate)
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate)
{
- int ret;
-
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
- if (ret)
- return ret;
-
ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
- iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(ptp_priv->default_addend, ptp_priv->addr + PTPTIVC0_REG);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
return PTR_ERR(ptp_priv->clock);
- iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable);
+ iowrite32(0x01, ptp_priv->addr + PTPTMEC_REG);
ptp_priv->initialized = true;
return 0;
@@ -180,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
return ptp_clock_unregister(ptp_priv->clock);
}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index e22da5acd53d..f77e79e47357 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -11,10 +11,6 @@
#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT
-};
-
/* driver's definitions */
#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0)
#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1)
@@ -23,37 +19,11 @@ enum rcar_gen4_ptp_reg_layout {
#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0)
-#define PTPRO 0
-
-enum rcar_gen4_ptp_reg {
- PTPTMEC = PTPRO + 0x0010,
- PTPTMDC = PTPRO + 0x0014,
- PTPTIVC0 = PTPRO + 0x0020,
- PTPTOVC00 = PTPRO + 0x0030,
- PTPTOVC10 = PTPRO + 0x0034,
- PTPTOVC20 = PTPRO + 0x0038,
- PTPGPTPTM00 = PTPRO + 0x0050,
- PTPGPTPTM10 = PTPRO + 0x0054,
- PTPGPTPTM20 = PTPRO + 0x0058,
-};
-
-struct rcar_gen4_ptp_reg_offset {
- u16 enable;
- u16 disable;
- u16 increment;
- u16 config_t0;
- u16 config_t1;
- u16 config_t2;
- u16 monitor_t0;
- u16 monitor_t1;
- u16 monitor_t2;
-};
struct rcar_gen4_ptp_private {
void __iomem *addr;
struct ptp_clock *clock;
struct ptp_clock_info info;
- const struct rcar_gen4_ptp_reg_offset *offs;
spinlock_t lock; /* For multiple registers access */
u32 tstamp_tx_ctrl;
u32 tstamp_rx_ctrl;
@@ -61,8 +31,7 @@ struct rcar_gen4_ptp_private {
bool initialized;
};
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate);
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 532192cbca4b..a1d4a877e5bd 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1,19 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#ifndef __RSWITCH_H__
#define __RSWITCH_H__
#include <linux/platform_device.h>
+#include <linux/phy.h>
+
#include "rcar_gen4_ptp.h"
#define RSWITCH_MAX_NUM_QUEUES 128
#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
+
+#define rswitch_for_all_ports(_priv, _rdev) \
+ list_for_each_entry(_rdev, &_priv->port_list, list)
+
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
if (priv->rdev[i]->disabled) \
@@ -809,7 +815,8 @@ enum rswitch_gwca_mode {
#define FWPC0_IP4EA BIT(10)
#define FWPC0_IPDSA BIT(12)
#define FWPC0_IPHLA BIT(18)
-#define FWPC0_MACSDA BIT(20)
+#define FWPC0_MACDSA BIT(20)
+#define FWPC0_MACSSA BIT(23)
#define FWPC0_MACHLA BIT(26)
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
@@ -820,12 +827,30 @@ enum rswitch_gwca_mode {
#define FWPC2(i) (FWPC20 + (i) * 0x10)
#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+#define FWCP2_LTWFW_MASK GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
+#define FWMACHEC_MACHMUE_MASK GENMASK(26, 16)
+
+#define FWMACTIM_MACTIOG BIT(0)
+#define FWMACTIM_MACTR BIT(1)
+
+#define FWMACAGUSPC_MACAGUSP GENMASK(9, 0)
+#define FWMACAGC_MACAGT GENMASK(15, 0)
+#define FWMACAGC_MACAGE BIT(16)
+#define FWMACAGC_MACAGSL BIT(17)
+#define FWMACAGC_MACAGPM BIT(18)
+#define FWMACAGC_MACDES BIT(24)
+#define FWMACAGC_MACAGOG BIT(28)
+#define FWMACAGC_MACDESOG BIT(29)
+
+#define RSW_AGEING_CLK_PER_US 0x140
+#define RSW_AGEING_TIME 300
+
/* TOP */
#define TPEMIMC7(queue) (TPEMIMC70 + (queue) * 4)
@@ -994,10 +1019,18 @@ struct rswitch_device {
DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
+ struct list_head list;
+
int port;
struct rswitch_etha *etha;
struct device_node *np_port;
struct phy *serdes;
+
+ struct net_device *brdev; /* master bridge device */
+ unsigned int learning_requested : 1;
+ unsigned int learning_offloaded : 1;
+ unsigned int forwarding_requested : 1;
+ unsigned int forwarding_offloaded : 1;
};
struct rswitch_mfwd_mac_table_entry {
@@ -1022,11 +1055,17 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ struct list_head port_list;
+
spinlock_t lock; /* lock interrupt registers' control */
struct clk *clk;
bool etha_no_runtime_change;
bool gwca_halt;
+ struct net_device *offload_brdev;
};
+bool is_rdev(const struct net_device *ndev);
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set);
+
#endif /* #ifndef __RSWITCH_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.c b/drivers/net/ethernet/renesas/rswitch_l2.c
new file mode 100644
index 000000000000..4a69ec77d69c
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/kernel.h>
+#include <net/switchdev.h>
+
+#include "rswitch.h"
+#include "rswitch_l2.h"
+
+static bool rdev_for_l2_offload(struct rswitch_device *rdev)
+{
+ return rdev->priv->offload_brdev &&
+ rdev->brdev == rdev->priv->offload_brdev &&
+ (test_bit(rdev->port, rdev->priv->opened_ports));
+}
+
+static void rswitch_change_l2_hw_offloading(struct rswitch_device *rdev,
+ bool start, bool learning)
+{
+ u32 bits = learning ? FWPC0_MACSSA | FWPC0_MACHLA | FWPC0_MACHMA : FWPC0_MACDSA;
+ u32 clear = start ? 0 : bits;
+ u32 set = start ? bits : 0;
+
+ if ((learning && rdev->learning_offloaded == start) ||
+ (!learning && rdev->forwarding_offloaded == start))
+ return;
+
+ rswitch_modify(rdev->priv->addr, FWPC0(rdev->port), clear, set);
+
+ if (learning)
+ rdev->learning_offloaded = start;
+ else
+ rdev->forwarding_offloaded = start;
+
+ netdev_info(rdev->ndev, "%s hw %s\n", start ? "starting" : "stopping",
+ learning ? "learning" : "forwarding");
+}
+
+static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ bool learning_needed;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev))
+ learning_needed = rdev->learning_requested;
+ else
+ learning_needed = false;
+
+ rswitch_change_l2_hw_offloading(rdev, learning_needed, true);
+ }
+}
+
+static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ unsigned int fwd_mask;
+
+ /* calculate fwd_mask with zeroes in bits corresponding to ports that
+ * shall participate in hardware forwarding
+ */
+ fwd_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev) && rdev->forwarding_requested)
+ fwd_mask &= ~BIT(rdev->port);
+ }
+
+ rswitch_for_all_ports(priv, rdev) {
+ if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
+ rdev->forwarding_offloaded) {
+ /* Update allowed offload destinations even for ports
+ * with L2 offload enabled earlier.
+ *
+ * Do not allow L2 forwarding to self for hw port.
+ */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW_MASK, fwd_mask | BIT(rdev->port)),
+ priv->addr + FWPC2(rdev->port));
+ }
+
+ if (rdev_for_l2_offload(rdev) &&
+ rdev->forwarding_requested &&
+ !rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, true, false);
+ } else if (rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, false, false);
+ }
+ }
+}
+
+void rswitch_update_l2_offload(struct rswitch_private *priv)
+{
+ rswitch_update_l2_hw_learning(priv);
+ rswitch_update_l2_hw_forwarding(priv);
+}
+
+static void rswitch_update_offload_brdev(struct rswitch_private *priv)
+{
+ struct net_device *offload_brdev = NULL;
+ struct rswitch_device *rdev, *rdev2;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (!rdev->brdev)
+ continue;
+ rswitch_for_all_ports(priv, rdev2) {
+ if (rdev2 == rdev)
+ break;
+ if (rdev2->brdev == rdev->brdev) {
+ offload_brdev = rdev->brdev;
+ break;
+ }
+ }
+ if (offload_brdev)
+ break;
+ }
+
+ if (offload_brdev == priv->offload_brdev)
+ dev_dbg(&priv->pdev->dev,
+ "changing l2 offload from %s to %s\n",
+ netdev_name(priv->offload_brdev),
+ netdev_name(offload_brdev));
+ else if (offload_brdev)
+ dev_dbg(&priv->pdev->dev, "starting l2 offload for %s\n",
+ netdev_name(offload_brdev));
+ else if (!offload_brdev)
+ dev_dbg(&priv->pdev->dev, "stopping l2 offload for %s\n",
+ netdev_name(priv->offload_brdev));
+
+ priv->offload_brdev = offload_brdev;
+
+ rswitch_update_l2_offload(priv);
+}
+
+static bool rswitch_port_check(const struct net_device *ndev)
+{
+ return is_rdev(ndev);
+}
+
+static void rswitch_port_update_brdev(struct net_device *ndev,
+ struct net_device *brdev)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return;
+
+ rdev = netdev_priv(ndev);
+ rdev->brdev = brdev;
+ rswitch_update_offload_brdev(rdev->priv);
+}
+
+static int rswitch_port_update_stp_state(struct net_device *ndev, u8 stp_state)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ rdev = netdev_priv(ndev);
+ rdev->learning_requested = (stp_state == BR_STATE_LEARNING ||
+ stp_state == BR_STATE_FORWARDING);
+ rdev->forwarding_requested = (stp_state == BR_STATE_FORWARDING);
+ rswitch_update_l2_offload(rdev->priv);
+
+ return 0;
+}
+
+static int rswitch_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ if (event != NETDEV_CHANGEUPPER)
+ return NOTIFY_DONE;
+
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ brdev = info->linking ? info->upper_dev : NULL;
+ rswitch_port_update_brdev(ndev, brdev);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int rswitch_update_ageing_time(struct net_device *ndev, clock_t time)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ u32 reg_val;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ if (!FIELD_FIT(FWMACAGC_MACAGT, time))
+ return -EINVAL;
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, time);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, rdev->priv->addr + FWMACAGC);
+
+ return 0;
+}
+
+static int rswitch_port_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return rswitch_port_update_stp_state(ndev, attr->u.stp_state);
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ return rswitch_update_ageing_time(ndev, attr->u.ageing_time);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rswitch_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ return notifier_from_errno(ret);
+ }
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+
+ return notifier_from_errno(-EOPNOTSUPP);
+}
+
+static int rswitch_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_ATTR_SET:
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ break;
+ default:
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ ret = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block rswitch_netdevice_nb = {
+ .notifier_call = rswitch_netdevice_event,
+};
+
+static struct notifier_block rswitch_switchdev_nb = {
+ .notifier_call = rswitch_switchdev_event,
+};
+
+static struct notifier_block rswitch_switchdev_blocking_nb = {
+ .notifier_call = rswitch_switchdev_blocking_event,
+};
+
+int rswitch_register_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&rswitch_netdevice_nb);
+ if (ret)
+ goto register_netdevice_notifier_failed;
+
+ ret = register_switchdev_notifier(&rswitch_switchdev_nb);
+ if (ret)
+ goto register_switchdev_notifier_failed;
+
+ ret = register_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ if (ret)
+ goto register_switchdev_blocking_notifier_failed;
+
+ return 0;
+
+register_switchdev_blocking_notifier_failed:
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+register_switchdev_notifier_failed:
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+register_netdevice_notifier_failed:
+
+ return ret;
+}
+
+void rswitch_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.h b/drivers/net/ethernet/renesas/rswitch_l2.h
new file mode 100644
index 000000000000..57050ede8f31
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#ifndef __RSWITCH_L2_H__
+#define __RSWITCH_L2_H__
+
+void rswitch_update_l2_offload(struct rswitch_private *priv);
+
+int rswitch_register_notifiers(void);
+void rswitch_unregister_notifiers(void);
+
+#endif /* #ifndef __RSWITCH_L2_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch_main.c
index aba772e14555..8d8acc2124b8 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
@@ -25,6 +28,7 @@
#include <linux/sys_soc.h>
#include "rswitch.h"
+#include "rswitch_l2.h"
static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
{
@@ -34,7 +38,7 @@ static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected
1, RSWITCH_TIMEOUT_US);
}
-static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
{
iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
}
@@ -109,10 +113,11 @@ static void rswitch_top_init(struct rswitch_private *priv)
}
/* Forwarding engine block (MFWD) */
-static void rswitch_fwd_init(struct rswitch_private *priv)
+static int rswitch_fwd_init(struct rswitch_private *priv)
{
u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
+ u32 reg_val;
/* Start with empty configuration */
for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
@@ -128,6 +133,14 @@ static void rswitch_fwd_init(struct rswitch_private *priv)
iowrite32(0, priv->addr + FWPBFC(i));
}
+ /* Configure MAC table aging */
+ rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP,
+ FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US));
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, priv->addr + FWMACAGC);
+
/* For enabled ETHA ports, setup port based forwarding */
rswitch_for_each_enabled_port(priv, i) {
/* Port based forwarding from port i to GWCA port */
@@ -140,6 +153,16 @@ static void rswitch_fwd_init(struct rswitch_private *priv)
/* For GWCA port, allow direct descriptor forwarding */
rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
+
+ /* Initialize hardware L2 forwarding table */
+
+ /* Allow entire table to be used for "unsecure" entries */
+ rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK);
+
+ /* Initialize MAC hash table */
+ iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM);
+
+ return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0);
}
/* Gateway CPU agent block (GWCA) */
@@ -1602,8 +1625,11 @@ static int rswitch_open(struct net_device *ndev)
netif_start_queue(ndev);
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
+
return 0;
-};
+}
static int rswitch_stop(struct net_device *ndev)
{
@@ -1624,12 +1650,13 @@ static int rswitch_stop(struct net_device *ndev)
napi_disable(&rdev->napi);
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
+
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
- for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
- tag < TS_TAGS_PER_PORT;
- tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
+ for_each_set_bit(tag, rdev->ts_skb_used, TS_TAGS_PER_PORT) {
ts_skb = xchg(&rdev->ts_skb[tag], NULL);
clear_bit(tag, rdev->ts_skb_used);
if (ts_skb)
@@ -1637,7 +1664,7 @@ static int rswitch_stop(struct net_device *ndev)
}
return 0;
-};
+}
static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
struct sk_buff *skb,
@@ -1850,16 +1877,46 @@ static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd
}
}
+static int rswitch_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ const char *name;
+
+ name = dev_name(&rdev->priv->pdev->dev);
+ ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id));
+ memcpy(ppid->id, name, ppid->id_len);
+
+ return 0;
+}
+
+static int rswitch_get_phys_port_name(struct net_device *ndev,
+ char *name, size_t len)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ snprintf(name, len, "tsn%d", rdev->port);
+
+ return 0;
+}
+
static const struct net_device_ops rswitch_netdev_ops = {
.ndo_open = rswitch_open,
.ndo_stop = rswitch_stop,
.ndo_start_xmit = rswitch_start_xmit,
.ndo_get_stats = rswitch_get_stats,
.ndo_eth_ioctl = rswitch_eth_ioctl,
+ .ndo_get_port_parent_id = rswitch_get_port_parent_id,
+ .ndo_get_phys_port_name = rswitch_get_phys_port_name,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
+bool is_rdev(const struct net_device *ndev)
+{
+ return (ndev->netdev_ops == &rswitch_netdev_ops);
+}
+
static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
@@ -1959,6 +2016,8 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_txdmac;
+ list_add_tail(&rdev->list, &priv->port_list);
+
return 0;
out_txdmac:
@@ -1978,6 +2037,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
+ list_del(&rdev->list);
rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev);
of_node_put(rdev->np_port);
@@ -2024,10 +2084,11 @@ static int rswitch_init(struct rswitch_private *priv)
}
}
- rswitch_fwd_init(priv);
+ err = rswitch_fwd_init(priv);
+ if (err < 0)
+ goto err_fwd_init;
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
@@ -2073,6 +2134,7 @@ err_gwca_ts_request_irq:
err_gwca_request_irq:
rcar_gen4_ptp_unregister(priv->ptp_priv);
+err_fwd_init:
err_ptp_register:
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
rswitch_device_free(priv, i);
@@ -2107,6 +2169,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
spin_lock_init(&priv->lock);
priv->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2144,6 +2207,8 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (!priv->gwca.queues)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->port_list);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -2154,6 +2219,15 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
return ret;
}
+ if (list_empty(&priv->port_list))
+ dev_warn(&pdev->dev, "could not initialize any ports\n");
+
+ ret = rswitch_register_notifiers();
+ if (ret) {
+ dev_err(&pdev->dev, "could not register notifiers\n");
+ return ret;
+ }
+
device_set_wakeup_capable(&pdev->dev, 1);
return ret;
@@ -2187,6 +2261,7 @@ static void renesas_eth_sw_remove(struct platform_device *pdev)
{
struct rswitch_private *priv = platform_get_drvdata(pdev);
+ rswitch_unregister_notifiers();
rswitch_deinit(priv);
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 05c4b6c8c9c3..15a043e85431 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1330,8 +1330,7 @@ static int rtsn_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
- ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (ret)
goto error_pm;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5fc8027c92c7..6fb0ffc1c844 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2233,7 +2233,7 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
pm_runtime_get_sync(&mdp->pdev->dev);
__sh_eth_get_regs(ndev, buf);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
}
static u32 sh_eth_get_msglevel(struct net_device *ndev)
@@ -2360,6 +2360,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2386,6 +2387,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
+#endif
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
@@ -2401,8 +2403,10 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.set_ringparam = sh_eth_set_ringparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM_SLEEP
.get_wol = sh_eth_get_wol,
.set_wol = sh_eth_set_wol,
+#endif
};
/* network device open function */
@@ -2447,7 +2451,7 @@ out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
napi_disable(&mdp->napi);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
return ret;
}
@@ -3443,8 +3447,6 @@ static void sh_eth_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int sh_eth_wol_setup(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -3527,28 +3529,8 @@ static int sh_eth_resume(struct device *dev)
return ret;
}
-#endif
-
-static int sh_eth_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-static const struct dev_pm_ops sh_eth_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
- SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
-};
-#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
-#else
-#define SH_ETH_PM_OPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_eth_dev_pm_ops, sh_eth_suspend, sh_eth_resume);
static const struct platform_device_id sh_eth_id_table[] = {
{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
@@ -3568,7 +3550,7 @@ static struct platform_driver sh_eth_driver = {
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
- .pm = SH_ETH_PM_OPS,
+ .pm = pm_sleep_ptr(&sh_eth_dev_pm_ops),
.of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index e6b6be549581..03005757c060 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -189,6 +189,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
{
bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
@@ -200,8 +201,17 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
bool outer_csum;
u32 paylen;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
- mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (encap) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID_INNER)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ }
+
if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlan_enable = skb_vlan_tag_present(skb);
@@ -245,8 +255,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
- ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
- ESE_GZ_TX_DESC_IP4_ID_NO_OP,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, mangleid_outer,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 06b4f52713ef..ed3a96ebc7f3 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -216,8 +216,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1281,7 +1281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 23c6a7df78d0..18fe5850a978 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index b07f7e4e2877..d19fbf8732ff 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1394,9 +1394,8 @@ static int ef4_probe_interrupts(struct ef4_nic *efx)
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (ef4_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
+ efx->n_tx_channels = clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index d120b3c83ac0..fc075ab6b7b5 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -217,8 +217,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_siena_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1300,7 +1300,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 994909789bfe..8c3ebd0617fb 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index e872f926e438..eef06e48185d 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -11,6 +11,8 @@
#include "tc_encap_actions.h"
#include "tc.h"
#include "mae.h"
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <net/netevent.h>
@@ -99,7 +101,7 @@ static int efx_bind_neigh(struct efx_nic *efx,
case EFX_ENCAP_TYPE_GENEVE:
flow4.flowi4_proto = IPPROTO_UDP;
flow4.fl4_dport = encap->key.tp_dst;
- flow4.flowi4_tos = encap->key.tos;
+ flow4.flowi4_dscp = inet_dsfield_to_dscp(encap->key.tos);
flow4.daddr = encap->key.u.ipv4.dst;
flow4.saddr = encap->key.u.ipv4.src;
break;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 6ca290f7c0df..3ebd0664c697 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2162,10 +2162,20 @@ static const struct net_device_ops smsc911x_netdev_ops = {
static void smsc911x_read_mac_address(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
- u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u32 mac_high16, mac_low32;
u8 addr[ETH_ALEN];
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+
+ /* The first mac_read in some setups can incorrectly read 0. Re-read it
+ * to get the full MAC if this is observed.
+ */
+ if (mac_high16 == 0) {
+ SMSC_TRACE(pdata, probe, "Re-read MAC ADDRH\n");
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ }
+
addr[0] = (u8)(mac_low32);
addr[1] = (u8)(mac_low32 >> 8);
addr[2] = (u8)(mac_low32 >> 16);
diff --git a/drivers/net/ethernet/spacemit/Kconfig b/drivers/net/ethernet/spacemit/Kconfig
new file mode 100644
index 000000000000..85ef61a9b4ef
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Kconfig
@@ -0,0 +1,29 @@
+config NET_VENDOR_SPACEMIT
+ bool "SpacemiT devices"
+ default y
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ If you have a network (Ethernet) device belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding SpacemiT devices. If you say Y, you will
+ be asked for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_SPACEMIT
+
+config SPACEMIT_K1_EMAC
+ tristate "SpacemiT K1 Ethernet MAC driver"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on MFD_SYSCON
+ depends on OF
+ default m if ARCH_SPACEMIT
+ select PHYLIB
+ help
+ This driver supports the Ethernet MAC in the SpacemiT K1 SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called k1_emac.
+
+endif # NET_VENDOR_SPACEMIT
diff --git a/drivers/net/ethernet/spacemit/Makefile b/drivers/net/ethernet/spacemit/Makefile
new file mode 100644
index 000000000000..d29efd997a4f
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the SpacemiT network device drivers.
+#
+
+obj-$(CONFIG_SPACEMIT_K1_EMAC) += k1_emac.o
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
new file mode 100644
index 000000000000..e1c5faff3b71
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -0,0 +1,2159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 Ethernet driver
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "k1_emac.h"
+
+#define DRIVER_NAME "k1_emac"
+
+#define EMAC_DEFAULT_BUFSIZE 1536
+#define EMAC_RX_BUF_2K 2048
+#define EMAC_RX_BUF_4K 4096
+
+/* Tuning parameters from SpacemiT */
+#define EMAC_TX_FRAMES 64
+#define EMAC_TX_COAL_TIMEOUT 40000
+#define EMAC_RX_FRAMES 64
+#define EMAC_RX_COAL_TIMEOUT (600 * 312)
+
+#define DEFAULT_FC_PAUSE_TIME 0xffff
+#define DEFAULT_FC_FIFO_HIGH 1600
+#define DEFAULT_TX_ALMOST_FULL 0x1f8
+#define DEFAULT_TX_THRESHOLD 1518
+#define DEFAULT_RX_THRESHOLD 12
+#define DEFAULT_TX_RING_NUM 1024
+#define DEFAULT_RX_RING_NUM 1024
+#define DEFAULT_DMA_BURST MREGBIT_BURST_16WORD
+#define HASH_TABLE_SIZE 64
+
+struct desc_buf {
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+struct emac_tx_desc_buffer {
+ struct sk_buff *skb;
+ struct desc_buf buf[2];
+};
+
+struct emac_rx_desc_buffer {
+ struct sk_buff *skb;
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+/**
+ * struct emac_desc_ring - Software-side information for one descriptor ring
+ * Same structure used for both RX and TX
+ * @desc_addr: Virtual address to the descriptor ring memory
+ * @desc_dma_addr: DMA address of the descriptor ring
+ * @total_size: Size of ring in bytes
+ * @total_cnt: Number of descriptors
+ * @head: Next descriptor to associate a buffer with
+ * @tail: Next descriptor to check status bit
+ * @rx_desc_buf: Array of descriptors for RX
+ * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
+ */
+struct emac_desc_ring {
+ void *desc_addr;
+ dma_addr_t desc_dma_addr;
+ u32 total_size;
+ u32 total_cnt;
+ u32 head;
+ u32 tail;
+ union {
+ struct emac_rx_desc_buffer *rx_desc_buf;
+ struct emac_tx_desc_buffer *tx_desc_buf;
+ };
+};
+
+struct emac_priv {
+ void __iomem *iobase;
+ u32 dma_buf_sz;
+ struct emac_desc_ring tx_ring;
+ struct emac_desc_ring rx_ring;
+
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct platform_device *pdev;
+ struct clk *bus_clk;
+ struct clk *ref_clk;
+ struct regmap *regmap_apmu;
+ u32 regmap_apmu_offset;
+ int irq;
+
+ phy_interface_t phy_interface;
+
+ union emac_hw_tx_stats tx_stats, tx_stats_off;
+ union emac_hw_rx_stats rx_stats, rx_stats_off;
+
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timeout;
+ struct work_struct tx_timeout_task;
+
+ struct timer_list txtimer;
+ struct timer_list stats_timer;
+
+ u32 tx_delay;
+ u32 rx_delay;
+
+ bool flow_control_autoneg;
+ u8 flow_control;
+
+ /* Softirq-safe, hold while touching hardware statistics */
+ spinlock_t stats_lock;
+};
+
+static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->iobase + reg);
+}
+
+static u32 emac_rd(struct emac_priv *priv, u32 reg)
+{
+ return readl(priv->iobase + reg);
+}
+
+static int emac_phy_interface_config(struct emac_priv *priv)
+{
+ u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ val |= PHY_INTF_RGMII;
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ mask, val);
+
+ return 0;
+}
+
+/*
+ * Where the hardware expects a MAC address, it is laid out in this high, med,
+ * low order in three consecutive registers and in this format.
+ */
+
+static void emac_set_mac_addr_reg(struct emac_priv *priv,
+ const unsigned char *addr,
+ u32 reg)
+{
+ emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
+ emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
+ emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
+}
+
+static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
+{
+ /* We use only one address, so set the same for flow control as well */
+ emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
+ emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+}
+
+static void emac_reset_hw(struct emac_priv *priv)
+{
+ /* Disable all interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Disable DMA */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+}
+
+static void emac_init_hw(struct emac_priv *priv)
+{
+ /* Destination address for 802.3x Ethernet flow control */
+ u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
+
+ u32 rxirq = 0, dma = 0;
+
+ regmap_set_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ AXI_SINGLE_ID);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Enable MAC address 1 filtering */
+ emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
+
+ /* Zero initialize the multicast hash table */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+
+ /* Configure thresholds */
+ emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
+ emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
+ DEFAULT_TX_THRESHOLD);
+ emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
+
+ /* Configure flow control (enabled in emac_adjust_link() later) */
+ emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+ emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
+ emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
+ emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
+
+ /* RX IRQ mitigation */
+ rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
+ EMAC_RX_FRAMES);
+ rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
+ EMAC_RX_COAL_TIMEOUT);
+ rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
+ emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
+
+ /* Disable and set DMA config */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+
+ emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
+ usleep_range(9000, 10000);
+ emac_wr(priv, DMA_CONFIGURATION, 0x0);
+ usleep_range(9000, 10000);
+
+ dma |= MREGBIT_STRICT_BURST;
+ dma |= MREGBIT_DMA_64BIT_MODE;
+ dma |= DEFAULT_DMA_BURST;
+
+ emac_wr(priv, DMA_CONFIGURATION, dma);
+}
+
+static void emac_dma_start_transmit(struct emac_priv *priv)
+{
+ /* The actual value written does not matter */
+ emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
+}
+
+static void emac_enable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static void emac_disable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static u32 emac_tx_avail(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 avail;
+
+ if (tx_ring->tail > tx_ring->head)
+ avail = tx_ring->tail - tx_ring->head - 1;
+ else
+ avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
+
+ return avail;
+}
+
+static void emac_tx_coal_timer_resched(struct emac_priv *priv)
+{
+ mod_timer(&priv->txtimer,
+ jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
+}
+
+static void emac_tx_coal_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, txtimer);
+
+ napi_schedule(&priv->napi);
+}
+
+static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
+{
+ priv->tx_count_frames += pkt_num;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ emac_tx_coal_timer_resched(priv);
+ return false;
+ }
+
+ priv->tx_count_frames = 0;
+ return true;
+}
+
+static void emac_free_tx_buf(struct emac_priv *priv, int i)
+{
+ struct emac_tx_desc_buffer *tx_buf;
+ struct emac_desc_ring *tx_ring;
+ struct desc_buf *buf;
+ int j;
+
+ tx_ring = &priv->tx_ring;
+ tx_buf = &tx_ring->tx_desc_buf[i];
+
+ for (j = 0; j < 2; j++) {
+ buf = &tx_buf->buf[j];
+ if (!buf->dma_addr)
+ continue;
+
+ if (buf->map_as_page)
+ dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
+ buf->dma_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&priv->pdev->dev,
+ buf->dma_addr, buf->dma_len,
+ DMA_TO_DEVICE);
+
+ buf->dma_addr = 0;
+ buf->map_as_page = false;
+ buf->buff_addr = NULL;
+ }
+
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+ }
+}
+
+static void emac_clean_tx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 i;
+
+ for (i = 0; i < tx_ring->total_cnt; i++)
+ emac_free_tx_buf(priv, i);
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+}
+
+static void emac_clean_rx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ u32 i;
+
+ rx_ring = &priv->rx_ring;
+
+ for (i = 0; i < rx_ring->total_cnt; i++) {
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ continue;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ dev_kfree_skb(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ rx_ring->tail = 0;
+ rx_ring->head = 0;
+}
+
+static int emac_alloc_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
+ sizeof(*tx_ring->tx_desc_buf),
+ GFP_KERNEL);
+
+ if (!tx_ring->tx_desc_buf)
+ return -ENOMEM;
+
+ tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
+ tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
+
+ tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
+ &tx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!tx_ring->desc_addr) {
+ kfree(tx_ring->tx_desc_buf);
+ return -ENOMEM;
+ }
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+
+ return 0;
+}
+
+static int emac_alloc_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
+ sizeof(*rx_ring->rx_desc_buf),
+ GFP_KERNEL);
+ if (!rx_ring->rx_desc_buf)
+ return -ENOMEM;
+
+ rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
+
+ rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
+
+ rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
+ &rx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!rx_ring->desc_addr) {
+ kfree(rx_ring->rx_desc_buf);
+ return -ENOMEM;
+ }
+
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+
+ return 0;
+}
+
+static void emac_free_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tr = &priv->tx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_tx_desc_ring(priv);
+
+ kfree(tr->tx_desc_buf);
+ tr->tx_desc_buf = NULL;
+
+ dma_free_coherent(dev, tr->total_size, tr->desc_addr,
+ tr->desc_dma_addr);
+ tr->desc_addr = NULL;
+}
+
+static void emac_free_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rr = &priv->rx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_rx_desc_ring(priv);
+
+ kfree(rr->rx_desc_buf);
+ rr->rx_desc_buf = NULL;
+
+ dma_free_coherent(dev, rr->total_size, rr->desc_addr,
+ rr->desc_dma_addr);
+ rr->desc_addr = NULL;
+}
+
+static int emac_tx_clean_desc(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_desc_ring *tx_ring;
+ struct emac_desc *tx_desc;
+ u32 i;
+
+ netif_tx_lock(ndev);
+
+ tx_ring = &priv->tx_ring;
+
+ i = tx_ring->tail;
+
+ while (i != tx_ring->head) {
+ tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
+
+ /* Stop checking if desc still own by DMA */
+ if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
+ break;
+
+ emac_free_tx_buf(priv, i);
+ memset(tx_desc, 0, sizeof(struct emac_desc));
+
+ if (++i == tx_ring->total_cnt)
+ i = 0;
+ }
+
+ tx_ring->tail = i;
+
+ if (unlikely(netif_queue_stopped(ndev) &&
+ emac_tx_avail(priv) > tx_ring->total_cnt / 4))
+ netif_wake_queue(ndev);
+
+ netif_tx_unlock(ndev);
+
+ return 0;
+}
+
+static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
+{
+ const char *msg;
+ u32 len;
+
+ len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
+
+ if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
+ msg = "Not last descriptor"; /* This would be a bug */
+ else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
+ msg = "Runt frame";
+ else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
+ msg = "Frame CRC error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
+ msg = "Frame exceeds max length";
+ else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
+ msg = "Frame jabber error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
+ msg = "Frame length error";
+ else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
+ msg = "Frame length unacceptable";
+ else
+ return true; /* All good */
+
+ dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
+
+ return false;
+}
+
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct emac_desc rx_desc, *rx_desc_addr;
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct sk_buff *skb;
+ u32 i;
+
+ i = rx_ring->head;
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ while (!rx_buf->skb) {
+ skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
+ if (!skb)
+ break;
+
+ skb->dev = ndev;
+
+ rx_buf->skb = skb;
+ rx_buf->dma_len = priv->dma_buf_sz;
+ rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
+ dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
+ goto err_free_skb;
+ }
+
+ rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ memset(&rx_desc, 0, sizeof(rx_desc));
+
+ rx_desc.buffer_addr_1 = rx_buf->dma_addr;
+ rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
+ rx_buf->dma_len);
+
+ if (++i == rx_ring->total_cnt) {
+ rx_desc.desc1 |= RX_DESC_1_END_RING;
+ i = 0;
+ }
+
+ *rx_desc_addr = rx_desc;
+ dma_wmb();
+ WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+ }
+
+ rx_ring->head = i;
+ return;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ rx_buf->skb = NULL;
+}
+
+/* Returns number of packets received */
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ struct sk_buff *skb = NULL;
+ struct emac_desc *rx_desc;
+ u32 got = 0, skb_len, i;
+
+ rx_ring = &priv->rx_ring;
+
+ i = rx_ring->tail;
+
+ while (budget--) {
+ rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ /* Stop checking if rx_desc still owned by DMA */
+ if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
+ break;
+
+ dma_rmb();
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ break;
+
+ got++;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ if (likely(emac_rx_frame_good(priv, rx_desc))) {
+ skb = rx_buf->skb;
+
+ skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
+ rx_desc->desc0);
+ skb_len -= ETH_FCS_LEN;
+
+ skb_put(skb, skb_len);
+ skb->dev = ndev;
+ ndev->hard_header_len = ETH_HLEN;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&priv->napi, skb);
+
+ memset(rx_desc, 0, sizeof(struct emac_desc));
+ rx_buf->skb = NULL;
+ } else {
+ dev_kfree_skb_irq(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ if (++i == rx_ring->total_cnt)
+ i = 0;
+ }
+
+ rx_ring->tail = i;
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ return got;
+}
+
+static int emac_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ int work_done;
+
+ emac_tx_clean_desc(priv);
+
+ work_done = emac_rx_clean_desc(priv, budget);
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ emac_enable_interrupt(priv);
+
+ return work_done;
+}
+
+/*
+ * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
+ *
+ * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
+ * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
+ */
+
+static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
+ struct emac_tx_desc_buffer *tx_buf,
+ struct sk_buff *skb, u32 frag_idx)
+{
+ bool map_as_page, buf_idx;
+ const skb_frag_t *frag;
+ phys_addr_t addr;
+ u32 len;
+ int ret;
+
+ buf_idx = frag_idx % 2;
+
+ if (frag_idx == 0) {
+ /* Non-fragmented part */
+ len = skb_headlen(skb);
+ addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ map_as_page = false;
+ } else {
+ /* Fragment */
+ frag = &skb_shinfo(skb)->frags[frag_idx - 1];
+ len = skb_frag_size(frag);
+ addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ map_as_page = true;
+ }
+
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ return ret;
+
+ tx_buf->buf[buf_idx].dma_addr = addr;
+ tx_buf->buf[buf_idx].dma_len = len;
+ tx_buf->buf[buf_idx].map_as_page = map_as_page;
+
+ if (buf_idx == 0) {
+ tx_desc->buffer_addr_1 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
+ } else {
+ tx_desc->buffer_addr_2 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
+ }
+
+ return 0;
+}
+
+static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_desc tx_desc, *tx_desc_addr;
+ struct device *dev = &priv->pdev->dev;
+ struct emac_tx_desc_buffer *tx_buf;
+ u32 head, old_head, frag_num, f;
+ bool buf_idx;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ head = tx_ring->head;
+ old_head = head;
+
+ for (f = 0; f < frag_num + 1; f++) {
+ buf_idx = f % 2;
+
+ /*
+ * If using buffer 1, initialize a new desc. Otherwise, use
+ * buffer 2 of previous fragment's desc.
+ */
+ if (!buf_idx) {
+ tx_buf = &tx_ring->tx_desc_buf[head];
+ tx_desc_addr =
+ &((struct emac_desc *)tx_ring->desc_addr)[head];
+ memset(&tx_desc, 0, sizeof(tx_desc));
+
+ /*
+ * Give ownership for all but first desc initially. For
+ * first desc, give at the end so DMA cannot start
+ * reading uninitialized descs.
+ */
+ if (head != old_head)
+ tx_desc.desc0 |= TX_DESC_0_OWN;
+
+ if (++head == tx_ring->total_cnt) {
+ /* Just used last desc in ring */
+ tx_desc.desc1 |= TX_DESC_1_END_RING;
+ head = 0;
+ }
+ }
+
+ if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
+ dev_err_ratelimited(&priv->ndev->dev,
+ "Map TX frag %d failed\n", f);
+ goto err_free_skb;
+ }
+
+ if (f == 0)
+ tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
+
+ if (f == frag_num) {
+ tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
+ tx_buf->skb = skb;
+ if (emac_tx_should_interrupt(priv, frag_num + 1))
+ tx_desc.desc1 |=
+ TX_DESC_1_INTERRUPT_ON_COMPLETION;
+ }
+
+ *tx_desc_addr = tx_desc;
+ }
+
+ /* All descriptors are ready, give ownership for first desc */
+ tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
+ dma_wmb();
+ WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
+
+ emac_dma_start_transmit(priv);
+
+ tx_ring->head = head;
+
+ return;
+
+err_free_skb:
+ dev_dstats_tx_dropped(priv->ndev);
+ dev_kfree_skb_any(skb);
+}
+
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct device *dev = &priv->pdev->dev;
+
+ if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ emac_tx_mem_map(priv, skb);
+
+ /* Make sure there is space in the ring for the next TX. */
+ if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int ret = eth_mac_addr(ndev, addr);
+
+ if (ret)
+ return ret;
+
+ /* If running, set now; if not running it will be set in emac_up. */
+ if (netif_running(ndev))
+ emac_set_mac_addr(priv, ndev->dev_addr);
+
+ return 0;
+}
+
+static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
+{
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+}
+
+/*
+ * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
+ * when matching multicast addresses.
+ */
+static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
+{
+ u32 crc32 = ether_crc(ETH_ALEN, addr);
+
+ return crc32 >> 26;
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_set_rx_mode(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0 };
+ u32 hash, reg, bit, val;
+
+ val = emac_rd(priv, MAC_ADDRESS_CONTROL);
+
+ val &= ~MREGBIT_PROMISCUOUS_MODE;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promisc mode */
+ val |= MREGBIT_PROMISCUOUS_MODE;
+ } else if ((ndev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
+ /* Accept all multicast frames by setting every bit */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ emac_mac_multicast_filter_clear(priv);
+ netdev_for_each_mc_addr(ha, ndev) {
+ /*
+ * The hash table is an array of 4 16-bit registers. It
+ * is treated like an array of 64 bits (bits[hash]).
+ */
+ hash = emac_ether_addr_hash(ha->addr);
+ reg = hash / 16;
+ bit = hash % 16;
+ mc_filter[reg] |= BIT(bit);
+ }
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
+ }
+
+ emac_wr(priv, MAC_ADDRESS_CONTROL, val);
+}
+
+static int emac_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 frame_len;
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev, "must be stopped to change MTU\n");
+ return -EBUSY;
+ }
+
+ frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if (frame_len <= EMAC_DEFAULT_BUFSIZE)
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+ else if (frame_len <= EMAC_RX_BUF_2K)
+ priv->dma_buf_sz = EMAC_RX_BUF_2K;
+ else
+ priv->dma_buf_sz = EMAC_RX_BUF_4K;
+
+ ndev->mtu = mtu;
+
+ return 0;
+}
+
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
+
+ emac_wr(priv, MAC_MDIO_DATA, 0x0);
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ if (ret)
+ return ret;
+
+ val = emac_rd(priv, MAC_MDIO_DATA);
+ return FIELD_GET(MREGBIT_MDIO_DATA, val);
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ emac_wr(priv, MAC_MDIO_DATA, value);
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS;
+
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ return ret;
+}
+
+static int emac_mdio_init(struct emac_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *mii_np;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = devm_mdiobus_alloc(dev);
+ if (!mii)
+ return -ENOMEM;
+
+ mii->priv = priv;
+ mii->name = "k1_emac_mii";
+ mii->read = emac_mii_read;
+ mii->write = emac_mii_write;
+ mii->parent = dev;
+ mii->phy_mask = ~0;
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
+
+ mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
+
+ ret = devm_of_mdiobus_register(dev, mii, mii_np);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to register mdio bus\n");
+
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val;
+
+ val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
+ FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_fc(struct emac_priv *priv, u8 fc)
+{
+ emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
+ emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
+ priv->flow_control = fc;
+}
+
+static void emac_set_fc_autoneg(struct emac_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 local_adv, remote_adv;
+ u8 fc;
+
+ local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+
+ remote_adv = 0;
+
+ if (phydev->pause)
+ remote_adv |= LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_adv |= LPA_PAUSE_ASYM;
+
+ fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
+
+ priv->flow_control_autoneg = true;
+
+ emac_set_fc(priv, fc);
+}
+
+/*
+ * Even though this MAC supports gigabit operation, it only provides 32-bit
+ * statistics counters. The most overflow-prone counters are the "bytes" ones,
+ * which at gigabit overflow about twice a minute.
+ *
+ * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
+ * every time statistics seem to go backwards. Also, update periodically to
+ * catch overflows when we are not otherwise checking the statistics often
+ * enough.
+ */
+
+#define EMAC_STATS_TIMER_PERIOD 20
+
+static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
+ u32 control_reg, u32 high_reg, u32 low_reg)
+{
+ u32 val, high, low;
+ int ret;
+
+ /* The "read" bit is the same for TX and RX */
+
+ val = MREGBIT_START_TX_COUNTER_READ | cnt;
+ emac_wr(priv, control_reg, val);
+ val = emac_rd(priv, control_reg);
+
+ ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
+ !(val & MREGBIT_START_TX_COUNTER_READ),
+ 100, 10000);
+
+ if (ret) {
+ netdev_err(priv->ndev, "Read stat timeout\n");
+ return ret;
+ }
+
+ high = emac_rd(priv, high_reg);
+ low = emac_rd(priv, low_reg);
+ *res = high << 16 | lower_16_bits(low);
+
+ return 0;
+}
+
+static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
+ MAC_TX_STATCTR_DATA_HIGH,
+ MAC_TX_STATCTR_DATA_LOW);
+}
+
+static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
+ MAC_RX_STATCTR_DATA_HIGH,
+ MAC_RX_STATCTR_DATA_LOW);
+}
+
+static void emac_update_counter(u64 *counter, u32 new_low)
+{
+ u32 old_low = lower_32_bits(*counter);
+ u64 high = upper_32_bits(*counter);
+
+ if (old_low > new_low) {
+ /* Overflowed, increment high 32 bits */
+ high++;
+ }
+
+ *counter = (high << 32) | new_low;
+}
+
+static void emac_stats_update(struct emac_priv *priv)
+{
+ u64 *tx_stats_off = priv->tx_stats_off.array;
+ u64 *rx_stats_off = priv->rx_stats_off.array;
+ u64 *tx_stats = priv->tx_stats.array;
+ u64 *rx_stats = priv->rx_stats.array;
+ u32 i, res, offset;
+
+ assert_spin_locked(&priv->stats_lock);
+
+ if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
+ /* Not up, don't try to update */
+ return;
+ }
+
+ for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
+ /*
+ * If reading stats times out, everything is broken and there's
+ * nothing we can do. Reading statistics also can't return an
+ * error, so just return without updating and without
+ * rescheduling.
+ */
+ if (emac_tx_read_stat_cnt(priv, i, &res))
+ return;
+
+ /*
+ * Re-initializing while bringing interface up resets counters
+ * to zero, so to provide continuity, we add the values saved
+ * last time we did emac_down() to the new hardware-provided
+ * value.
+ */
+ offset = lower_32_bits(tx_stats_off[i]);
+ emac_update_counter(&tx_stats[i], res + offset);
+ }
+
+ /* Similar remarks as TX stats */
+ for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
+ if (emac_rx_read_stat_cnt(priv, i, &res))
+ return;
+ offset = lower_32_bits(rx_stats_off[i]);
+ emac_update_counter(&rx_stats[i], res + offset);
+ }
+
+ mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
+}
+
+static void emac_stats_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
+
+ spin_lock(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ spin_unlock(&priv->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4096 },
+ { /* sentinel */ },
+};
+
+/* Like dev_fetch_dstats(), but we only use tx_drops */
+static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
+{
+ const struct pcpu_dstats *stats;
+ u64 tx_drops, total = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(priv->ndev->dstats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_drops = u64_stats_read(&stats->tx_drops);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ total += tx_drops;
+ }
+
+ return total;
+}
+
+static void emac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ /* This is the only software counter */
+ storage->tx_dropped = emac_get_stat_tx_drops(priv);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ storage->tx_packets = tx_stats->stats.tx_ok_pkts;
+ storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
+ storage->tx_errors = tx_stats->stats.tx_err_pkts;
+
+ storage->rx_packets = rx_stats->stats.rx_ok_pkts;
+ storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
+ storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
+ storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
+ storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
+ storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
+
+ storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
+
+ storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
+ storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_rx_stats *rx_stats;
+
+ rx_stats = &priv->rx_stats;
+
+ *ranges = emac_rmon_hist_ranges;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
+ rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
+ rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
+ rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
+
+ /* Only RX has histogram stats */
+
+ rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
+ rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
+ rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
+ rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
+ rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
+ rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
+ rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
+ mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
+
+ mac_stats->MulticastFramesReceivedOK =
+ rx_stats->stats.rx_multicast_pkts;
+ mac_stats->BroadcastFramesReceivedOK =
+ rx_stats->stats.rx_broadcast_pkts;
+
+ mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
+ mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
+ mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
+ mac_stats->FramesAbortedDueToXSColls =
+ tx_stats->stats.tx_excessclsn_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
+ pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+/* Other statistics that are not derivable from standard statistics */
+
+#define EMAC_ETHTOOL_STAT(type, name) \
+ { offsetof(type, stats.name) / sizeof(u64), #name }
+
+static const struct emac_ethtool_stats {
+ size_t offset;
+ char str[ETH_GSTRING_LEN];
+} emac_ethtool_rx_stats[] = {
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
+};
+
+static int emac_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(emac_ethtool_rx_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
+ memcpy(data, emac_ethtool_rx_stats[i].str,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u64 *rx_stats = (u64 *)&priv->rx_stats;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
+ data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
+}
+
+static void emac_ethtool_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u32 *reg_space = space;
+ int i;
+
+ regs->version = 1;
+
+ for (i = 0; i < EMAC_DMA_REG_CNT; i++)
+ reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
+
+ for (i = 0; i < EMAC_MAC_REG_CNT; i++)
+ reg_space[i + EMAC_DMA_REG_CNT] =
+ emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
+}
+
+static void emac_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+
+ pause->autoneg = priv->flow_control_autoneg;
+ pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
+ pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
+}
+
+static int emac_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u8 fc = 0;
+
+ priv->flow_control_autoneg = pause->autoneg;
+
+ if (pause->autoneg) {
+ emac_set_fc_autoneg(priv);
+ } else {
+ if (pause->tx_pause)
+ fc |= FLOW_CTRL_TX;
+
+ if (pause->rx_pause)
+ fc |= FLOW_CTRL_RX;
+
+ emac_set_fc(priv, fc);
+ }
+
+ return 0;
+}
+
+static void emac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
+}
+
+static void emac_tx_timeout_task(struct work_struct *work)
+{
+ struct net_device *ndev;
+ struct emac_priv *priv;
+
+ priv = container_of(work, struct emac_priv, tx_timeout_task);
+ ndev = priv->ndev;
+
+ rtnl_lock();
+
+ /* No need to reset if already down */
+ if (!netif_running(ndev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ netdev_err(ndev, "MAC reset due to TX timeout\n");
+
+ netif_trans_update(ndev); /* prevent tx timeout */
+ dev_close(ndev);
+ dev_open(ndev, NULL);
+
+ rtnl_unlock();
+}
+
+static void emac_sw_init(struct emac_priv *priv)
+{
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+
+ priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
+ priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
+
+ spin_lock_init(&priv->stats_lock);
+
+ INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
+
+ priv->tx_coal_frames = EMAC_TX_FRAMES;
+ priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
+
+ timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
+ timer_setup(&priv->stats_timer, emac_stats_timer, 0);
+}
+
+static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ bool should_schedule = false;
+ u32 clr = 0;
+ u32 status;
+
+ status = emac_rd(priv, DMA_STATUS_IRQ);
+
+ if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
+ clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
+
+ if (should_schedule) {
+ if (napi_schedule_prep(&priv->napi)) {
+ emac_disable_interrupt(priv);
+ __napi_schedule_irqoff(&priv->napi);
+ }
+ }
+
+ emac_wr(priv, DMA_STATUS_IRQ, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void emac_configure_tx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->tx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
+
+ /* Set TX inter-frame gap value, enable transmit */
+ val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
+ val &= ~MREGBIT_IFG_LEN;
+ val |= MREGBIT_TRANSMIT_ENABLE;
+ val |= MREGBIT_TRANSMIT_AUTO_RETRY;
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
+
+ emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
+
+ /* Start TX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_TRANSMIT_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_configure_rx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->rx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
+
+ /* Enable receive */
+ val = emac_rd(priv, MAC_RECEIVE_CONTROL);
+ val |= MREGBIT_RECEIVE_ENABLE;
+ val |= MREGBIT_STORE_FORWARD;
+ emac_wr(priv, MAC_RECEIVE_CONTROL, val);
+
+ /* Start RX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_RECEIVE_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_adjust_link(struct net_device *dev)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 ctrl;
+
+ if (phydev->link) {
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+
+ /* Update duplex and speed from PHY */
+
+ FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
+ phydev->duplex == DUPLEX_FULL);
+
+ ctrl &= ~MREGBIT_SPEED;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ ctrl |= MREGBIT_SPEED_1000M;
+ break;
+ case SPEED_100:
+ ctrl |= MREGBIT_SPEED_100M;
+ break;
+ case SPEED_10:
+ ctrl |= MREGBIT_SPEED_10M;
+ break;
+ default:
+ netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+
+ emac_set_fc_autoneg(priv);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void emac_update_delay_line(struct emac_priv *priv)
+{
+ u32 mask = 0, val = 0;
+
+ mask |= EMAC_RX_DLINE_EN;
+ mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
+ mask |= EMAC_TX_DLINE_EN;
+ mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
+ val |= EMAC_RX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
+
+ val |= EMAC_TX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
+ }
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
+ mask, val);
+}
+
+static int emac_phy_connect(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *np;
+ int ret;
+
+ ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
+ if (ret) {
+ netdev_err(ndev, "No phy-mode found");
+ return ret;
+ }
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ break;
+ default:
+ netdev_err(ndev, "Unsupported PHY interface %s",
+ phy_modes(priv->phy_interface));
+ return -EINVAL;
+ }
+
+ np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(dev->of_node))
+ np = of_node_get(dev->of_node);
+
+ if (!np) {
+ netdev_err(ndev, "No PHY specified");
+ return -ENODEV;
+ }
+
+ ret = emac_phy_interface_config(priv);
+ if (ret)
+ goto err_node_put;
+
+ phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ phy_support_asym_pause(phydev);
+
+ phydev->mac_managed_pm = true;
+
+ emac_update_delay_line(priv);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static int emac_up(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = emac_phy_connect(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "emac_phy_connect failed\n");
+ goto err_pm_put;
+ }
+
+ emac_init_hw(priv);
+
+ emac_set_mac_addr(priv, ndev->dev_addr);
+ emac_configure_tx(priv);
+ emac_configure_rx(priv);
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ phy_start(ndev->phydev);
+
+ ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_reset_disconnect_phy;
+ }
+
+ /* Don't enable MAC interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+
+ /* Enable DMA interrupts */
+ emac_wr(priv, DMA_INTERRUPT_ENABLE,
+ MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
+
+ napi_enable(&priv->napi);
+
+ netif_start_queue(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+
+err_reset_disconnect_phy:
+ emac_reset_hw(priv);
+ phy_disconnect(ndev->phydev);
+
+err_pm_put:
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static int emac_down(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+
+ netif_stop_queue(ndev);
+
+ phy_disconnect(ndev->phydev);
+
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ free_irq(priv->irq, ndev);
+
+ napi_disable(&priv->napi);
+
+ timer_delete_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_delete_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+
+ /* Update and save current stats, see emac_stats_update() for usage */
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ priv->tx_stats_off = priv->tx_stats;
+ priv->rx_stats_off = priv->rx_stats;
+
+ spin_unlock_bh(&priv->stats_lock);
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+}
+
+/* Called when net interface is brought up. */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = emac_alloc_tx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate TX resources\n");
+ return ret;
+ }
+
+ ret = emac_alloc_rx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate RX resources\n");
+ goto err_free_tx;
+ }
+
+ ret = emac_up(priv);
+ if (ret) {
+ dev_err(dev, "Error when bringing interface up\n");
+ goto err_free_rx;
+ }
+ return 0;
+
+err_free_rx:
+ emac_free_rx_resources(priv);
+err_free_tx:
+ emac_free_tx_resources(priv);
+
+ return ret;
+}
+
+/* Called when interface is brought down. */
+static int emac_stop(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_down(priv);
+ emac_free_tx_resources(priv);
+ emac_free_rx_resources(priv);
+
+ return 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_drvinfo = emac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+
+ .get_regs = emac_ethtool_get_regs,
+ .get_regs_len = emac_ethtool_get_regs_len,
+
+ .get_rmon_stats = emac_get_rmon_stats,
+ .get_pause_stats = emac_get_pause_stats,
+ .get_eth_mac_stats = emac_get_eth_mac_stats,
+
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+
+ .get_pauseparam = emac_get_pauseparam,
+ .set_pauseparam = emac_set_pauseparam,
+};
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_stop,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_set_rx_mode = emac_set_rx_mode,
+ .ndo_get_stats64 = emac_get_stats64,
+};
+
+/* Currently we always use 15.6 ps/step for the delay line */
+
+static u32 delay_ps_to_unit(u32 ps)
+{
+ return DIV_ROUND_CLOSEST(ps * 10, 156);
+}
+
+static u32 delay_unit_to_ps(u32 unit)
+{
+ return DIV_ROUND_CLOSEST(unit * 156, 10);
+}
+
+#define EMAC_MAX_DELAY_UNIT FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
+
+/* Minus one just to be safe from rounding errors */
+#define EMAC_MAX_DELAY_PS (delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
+
+static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int ret;
+
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->iobase))
+ return dev_err_probe(dev, PTR_ERR(priv->iobase),
+ "ioremap failed\n");
+
+ priv->regmap_apmu =
+ syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
+ &priv->regmap_apmu_offset);
+
+ if (IS_ERR(priv->regmap_apmu))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
+ "failed to get syscon\n");
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = of_get_mac_address(np, mac_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return dev_err_probe(dev, ret,
+ "Can't get MAC address\n");
+
+ dev_info(&pdev->dev, "Using random MAC address\n");
+ eth_hw_addr_random(priv->ndev);
+ } else {
+ eth_hw_addr_set(priv->ndev, mac_addr);
+ }
+
+ priv->tx_delay = 0;
+ priv->rx_delay = 0;
+
+ of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
+ of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
+
+ if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "tx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->tx_delay);
+ return -EINVAL;
+ }
+
+ if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "rx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->rx_delay);
+ return -EINVAL;
+ }
+
+ priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
+ priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
+
+ return 0;
+}
+
+static void emac_phy_deregister_fixed_link(void *data)
+{
+ struct device_node *of_node = data;
+
+ of_phy_deregister_fixed_link(of_node);
+}
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *reset;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features |= ndev->hw_features;
+
+ ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
+ ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = emac_config_dt(pdev, priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Configuration failed\n");
+
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->base_addr = (unsigned long)priv->iobase;
+ ndev->irq = priv->irq;
+
+ ndev->ethtool_ops = &emac_ethtool_ops;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ devm_pm_runtime_enable(&pdev->dev);
+
+ priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
+ "Failed to get clock\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
+ NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset\n");
+
+ if (of_phy_is_fixed_link(dev->of_node)) {
+ ret = of_phy_register_fixed_link(dev->of_node);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register fixed-link\n");
+
+ ret = devm_add_action_or_reset(dev,
+ emac_phy_deregister_fixed_link,
+ dev->of_node);
+
+ if (ret) {
+ dev_err(dev, "devm_add_action_or_reset failed\n");
+ return ret;
+ }
+ }
+
+ emac_sw_init(priv);
+
+ ret = emac_mdio_init(priv);
+ if (ret)
+ goto err_timer_delete;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ dev_err(dev, "devm_register_netdev failed\n");
+ goto err_timer_delete;
+ }
+
+ netif_napi_add(ndev, &priv->napi, emac_rx_poll);
+ netif_carrier_off(ndev);
+
+ return 0;
+
+err_timer_delete:
+ timer_delete_sync(&priv->txtimer);
+ timer_delete_sync(&priv->stats_timer);
+
+ return ret;
+}
+
+static void emac_remove(struct platform_device *pdev)
+{
+ struct emac_priv *priv = platform_get_drvdata(pdev);
+
+ timer_shutdown_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_shutdown_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+}
+
+static int emac_resume(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable bus clock: %d\n", ret);
+ return ret;
+ }
+
+ if (!netif_running(ndev))
+ return 0;
+
+ ret = emac_open(ndev);
+ if (ret) {
+ clk_disable_unprepare(priv->bus_clk);
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+}
+
+static int emac_suspend(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!ndev || !netif_running(ndev)) {
+ clk_disable_unprepare(priv->bus_clk);
+ return 0;
+ }
+
+ emac_stop(ndev);
+
+ clk_disable_unprepare(priv->bus_clk);
+ netif_device_detach(ndev);
+ return 0;
+}
+
+static const struct dev_pm_ops emac_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
+};
+
+static const struct of_device_id emac_of_match[] = {
+ { .compatible = "spacemit,k1-emac" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+static struct platform_driver emac_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(emac_of_match),
+ .pm = &emac_pm_ops,
+ },
+};
+module_platform_driver(emac_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
+MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/spacemit/k1_emac.h b/drivers/net/ethernet/spacemit/k1_emac.h
new file mode 100644
index 000000000000..5a09e946a276
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SpacemiT K1 Ethernet hardware definitions
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#ifndef _K1_EMAC_H_
+#define _K1_EMAC_H_
+
+#include <linux/stddef.h>
+
+/* APMU syscon registers */
+
+#define APMU_EMAC_CTRL_REG 0x0
+
+#define PHY_INTF_RGMII BIT(2)
+
+/*
+ * Only valid for RMII mode
+ * 0: Ref clock from External PHY
+ * 1: Ref clock from SoC
+ */
+#define REF_CLK_SEL BIT(3)
+
+/*
+ * Function clock select
+ * 0: 208 MHz
+ * 1: 312 MHz
+ */
+#define FUNC_CLK_SEL BIT(4)
+
+/* Only valid for RMII, invert TX clk */
+#define RMII_TX_CLK_SEL BIT(6)
+
+/* Only valid for RMII, invert RX clk */
+#define RMII_RX_CLK_SEL BIT(7)
+
+/*
+ * Only valid for RGMII
+ * 0: TX clk from RX clk
+ * 1: TX clk from SoC
+ */
+#define RGMII_TX_CLK_SEL BIT(8)
+
+#define PHY_IRQ_EN BIT(12)
+#define AXI_SINGLE_ID BIT(13)
+
+#define APMU_EMAC_DLINE_REG 0x4
+
+#define EMAC_RX_DLINE_EN BIT(0)
+#define EMAC_RX_DLINE_STEP_MASK GENMASK(5, 4)
+#define EMAC_RX_DLINE_CODE_MASK GENMASK(15, 8)
+
+#define EMAC_TX_DLINE_EN BIT(16)
+#define EMAC_TX_DLINE_STEP_MASK GENMASK(21, 20)
+#define EMAC_TX_DLINE_CODE_MASK GENMASK(31, 24)
+
+#define EMAC_DLINE_STEP_15P6 0 /* 15.6 ps/step */
+#define EMAC_DLINE_STEP_24P4 1 /* 24.4 ps/step */
+#define EMAC_DLINE_STEP_29P7 2 /* 29.7 ps/step */
+#define EMAC_DLINE_STEP_35P1 3 /* 35.1 ps/step */
+
+/* DMA register set */
+#define DMA_CONFIGURATION 0x0000
+#define DMA_CONTROL 0x0004
+#define DMA_STATUS_IRQ 0x0008
+#define DMA_INTERRUPT_ENABLE 0x000c
+
+#define DMA_TRANSMIT_AUTO_POLL_COUNTER 0x0010
+#define DMA_TRANSMIT_POLL_DEMAND 0x0014
+#define DMA_RECEIVE_POLL_DEMAND 0x0018
+
+#define DMA_TRANSMIT_BASE_ADDRESS 0x001c
+#define DMA_RECEIVE_BASE_ADDRESS 0x0020
+#define DMA_MISSED_FRAME_COUNTER 0x0024
+#define DMA_STOP_FLUSH_COUNTER 0x0028
+
+#define DMA_RECEIVE_IRQ_MITIGATION_CTRL 0x002c
+
+#define DMA_CURRENT_TRANSMIT_DESCRIPTOR_POINTER 0x0030
+#define DMA_CURRENT_TRANSMIT_BUFFER_POINTER 0x0034
+#define DMA_CURRENT_RECEIVE_DESCRIPTOR_POINTER 0x0038
+#define DMA_CURRENT_RECEIVE_BUFFER_POINTER 0x003c
+
+/* MAC Register set */
+#define MAC_GLOBAL_CONTROL 0x0100
+#define MAC_TRANSMIT_CONTROL 0x0104
+#define MAC_RECEIVE_CONTROL 0x0108
+#define MAC_MAXIMUM_FRAME_SIZE 0x010c
+#define MAC_TRANSMIT_JABBER_SIZE 0x0110
+#define MAC_RECEIVE_JABBER_SIZE 0x0114
+#define MAC_ADDRESS_CONTROL 0x0118
+#define MAC_MDIO_CLK_DIV 0x011c
+#define MAC_ADDRESS1_HIGH 0x0120
+#define MAC_ADDRESS1_MED 0x0124
+#define MAC_ADDRESS1_LOW 0x0128
+#define MAC_ADDRESS2_HIGH 0x012c
+#define MAC_ADDRESS2_MED 0x0130
+#define MAC_ADDRESS2_LOW 0x0134
+#define MAC_ADDRESS3_HIGH 0x0138
+#define MAC_ADDRESS3_MED 0x013c
+#define MAC_ADDRESS3_LOW 0x0140
+#define MAC_ADDRESS4_HIGH 0x0144
+#define MAC_ADDRESS4_MED 0x0148
+#define MAC_ADDRESS4_LOW 0x014c
+#define MAC_MULTICAST_HASH_TABLE1 0x0150
+#define MAC_MULTICAST_HASH_TABLE2 0x0154
+#define MAC_MULTICAST_HASH_TABLE3 0x0158
+#define MAC_MULTICAST_HASH_TABLE4 0x015c
+#define MAC_FC_CONTROL 0x0160
+#define MAC_FC_PAUSE_FRAME_GENERATE 0x0164
+#define MAC_FC_SOURCE_ADDRESS_HIGH 0x0168
+#define MAC_FC_SOURCE_ADDRESS_MED 0x016c
+#define MAC_FC_SOURCE_ADDRESS_LOW 0x0170
+#define MAC_FC_DESTINATION_ADDRESS_HIGH 0x0174
+#define MAC_FC_DESTINATION_ADDRESS_MED 0x0178
+#define MAC_FC_DESTINATION_ADDRESS_LOW 0x017c
+#define MAC_FC_PAUSE_TIME_VALUE 0x0180
+#define MAC_FC_HIGH_PAUSE_TIME 0x0184
+#define MAC_FC_LOW_PAUSE_TIME 0x0188
+#define MAC_FC_PAUSE_HIGH_THRESHOLD 0x018c
+#define MAC_FC_PAUSE_LOW_THRESHOLD 0x0190
+#define MAC_MDIO_CONTROL 0x01a0
+#define MAC_MDIO_DATA 0x01a4
+#define MAC_RX_STATCTR_CONTROL 0x01a8
+#define MAC_RX_STATCTR_DATA_HIGH 0x01ac
+#define MAC_RX_STATCTR_DATA_LOW 0x01b0
+#define MAC_TX_STATCTR_CONTROL 0x01b4
+#define MAC_TX_STATCTR_DATA_HIGH 0x01b8
+#define MAC_TX_STATCTR_DATA_LOW 0x01bc
+#define MAC_TRANSMIT_FIFO_ALMOST_FULL 0x01c0
+#define MAC_TRANSMIT_PACKET_START_THRESHOLD 0x01c4
+#define MAC_RECEIVE_PACKET_START_THRESHOLD 0x01c8
+#define MAC_STATUS_IRQ 0x01e0
+#define MAC_INTERRUPT_ENABLE 0x01e4
+
+/* Used for register dump */
+#define EMAC_DMA_REG_CNT 16
+#define EMAC_MAC_REG_CNT 124
+
+/* DMA_CONFIGURATION (0x0000) */
+
+/*
+ * 0-DMA controller in normal operation mode,
+ * 1-DMA controller reset to default state,
+ * clearing all internal state information
+ */
+#define MREGBIT_SOFTWARE_RESET BIT(0)
+
+#define MREGBIT_BURST_1WORD BIT(1)
+#define MREGBIT_BURST_2WORD BIT(2)
+#define MREGBIT_BURST_4WORD BIT(3)
+#define MREGBIT_BURST_8WORD BIT(4)
+#define MREGBIT_BURST_16WORD BIT(5)
+#define MREGBIT_BURST_32WORD BIT(6)
+#define MREGBIT_BURST_64WORD BIT(7)
+#define MREGBIT_BURST_LENGTH GENMASK(7, 1)
+#define MREGBIT_DESCRIPTOR_SKIP_LENGTH GENMASK(12, 8)
+
+/* For Receive and Transmit DMA operate in Big-Endian mode for Descriptors. */
+#define MREGBIT_DESCRIPTOR_BYTE_ORDERING BIT(13)
+
+#define MREGBIT_BIG_LITLE_ENDIAN BIT(14)
+#define MREGBIT_TX_RX_ARBITRATION BIT(15)
+#define MREGBIT_WAIT_FOR_DONE BIT(16)
+#define MREGBIT_STRICT_BURST BIT(17)
+#define MREGBIT_DMA_64BIT_MODE BIT(18)
+
+/* DMA_CONTROL (0x0004) */
+#define MREGBIT_START_STOP_TRANSMIT_DMA BIT(0)
+#define MREGBIT_START_STOP_RECEIVE_DMA BIT(1)
+
+/* DMA_STATUS_IRQ (0x0008) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_IRQ BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_IRQ BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_IRQ BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_IRQ BIT(7)
+#define MREGBIT_MAC_IRQ BIT(8)
+#define MREGBIT_TRANSMIT_DMA_STATE GENMASK(18, 16)
+#define MREGBIT_RECEIVE_DMA_STATE GENMASK(23, 20)
+
+/* DMA_INTERRUPT_ENABLE (0x000c) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_INTR_ENABLE BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE BIT(7)
+#define MREGBIT_MAC_INTR_ENABLE BIT(8)
+
+/* DMA_RECEIVE_IRQ_MITIGATION_CTRL (0x002c) */
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK GENMASK(7, 0)
+#define MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK GENMASK(27, 8)
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MODE BIT(30)
+#define MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE BIT(31)
+
+/* MAC_GLOBAL_CONTROL (0x0100) */
+#define MREGBIT_SPEED GENMASK(1, 0)
+#define MREGBIT_SPEED_10M 0x0
+#define MREGBIT_SPEED_100M BIT(0)
+#define MREGBIT_SPEED_1000M BIT(1)
+#define MREGBIT_FULL_DUPLEX_MODE BIT(2)
+#define MREGBIT_RESET_RX_STAT_COUNTERS BIT(3)
+#define MREGBIT_RESET_TX_STAT_COUNTERS BIT(4)
+#define MREGBIT_UNICAST_WAKEUP_MODE BIT(8)
+#define MREGBIT_MAGIC_PACKET_WAKEUP_MODE BIT(9)
+
+/* MAC_TRANSMIT_CONTROL (0x0104) */
+#define MREGBIT_TRANSMIT_ENABLE BIT(0)
+#define MREGBIT_INVERT_FCS BIT(1)
+#define MREGBIT_DISABLE_FCS_INSERT BIT(2)
+#define MREGBIT_TRANSMIT_AUTO_RETRY BIT(3)
+#define MREGBIT_IFG_LEN GENMASK(6, 4)
+#define MREGBIT_PREAMBLE_LENGTH GENMASK(9, 7)
+
+/* MAC_RECEIVE_CONTROL (0x0108) */
+#define MREGBIT_RECEIVE_ENABLE BIT(0)
+#define MREGBIT_DISABLE_FCS_CHECK BIT(1)
+#define MREGBIT_STRIP_FCS BIT(2)
+#define MREGBIT_STORE_FORWARD BIT(3)
+#define MREGBIT_STATUS_FIRST BIT(4)
+#define MREGBIT_PASS_BAD_FRAMES BIT(5)
+#define MREGBIT_ACOOUNT_VLAN BIT(6)
+
+/* MAC_MAXIMUM_FRAME_SIZE (0x010c) */
+#define MREGBIT_MAX_FRAME_SIZE GENMASK(13, 0)
+
+/* MAC_TRANSMIT_JABBER_SIZE (0x0110) */
+#define MREGBIT_TRANSMIT_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_RECEIVE_JABBER_SIZE (0x0114) */
+#define MREGBIT_RECEIVE_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_ADDRESS_CONTROL (0x0118) */
+#define MREGBIT_MAC_ADDRESS1_ENABLE BIT(0)
+#define MREGBIT_MAC_ADDRESS2_ENABLE BIT(1)
+#define MREGBIT_MAC_ADDRESS3_ENABLE BIT(2)
+#define MREGBIT_MAC_ADDRESS4_ENABLE BIT(3)
+#define MREGBIT_INVERSE_MAC_ADDRESS1_ENABLE BIT(4)
+#define MREGBIT_INVERSE_MAC_ADDRESS2_ENABLE BIT(5)
+#define MREGBIT_INVERSE_MAC_ADDRESS3_ENABLE BIT(6)
+#define MREGBIT_INVERSE_MAC_ADDRESS4_ENABLE BIT(7)
+#define MREGBIT_PROMISCUOUS_MODE BIT(8)
+
+/* MAC_FC_CONTROL (0x0160) */
+#define MREGBIT_FC_DECODE_ENABLE BIT(0)
+#define MREGBIT_FC_GENERATION_ENABLE BIT(1)
+#define MREGBIT_AUTO_FC_GENERATION_ENABLE BIT(2)
+#define MREGBIT_MULTICAST_MODE BIT(3)
+#define MREGBIT_BLOCK_PAUSE_FRAMES BIT(4)
+
+/* MAC_FC_PAUSE_FRAME_GENERATE (0x0164) */
+#define MREGBIT_GENERATE_PAUSE_FRAME BIT(0)
+
+/* MAC_FC_PAUSE_TIME_VALUE (0x0180) */
+#define MREGBIT_MAC_FC_PAUSE_TIME GENMASK(15, 0)
+
+/* MAC_MDIO_CONTROL (0x01a0) */
+#define MREGBIT_PHY_ADDRESS GENMASK(4, 0)
+#define MREGBIT_REGISTER_ADDRESS GENMASK(9, 5)
+#define MREGBIT_MDIO_READ_WRITE BIT(10)
+#define MREGBIT_START_MDIO_TRANS BIT(15)
+
+/* MAC_MDIO_DATA (0x01a4) */
+#define MREGBIT_MDIO_DATA GENMASK(15, 0)
+
+/* MAC_RX_STATCTR_CONTROL (0x01a8) */
+#define MREGBIT_RX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_RX_COUNTER_READ BIT(15)
+
+/* MAC_RX_STATCTR_DATA_HIGH (0x01ac) */
+#define MREGBIT_RX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_RX_STATCTR_DATA_LOW (0x01b0) */
+#define MREGBIT_RX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TX_STATCTR_CONTROL (0x01b4) */
+#define MREGBIT_TX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_TX_COUNTER_READ BIT(15)
+
+/* MAC_TX_STATCTR_DATA_HIGH (0x01b8) */
+#define MREGBIT_TX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_TX_STATCTR_DATA_LOW (0x01bc) */
+#define MREGBIT_TX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TRANSMIT_FIFO_ALMOST_FULL (0x01c0) */
+#define MREGBIT_TX_FIFO_AF GENMASK(13, 0)
+
+/* MAC_TRANSMIT_PACKET_START_THRESHOLD (0x01c4) */
+#define MREGBIT_TX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_RECEIVE_PACKET_START_THRESHOLD (0x01c8) */
+#define MREGBIT_RX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_STATUS_IRQ (0x01e0) */
+#define MREGBIT_MAC_UNDERRUN_IRQ BIT(0)
+#define MREGBIT_MAC_JABBER_IRQ BIT(1)
+
+/* MAC_INTERRUPT_ENABLE (0x01e4) */
+#define MREGBIT_MAC_UNDERRUN_INTERRUPT_ENABLE BIT(0)
+#define MREGBIT_JABBER_INTERRUPT_ENABLE BIT(1)
+
+/* RX DMA descriptor */
+
+#define RX_DESC_0_FRAME_PACKET_LENGTH_MASK GENMASK(13, 0)
+#define RX_DESC_0_FRAME_ALIGN_ERR BIT(14)
+#define RX_DESC_0_FRAME_RUNT BIT(15)
+#define RX_DESC_0_FRAME_ETHERNET_TYPE BIT(16)
+#define RX_DESC_0_FRAME_VLAN BIT(17)
+#define RX_DESC_0_FRAME_MULTICAST BIT(18)
+#define RX_DESC_0_FRAME_BROADCAST BIT(19)
+#define RX_DESC_0_FRAME_CRC_ERR BIT(20)
+#define RX_DESC_0_FRAME_MAX_LEN_ERR BIT(21)
+#define RX_DESC_0_FRAME_JABBER_ERR BIT(22)
+#define RX_DESC_0_FRAME_LENGTH_ERR BIT(23)
+#define RX_DESC_0_FRAME_MAC_ADDR1_MATCH BIT(24)
+#define RX_DESC_0_FRAME_MAC_ADDR2_MATCH BIT(25)
+#define RX_DESC_0_FRAME_MAC_ADDR3_MATCH BIT(26)
+#define RX_DESC_0_FRAME_MAC_ADDR4_MATCH BIT(27)
+#define RX_DESC_0_FRAME_PAUSE_CTRL BIT(28)
+#define RX_DESC_0_LAST_DESCRIPTOR BIT(29)
+#define RX_DESC_0_FIRST_DESCRIPTOR BIT(30)
+#define RX_DESC_0_OWN BIT(31)
+
+#define RX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define RX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+ /* [24] reserved */
+#define RX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define RX_DESC_1_END_RING BIT(26)
+ /* [29:27] reserved */
+#define RX_DESC_1_RX_TIMESTAMP BIT(30)
+#define RX_DESC_1_PTP_PKT BIT(31)
+
+/* TX DMA descriptor */
+
+ /* [29:0] unused */
+#define TX_DESC_0_TX_TIMESTAMP BIT(30)
+#define TX_DESC_0_OWN BIT(31)
+
+#define TX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define TX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+#define TX_DESC_1_FORCE_EOP_ERROR BIT(24)
+#define TX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define TX_DESC_1_END_RING BIT(26)
+#define TX_DESC_1_DISABLE_PADDING BIT(27)
+#define TX_DESC_1_ADD_CRC_DISABLE BIT(28)
+#define TX_DESC_1_FIRST_SEGMENT BIT(29)
+#define TX_DESC_1_LAST_SEGMENT BIT(30)
+#define TX_DESC_1_INTERRUPT_ON_COMPLETION BIT(31)
+
+struct emac_desc {
+ u32 desc0;
+ u32 desc1;
+ u32 buffer_addr_1;
+ u32 buffer_addr_2;
+};
+
+/* Keep stats in this order, index used for accessing hardware */
+
+union emac_hw_tx_stats {
+ struct {
+ u64 tx_ok_pkts;
+ u64 tx_total_pkts;
+ u64 tx_ok_bytes;
+ u64 tx_err_pkts;
+ u64 tx_singleclsn_pkts;
+ u64 tx_multiclsn_pkts;
+ u64 tx_lateclsn_pkts;
+ u64 tx_excessclsn_pkts;
+ u64 tx_unicast_pkts;
+ u64 tx_multicast_pkts;
+ u64 tx_broadcast_pkts;
+ u64 tx_pause_pkts;
+ } stats;
+
+ DECLARE_FLEX_ARRAY(u64, array);
+};
+
+union emac_hw_rx_stats {
+ struct {
+ u64 rx_ok_pkts;
+ u64 rx_total_pkts;
+ u64 rx_crc_err_pkts;
+ u64 rx_align_err_pkts;
+ u64 rx_err_total_pkts;
+ u64 rx_ok_bytes;
+ u64 rx_total_bytes;
+ u64 rx_unicast_pkts;
+ u64 rx_multicast_pkts;
+ u64 rx_broadcast_pkts;
+ u64 rx_pause_pkts;
+ u64 rx_len_err_pkts;
+ u64 rx_len_undersize_pkts;
+ u64 rx_len_oversize_pkts;
+ u64 rx_len_fragment_pkts;
+ u64 rx_len_jabber_pkts;
+ u64 rx_64_pkts;
+ u64 rx_65_127_pkts;
+ u64 rx_128_255_pkts;
+ u64 rx_256_511_pkts;
+ u64 rx_512_1023_pkts;
+ u64 rx_1024_1518_pkts;
+ u64 rx_1519_plus_pkts;
+ u64 rx_drp_fifo_full_pkts;
+ u64 rx_truncate_fifo_full_pkts;
+ } stats;
+
+ DECLARE_FLEX_ARRAY(u64, array);
+};
+
+#endif /* _K1_EMAC_H_ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 67fa879b1e52..9507131875b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -133,15 +133,17 @@ config DWMAC_QCOM_ETHQOS
stmmac device driver.
config DWMAC_RENESAS_GBETH
- tristate "Renesas RZ/V2H(P) GBETH support"
+ tristate "Renesas RZ/V2H(P) GBETH and RZ/T2H, RZ/N2H GMAC support"
default ARCH_RENESAS
depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ select PCS_RZN1_MIIC
help
- Support for Gigabit Ethernet Interface (GBETH) on Renesas
- RZ/V2H(P) SoCs.
+ Support for Gigabit Ethernet Interface (GBETH)/ Ethernet MAC (GMAC)
+ on Renesas SoCs.
- This selects the Renesas RZ/V2H(P) Soc specific glue layer support
- for the stmmac device driver.
+ This selects Renesas SoC glue layer support for the stmmac device
+ driver. This driver is used for the RZ/V2H(P) family, RZ/T2H and
+ RZ/N2H SoCs.
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
@@ -263,6 +265,18 @@ config DWMAC_SUN8I
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+config DWMAC_SUN55I
+ tristate "Allwinner sun55i GMAC200 support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ help
+ Support for Allwinner A523/T527 GMAC200 ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A523/T527
+ GMAC200 ethernet controller.
+
config DWMAC_THEAD
tristate "T-HEAD dwmac support"
depends on OF && (ARCH_THEAD || COMPILE_TEST)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b591d93f8503..51e068e26ce4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
+obj-$(CONFIG_DWMAC_SUN55I) += dwmac-sun55i.o
obj-$(CONFIG_DWMAC_THEAD) += dwmac-thead.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cbffccb3b9af..8f34c9ad457f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -228,6 +228,7 @@ struct stmmac_extra_stats {
unsigned long mtl_est_btrlm;
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbs[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
@@ -602,7 +603,6 @@ struct mac_device_info {
unsigned int mcast_bits_log2;
unsigned int rx_csum;
unsigned int pcs;
- unsigned int pmt;
unsigned int ps;
unsigned int xlgmac;
unsigned int num_vlan;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 6c363f9b0ce2..e8539cad4602 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -261,7 +261,8 @@ bypass_clk_reset_gpio:
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->bsp_priv = eqos;
plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
- STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_USE_PHY_WOL;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 889e2bb6f7f5..4268b9987237 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -49,7 +49,7 @@ struct imx_dwmac_ops {
u32 flags;
bool mac_rgmii_txclk_auto_adj;
- int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
@@ -72,7 +72,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -88,8 +88,8 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
GPR_ENET_QOS_RGMII_EN;
break;
default:
- pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
+ pr_debug("imx dwmac doesn't support %s interface\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -112,7 +112,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val, ret;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -134,8 +134,8 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII;
break;
default:
- dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
+ dev_dbg(dwmac->dev, "imx dwmac doesn't support %s interface\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -197,7 +197,7 @@ static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
{
struct imx_priv_data *dwmac = bsp_priv;
- interface = dwmac->plat_dat->mac_interface;
+ interface = dwmac->plat_dat->phy_interface;
if (interface == PHY_INTERFACE_MODE_RMII ||
interface == PHY_INTERFACE_MODE_MII)
return 0;
@@ -215,8 +215,8 @@ static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII))
return;
rate = rgmii_clock(speed);
@@ -265,16 +265,16 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
}
-static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
+static int imx_dwmac_mx93_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
- struct plat_stmmacenet_data *plat_dat = priv;
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
@@ -301,6 +301,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
dwmac->clk_mem = NULL;
if (of_machine_is_compatible("fsl,imx8dxl") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
dwmac->clk_mem = devm_clk_get(dev, "mem");
if (IS_ERR(dwmac->clk_mem)) {
@@ -310,9 +311,10 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
/* Binding doc describes the propety:
- * is required by i.MX8MP, i.MX93.
+ * is required by i.MX8MP, i.MX91, i.MX93.
* is optinoal for i.MX8DXL.
*/
dwmac->intf_regmap =
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 15abe214131f..c1670f6bae14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -90,7 +90,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
@@ -119,7 +119,8 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -131,13 +132,14 @@ static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct ingenic_mac *mac = plat_dat->bsp_priv;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -150,14 +152,15 @@ static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -170,7 +173,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
@@ -178,7 +181,8 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -191,7 +195,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
@@ -221,7 +225,8 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index ea33ae39be6b..e74d00984b88 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -371,9 +371,6 @@ static int intel_crosststamp(ktime_t *device,
u32 acr_value;
int i;
- if (!boot_cpu_has(X86_FEATURE_ART))
- return -EOPNOTSUPP;
-
intel_priv = priv->plat->bsp_priv;
/* Both internal crosstimestamping and external triggered event
@@ -566,7 +563,8 @@ static int intel_mac_finish(struct net_device *ndev,
static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
@@ -613,7 +611,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->pdev = pdev;
plat->phy_addr = -1;
- plat->clk_csr = 5;
+ plat->clk_csr = STMMAC_CSR_250_300M;
plat->has_gmac = 0;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
@@ -755,7 +753,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
- plat->crosststamp = intel_crosststamp;
+ if (boot_cpu_has(X86_FEATURE_ART))
+ plat->crosststamp = intel_crosststamp;
+
plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
/* Setup MSI vector offset specific to Intel mGbE controller */
@@ -1231,6 +1231,37 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
return 0;
}
+static int intel_eth_pci_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+static int intel_eth_pci_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
/**
* intel_eth_pci_probe
*
@@ -1292,6 +1323,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
plat->bsp_priv = intel_priv;
+ plat->suspend = intel_eth_pci_suspend;
+ plat->resume = intel_eth_pci_resume;
+
intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
intel_priv->crossts_adj = 1;
@@ -1355,44 +1389,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
}
-static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_wake_from_d3(pdev, true);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
-}
-
-static int __maybe_unused intel_eth_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
- intel_eth_pci_resume);
-
#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
@@ -1442,7 +1438,7 @@ static struct pci_driver intel_eth_pci_driver = {
.probe = intel_eth_pci_probe,
.remove = intel_eth_pci_remove,
.driver = {
- .pm = &intel_eth_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index e1591e6217d4..592aa9d636e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -90,15 +90,14 @@ static void loongson_default_data(struct pci_dev *pdev,
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = 256;
- plat->mac_interface = PHY_INTERFACE_MODE_NA;
-
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
@@ -509,10 +508,15 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
}
/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
-static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ if (value & DMA_BUS_MODE_SFT_RESET) {
+ netdev_err(priv->dev, "the PHY clock is missing\n");
+ return -EINVAL;
+ }
+
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
@@ -521,6 +525,37 @@ static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
10000, 2000000);
}
+static int loongson_dwmac_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int loongson_dwmac_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -565,6 +600,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
plat->fix_soc_reset = loongson_dwmac_fix_reset;
+ plat->suspend = loongson_dwmac_suspend;
+ plat->resume = loongson_dwmac_resume;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -621,44 +658,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int __maybe_unused loongson_dwmac_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
- loongson_dwmac_resume);
-
static const struct pci_device_id loongson_dwmac_id_table[] = {
{ PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
{ PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
@@ -673,7 +672,7 @@ static struct pci_driver loongson_dwmac_driver = {
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
.driver = {
- .pm = &loongson_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index c0c44916f849..2562a6d036a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -41,7 +41,6 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->mac_interface = PHY_INTERFACE_MODE_NA;
plat_dat->has_gmac = true;
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 39421d6a34e4..f1b36f0a401d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -523,7 +523,7 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
return ret;
}
-static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+static int mediatek_dwmac_init(struct device *dev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
const struct mediatek_dwmac_variant *variant = plat->variant;
@@ -532,7 +532,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
if (variant->dwmac_set_phy_interface) {
ret = variant->dwmac_set_phy_interface(plat);
if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ dev_err(dev, "failed to set phy interface, err = %d\n", ret);
return ret;
}
}
@@ -540,7 +540,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
if (variant->dwmac_set_delay) {
ret = variant->dwmac_set_delay(plat);
if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ dev_err(dev, "failed to set delay value, err = %d\n", ret);
return ret;
}
}
@@ -589,7 +589,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
- plat->init = mediatek_dwmac_init;
+ plat->resume = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
@@ -654,7 +654,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
- mediatek_dwmac_init(pdev, priv_plat);
+ mediatek_dwmac_init(&pdev->dev, priv_plat);
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index df4ca897a60c..bc7bb975803c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -16,12 +16,37 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#include "stmmac_platform.h"
+/**
+ * struct renesas_gbeth_of_data - OF data for Renesas GBETH
+ *
+ * @clks: Array of clock names
+ * @num_clks: Number of clocks
+ * @stmmac_flags: Flags for the stmmac driver
+ * @handle_reset: Flag to indicate if reset control is
+ * handled by the glue driver or core driver.
+ * @set_clk_tx_rate: Flag to indicate if Tx clock is fixed or
+ * set_clk_tx_rate is needed.
+ * @has_pcs: Flag to indicate if the MAC has a PCS
+ */
+struct renesas_gbeth_of_data {
+ const char * const *clks;
+ u8 num_clks;
+ u32 stmmac_flags;
+ bool handle_reset;
+ bool set_clk_tx_rate;
+ bool has_pcs;
+};
+
struct renesas_gbeth {
+ const struct renesas_gbeth_of_data *of_data;
struct plat_stmmacenet_data *plat_dat;
struct reset_control *rstc;
struct device *dev;
@@ -31,6 +56,41 @@ static const char *const renesas_gbeth_clks[] = {
"tx", "tx-180", "rx", "rx-180",
};
+static const char *const renesas_gmac_clks[] = {
+ "tx",
+};
+
+static int renesas_gmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void renesas_gmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
@@ -70,6 +130,7 @@ static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
static int renesas_gbeth_probe(struct platform_device *pdev)
{
+ const struct renesas_gbeth_of_data *of_data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
@@ -91,14 +152,17 @@ static int renesas_gbeth_probe(struct platform_device *pdev)
if (!gbeth)
return -ENOMEM;
- plat_dat->num_clks = ARRAY_SIZE(renesas_gbeth_clks);
+ of_data = of_device_get_match_data(&pdev->dev);
+ gbeth->of_data = of_data;
+
+ plat_dat->num_clks = of_data->num_clks;
plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
sizeof(*plat_dat->clks), GFP_KERNEL);
if (!plat_dat->clks)
return -ENOMEM;
for (i = 0; i < plat_dat->num_clks; i++)
- plat_dat->clks[i].id = renesas_gbeth_clks[i];
+ plat_dat->clks[i].id = of_data->clks[i];
err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
if (err < 0)
@@ -109,25 +173,49 @@ static int renesas_gbeth_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EINVAL,
"error finding tx clock\n");
- gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(gbeth->rstc))
- return PTR_ERR(gbeth->rstc);
+ if (of_data->handle_reset) {
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+ }
gbeth->dev = dev;
gbeth->plat_dat = plat_dat;
plat_dat->bsp_priv = gbeth;
- plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ if (of_data->set_clk_tx_rate)
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->init = renesas_gbeth_init;
plat_dat->exit = renesas_gbeth_exit;
- plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
- STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
- STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ gbeth->of_data->stmmac_flags;
+ if (of_data->has_pcs) {
+ plat_dat->pcs_init = renesas_gmac_pcs_init;
+ plat_dat->pcs_exit = renesas_gmac_pcs_exit;
+ plat_dat->select_pcs = renesas_gmac_select_pcs;
+ }
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
+static const struct renesas_gbeth_of_data renesas_gbeth_of_data = {
+ .clks = renesas_gbeth_clks,
+ .num_clks = ARRAY_SIZE(renesas_gbeth_clks),
+ .handle_reset = true,
+ .set_clk_tx_rate = true,
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_SPH_DISABLE,
+};
+
+static const struct renesas_gbeth_of_data renesas_gmac_of_data = {
+ .clks = renesas_gmac_clks,
+ .num_clks = ARRAY_SIZE(renesas_gmac_clks),
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
+ .has_pcs = true,
+};
+
static const struct of_device_id renesas_gbeth_match[] = {
- { .compatible = "renesas,rzv2h-gbeth", },
+ { .compatible = "renesas,r9a09g077-gbeth", .data = &renesas_gmac_of_data },
+ { .compatible = "renesas,rzv2h-gbeth", .data = &renesas_gbeth_of_data },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index f6687c2f30f6..51ea0caf16c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -8,6 +8,7 @@
*/
#include <linux/stmmac.h>
+#include <linux/hw_bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/phy.h>
@@ -71,7 +72,6 @@ struct rk_priv_data {
phy_interface_t phy_iface;
int id;
struct regulator *regulator;
- bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -150,7 +150,7 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
}
#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
+ (FIELD_PREP_WM16((mask) << (shift), (val)))
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
@@ -557,9 +557,7 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
-#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
-#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3328_GRF_MACPHY_CON1 */
#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
@@ -1706,6 +1704,28 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
return -EINVAL;
}
+static int rk_gmac_suspend(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerdown(bsp_priv);
+
+ return 0;
+}
+
+static int rk_gmac_resume(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerup(bsp_priv);
+
+ return 0;
+}
+
static int rk_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -1738,6 +1758,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->get_interfaces = rk_get_interfaces;
plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -1776,37 +1798,6 @@ static void rk_gmac_remove(struct platform_device *pdev)
clk_put(bsp_priv->clk_phy);
}
-#ifdef CONFIG_PM_SLEEP
-static int rk_gmac_suspend(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- /* Keep the PHY up if we use Wake-on-Lan. */
- if (!device_may_wakeup(dev)) {
- rk_gmac_powerdown(bsp_priv);
- bsp_priv->suspended = true;
- }
-
- return ret;
-}
-
-static int rk_gmac_resume(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
-
- /* The PHY was up for Wake-on-Lan. */
- if (bsp_priv->suspended) {
- rk_gmac_powerup(bsp_priv);
- bsp_priv->suspended = false;
- }
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
-
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
@@ -1832,7 +1823,7 @@ static struct platform_driver rk_gmac_dwmac_driver = {
.remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
- .pm = &rk_gmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = rk_gmac_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 01dd0cf0923c..354f01184e6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -234,7 +234,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- return dwmac->plat_dat->mac_interface;
+ return dwmac->plat_dat->phy_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 2013d7477eb7..6938dd2a79b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -38,7 +38,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
unsigned int mode;
int err;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
mode = STARFIVE_DWMAC_PHY_INFT_RMII;
break;
@@ -51,8 +51,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1eb16eec9c0d..6c179911ef3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -171,7 +171,7 @@ static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
dwmac->enable_eth_ck = dwmac->ext_phyclk;
return 0;
@@ -193,7 +193,7 @@ static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
default:
dwmac->enable_eth_ck = false;
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
}
@@ -206,7 +206,7 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
if (!dwmac->enable_eth_ck)
return 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
if (clk_rate == ETH_CK_F_25M)
@@ -228,7 +228,7 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
}
dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz",
- phy_modes(plat_dat->mac_interface), clk_rate);
+ phy_modes(plat_dat->phy_interface), clk_rate);
return -EINVAL;
}
@@ -238,7 +238,7 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val = 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/*
* STM32MP15xx supports both MII and GMII, STM32MP13xx MII only.
@@ -269,12 +269,12 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
/* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */
val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK);
@@ -294,7 +294,7 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val = 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */
break;
@@ -319,12 +319,12 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
/* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */
val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL;
@@ -359,7 +359,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
break;
@@ -368,12 +368,12 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
return regmap_update_bits(dwmac->regmap, reg,
SYSCFG_MCU_ETH_MASK, val << 23);
@@ -498,6 +498,26 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return err;
}
+static int stm32_dwmac_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ stm32_dwmac_clk_disable(dwmac);
+
+ return dwmac->ops->suspend ? dwmac->ops->suspend(dwmac) : 0;
+}
+
+static int stm32_dwmac_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ if (dwmac->ops->resume)
+ dwmac->ops->resume(dwmac);
+
+ return stm32_dwmac_init(priv->plat);
+}
+
static int stm32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -535,6 +555,8 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
+ plat_dat->suspend = stm32_dwmac_suspend;
+ plat_dat->resume = stm32_dwmac_resume;
ret = stm32_dwmac_init(plat_dat);
if (ret)
@@ -600,50 +622,6 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_ethstp);
}
-#ifdef CONFIG_PM_SLEEP
-static int stm32_dwmac_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
-
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- stm32_dwmac_clk_disable(dwmac);
-
- if (dwmac->ops->suspend)
- ret = dwmac->ops->suspend(dwmac);
-
- return ret;
-}
-
-static int stm32_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
- int ret;
-
- if (dwmac->ops->resume)
- dwmac->ops->resume(dwmac);
-
- ret = stm32_dwmac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
- stm32_dwmac_suspend, stm32_dwmac_resume);
-
static struct stm32_ops stm32mcu_dwmac_data = {
.set_mode = stm32mcu_set_mode
};
@@ -691,7 +669,7 @@ static struct platform_driver stm32_dwmac_driver = {
.remove = stm32_dwmac_remove,
.driver = {
.name = "stm32-dwmac",
- .pm = &stm32_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = stm32_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
new file mode 100644
index 000000000000..862df173d963
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dwmac-sun55i.c - Allwinner sun55i GMAC200 specific glue layer
+ *
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * syscon parts taken from dwmac-sun8i.c, which is
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define SYSCON_REG 0x34
+
+/* RMII specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_MASK GENMASK(12, 10)
+#define SYSCON_ERXDC_MASK GENMASK(9, 5)
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+
+static int sun55i_gmac200_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *node = dev->of_node;
+ struct regmap *regmap;
+ u32 val, reg = 0;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_phandle(node, "syscon");
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Unable to map syscon\n");
+
+ if (!of_property_read_u32(node, "tx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "tx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set tx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ETXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "TX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ETXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ETXDC_MASK, val);
+ }
+
+ if (!of_property_read_u32(node, "rx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "rx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set rx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ERXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "RX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ERXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ERXDC_MASK, val);
+ }
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unsupported interface mode: %s",
+ phy_modes(plat->phy_interface));
+ }
+
+ ret = regmap_write(regmap, SYSCON_REG, reg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to write to syscon\n");
+
+ return 0;
+}
+
+static int sun55i_gmac200_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ /* BSP disables it */
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->host_dma_width = 32;
+
+ ret = sun55i_gmac200_set_syscon(dev, plat_dat);
+ if (ret)
+ return ret;
+
+ clk = devm_clk_get_enabled(dev, "mbus");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get or enable MBUS clock\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "phy");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get or enable PHY supply\n");
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sun55i_gmac200_match[] = {
+ { .compatible = "allwinner,sun55i-a523-gmac200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun55i_gmac200_match);
+
+static struct platform_driver sun55i_gmac200_driver = {
+ .probe = sun55i_gmac200_probe,
+ .driver = {
+ .name = "dwmac-sun55i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun55i_gmac200_match,
+ },
+};
+module_platform_driver(sun55i_gmac200_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sun55i GMAC200 specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2796dc426943..5d871b2cd111 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -31,10 +31,6 @@
*/
/* struct emac_variant - Describe dwmac-sun8i hardware variant
- * @default_syscon_value: The default value of the EMAC register in syscon
- * This value is used for disabling properly EMAC
- * and used as a good starting value in case of the
- * boot process(uboot) leave some stuff.
* @syscon_field reg_field for the syscon's gmac register
* @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
@@ -48,7 +44,6 @@
* value of zero indicates this is not supported.
*/
struct emac_variant {
- u32 default_syscon_value;
const struct reg_field *syscon_field;
bool soc_has_internal_phy;
bool support_mii;
@@ -94,7 +89,6 @@ static const struct reg_field sun8i_ccu_reg_field = {
};
static const struct emac_variant emac_variant_h3 = {
- .default_syscon_value = 0x58000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true,
@@ -105,14 +99,12 @@ static const struct emac_variant emac_variant_h3 = {
};
static const struct emac_variant emac_variant_v3s = {
- .default_syscon_value = 0x38000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -122,7 +114,6 @@ static const struct emac_variant emac_variant_a83t = {
};
static const struct emac_variant emac_variant_r40 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_ccu_reg_field,
.support_mii = true,
.support_rgmii = true,
@@ -130,7 +121,6 @@ static const struct emac_variant emac_variant_r40 = {
};
static const struct emac_variant emac_variant_a64 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -141,7 +131,6 @@ static const struct emac_variant emac_variant_a64 = {
};
static const struct emac_variant emac_variant_h6 = {
- .default_syscon_value = 0x50000,
.syscon_field = &sun8i_syscon_reg_field,
/* The "Internal PHY" of H6 is not on the die. It's on the
* co-packaged AC200 chip instead.
@@ -933,25 +922,11 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
struct sunxi_priv_data *gmac = plat->bsp_priv;
struct device_node *node = dev->of_node;
int ret;
- u32 reg, val;
-
- ret = regmap_field_read(gmac->regmap_field, &val);
- if (ret) {
- dev_err(dev, "Fail to read from regmap field.\n");
- return ret;
- }
-
- reg = gmac->variant->default_syscon_value;
- if (reg != val)
- dev_warn(dev,
- "Current syscon value is not the default %x (expect %x)\n",
- val, reg);
+ u32 reg = 0, val;
if (gmac->variant->soc_has_internal_phy) {
if (of_property_read_bool(node, "allwinner,leds-active-low"))
reg |= H3_EPHY_LED_POL;
- else
- reg &= ~H3_EPHY_LED_POL;
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
@@ -965,11 +940,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
* address. No need to mask it again.
*/
reg |= ret << H3_EPHY_ADDR_SHIFT;
- } else {
- /* For SoCs without internal PHY the PHY selection bit should be
- * set to 0 (external PHY).
- */
- reg &= ~H3_EPHY_SELECT;
}
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
@@ -980,8 +950,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
- reg &= ~(gmac->variant->tx_delay_max <<
- SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(dev, "Invalid TX clock delay: %d\n",
@@ -998,8 +966,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
- reg &= ~(gmac->variant->rx_delay_max <<
- SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(dev, "Invalid RX clock delay: %d\n",
@@ -1008,12 +974,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
}
}
- /* Clear interface mode bits */
- reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
- if (gmac->variant->support_rmii)
- reg &= ~SYSCON_RMII_EN;
-
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1028,7 +989,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->mac_interface));
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -1039,9 +1000,9 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
{
- u32 reg = gmac->variant->default_syscon_value;
-
- regmap_field_write(gmac->regmap_field, reg);
+ if (gmac->variant->soc_has_internal_phy)
+ regmap_field_write(gmac->regmap_field,
+ (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT));
}
static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index 6c6c49e4b66f..a3378046b061 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -56,7 +56,7 @@ static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
struct thead_dwmac *dwmac = plat->bsp_priv;
u32 phyif;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
phyif = PHY_INTF_MII_GMII;
break;
@@ -67,8 +67,8 @@ static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
phyif = PHY_INTF_RGMII;
break;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -81,7 +81,7 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
struct thead_dwmac *dwmac = plat->bsp_priv;
u32 txclk_dir;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
txclk_dir = TXCLK_DIR_INPUT;
break;
@@ -92,8 +92,8 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
txclk_dir = TXCLK_DIR_OUTPUT;
break;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -112,7 +112,7 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
plat = dwmac->plat;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
/* For MII, rxc/txc is provided by phy */
case PHY_INTERFACE_MODE_MII:
return 0;
@@ -143,8 +143,8 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
return 0;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
}
@@ -154,7 +154,7 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
struct thead_dwmac *dwmac = plat->bsp_priv;
u32 reg, div;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN;
break;
@@ -177,8 +177,8 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
break;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index f4694fd576f5..3dec1a264cf6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -341,6 +341,7 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_RFA_SHIFT 8
#define MTL_OP_MODE_EHFC BIT(7)
+#define MTL_OP_MODE_DIS_TCP_EF BIT(6)
#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
#define MTL_OP_MODE_RTC_SHIFT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a5fb31eb0192..aac68dc28dc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -110,16 +110,20 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
- if (rdes1 & RDES1_IP_HDR_ERROR)
+ if (rdes1 & RDES1_IP_HDR_ERROR) {
x->ip_hdr_err++;
+ ret |= csum_none;
+ }
if (rdes1 & RDES1_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++;
if (rdes1 & RDES1_IPV4_HEADER)
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
- if (rdes1 & RDES1_IP_PAYLOAD_ERROR)
+ if (rdes1 & RDES1_IP_PAYLOAD_ERROR) {
x->ip_payload_err++;
+ ret |= csum_none;
+ }
if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 0cb84a0041a4..d87a8b595e6a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -268,6 +268,8 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
+ mtl_rx_op |= MTL_OP_MODE_DIS_TCP_EF;
+
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 4846bf49c576..467f1a05747e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -251,7 +251,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
- unsigned long data;
+ u32 data;
data = (addr[5] << 8) | addr[4];
/* For MAC Addr registers we have to set the Address Enable (AE)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 99635b37044a..3f7c765dcb79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -100,7 +100,7 @@ int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
return -EINVAL;
if (plat && plat->fix_soc_reset)
- return plat->fix_soc_reset(plat, ioaddr);
+ return plat->fix_soc_reset(priv, ioaddr);
return stmmac_do_callback(priv, dma, reset, ioaddr);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cda09cf5dcca..7ca5477be390 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -289,8 +289,7 @@ struct stmmac_priv {
u32 msg_enable;
int wolopts;
int wol_irq;
- bool wol_irq_disabled;
- int clk_csr;
+ u32 gmii_address_bus_config;
struct timer_list eee_ctrl_timer;
int lpi_irq;
u32 tx_lpi_timer;
@@ -374,6 +373,18 @@ enum stmmac_state {
STMMAC_SERVICE_SCHED,
};
+extern const struct dev_pm_ops stmmac_simple_pm_ops;
+
+static inline bool stmmac_wol_enabled_mac(struct stmmac_priv *priv)
+{
+ return priv->plat->pmt && device_may_wakeup(priv->device);
+}
+
+static inline bool stmmac_wol_enabled_phy(struct stmmac_priv *priv)
+{
+ return !priv->plat->pmt && device_may_wakeup(priv->device);
+}
+
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
@@ -381,7 +392,6 @@ int stmmac_pcs_setup(struct net_device *ndev);
void stmmac_pcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_xdp_open(struct net_device *dev);
@@ -394,7 +404,6 @@ int stmmac_dvr_probe(struct device *device,
struct stmmac_resources *res);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index ac6f2e3a3fcd..4b513d27a988 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -63,7 +63,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
EST_GMAC5_PTOV_SHIFT;
}
if (cfg->enable)
- ctrl |= EST_EEST | EST_SSWL;
+ ctrl |= EST_EEST | EST_SSWL | EST_DFBS;
else
ctrl &= ~EST_EEST;
@@ -109,6 +109,10 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbs++;
+ for (i = 0; i < txqcnt; i++)
+ if (value & BIT(i))
+ x->mtl_est_txq_hlbs[i]++;
+
/* Clear Interrupt */
writel(value, est_addr + EST_SCH_ERR);
@@ -131,10 +135,9 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
- for (i = 0; i < txqcnt; i++) {
+ for (i = 0; i < txqcnt; i++)
if (feqn & BIT(i))
x->mtl_est_txq_hlbf[i]++;
- }
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index d247fa383a6e..f70221c9c84a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -16,6 +16,7 @@
#define EST_XGMAC_PTOV_MUL 9
#define EST_SSWL BIT(1)
#define EST_EEST BIT(0)
+#define EST_DFBS BIT(5)
#define EST_STATUS 0x00000008
#define EST_GMAC5_BTRL GENMASK(11, 8)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 77758a7299b4..39fa1ec92f82 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -803,7 +803,6 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 support = WAKE_MAGIC | WAKE_UCAST;
if (!device_can_wakeup(priv->device))
return -EOPNOTSUPP;
@@ -816,29 +815,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return ret;
}
- /* By default almost all GMAC devices support the WoL via
- * magic frame but we can disable it if the HW capability
- * register shows no support for pmt_magic_frame. */
- if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
- wol->wolopts &= ~WAKE_MAGIC;
-
- if (wol->wolopts & ~support)
- return -EINVAL;
-
- if (wol->wolopts) {
- pr_info("stmmac: wakeup enable\n");
- device_set_wakeup_enable(priv->device, 1);
- /* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
- enable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = false;
- } else {
- device_set_wakeup_enable(priv->device, 0);
- /* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
- disable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = true;
- }
+ device_set_wakeup_enable(priv->device, !!wol->wolopts);
mutex_lock(&priv->lock);
priv->wolopts = wol->wolopts;
@@ -852,9 +829,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -863,9 +837,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
return phylink_ethtool_set_eee(priv->phylink, edata);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index e2840fa241f2..bb110124f21e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -135,7 +135,6 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
static int config_addend(void __iomem *ioaddr, u32 addend)
{
u32 value;
- int limit;
writel(addend, ioaddr + PTP_TAR);
/* issue command to update the addend value */
@@ -144,23 +143,15 @@ static int config_addend(void __iomem *ioaddr, u32 addend)
writel(value, ioaddr + PTP_TCR);
/* wait for present addend update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSADDREG),
+ 10, 100000);
}
static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub, int gmac4)
{
u32 value;
- int limit;
if (add_sub) {
/* If the new sec value needs to be subtracted with
@@ -187,16 +178,9 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
writel(value, ioaddr + PTP_TCR);
/* wait for present system time adjust/update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSUPDT),
+ 10, 100000);
}
static void get_systime(void __iomem *ioaddr, u64 *systime)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7b16d1207b80..650d75b73e0b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_DEBUG_FS
@@ -146,38 +147,6 @@ static void stmmac_exit_fs(struct net_device *dev);
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
-{
- int ret = 0;
-
- if (enabled) {
- ret = clk_prepare_enable(priv->plat->stmmac_clk);
- if (ret)
- return ret;
- ret = clk_prepare_enable(priv->plat->pclk);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- return ret;
- }
- if (priv->plat->clks_config) {
- ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- return ret;
- }
- }
- } else {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- if (priv->plat->clks_config)
- priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
-
/**
* stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
* @bsp_priv: BSP private data structure (unused)
@@ -312,77 +281,6 @@ static void stmmac_global_err(struct stmmac_priv *priv)
stmmac_service_event_schedule(priv);
}
-/**
- * stmmac_clk_csr_set - dynamically set the MDC clock
- * @priv: driver private structure
- * Description: this is to dynamically set the MDC clock according to the csr
- * clock input.
- * Note:
- * If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
- * clock dynamically according to the actual clock input.
- */
-static void stmmac_clk_csr_set(struct stmmac_priv *priv)
-{
- unsigned long clk_rate;
-
- clk_rate = clk_get_rate(priv->plat->stmmac_clk);
-
- /* Platform provided default clk_csr would be assumed valid
- * for all other cases except for the below mentioned ones.
- * For values higher than the IEEE 802.3 specified frequency
- * we can not estimate the proper divider as it is not known
- * the frequency of clk_csr_i. So we do not change the default
- * divider.
- */
- if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
- if (clk_rate < CSR_F_35M)
- priv->clk_csr = STMMAC_CSR_20_35M;
- else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
- priv->clk_csr = STMMAC_CSR_35_60M;
- else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
- priv->clk_csr = STMMAC_CSR_60_100M;
- else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
- priv->clk_csr = STMMAC_CSR_100_150M;
- else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
- priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
- priv->clk_csr = STMMAC_CSR_250_300M;
- else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
- priv->clk_csr = STMMAC_CSR_300_500M;
- else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
- priv->clk_csr = STMMAC_CSR_500_800M;
- }
-
- if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
- if (clk_rate > 160000000)
- priv->clk_csr = 0x03;
- else if (clk_rate > 80000000)
- priv->clk_csr = 0x02;
- else if (clk_rate > 40000000)
- priv->clk_csr = 0x01;
- else
- priv->clk_csr = 0;
- }
-
- if (priv->plat->has_xgmac) {
- if (clk_rate > 400000000)
- priv->clk_csr = 0x5;
- else if (clk_rate > 350000000)
- priv->clk_csr = 0x4;
- else if (clk_rate > 300000000)
- priv->clk_csr = 0x3;
- else if (clk_rate > 250000000)
- priv->clk_csr = 0x2;
- else if (clk_rate > 150000000)
- priv->clk_csr = 0x1;
- else
- priv->clk_csr = 0x0;
- }
-}
-
static void print_pkt(unsigned char *buf, int len)
{
pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
@@ -795,16 +693,14 @@ static int stmmac_hwtstamp_get(struct net_device *dev,
* Will be rerun after resuming from suspend, case in which the timestamping
* flags updated by stmmac_hwtstamp_set() also need to be restored.
*/
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
+ u32 systime_flags)
{
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
-
if (!priv->plat->clk_ptp_rate) {
netdev_err(priv->dev, "Invalid PTP clock rate");
return -EINVAL;
@@ -839,16 +735,15 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
return 0;
}
-EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
/**
- * stmmac_init_ptp - init PTP
+ * stmmac_init_timestamping - initialise timestamping
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
* This function also registers the ptp driver.
*/
-static int stmmac_init_ptp(struct stmmac_priv *priv)
+static int stmmac_init_timestamping(struct stmmac_priv *priv)
{
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
@@ -856,9 +751,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (priv->plat->ptp_clk_freq_config)
priv->plat->ptp_clk_freq_config(priv);
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ return -EOPNOTSUPP;
+ }
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
- if (ret)
+ if (ret) {
+ netdev_warn(priv->dev, "PTP init failed\n");
return ret;
+ }
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -884,10 +786,24 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_setup_ptp(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+
+ if (stmmac_init_timestamping(priv) == 0)
+ stmmac_ptp_register(priv);
+}
+
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
/**
@@ -1169,7 +1085,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->mac_interface;
+ int interface = priv->plat->phy_interface;
if (priv->dma_cap.pcs) {
if ((interface == PHY_INTERFACE_MODE_RGMII) ||
@@ -1196,13 +1112,19 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
+ struct ethtool_keee eee;
int ret;
if (!phylink_expects_phy(priv->phylink))
return 0;
+ if (priv->hw->xpcs &&
+ xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
+ return 0;
+
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1236,19 +1158,20 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
- if (ret == 0) {
- struct ethtool_keee eee;
+ if (ret) {
+ netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
- /* Configure phylib's copy of the LPI timer. Normally,
- * phylink_config.lpi_timer_default would do this, but there is
- * a chance that userspace could change the eee_timer setting
- * via sysfs before the first open. Thus, preserve existing
- * behaviour.
- */
- if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
- eee.tx_lpi_timer = priv->tx_lpi_timer;
- phylink_ethtool_set_eee(priv->phylink, &eee);
- }
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is a
+ * chance that userspace could change the eee_timer setting via sysfs
+ * before the first open. Thus, preserve existing behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
}
if (!priv->plat->pmt) {
@@ -1259,7 +1182,7 @@ static int stmmac_init_phy(struct net_device *dev)
device_set_wakeup_enable(priv->device, !!wol.wolopts);
}
- return ret;
+ return 0;
}
static int stmmac_phy_setup(struct stmmac_priv *priv)
@@ -3477,7 +3400,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3487,7 +3409,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+static int stmmac_hw_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3558,22 +3480,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
- if (ptp_register) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- }
-
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_info(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- else if (ptp_register)
- stmmac_ptp_register(priv);
-
if (priv->use_riwt) {
u32 queue;
@@ -3637,13 +3543,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
return 0;
}
-static void stmmac_hw_teardown(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
-}
-
static void stmmac_free_irq(struct net_device *dev,
enum request_irq_err irq_err, int irq_idx)
{
@@ -3725,7 +3624,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
- priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3886,7 +3784,6 @@ static int stmmac_request_irq_single(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
- priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
@@ -4035,29 +3932,9 @@ static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int mode = priv->plat->phy_interface;
u32 chan;
int ret;
- /* Initialise the tx lpi timer, converting from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- if ((!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
- ret = stmmac_init_phy(dev);
- if (ret) {
- netdev_err(priv->dev,
- "%s: Cannot attach to PHY (error: %d)\n",
- __func__, ret);
- goto init_phy_error;
- }
- }
-
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -4075,12 +3952,14 @@ static int __stmmac_open(struct net_device *dev,
}
}
- ret = stmmac_hw_setup(dev, true);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_setup_ptp(priv);
+
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
@@ -4103,11 +3982,8 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
+ stmmac_release_ptp(priv);
init_error:
- phylink_disconnect_phy(priv->phylink);
-init_phy_error:
- pm_runtime_put(priv->device);
return ret;
}
@@ -4117,34 +3993,54 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_dma_conf *dma_conf;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ goto err_dma_resources;
+
+ ret = stmmac_init_phy(dev);
+ if (ret)
+ goto err_runtime_pm;
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- free_dma_desc_resources(priv, dma_conf);
+ goto err_disconnect_phy;
kfree(dma_conf);
+
+ return ret;
+
+err_disconnect_phy:
+ phylink_disconnect_phy(priv->phylink);
+err_runtime_pm:
+ pm_runtime_put(priv->device);
+err_dma_resources:
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
return ret;
}
-/**
- * stmmac_release - close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver.
- */
-static int stmmac_release(struct net_device *dev)
+static void __stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
+ /* If the PHY or MAC has WoL enabled, then the PHY will not be
+ * suspended when phylink_stop() is called below. Set the PHY
+ * to its slowest speed to save power.
+ */
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
+
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
- phylink_disconnect_phy(priv->phylink);
stmmac_disable_all_queues(priv);
@@ -4170,7 +4066,21 @@ static int stmmac_release(struct net_device *dev)
if (stmmac_fpe_supported(priv))
ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ __stmmac_release(dev);
+ phylink_disconnect_phy(priv->phylink);
pm_runtime_put(priv->device);
return 0;
@@ -5735,7 +5645,8 @@ drain_data:
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
+ (status & csum_none))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5967,7 +5878,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return PTR_ERR(dma_conf);
}
- stmmac_release(dev);
+ __stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
if (ret) {
@@ -7057,7 +6968,6 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
@@ -7242,7 +7152,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
- priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
(BIT(priv->dma_cap.hash_tb_sz) << 5);
@@ -7280,6 +7189,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->plat->pmt) {
dev_info(priv->device, "Wake-Up On Lan supported\n");
device_set_wakeup_capable(priv->device, 1);
+ devm_pm_set_wake_irq(priv->device, priv->wol_irq);
}
if (priv->dma_cap.tsoen)
@@ -7712,17 +7622,6 @@ int stmmac_dvr_probe(struct device *device,
stmmac_fpe_init(priv);
- /* If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed. Viceversa the driver'll try to
- * set the MDC clock dynamically according to the csr actual
- * clock input.
- */
- if (priv->plat->clk_csr >= 0)
- priv->clk_csr = priv->plat->clk_csr;
- else
- stmmac_clk_csr_set(priv);
-
stmmac_check_pcs_mode(priv);
pm_runtime_get_noresume(device);
@@ -7860,7 +7759,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (stmmac_wol_enabled_mac(priv)) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
@@ -7871,16 +7770,18 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ if (stmmac_wol_enabled_phy(priv))
phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink,
- device_may_wakeup(priv->device) && priv->plat->pmt);
+ phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
rtnl_unlock();
if (stmmac_fpe_supported(priv))
ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+ if (priv->plat->suspend)
+ return priv->plat->suspend(dev, priv->plat->bsp_priv);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7933,6 +7834,12 @@ int stmmac_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
+ if (priv->plat->resume) {
+ ret = priv->plat->resume(dev, priv->plat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
if (!netif_running(ndev))
return 0;
@@ -7942,7 +7849,7 @@ int stmmac_resume(struct device *dev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (stmmac_wol_enabled_mac(priv)) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@ -7977,7 +7884,16 @@ int stmmac_resume(struct device *dev)
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv, &priv->dma_conf);
- stmmac_hw_setup(ndev, false);
+ ret = stmmac_hw_setup(ndev);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+ return ret;
+ }
+
+ stmmac_init_timestamping(priv);
+
stmmac_init_coalesce(priv);
phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
@@ -7995,7 +7911,7 @@ int stmmac_resume(struct device *dev)
* workqueue thread, which will race with initialisation.
*/
phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ if (stmmac_wol_enabled_phy(priv))
phylink_speed_up(priv->phylink);
rtnl_unlock();
@@ -8006,6 +7922,10 @@ int stmmac_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_resume);
+/* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
+DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
+EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 836f2848dfeb..f408737f6fc7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -23,9 +23,9 @@
#include "dwxgmac2.h"
#include "stmmac.h"
-#define MII_BUSY 0x00000001
-#define MII_WRITE 0x00000002
-#define MII_DATA_MASK GENMASK(15, 0)
+#define MII_ADDR_GBUSY BIT(0)
+#define MII_ADDR_GWRITE BIT(1)
+#define MII_DATA_GD_MASK GENMASK(15, 0)
/* GMAC4 defines */
#define MII_GMAC4_GOC_SHIFT 2
@@ -45,6 +45,16 @@
#define MII_XGMAC_PA_SHIFT 16
#define MII_XGMAC_DA_SHIFT 21
+static int stmmac_mdio_wait(void __iomem *reg, u32 mask)
+{
+ u32 v;
+
+ if (readl_poll_timeout(reg, v, !(v & mask), 100, 10000))
+ return -EBUSY;
+
+ return 0;
+}
+
static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
int devad, int phyreg, u32 *hw_addr)
{
@@ -83,7 +93,6 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -91,33 +100,25 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_READ;
+ value |= priv->gmii_address_bus_config | MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to read */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Read the data from the MII data register */
ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
@@ -131,12 +132,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -150,12 +148,9 @@ static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
@@ -166,7 +161,6 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -174,31 +168,23 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= phydata;
- value |= MII_XGMAC_WRITE;
+ value |= priv->gmii_address_bus_config | phydata | MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to write */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000);
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
err_disable_clks:
pm_runtime_put(priv->device);
@@ -209,12 +195,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -229,37 +212,78 @@ static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY,
phydata);
}
-static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
+/**
+ * stmmac_mdio_format_addr() - format the address register
+ * @priv: struct stmmac_priv pointer
+ * @pa: 5-bit MDIO package address
+ * @gr: 5-bit MDIO register address (C22) or MDIO device address (C45)
+ *
+ * Return: formatted address register
+ */
+static u32 stmmac_mdio_format_addr(struct stmmac_priv *priv,
+ unsigned int pa, unsigned int gr)
{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
+ const struct mii_regs *mii_regs = &priv->hw->mii;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ return ((pa << mii_regs->addr_shift) & mii_regs->addr_mask) |
+ ((gr << mii_regs->reg_shift) & mii_regs->reg_mask) |
+ priv->gmii_address_bus_config |
+ MII_ADDR_GBUSY;
+}
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+static int stmmac_mdio_access(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, u32 data, bool read)
+{
+ void __iomem *mii_address = priv->ioaddr + priv->hw->mii.addr;
+ void __iomem *mii_data = priv->ioaddr + priv->hw->mii.data;
+ u32 addr;
+ int ret;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
- /* Read the data from the MII data register */
- return readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ addr = stmmac_mdio_format_addr(priv, pa, gr) | cmd;
+
+ writel(data, mii_data);
+ writel(addr, mii_address);
+
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ /* Read the data from the MII data register if in read mode */
+ ret = read ? readl(mii_data) & MII_DATA_GD_MASK : 0;
+
+out:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_mdio_read(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, true);
+}
+
+static int stmmac_mdio_write(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, false);
}
/**
@@ -274,28 +298,15 @@ static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
*/
static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_resume_and_get(priv->device);
- if (data < 0)
- return data;
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
- value |= MII_GMAC4_READ;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
+ if (priv->plat->has_gmac4)
+ cmd = MII_GMAC4_READ;
+ else
+ cmd = 0;
- return data;
+ return stmmac_mdio_read(priv, phyaddr, phyreg, cmd, 0);
}
/**
@@ -312,54 +323,11 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
- return data;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_GMAC4_READ;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ int data = phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+ u32 cmd = MII_GMAC4_READ | MII_GMAC4_C45E;
- data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
-
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return data;
-}
-
-static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
-{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
-
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
-
- /* Set the MII address register to write */
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
-
- /* Wait until any existing MII operation is complete */
- return readl_poll_timeout(priv->ioaddr + mii_address, v,
- !(v & MII_BUSY), 100, 10000);
+ return stmmac_mdio_read(priv, phyaddr, devad, cmd, data);
}
/**
@@ -373,31 +341,15 @@ static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
-
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4)
- value |= MII_GMAC4_WRITE;
+ cmd = MII_GMAC4_WRITE;
else
- value |= MII_WRITE;
+ cmd = MII_ADDR_GWRITE;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, phyreg, cmd, phydata);
}
/**
@@ -412,36 +364,13 @@ static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
-
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
-
- value |= MII_GMAC4_WRITE;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd = MII_GMAC4_WRITE | MII_GMAC4_C45E;
+ int data = phydata;
data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, devad, cmd, data);
}
/**
@@ -452,8 +381,7 @@ static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int stmmac_mdio_reset(struct mii_bus *bus)
{
#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
unsigned int mii_address = priv->hw->mii.addr;
#ifdef CONFIG_OF
@@ -497,12 +425,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
int stmmac_pcs_setup(struct net_device *ndev)
{
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct fwnode_handle *devnode, *pcsnode;
struct dw_xpcs *xpcs = NULL;
- struct stmmac_priv *priv;
int addr, ret;
- priv = netdev_priv(ndev);
devnode = priv->plat->port_node;
if (priv->plat->pcs_init) {
@@ -547,6 +474,102 @@ void stmmac_pcs_clean(struct net_device *ndev)
}
/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Return: MII register CR field value
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static u32 stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ unsigned long clk_rate;
+ u32 value = ~0;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (clk_rate < CSR_F_35M)
+ value = STMMAC_CSR_20_35M;
+ else if (clk_rate < CSR_F_60M)
+ value = STMMAC_CSR_35_60M;
+ else if (clk_rate < CSR_F_100M)
+ value = STMMAC_CSR_60_100M;
+ else if (clk_rate < CSR_F_150M)
+ value = STMMAC_CSR_100_150M;
+ else if (clk_rate < CSR_F_250M)
+ value = STMMAC_CSR_150_250M;
+ else if (clk_rate <= CSR_F_300M)
+ value = STMMAC_CSR_250_300M;
+ else if (clk_rate < CSR_F_500M)
+ value = STMMAC_CSR_300_500M;
+ else if (clk_rate < CSR_F_800M)
+ value = STMMAC_CSR_500_800M;
+
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
+ if (clk_rate > 160000000)
+ value = 0x03;
+ else if (clk_rate > 80000000)
+ value = 0x02;
+ else if (clk_rate > 40000000)
+ value = 0x01;
+ else
+ value = 0;
+ }
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ value = 0x5;
+ else if (clk_rate > 350000000)
+ value = 0x4;
+ else if (clk_rate > 300000000)
+ value = 0x3;
+ else if (clk_rate > 250000000)
+ value = 0x2;
+ else if (clk_rate > 150000000)
+ value = 0x1;
+ else
+ value = 0x0;
+ }
+
+ return value;
+}
+
+static void stmmac_mdio_bus_config(struct stmmac_priv *priv)
+{
+ u32 value;
+
+ /* If a specific clk_csr value is passed from the platform, this means
+ * that the CSR Clock Range value should not be computed from the CSR
+ * clock.
+ */
+ if (priv->plat->clk_csr >= 0)
+ value = priv->plat->clk_csr;
+ else
+ value = stmmac_clk_csr_set(priv);
+
+ value <<= priv->hw->mii.clk_csr_shift;
+
+ if (value & ~priv->hw->mii.clk_csr_mask)
+ dev_warn(priv->device,
+ "clk_csr value out of range (0x%08x exceeds mask 0x%08x), truncating\n",
+ value, priv->hw->mii.clk_csr_mask);
+
+ priv->gmii_address_bus_config = value & priv->hw->mii.clk_csr_mask;
+}
+
+/**
* stmmac_mdio_register
* @ndev: net device structure
* Description: it registers the MII bus
@@ -566,6 +589,8 @@ int stmmac_mdio_register(struct net_device *ndev)
if (!mdio_bus_data)
return 0;
+ stmmac_mdio_bus_config(priv);
+
new_bus = mdiobus_alloc();
if (!new_bus)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 9c1b54b701f7..4e3aa611fda8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -21,7 +21,8 @@ struct stmmac_pci_info {
static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
@@ -74,7 +75,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
{
int i;
- plat->clk_csr = 5;
+ plat->clk_csr = STMMAC_CSR_250_300M;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 1;
plat->flags |= STMMAC_FLAG_TSO_EN;
@@ -138,6 +139,37 @@ static const struct stmmac_pci_info snps_gmac5_pci_info = {
.setup = snps_gmac5_default_data,
};
+static int stmmac_pci_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int stmmac_pci_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
/**
* stmmac_pci_probe
*
@@ -217,6 +249,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 1;
plat->safety_feat_cfg->tmouten = 1;
+ plat->suspend = stmmac_pci_suspend;
+ plat->resume = stmmac_pci_resume;
+
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
@@ -231,43 +266,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
}
-static int __maybe_unused stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int __maybe_unused stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
-
/* synthetic ID, no official vendor */
#define PCI_VENDOR_ID_STMMAC 0x0700
@@ -289,7 +287,7 @@ static struct pci_driver stmmac_pci_driver = {
.probe = stmmac_pci_probe,
.remove = stmmac_pci_remove,
.driver = {
- .pm = &stmmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 030fcf1b5993..27bcaae07a7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -453,8 +453,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
+
rc = stmmac_of_get_mac_mode(np);
- plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
+ if (rc >= 0 && rc != phy_mode)
+ dev_warn(&pdev->dev,
+ "\"mac-mode\" property used for %s but differs to \"phy-mode\" of %s, and will be ignored. Please report.\n",
+ phy_modes(rc), phy_modes(phy_mode));
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
@@ -811,6 +815,22 @@ static void stmmac_pltfr_exit(struct platform_device *pdev,
plat->exit(pdev, plat->bsp_priv);
}
+static int stmmac_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ stmmac_pltfr_exit(to_platform_device(dev), priv->plat);
+
+ return 0;
+}
+
+static int stmmac_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ return stmmac_pltfr_init(to_platform_device(dev), priv->plat);
+}
+
/**
* stmmac_pltfr_probe
* @pdev: platform device pointer
@@ -825,6 +845,11 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
{
int ret;
+ if (!plat->suspend && plat->exit)
+ plat->suspend = stmmac_plat_suspend;
+ if (!plat->resume && plat->init)
+ plat->resume = stmmac_plat_resume;
+
ret = stmmac_pltfr_init(pdev, plat);
if (ret)
return ret;
@@ -886,45 +911,36 @@ void stmmac_pltfr_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
-/**
- * stmmac_pltfr_suspend
- * @dev: device pointer
- * Description: this function is invoked when suspend the driver and it direcly
- * call the main suspend function and then, if required, on some platform, it
- * can call an exit helper.
- */
-static int __maybe_unused stmmac_pltfr_suspend(struct device *dev)
-{
- int ret;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
-
- ret = stmmac_suspend(dev);
- stmmac_pltfr_exit(pdev, priv->plat);
-
- return ret;
-}
-
-/**
- * stmmac_pltfr_resume
- * @dev: device pointer
- * Description: this function is invoked when resume the driver before calling
- * the main resume function, on some platforms, it can call own init helper
- * if required.
- */
-static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
+static int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
int ret;
- ret = stmmac_pltfr_init(pdev, priv->plat);
- if (ret)
- return ret;
+ if (enabled) {
+ ret = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(plat_dat->pclk);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ret;
+ }
+ if (plat_dat->clks_config) {
+ ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ if (plat_dat->clks_config)
+ plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ }
- return stmmac_resume(dev);
+ return 0;
}
static int __maybe_unused stmmac_runtime_suspend(struct device *dev)
@@ -954,7 +970,7 @@ static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!stmmac_wol_enabled_mac(priv)) {
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
@@ -975,7 +991,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!stmmac_wol_enabled_mac(priv)) {
/* enable the clk previously disabled */
ret = pm_runtime_force_resume(dev);
if (ret)
@@ -994,7 +1010,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
}
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, stmmac_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 3767ba495e78..993ff4e87e55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -10,6 +10,8 @@
#include "stmmac.h"
#include "stmmac_ptp.h"
+#define PTP_SAFE_TIME_OFFSET_NS 500000
+
/**
* stmmac_adjust_freq
*
@@ -171,7 +173,11 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
u32 acr_value;
switch (rq->type) {
- case PTP_CLK_REQ_PEROUT:
+ case PTP_CLK_REQ_PEROUT: {
+ struct timespec64 curr_time;
+ u64 target_ns = 0;
+ u64 ns = 0;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -180,6 +186,31 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->start.tv_sec = rq->perout.start.sec;
cfg->start.tv_nsec = rq->perout.start.nsec;
+
+ /* A time set in the past won't trigger the start of the flexible PPS generation for
+ * the GMAC5. For some reason it does for the GMAC4 but setting a time in the past
+ * should be addressed anyway. Therefore, any value set it the past is considered as
+ * an offset compared to the current MAC system time.
+ * Be aware that an offset too low may not trigger flexible PPS generation
+ * if time spent in this configuration makes the targeted time already outdated.
+ * To address this, add a safe time offset.
+ */
+ if (!cfg->start.tv_sec && cfg->start.tv_nsec < PTP_SAFE_TIME_OFFSET_NS)
+ cfg->start.tv_nsec += PTP_SAFE_TIME_OFFSET_NS;
+
+ target_ns = cfg->start.tv_nsec + ((u64)cfg->start.tv_sec * NSEC_PER_SEC);
+
+ stmmac_get_systime(priv, priv->ptpaddr, &ns);
+ if (ns > TIME64_MAX - PTP_SAFE_TIME_OFFSET_NS)
+ return -EINVAL;
+
+ curr_time = ns_to_timespec64(ns);
+ if (target_ns < ns + PTP_SAFE_TIME_OFFSET_NS) {
+ cfg->start = timespec64_add_safe(cfg->start, curr_time);
+ if (cfg->start.tv_sec == TIME64_MAX)
+ return -EINVAL;
+ }
+
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
@@ -190,6 +221,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
priv->systime_flags);
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
+ }
case PTP_CLK_REQ_EXTTS: {
u8 channel;
@@ -247,10 +279,7 @@ static int stmmac_get_syncdevicetime(ktime_t *device,
{
struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
- if (priv->plat->crosststamp)
- return priv->plat->crosststamp(device, system, ctx);
- else
- return -EOPNOTSUPP;
+ return priv->plat->crosststamp(device, system, ctx);
}
static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
@@ -278,7 +307,6 @@ const struct ptp_clock_info stmmac_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = stmmac_enable,
- .getcrosststamp = stmmac_getcrosststamp,
};
/* structure describing a PTP hardware clock */
@@ -296,7 +324,6 @@ const struct ptp_clock_info dwmac1000_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = dwmac1000_ptp_enable,
- .getcrosststamp = stmmac_getcrosststamp,
};
/**
@@ -332,6 +359,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (priv->plat->ptp_max_adj)
priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+ if (priv->plat->crosststamp)
+ priv->ptp_clock_ops.getcrosststamp = stmmac_getcrosststamp;
+
rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
@@ -340,8 +370,12 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (IS_ERR(priv->ptp_clock)) {
netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- } else if (priv->ptp_clock)
+ }
+
+ if (priv->ptp_clock)
netdev_info(priv->dev, "registered PTP clock\n");
+ else
+ mutex_destroy(&priv->aux_ts_lock);
}
/**
@@ -357,7 +391,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
- }
- mutex_destroy(&priv->aux_ts_lock);
+ mutex_destroy(&priv->aux_ts_lock);
+ }
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 694d6ee14381..97e89a604abd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1080,6 +1080,7 @@ disable:
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ priv->xstats.mtl_est_txq_hlbs[i] = 0;
}
mutex_unlock(&priv->est_lock);
}
@@ -1097,7 +1098,8 @@ static void tc_taprio_stats(struct stmmac_priv *priv,
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
window_drops += priv->xstats.max_sdu_txq_drop[i] +
- priv->xstats.mtl_est_txq_hlbf[i];
+ priv->xstats.mtl_est_txq_hlbf[i] +
+ priv->xstats.mtl_est_txq_hlbs[i];
qopt->stats.window_drops = window_drops;
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
@@ -1111,7 +1113,8 @@ static void tc_taprio_queue_stats(struct stmmac_priv *priv,
int queue = qopt->queue_stats.queue;
q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
- priv->xstats.mtl_est_txq_hlbf[queue];
+ priv->xstats.mtl_est_txq_hlbf[queue] +
+ priv->xstats.mtl_est_txq_hlbs[queue];
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
q_stats->stats.tx_overruns = 0;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index a07c910c497a..a54d71155263 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -229,4 +229,16 @@ config TI_ICSS_IEP
To compile this driver as a module, choose M here. The module
will be called icss_iep.
+config TI_PRUETH
+ tristate "TI PRU Ethernet EMAC driver"
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ select TI_ICSS_IEP
+ imply PTP_1588_CLOCK
+ help
+ Some TI SoCs has Programmable Realtime Unit (PRU) cores which can
+ support Single or Dual Ethernet ports with the help of firmware code
+ running on PRU cores. This driver supports remoteproc based
+ communication to PRU firmware to expose Ethernet interface to Linux.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index cbcf44806924..93c0a4d0e33a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -3,6 +3,9 @@
# Makefile for the TI network device drivers.
#
+obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o
+icssm-prueth-y := icssm/icssm_prueth.o
+
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 9032444435e9..c57497074ae6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -694,17 +694,20 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- unsigned int ptp_v2_filter;
-
- ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ unsigned int ptp_filter;
+
+ ptp_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return ethtool_op_get_ts_info(ndev, info);
@@ -716,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_filter;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 8b2364f5f731..110eb2da8dbc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1813,6 +1813,9 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1823,7 +1826,7 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -1884,8 +1887,8 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
cfg.flags = 0;
cfg.tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
+ cfg.rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT |
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index d8c9fe1d98c4..ec085897edf0 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -982,11 +982,112 @@ static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
.config = &am654_icss_iep_regmap_config,
};
+static const struct icss_iep_plat_data am57xx_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0c,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static bool am335x_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_CAPTURE_STAT_REG:
+ case ICSS_IEP_CAP6_RISE_REG0:
+ case ICSS_IEP_CMP_CFG_REG ... ICSS_IEP_CMP0_REG0:
+ case ICSS_IEP_CMP8_REG0 ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config am335x_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am335x_icss_iep_valid_reg,
+ .readable_reg = am335x_icss_iep_valid_reg,
+};
+
+static const struct icss_iep_plat_data am335x_icss_iep_plat_data = {
+ .flags = 0,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_COUNT_REG0] = 0x0c,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x10,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x14,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x30,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x38,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x40,
+ [ICSS_IEP_CMP_STAT_REG] = 0x44,
+ [ICSS_IEP_CMP0_REG0] = 0x48,
+
+ [ICSS_IEP_CMP8_REG0] = 0x88,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x100,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x108,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x10c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x110,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x114,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x118,
+ [ICSS_IEP_SYNC_START_REG] = 0x11c,
+ },
+ .config = &am335x_icss_iep_regmap_config,
+};
+
static const struct of_device_id icss_iep_of_match[] = {
{
.compatible = "ti,am654-icss-iep",
.data = &am654_icss_iep_plat_data,
},
+ {
+ .compatible = "ti,am5728-icss-iep",
+ .data = &am57xx_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am4376-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am3356-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, icss_iep_of_match);
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.c b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
new file mode 100644
index 000000000000..293b7af04263
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
@@ -0,0 +1,1746 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSM Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/ptp_classify.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <net/pkt_cls.h>
+
+#include "icssm_prueth.h"
+#include "../icssg/icssg_mii_rt.h"
+#include "../icssg/icss_iep.h"
+
+#define OCMC_RAM_SIZE (SZ_64K)
+
+#define TX_START_DELAY 0x40
+#define TX_CLK_DELAY_100M 0x6
+#define HR_TIMER_TX_DELAY_US 100
+
+static void icssm_prueth_write_reg(struct prueth *prueth,
+ enum prueth_mem region,
+ unsigned int reg, u32 val)
+{
+ writel_relaxed(val, prueth->mem[region].va + reg);
+}
+
+/* Below macro is for 1528 Byte Frame support, to Allow even with
+ * Redundancy tag
+ */
+#define PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC (VLAN_ETH_FRAME_LEN + \
+ ETH_FCS_LEN + \
+ ICSSM_LRE_TAG_SIZE)
+
+/* ensure that order of PRUSS mem regions is same as enum prueth_mem */
+static enum pruss_mem pruss_mem_ids[] = { PRUSS_MEM_DRAM0, PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2 };
+
+static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ { .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, },
+ { .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, },
+ { .rd_ptr = P0_Q3_BD_OFFSET, .wr_ptr = P0_Q3_BD_OFFSET, },
+ { .rd_ptr = P0_Q4_BD_OFFSET, .wr_ptr = P0_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ { .rd_ptr = P1_Q1_BD_OFFSET, .wr_ptr = P1_Q1_BD_OFFSET, },
+ { .rd_ptr = P1_Q2_BD_OFFSET, .wr_ptr = P1_Q2_BD_OFFSET, },
+ { .rd_ptr = P1_Q3_BD_OFFSET, .wr_ptr = P1_Q3_BD_OFFSET, },
+ { .rd_ptr = P1_Q4_BD_OFFSET, .wr_ptr = P1_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ { .rd_ptr = P2_Q1_BD_OFFSET, .wr_ptr = P2_Q1_BD_OFFSET, },
+ { .rd_ptr = P2_Q2_BD_OFFSET, .wr_ptr = P2_Q2_BD_OFFSET, },
+ { .rd_ptr = P2_Q3_BD_OFFSET, .wr_ptr = P2_Q3_BD_OFFSET, },
+ { .rd_ptr = P2_Q4_BD_OFFSET, .wr_ptr = P2_Q4_BD_OFFSET, },
+ }
+};
+
+static void icssm_prueth_hostconfig(struct prueth *prueth)
+{
+ void __iomem *sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *sram;
+
+ /* queue size lookup table */
+ sram = sram_base + HOST_QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, sram);
+ writew(HOST_QUEUE_2_SIZE, sram + 2);
+ writew(HOST_QUEUE_3_SIZE, sram + 4);
+ writew(HOST_QUEUE_4_SIZE, sram + 6);
+
+ /* queue information table */
+ sram = sram_base + HOST_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(sram, queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer offset table */
+ sram = sram_base + HOST_QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, sram);
+ writew(P0_Q2_BUFFER_OFFSET, sram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, sram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, sram + 6);
+
+ /* buffer descriptor offset table*/
+ sram = sram_base + HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, sram);
+ writew(P0_Q2_BD_OFFSET, sram + 2);
+ writew(P0_Q3_BD_OFFSET, sram + 4);
+ writew(P0_Q4_BD_OFFSET, sram + 6);
+
+ /* queue table */
+ sram = sram_base + HOST_QUEUE_DESC_OFFSET;
+ memcpy_toio(sram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+static void icssm_prueth_mii_init(struct prueth *prueth)
+{
+ struct regmap *mii_rt;
+ u32 rxcfg_reg, rxcfg;
+ u32 txcfg_reg, txcfg;
+
+ mii_rt = prueth->mii_rt;
+
+ rxcfg = PRUSS_MII_RT_RXCFG_RX_ENABLE |
+ PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS |
+ PRUSS_MII_RT_RXCFG_RX_L2_EN |
+ PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE |
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS;
+
+ /* Configuration of Port 0 Rx */
+ rxcfg_reg = PRUSS_MII_RT_RXCFG0;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+
+ /* Configuration of Port 1 Rx */
+ rxcfg_reg = PRUSS_MII_RT_RXCFG1;
+
+ rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_32_MODE_EN |
+ (TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT) |
+ (TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+
+ /* Configuration of Port 0 Tx */
+ txcfg_reg = PRUSS_MII_RT_TXCFG0;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ /* Configuration of Port 1 Tx */
+ txcfg_reg = PRUSS_MII_RT_TXCFG1;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg_reg = PRUSS_MII_RT_RX_FRMS0;
+
+ /* Min frame length should be set to 64 to allow receive of standard
+ * Ethernet frames such as PTP, LLDP that will not have the tag/rct.
+ * Actual size written to register is size - 1 per TRM. This also
+ * includes CRC/FCS.
+ */
+ txcfg = FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MIN_FRM - 1));
+
+ /* For EMAC, set Max frame size to 1528 i.e size with VLAN.
+ * Actual size written to register is size - 1 as per TRM.
+ * Since driver support run time change of protocol, driver
+ * must overwrite the values based on Ethernet type.
+ */
+ txcfg |= FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC - 1));
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg_reg = PRUSS_MII_RT_RX_FRMS1;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+}
+
+static void icssm_prueth_clearmem(struct prueth *prueth, enum prueth_mem region)
+{
+ memset_io(prueth->mem[region].va, 0, prueth->mem[region].size);
+}
+
+static void icssm_prueth_hostinit(struct prueth *prueth)
+{
+ /* Clear shared RAM */
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_SHARED_RAM);
+
+ /* Clear OCMC RAM */
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_OCMC);
+
+ /* Clear data RAMs */
+ if (prueth->eth_node[PRUETH_MAC0])
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM1);
+
+ /* Initialize host queues in shared RAM */
+ icssm_prueth_hostconfig(prueth);
+
+ /* Configure MII_RT */
+ icssm_prueth_mii_init(prueth);
+}
+
+/* This function initialize the driver in EMAC mode
+ * based on eth_type
+ */
+static void icssm_prueth_init_ethernet_mode(struct prueth *prueth)
+{
+ icssm_prueth_hostinit(prueth);
+}
+
+static void icssm_prueth_port_enable(struct prueth_emac *emac, bool enable)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *port_ctrl;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+ port_ctrl = ram + PORT_CONTROL_ADDR;
+ writeb(!!enable, port_ctrl);
+}
+
+static int icssm_prueth_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 sharedramaddr, ocmcaddr;
+ void __iomem *dram_base;
+ void __iomem *mac_addr;
+ void __iomem *dram;
+ void __iomem *sram;
+
+ /* PRU needs local shared RAM address for C28 */
+ sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+ /* PRU needs real global OCMC address for C30*/
+ ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ /* Clear data RAM */
+ icssm_prueth_clearmem(prueth, emac->dram);
+
+ dram_base = prueth->mem[emac->dram].va;
+
+ /* setup mac address */
+ mac_addr = dram_base + PORT_MAC_ADDR;
+ memcpy_toio(mac_addr, emac->mac_addr, 6);
+
+ /* queue information table */
+ dram = dram_base + TX_CONTEXT_Q1_OFFSET_ADDR;
+ memcpy_toio(dram, queue_infos[emac->port_id],
+ sizeof(queue_infos[emac->port_id]));
+
+ /* queue table */
+ dram = dram_base + PORT_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[emac->port_id],
+ sizeof(queue_descs[emac->port_id]));
+
+ emac->rx_queue_descs = sram + HOST_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram;
+
+ /* Set in constant table C28 of PRU0 to ICSS Shared memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRU0 to OCMC memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C30, ocmcaddr);
+
+ return 0;
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void icssm_emac_adjust_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = emac->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ enum prueth_mem region;
+ unsigned long flags;
+ u32 port_status = 0;
+ u32 txcfg, mask;
+ u32 delay;
+
+ spin_lock_irqsave(&emac->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+ region = emac->dram;
+
+ /* update phy/port status information based on PHY values*/
+ if (emac->link) {
+ port_status |= PORT_LINK_MASK;
+
+ icssm_prueth_write_reg(prueth, region, PHY_SPEED_OFFSET,
+ emac->speed);
+
+ delay = TX_CLK_DELAY_100M;
+ delay = delay << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT;
+ mask = PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK;
+
+ if (emac->port_id)
+ txcfg = PRUSS_MII_RT_TXCFG1;
+ else
+ txcfg = PRUSS_MII_RT_TXCFG0;
+
+ regmap_update_bits(prueth->mii_rt, txcfg, mask, delay);
+ }
+
+ writeb(port_status, prueth->mem[region].va +
+ PORT_STATUS_OFFSET);
+ }
+
+ if (emac->link) {
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+
+ spin_unlock_irqrestore(&emac->lock, flags);
+}
+
+static unsigned int
+icssm_get_buff_desc_count(const struct prueth_queue_info *queue)
+{
+ unsigned int buffer_desc_count;
+
+ buffer_desc_count = queue->buffer_desc_end -
+ queue->buffer_desc_offset;
+ buffer_desc_count /= BD_SIZE;
+ buffer_desc_count++;
+
+ return buffer_desc_count;
+}
+
+static void icssm_get_block(struct prueth_queue_desc __iomem *queue_desc,
+ const struct prueth_queue_info *queue,
+ int *write_block, int *read_block)
+{
+ *write_block = (readw(&queue_desc->wr_ptr) -
+ queue->buffer_desc_offset) / BD_SIZE;
+ *read_block = (readw(&queue_desc->rd_ptr) -
+ queue->buffer_desc_offset) / BD_SIZE;
+}
+
+/**
+ * icssm_emac_rx_irq - EMAC Rx interrupt handler
+ * @irq: interrupt number
+ * @dev_id: pointer to net_device
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here.
+ *
+ * Return: IRQ_HANDLED if the interrupt handled
+ */
+static irqreturn_t icssm_emac_rx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (likely(netif_running(ndev))) {
+ /* disable Rx system event */
+ disable_irq_nosync(emac->rx_irq);
+ napi_schedule(&emac->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * icssm_prueth_tx_enqueue - queue a packet to firmware for transmission
+ *
+ * @emac: EMAC data structure
+ * @skb: packet data buffer
+ * @queue_id: priority queue id
+ *
+ * Return: 0 (Success)
+ */
+static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ enum prueth_queue_id queue_id)
+{
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *txqueue;
+ struct net_device *ndev = emac->ndev;
+ unsigned int buffer_desc_count;
+ int free_blocks, update_block;
+ bool buffer_wrapped = false;
+ int write_block, read_block;
+ void *src_addr, *dst_addr;
+ int pkt_block_size;
+ void __iomem *dram;
+ int txport, pktlen;
+ u16 update_wr_ptr;
+ u32 wr_buf_desc;
+ void *ocmc_ram;
+
+ dram = emac->prueth->mem[emac->dram].va;
+ if (eth_skb_pad(skb)) {
+ if (netif_msg_tx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "packet pad failed\n");
+ return -ENOMEM;
+ }
+
+ /* which port to tx: MII0 or MII1 */
+ txport = emac->tx_port_queue;
+ src_addr = skb->data;
+ pktlen = skb->len;
+ /* Get the tx queue */
+ queue_desc = emac->tx_queue_descs + queue_id;
+ txqueue = &queue_infos[txport][queue_id];
+
+ buffer_desc_count = icssm_get_buff_desc_count(txqueue);
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ icssm_get_block(queue_desc, txqueue, &write_block, &read_block);
+
+ if (write_block > read_block) {
+ free_blocks = buffer_desc_count - write_block;
+ free_blocks += read_block;
+ } else if (write_block < read_block) {
+ free_blocks = read_block - write_block;
+ } else { /* they are all free */
+ free_blocks = buffer_desc_count;
+ }
+
+ pkt_block_size = DIV_ROUND_UP(pktlen, ICSS_BLOCK_SIZE);
+ if (pkt_block_size > free_blocks) /* out of queue space */
+ return -ENOBUFS;
+
+ /* calculate end BD address post write */
+ update_block = write_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ buffer_wrapped = true;
+ }
+
+ /* OCMC RAM is not cached and write order is not important */
+ ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+ dst_addr = ocmc_ram + txqueue->buffer_offset +
+ (write_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from socket buffer(DRAM) to PRU buffers(OCMC) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - write_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pktlen < bytes)
+ bytes = pktlen;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ src_addr += bytes;
+ remaining = pktlen - bytes;
+ dst_addr = ocmc_ram + txqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ } else {
+ memcpy(dst_addr, src_addr, pktlen);
+ }
+
+ /* update first buffer descriptor */
+ wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) &
+ PRUETH_BD_LENGTH_MASK;
+ writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
+
+ /* update the write pointer in this queue descriptor, the firmware
+ * polls for this change so this will signal the start of transmission
+ */
+ update_wr_ptr = txqueue->buffer_desc_offset + (update_block * BD_SIZE);
+ writew(update_wr_ptr, &queue_desc->wr_ptr);
+
+ return 0;
+}
+
+void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info)
+{
+ pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK);
+ pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >>
+ PRUETH_BD_PORT_SHIFT;
+ pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >>
+ PRUETH_BD_LENGTH_SHIFT;
+ pkt_info->broadcast = !!(buffer_descriptor & PRUETH_BD_BROADCAST_MASK);
+ pkt_info->error = !!(buffer_descriptor & PRUETH_BD_ERROR_MASK);
+ pkt_info->lookup_success = !!(buffer_descriptor &
+ PRUETH_BD_LOOKUP_SUCCESS_MASK);
+ pkt_info->flood = !!(buffer_descriptor & PRUETH_BD_SW_FLOOD_MASK);
+ pkt_info->timestamp = !!(buffer_descriptor & PRUETH_BD_TIMESTAMP_MASK);
+}
+
+/**
+ * icssm_emac_rx_packet - EMAC Receive function
+ *
+ * @emac: EMAC data structure
+ * @bd_rd_ptr: Buffer descriptor read pointer
+ * @pkt_info: packet information structure
+ * @rxqueue: Receive queue information structure
+ *
+ * Get a packet from receive queue
+ *
+ * Return: 0 (Success)
+ */
+int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue)
+{
+ struct net_device *ndev = emac->ndev;
+ unsigned int buffer_desc_count;
+ int read_block, update_block;
+ unsigned int actual_pkt_len;
+ bool buffer_wrapped = false;
+ void *src_addr, *dst_addr;
+ struct sk_buff *skb;
+ int pkt_block_size;
+ void *ocmc_ram;
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ buffer_desc_count = icssm_get_buff_desc_count(rxqueue);
+ read_block = (*bd_rd_ptr - rxqueue->buffer_desc_offset) / BD_SIZE;
+ pkt_block_size = DIV_ROUND_UP(pkt_info->length, ICSS_BLOCK_SIZE);
+
+ /* calculate end BD address post read */
+ update_block = read_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ if (update_block)
+ buffer_wrapped = true;
+ }
+
+ /* calculate new pointer in ram */
+ *bd_rd_ptr = rxqueue->buffer_desc_offset + (update_block * BD_SIZE);
+
+ actual_pkt_len = pkt_info->length;
+
+ /* Allocate a socket buffer for this packet */
+ skb = netdev_alloc_skb_ip_align(ndev, actual_pkt_len);
+ if (!skb) {
+ if (netif_msg_rx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "failed rx buffer alloc\n");
+ return -ENOMEM;
+ }
+
+ dst_addr = skb->data;
+
+ /* OCMC RAM is not cached and read order is not important */
+ ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+
+ /* Get the start address of the first buffer from
+ * the read buffer description
+ */
+ src_addr = ocmc_ram + rxqueue->buffer_offset +
+ (read_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from PRU buffers(OCMC) to socket buffer(DRAM) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - read_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pkt_info->length < bytes)
+ bytes = pkt_info->length;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ dst_addr += bytes;
+ remaining = actual_pkt_len - bytes;
+
+ src_addr = ocmc_ram + rxqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ src_addr += remaining;
+ } else {
+ memcpy(dst_addr, src_addr, actual_pkt_len);
+ src_addr += actual_pkt_len;
+ }
+
+ skb_put(skb, actual_pkt_len);
+
+ /* send packet up the stack */
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+
+ /* update stats */
+ emac->stats.rx_bytes += actual_pkt_len;
+ emac->stats.rx_packets++;
+
+ return 0;
+}
+
+static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
+{
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *rxqueue;
+ struct prueth *prueth = emac->prueth;
+ struct prueth_packet_info pkt_info;
+ int start_queue, end_queue;
+ void __iomem *shared_ram;
+ u16 bd_rd_ptr, bd_wr_ptr;
+ u16 update_rd_ptr;
+ u8 overflow_cnt;
+ u32 rd_buf_desc;
+ int used = 0;
+ int i, ret;
+
+ shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ start_queue = emac->rx_queue_start;
+ end_queue = emac->rx_queue_end;
+
+ /* skip Rx if budget is 0 */
+ if (!budget)
+ return 0;
+
+ /* search host queues for packets */
+ for (i = start_queue; i <= end_queue; i++) {
+ queue_desc = emac->rx_queue_descs + i;
+ rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
+
+ overflow_cnt = readb(&queue_desc->overflow_cnt);
+ if (overflow_cnt > 0) {
+ emac->stats.rx_over_errors += overflow_cnt;
+ /* reset to zero */
+ writeb(0, &queue_desc->overflow_cnt);
+ }
+
+ bd_rd_ptr = readw(&queue_desc->rd_ptr);
+ bd_wr_ptr = readw(&queue_desc->wr_ptr);
+
+ /* while packets are available in this queue */
+ while (bd_rd_ptr != bd_wr_ptr) {
+ /* get packet info from the read buffer descriptor */
+ rd_buf_desc = readl(shared_ram + bd_rd_ptr);
+ icssm_parse_packet_info(prueth, rd_buf_desc, &pkt_info);
+
+ if (pkt_info.length <= 0) {
+ /* a packet length of zero will cause us to
+ * never move the read pointer ahead, locking
+ * the driver, so we manually have to move it
+ * to the write pointer, discarding all
+ * remaining packets in this queue. This should
+ * never happen.
+ */
+ update_rd_ptr = bd_wr_ptr;
+ emac->stats.rx_length_errors++;
+ } else if (pkt_info.length > EMAC_MAX_FRM_SUPPORT) {
+ /* if the packet is too large we skip it but we
+ * still need to move the read pointer ahead
+ * and assume something is wrong with the read
+ * pointer as the firmware should be filtering
+ * these packets
+ */
+ update_rd_ptr = bd_wr_ptr;
+ emac->stats.rx_length_errors++;
+ } else {
+ update_rd_ptr = bd_rd_ptr;
+ ret = icssm_emac_rx_packet(emac, &update_rd_ptr,
+ &pkt_info, rxqueue);
+ if (ret)
+ return used;
+ used++;
+ }
+
+ /* after reading the buffer descriptor we clear it
+ * to prevent improperly moved read pointer errors
+ * from simply looking like old packets.
+ */
+ writel(0, shared_ram + bd_rd_ptr);
+
+ /* update read pointer in queue descriptor */
+ writew(update_rd_ptr, &queue_desc->rd_ptr);
+ bd_rd_ptr = update_rd_ptr;
+
+ /* all we have room for? */
+ if (used >= budget)
+ return used;
+ }
+ }
+
+ return used;
+}
+
+static int icssm_emac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct prueth_emac *emac = container_of(napi, struct prueth_emac, napi);
+ int num_rx;
+
+ num_rx = icssm_emac_rx_packets(emac, budget);
+
+ if (num_rx < budget && napi_complete_done(napi, num_rx))
+ enable_irq(emac->rx_irq);
+
+ return num_rx;
+}
+
+static int icssm_emac_set_boot_pru(struct prueth_emac *emac,
+ struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ struct prueth *prueth = emac->prueth;
+ const char *fw_name;
+ int ret;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[emac->port_id - 1];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ if (!fw_name) {
+ netdev_err(ndev, "eth_type %d not supported\n",
+ prueth->eth_type);
+ return -ENODEV;
+ }
+
+ ret = rproc_set_firmware(emac->pru, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set %s firmware: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = rproc_boot(emac->pru);
+ if (ret) {
+ netdev_err(ndev, "failed to boot %s firmware: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int icssm_emac_request_irqs(struct prueth_emac *emac)
+{
+ struct net_device *ndev = emac->ndev;
+ int ret;
+
+ ret = request_irq(emac->rx_irq, icssm_emac_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to request RX IRQ\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void icssm_ptp_dram_init(struct prueth_emac *emac)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u64 temp64;
+
+ writew(0, sram + MII_RX_CORRECTION_OFFSET);
+ writew(0, sram + MII_TX_CORRECTION_OFFSET);
+
+ /* Initialize RCF to 1 (Linux N/A) */
+ writel(1 * 1024, sram + TIMESYNC_TC_RCF_OFFSET);
+
+ /* This flag will be set and cleared by firmware */
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(200000000 / 50, sram + TIMESYNC_SYNC0_WIDTH_OFFSET);
+
+ /* Write CMP1 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ temp64 = 1000000;
+ memcpy_toio(sram + TIMESYNC_CMP1_CMP_OFFSET, &temp64, sizeof(temp64));
+
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(1000000, sram + TIMESYNC_CMP1_PERIOD_OFFSET);
+
+ /* Configures domainNumber list. Firmware supports 2 domains */
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST);
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST + 1);
+
+ /* Configure 1-step/2-step */
+ writeb(1, sram + DISABLE_SWITCH_SYNC_RELAY_OFFSET);
+
+ /* Configures the setting to Link local frame without HSR tag */
+ writeb(0, sram + LINK_LOCAL_FRAME_HAS_HSR_TAG);
+
+ /* Enable E2E/UDP PTP message timestamping */
+ writeb(1, sram + PTP_IPV4_UDP_E2E_ENABLE);
+}
+
+/**
+ * icssm_emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Return: 0 for a successful open, or appropriate error code
+ */
+static int icssm_emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret;
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ if (!prueth->emac_configured)
+ icssm_prueth_init_ethernet_mode(prueth);
+
+ icssm_prueth_emac_config(emac);
+
+ if (!prueth->emac_configured) {
+ icssm_ptp_dram_init(emac);
+ ret = icss_iep_init(prueth->iep, NULL, NULL, 0);
+ if (ret) {
+ netdev_err(ndev, "Failed to initialize iep: %d\n", ret);
+ goto iep_exit;
+ }
+ }
+
+ ret = icssm_emac_set_boot_pru(emac, ndev);
+ if (ret)
+ goto iep_exit;
+
+ ret = icssm_emac_request_irqs(emac);
+ if (ret)
+ goto rproc_shutdown;
+
+ napi_enable(&emac->napi);
+
+ /* start PHY */
+ phy_start(emac->phydev);
+
+ /* enable the port and vlan */
+ icssm_prueth_port_enable(emac, true);
+
+ prueth->emac_configured |= BIT(emac->port_id);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "started\n");
+
+ return 0;
+
+rproc_shutdown:
+ rproc_shutdown(emac->pru);
+
+iep_exit:
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+
+ return ret;
+}
+
+/**
+ * icssm_emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ *
+ * Return: Always 0 (Success)
+ */
+static int icssm_emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ prueth->emac_configured &= ~BIT(emac->port_id);
+
+ /* disable the mac port */
+ icssm_prueth_port_enable(emac, false);
+
+ /* stop PHY */
+ phy_stop(emac->phydev);
+
+ napi_disable(&emac->napi);
+ hrtimer_cancel(&emac->tx_hrtimer);
+
+ /* stop the PRU */
+ rproc_shutdown(emac->pru);
+
+ /* free rx interrupts */
+ free_irq(emac->rx_irq, ndev);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+/* VLAN-tag PCP to priority queue map for EMAC/Switch/HSR/PRP used by driver
+ * Index is PCP val / 2.
+ * low - pcp 0..3 maps to Q4 for Host
+ * high - pcp 4..7 maps to Q3 for Host
+ * low - pcp 0..3 maps to Q2 (FWD Queue) for PRU-x
+ * where x = 1 for PRUETH_PORT_MII0
+ * 0 for PRUETH_PORT_MII1
+ * high - pcp 4..7 maps to Q1 (FWD Queue) for PRU-x
+ */
+static const unsigned short emac_pcp_tx_priority_queue_map[] = {
+ PRUETH_QUEUE4, PRUETH_QUEUE4,
+ PRUETH_QUEUE3, PRUETH_QUEUE3,
+ PRUETH_QUEUE2, PRUETH_QUEUE2,
+ PRUETH_QUEUE1, PRUETH_QUEUE1,
+};
+
+static u16 icssm_prueth_get_tx_queue_id(struct prueth *prueth,
+ struct sk_buff *skb)
+{
+ u16 vlan_tci, pcp;
+ int err;
+
+ err = vlan_get_tag(skb, &vlan_tci);
+ if (likely(err))
+ pcp = 0;
+ else
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ /* Below code (pcp >>= 1) is made common for all
+ * protocols (i.e., EMAC, RSTP, HSR and PRP)*
+ * pcp value 0,1 will be updated to 0 mapped to QUEUE4
+ * pcp value 2,3 will be updated to 1 mapped to QUEUE4
+ * pcp value 4,5 will be updated to 2 mapped to QUEUE3
+ * pcp value 6,7 will be updated to 3 mapped to QUEUE3
+ */
+ pcp >>= 1;
+
+ return emac_pcp_tx_priority_queue_map[pcp];
+}
+
+/**
+ * icssm_emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Return: enum netdev_tx
+ */
+static enum netdev_tx icssm_emac_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+ u16 qid;
+
+ qid = icssm_prueth_get_tx_queue_id(emac->prueth, skb);
+ ret = icssm_prueth_tx_enqueue(emac, skb, qid);
+ if (ret) {
+ if (ret != -ENOBUFS && netif_msg_tx_err(emac) &&
+ net_ratelimit())
+ netdev_err(ndev, "packet queue failed: %d\n", ret);
+ goto fail_tx;
+ }
+
+ emac->stats.tx_packets++;
+ emac->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ if (ret == -ENOBUFS) {
+ netif_stop_queue(ndev);
+ hrtimer_start(&emac->tx_hrtimer,
+ us_to_ktime(HR_TIMER_TX_DELAY_US),
+ HRTIMER_MODE_REL_PINNED);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ /* error */
+ emac->stats.tx_dropped++;
+ ret = NET_XMIT_DROP;
+ }
+
+ return ret;
+}
+
+/**
+ * icssm_emac_ndo_get_stats64 - EMAC get statistics function
+ * @ndev: The EMAC network adapter
+ * @stats: rtnl_link_stats structure
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ */
+static void icssm_emac_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ stats->rx_packets = emac->stats.rx_packets;
+ stats->rx_bytes = emac->stats.rx_bytes;
+ stats->tx_packets = emac->stats.tx_packets;
+ stats->tx_bytes = emac->stats.tx_bytes;
+ stats->tx_dropped = emac->stats.tx_dropped;
+ stats->rx_over_errors = emac->stats.rx_over_errors;
+ stats->rx_length_errors = emac->stats.rx_length_errors;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = icssm_emac_ndo_open,
+ .ndo_stop = icssm_emac_ndo_stop,
+ .ndo_start_xmit = icssm_emac_ndo_start_xmit,
+ .ndo_get_stats64 = icssm_emac_ndo_get_stats64,
+};
+
+/* get emac_port corresponding to eth_node name */
+static int icssm_prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+
+/* get MAC instance corresponding to eth_node name */
+static int icssm_prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+
+static enum hrtimer_restart icssm_emac_tx_timer_callback(struct hrtimer *timer)
+{
+ struct prueth_emac *emac =
+ container_of(timer, struct prueth_emac, tx_hrtimer);
+
+ if (netif_queue_stopped(emac->ndev))
+ netif_wake_queue(emac->ndev);
+
+ return HRTIMER_NORESTART;
+}
+
+static int icssm_prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ enum prueth_mac mac;
+ int ret;
+
+ port = icssm_prueth_node_port(eth_node);
+ if (port == PRUETH_PORT_INVALID)
+ return -EINVAL;
+
+ mac = icssm_prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return -EINVAL;
+
+ ndev = devm_alloc_etherdev(prueth->dev, sizeof(*emac));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ emac = netdev_priv(ndev);
+ prueth->emac[mac] = emac;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+
+ /* by default eth_type is EMAC */
+ switch (port) {
+ case PRUETH_PORT_MII0:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII0;
+
+ /* packets from MII0 are on queues 1 through 2 */
+ emac->rx_queue_start = PRUETH_QUEUE1;
+ emac->rx_queue_end = PRUETH_QUEUE2;
+
+ emac->dram = PRUETH_MEM_DRAM0;
+ emac->pru = prueth->pru0;
+ break;
+ case PRUETH_PORT_MII1:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII1;
+
+ /* packets from MII1 are on queues 3 through 4 */
+ emac->rx_queue_start = PRUETH_QUEUE3;
+ emac->rx_queue_end = PRUETH_QUEUE4;
+
+ emac->dram = PRUETH_MEM_DRAM1;
+ emac->pru = prueth->pru1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ emac->rx_irq = of_irq_get_byname(eth_node, "rx");
+ if (emac->rx_irq < 0) {
+ ret = emac->rx_irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(prueth->dev, "could not get rx irq\n");
+ goto free;
+ }
+
+ /* get mac address from DT and set private and netdev addr */
+ ret = of_get_ethdev_address(eth_node, ndev);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ /* connect PHY */
+ emac->phydev = of_phy_get_and_connect(ndev, eth_node,
+ icssm_emac_adjust_link);
+ if (!emac->phydev) {
+ dev_dbg(prueth->dev, "PHY connection failed\n");
+ ret = -ENODEV;
+ goto free;
+ }
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ ndev->dev.of_node = eth_node;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ netif_napi_add(ndev, &emac->napi, icssm_emac_napi_poll);
+
+ hrtimer_setup(&emac->tx_hrtimer, &icssm_emac_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+
+ return 0;
+free:
+ emac->ndev = NULL;
+ prueth->emac[mac] = NULL;
+
+ return ret;
+}
+
+static void icssm_prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = icssm_prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ phy_disconnect(emac->phydev);
+
+ netif_napi_del(&emac->napi);
+ prueth->emac[mac] = NULL;
+}
+
+static int icssm_prueth_probe(struct platform_device *pdev)
+{
+ struct device_node *eth0_node = NULL, *eth1_node = NULL;
+ struct device_node *eth_node, *eth_ports_node;
+ enum pruss_pru_id pruss_id0, pruss_id1;
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct prueth *prueth;
+ struct pruss *pruss;
+ int i, ret;
+
+ np = dev->of_node;
+ if (!np)
+ return -ENODEV; /* we don't support non DT */
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, prueth);
+ prueth->dev = dev;
+ prueth->fw_data = device_get_match_data(dev);
+
+ eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
+ if (!eth_ports_node)
+ return -ENOENT;
+
+ for_each_child_of_node(eth_ports_node, eth_node) {
+ u32 reg;
+
+ if (strcmp(eth_node->name, "ethernet-port"))
+ continue;
+ ret = of_property_read_u32(eth_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ eth_node, ret);
+ of_node_put(eth_node);
+ return ret;
+ }
+
+ of_node_get(eth_node);
+
+ if (reg == 0 && !eth0_node) {
+ eth0_node = eth_node;
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+ } else if (reg == 1 && !eth1_node) {
+ eth1_node = eth_node;
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+ } else {
+ if (reg == 0 || reg == 1)
+ dev_err(dev, "duplicate port reg value: %d\n",
+ reg);
+ else
+ dev_err(dev, "invalid port reg value: %d\n",
+ reg);
+
+ of_node_put(eth_node);
+ }
+ }
+
+ of_node_put(eth_ports_node);
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither port0 nor port1 node available\n");
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get mii-rt syscon regmap\n");
+ ret = PTR_ERR(prueth->mii_rt);
+ goto put_eth;
+ }
+
+ if (eth0_node) {
+ prueth->pru0 = pru_rproc_get(np, 0, &pruss_id0);
+ if (IS_ERR(prueth->pru0)) {
+ ret = PTR_ERR(prueth->pru0);
+ dev_err_probe(dev, ret, "unable to get PRU0");
+ goto put_eth;
+ }
+ }
+
+ if (eth1_node) {
+ prueth->pru1 = pru_rproc_get(np, 1, &pruss_id1);
+ if (IS_ERR(prueth->pru1)) {
+ ret = PTR_ERR(prueth->pru1);
+ dev_err_probe(dev, ret, "unable to get PRU1");
+ goto put_pru0;
+ }
+ }
+
+ pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_pru1;
+ }
+ prueth->pruss = pruss;
+
+ /* Configure PRUSS */
+ if (eth0_node)
+ pruss_cfg_gpimode(pruss, pruss_id0, PRUSS_GPI_MODE_MII);
+ if (eth1_node)
+ pruss_cfg_gpimode(pruss, pruss_id1, PRUSS_GPI_MODE_MII);
+ pruss_cfg_miirt_enable(pruss, true);
+ pruss_cfg_xfr_enable(pruss, PRU_TYPE_PRU, true);
+
+ /* Get PRUSS mem resources */
+ /* OCMC is system resource which we get separately */
+ for (i = 0; i < ARRAY_SIZE(pruss_mem_ids); i++) {
+ /* skip appropriate DRAM if not required */
+ if (!eth0_node && i == PRUETH_MEM_DRAM0)
+ continue;
+
+ if (!eth1_node && i == PRUETH_MEM_DRAM1)
+ continue;
+
+ ret = pruss_request_mem_region(pruss, pruss_mem_ids[i],
+ &prueth->mem[i]);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS resource %d: %d\n",
+ i, ret);
+ goto put_mem;
+ }
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+ goto put_mem;
+ }
+
+ prueth->ocmc_ram_size = OCMC_RAM_SIZE;
+ /* Decreased by 8KB to address the reserved region for AM33x */
+ if (prueth->fw_data->driver_data == PRUSS_AM33XX)
+ prueth->ocmc_ram_size = (SZ_64K - SZ_8K);
+
+ prueth->mem[PRUETH_MEM_OCMC].va =
+ (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ prueth->ocmc_ram_size);
+ if (!prueth->mem[PRUETH_MEM_OCMC].va) {
+ dev_err(dev, "unable to allocate OCMC resource\n");
+ ret = -ENOMEM;
+ goto put_mem;
+ }
+ prueth->mem[PRUETH_MEM_OCMC].pa = gen_pool_virt_to_phys
+ (prueth->sram_pool, (unsigned long)
+ prueth->mem[PRUETH_MEM_OCMC].va);
+ prueth->mem[PRUETH_MEM_OCMC].size = prueth->ocmc_ram_size;
+ dev_dbg(dev, "ocmc: pa %pa va %p size %#zx\n",
+ &prueth->mem[PRUETH_MEM_OCMC].pa,
+ prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->mem[PRUETH_MEM_OCMC].size);
+
+ /* setup netdev interfaces */
+ if (eth0_node) {
+ ret = icssm_prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth0_node->name, ret);
+ }
+ goto free_pool;
+ }
+ }
+
+ if (eth1_node) {
+ ret = icssm_prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth1_node->name, ret);
+ }
+ goto netdev_exit;
+ }
+ }
+
+ prueth->iep = icss_iep_get(np);
+ if (IS_ERR(prueth->iep)) {
+ ret = PTR_ERR(prueth->iep);
+ dev_err(dev, "unable to get IEP\n");
+ goto netdev_exit;
+ }
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0");
+ goto iep_put;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] =
+ prueth->emac[PRUETH_MAC0]->ndev;
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] =
+ prueth->emac[PRUETH_MAC1]->ndev;
+ }
+
+ dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+iep_put:
+ icss_iep_put(prueth->iep);
+ prueth->iep = NULL;
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ icssm_prueth_netdev_exit(prueth, eth_node);
+ }
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->ocmc_ram_size);
+
+put_mem:
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(pruss, &prueth->mem[i]);
+ }
+ pruss_put(prueth->pruss);
+
+put_pru1:
+ if (eth1_node)
+ pru_rproc_put(prueth->pru1);
+put_pru0:
+ if (eth0_node)
+ pru_rproc_put(prueth->pru0);
+put_eth:
+ of_node_put(eth1_node);
+ of_node_put(eth0_node);
+
+ return ret;
+}
+
+static void icssm_prueth_remove(struct platform_device *pdev)
+{
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ struct device_node *eth_node;
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ icssm_prueth_netdev_exit(prueth, eth_node);
+ of_node_put(eth_node);
+ }
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->ocmc_ram_size);
+
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(prueth->pruss,
+ &prueth->mem[i]);
+ }
+
+ icss_iep_put(prueth->iep);
+ prueth->iep = NULL;
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ pru_rproc_put(prueth->pru0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ pru_rproc_put(prueth->pru1);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int icssm_prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = icssm_emac_ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int icssm_prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = icssm_emac_ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(icssm_prueth_suspend, icssm_prueth_resume)
+};
+
+/* AM335x SoC-specific firmware data */
+static struct prueth_private_data am335x_prueth_pdata = {
+ .driver_data = PRUSS_AM33XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru1-prueth-fw.elf",
+ },
+};
+
+/* AM437x SoC-specific firmware data */
+static struct prueth_private_data am437x_prueth_pdata = {
+ .driver_data = PRUSS_AM43XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru1-prueth-fw.elf",
+ },
+};
+
+/* AM57xx SoC-specific firmware data */
+static struct prueth_private_data am57xx_prueth_pdata = {
+ .driver_data = PRUSS_AM57XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru1-prueth-fw.elf",
+ },
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am57-prueth", .data = &am57xx_prueth_pdata, },
+ { .compatible = "ti,am4376-prueth", .data = &am437x_prueth_pdata, },
+ { .compatible = "ti,am3359-prueth", .data = &am335x_prueth_pdata, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = icssm_prueth_probe,
+ .remove = icssm_prueth_remove,
+ .driver = {
+ .name = "prueth",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSM Ethernet Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.h b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
new file mode 100644
index 000000000000..8e7e0af08144
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSM Ethernet driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_PRUETH_H
+#define __NET_TI_PRUETH_H
+
+#include <linux/phy.h>
+#include <linux/types.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc/pruss.h>
+
+#include "icssm_switch.h"
+#include "icssm_prueth_ptp.h"
+
+/* ICSSM size of redundancy tag */
+#define ICSSM_LRE_TAG_SIZE 6
+
+/* PRUSS local memory map */
+#define ICSS_LOCAL_SHARED_RAM 0x00010000
+#define EMAC_MAX_PKTLEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+/* Below macro is for 1528 Byte Frame support, to Allow even with
+ * Redundancy tag
+ */
+#define EMAC_MAX_FRM_SUPPORT (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN + \
+ ICSSM_LRE_TAG_SIZE)
+
+/* PRU Ethernet Type - Ethernet functionality (protocol
+ * implemented) provided by the PRU firmware being loaded.
+ */
+enum pruss_ethtype {
+ PRUSS_ETHTYPE_EMAC = 0,
+ PRUSS_ETHTYPE_HSR,
+ PRUSS_ETHTYPE_PRP,
+ PRUSS_ETHTYPE_SWITCH,
+ PRUSS_ETHTYPE_MAX,
+};
+
+#define PRUETH_IS_EMAC(p) ((p)->eth_type == PRUSS_ETHTYPE_EMAC)
+#define PRUETH_IS_SWITCH(p) ((p)->eth_type == PRUSS_ETHTYPE_SWITCH)
+
+/**
+ * struct prueth_queue_desc - Queue descriptor
+ * @rd_ptr: Read pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @wr_ptr: Write pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @busy_s: Slave queue busy flag, set by slave(us) to request access from
+ * master(PRU).
+ * @status: Bit field status register, Bits:
+ * 0: Master queue busy flag.
+ * 1: Packet has been placed in collision queue.
+ * 2: Packet has been discarded due to overflow.
+ * @max_fill_level: Maximum queue usage seen.
+ * @overflow_cnt: Count of queue overflows.
+ *
+ * Each port has up to 4 queues with variable length. The queue is processed
+ * as ring buffer with read and write pointers. Both pointers are address
+ * pointers and increment by 4 for each buffer descriptor position. Queue has
+ * a length defined in constants and a status.
+ */
+struct prueth_queue_desc {
+ u16 rd_ptr;
+ u16 wr_ptr;
+ u8 busy_s;
+ u8 status;
+ u8 max_fill_level;
+ u8 overflow_cnt;
+};
+
+/**
+ * struct prueth_queue_info - Information about a queue in memory
+ * @buffer_offset: buffer offset in OCMC RAM
+ * @queue_desc_offset: queue descriptor offset in Shared RAM
+ * @buffer_desc_offset: buffer descriptors offset in Shared RAM
+ * @buffer_desc_end: end address of buffer descriptors in Shared RAM
+ */
+struct prueth_queue_info {
+ u16 buffer_offset;
+ u16 queue_desc_offset;
+ u16 buffer_desc_offset;
+ u16 buffer_desc_end;
+};
+
+/**
+ * struct prueth_packet_info - Info about a packet in buffer
+ * @shadow: this packet is stored in the collision queue
+ * @port: port packet is on
+ * @length: length of packet
+ * @broadcast: this packet is a broadcast packet
+ * @error: this packet has an error
+ * @lookup_success: src mac found in FDB
+ * @flood: packet is to be flooded
+ * @timestamp: Specifies if timestamp is appended to the packet
+ */
+struct prueth_packet_info {
+ bool shadow;
+ unsigned int port;
+ unsigned int length;
+ bool broadcast;
+ bool error;
+ bool lookup_success;
+ bool flood;
+ bool timestamp;
+};
+
+/* In switch mode there are 3 real ports i.e. 3 mac addrs.
+ * however Linux sees only the host side port. The other 2 ports
+ * are the switch ports.
+ * In emac mode there are 2 real ports i.e. 2 mac addrs.
+ * Linux sees both the ports.
+ */
+enum prueth_port {
+ PRUETH_PORT_HOST = 0, /* host side port */
+ PRUETH_PORT_MII0, /* physical port MII 0 */
+ PRUETH_PORT_MII1, /* physical port MII 1 */
+ PRUETH_PORT_INVALID, /* Invalid prueth port */
+};
+
+enum prueth_mac {
+ PRUETH_MAC0 = 0,
+ PRUETH_MAC1,
+ PRUETH_NUM_MACS,
+ PRUETH_MAC_INVALID,
+};
+
+/* In both switch & emac modes there are 3 port queues
+ * EMAC mode:
+ * RX packets for both MII0 & MII1 ports come on
+ * QUEUE_HOST.
+ * TX packets for MII0 go on QUEUE_MII0, TX packets
+ * for MII1 go on QUEUE_MII1.
+ * Switch mode:
+ * Host port RX packets come on QUEUE_HOST
+ * TX packets might have to go on MII0 or MII1 or both.
+ * MII0 TX queue is QUEUE_MII0 and MII1 TX queue is
+ * QUEUE_MII1.
+ */
+enum prueth_port_queue_id {
+ PRUETH_PORT_QUEUE_HOST = 0,
+ PRUETH_PORT_QUEUE_MII0,
+ PRUETH_PORT_QUEUE_MII1,
+ PRUETH_PORT_QUEUE_MAX,
+};
+
+/* Each port queue has 4 queues and 1 collision queue */
+enum prueth_queue_id {
+ PRUETH_QUEUE1 = 0,
+ PRUETH_QUEUE2,
+ PRUETH_QUEUE3,
+ PRUETH_QUEUE4,
+ PRUETH_COLQUEUE, /* collision queue */
+};
+
+/**
+ * struct prueth_firmware - PRU Ethernet FW data
+ * @fw_name: firmware names of firmware to run on PRU
+ */
+struct prueth_firmware {
+ const char *fw_name[PRUSS_ETHTYPE_MAX];
+};
+
+/* PRUeth memory range identifiers */
+enum prueth_mem {
+ PRUETH_MEM_DRAM0 = 0,
+ PRUETH_MEM_DRAM1,
+ PRUETH_MEM_SHARED_RAM,
+ PRUETH_MEM_OCMC,
+ PRUETH_MEM_MAX,
+};
+
+enum pruss_device {
+ PRUSS_AM57XX = 0,
+ PRUSS_AM43XX,
+ PRUSS_AM33XX,
+ PRUSS_K2G
+};
+
+/**
+ * struct prueth_private_data - PRU Ethernet private data
+ * @driver_data: PRU Ethernet device name
+ * @fw_pru: firmware names to be used for PRUSS ethernet usecases
+ */
+struct prueth_private_data {
+ enum pruss_device driver_data;
+ const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS];
+};
+
+struct prueth_emac_stats {
+ u64 tx_packets;
+ u64 tx_dropped;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_length_errors;
+ u64 rx_over_errors;
+};
+
+/* data for each emac port */
+struct prueth_emac {
+ struct prueth *prueth;
+ struct net_device *ndev;
+ struct napi_struct napi;
+
+ struct rproc *pru;
+ struct phy_device *phydev;
+ struct prueth_queue_desc __iomem *rx_queue_descs;
+ struct prueth_queue_desc __iomem *tx_queue_descs;
+
+ int link;
+ int speed;
+ int duplex;
+ int rx_irq;
+
+ enum prueth_port_queue_id tx_port_queue;
+ enum prueth_queue_id rx_queue_start;
+ enum prueth_queue_id rx_queue_end;
+ enum prueth_port port_id;
+ enum prueth_mem dram;
+ const char *phy_id;
+ u32 msg_enable;
+ u8 mac_addr[6];
+ phy_interface_t phy_if;
+
+ /* spin lock used to protect
+ * during link configuration
+ */
+ spinlock_t lock;
+
+ struct hrtimer tx_hrtimer;
+ struct prueth_emac_stats stats;
+};
+
+struct prueth {
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *pru0, *pru1;
+ struct pruss_mem_region mem[PRUETH_MEM_MAX];
+ struct gen_pool *sram_pool;
+ struct regmap *mii_rt;
+ struct icss_iep *iep;
+
+ const struct prueth_private_data *fw_data;
+ struct prueth_fw_offsets *fw_offsets;
+
+ struct device_node *eth_node[PRUETH_NUM_MACS];
+ struct prueth_emac *emac[PRUETH_NUM_MACS];
+ struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+
+ unsigned int eth_type;
+ size_t ocmc_ram_size;
+ u8 emac_configured;
+};
+
+void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info);
+int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue);
+
+#endif /* __NET_TI_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h
new file mode 100644
index 000000000000..e0bf692beda1
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+#ifndef PRUETH_PTP_H
+#define PRUETH_PTP_H
+
+#define RX_SYNC_TIMESTAMP_OFFSET_P1 0x8 /* 8 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x14 /* 12 bytes */
+
+#define DISABLE_PTP_FRAME_FORWARDING_CTRL_OFFSET 0x14 /* 1 byte */
+
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x20 /* 12 bytes */
+#define RX_SYNC_TIMESTAMP_OFFSET_P2 0x2c /* 12 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0x38 /* 12 bytes */
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0x44 /* 12 bytes */
+#define TIMESYNC_DOMAIN_NUMBER_LIST 0x50 /* 2 bytes */
+#define P1_SMA_LINE_DELAY_OFFSET 0x52 /* 4 bytes */
+#define P2_SMA_LINE_DELAY_OFFSET 0x56 /* 4 bytes */
+#define TIMESYNC_SECONDS_COUNT_OFFSET 0x5a /* 6 bytes */
+#define TIMESYNC_TC_RCF_OFFSET 0x60 /* 4 bytes */
+#define DUT_IS_MASTER_OFFSET 0x64 /* 1 byte */
+#define MASTER_PORT_NUM_OFFSET 0x65 /* 1 byte */
+#define SYNC_MASTER_MAC_OFFSET 0x66 /* 6 bytes */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P1 0x6c /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P1 0x6d /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P1 0x6e /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P2 0x6f /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P2 0x70 /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P2 0x71 /* 1 byte */
+#define TX_SYNC_TIMESTAMP_OFFSET_P1 0x72 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x7e /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x8a /* 12 bytes */
+#define TX_SYNC_TIMESTAMP_OFFSET_P2 0x96 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0xa2 /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0xae /* 12 bytes */
+#define TIMESYNC_CTRL_VAR_OFFSET 0xba /* 1 byte */
+#define DISABLE_SWITCH_SYNC_RELAY_OFFSET 0xbb /* 1 byte */
+#define MII_RX_CORRECTION_OFFSET 0xbc /* 2 bytes */
+#define MII_TX_CORRECTION_OFFSET 0xbe /* 2 bytes */
+#define TIMESYNC_CMP1_CMP_OFFSET 0xc0 /* 8 bytes */
+#define TIMESYNC_SYNC0_CMP_OFFSET 0xc8 /* 8 bytes */
+#define TIMESYNC_CMP1_PERIOD_OFFSET 0xd0 /* 4 bytes */
+#define TIMESYNC_SYNC0_WIDTH_OFFSET 0xd4 /* 4 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P1 0xd8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P1 0xe0 /* 8 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P2 0xe8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P2 0xf0 /* 8 bytes */
+#define LINK_LOCAL_FRAME_HAS_HSR_TAG 0xf8 /* 1 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P1 0xf9 /* 8 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P2 0x101 /* 8 bytes */
+#define PTP_CLK_IDENTITY_OFFSET 0x109 /* 8 bytes */
+#define PTP_SCRATCH_MEM 0x111 /* 16 byte */
+#define PTP_IPV4_UDP_E2E_ENABLE 0x121 /* 1 byte */
+
+enum {
+ PRUETH_PTP_SYNC,
+ PRUETH_PTP_DLY_REQ,
+ PRUETH_PTP_DLY_RESP,
+ PRUETH_PTP_TS_EVENTS,
+};
+
+#define PRUETH_PTP_TS_SIZE 12
+#define PRUETH_PTP_TS_NOTIFY_SIZE 1
+#define PRUETH_PTP_TS_NOTIFY_MASK 0xff
+
+/* Bit definitions for TIMESYNC_CTRL */
+#define TIMESYNC_CTRL_BG_ENABLE BIT(0)
+#define TIMESYNC_CTRL_FORCED_2STEP BIT(1)
+
+static inline u32 icssm_prueth_tx_ts_offs_get(u8 port, u8 event)
+{
+ return TX_SYNC_TIMESTAMP_OFFSET_P1 + port *
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_SIZE +
+ event * PRUETH_PTP_TS_SIZE;
+}
+
+static inline u32 icssm_prueth_tx_ts_notify_offs_get(u8 port, u8 event)
+{
+ return TX_TS_NOTIFICATION_OFFSET_SYNC_P1 +
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_NOTIFY_SIZE * port +
+ event * PRUETH_PTP_TS_NOTIFY_SIZE;
+}
+
+#endif /* PRUETH_PTP_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switch.h b/drivers/net/ethernet/ti/icssm/icssm_switch.h
new file mode 100644
index 000000000000..8b494ffdcde7
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switch.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __ICSS_SWITCH_H
+#define __ICSS_SWITCH_H
+
+/* Basic Switch Parameters
+ * Used to auto compute offset addresses on L3 OCMC RAM. Do not modify these
+ * without changing firmware accordingly
+ */
+#define SWITCH_BUFFER_SIZE (64 * 1024) /* L3 buffer */
+#define ICSS_BLOCK_SIZE 32 /* data bytes per BD */
+#define BD_SIZE 4 /* byte buffer descriptor */
+#define NUM_QUEUES 4 /* Queues on Port 0/1/2 */
+
+#define PORT_LINK_MASK 0x1
+#define PORT_IS_HD_MASK 0x2
+
+/* Physical Port queue size (number of BDs). Same for both ports */
+#define QUEUE_1_SIZE 97 /* Network Management high */
+#define QUEUE_2_SIZE 97 /* Network Management low */
+#define QUEUE_3_SIZE 97 /* Protocol specific */
+#define QUEUE_4_SIZE 97 /* NRT (IP,ARP, ICMP) */
+
+/* Host queue size (number of BDs). Each BD points to data buffer of 32 bytes.
+ * HOST PORT QUEUES can buffer up to 4 full sized frames per queue
+ */
+#define HOST_QUEUE_1_SIZE 194 /* Protocol and VLAN priority 7 & 6 */
+#define HOST_QUEUE_2_SIZE 194 /* Protocol mid */
+#define HOST_QUEUE_3_SIZE 194 /* Protocol low */
+#define HOST_QUEUE_4_SIZE 194 /* NRT (IP, ARP, ICMP) */
+
+#define COL_QUEUE_SIZE 0
+
+/* NRT Buffer descriptor definition
+ * Each buffer descriptor points to a max 32 byte block and has 32 bit in size
+ * to have atomic operation.
+ * PRU can address bytewise into memory.
+ * Definition of 32 bit descriptor is as follows
+ *
+ * Bits Name Meaning
+ * =============================================================================
+ * 0..7 Index points to index in buffer queue, max 256 x 32
+ * byte blocks can be addressed
+ * 6 LookupSuccess For switch, FDB lookup was successful (source
+ * MAC address found in FDB).
+ * For RED, NodeTable lookup was successful.
+ * 7 Flood Packet should be flooded (destination MAC
+ * address found in FDB). For switch only.
+ * 8..12 Block_length number of valid bytes in this specific block.
+ * Will be <=32 bytes on last block of packet
+ * 13 More "More" bit indicating that there are more blocks
+ * 14 Shadow indicates that "index" is pointing into shadow
+ * buffer
+ * 15 TimeStamp indicates that this packet has time stamp in
+ * separate buffer - only needed if PTP runs on
+ * host
+ * 16..17 Port different meaning for ingress and egress,
+ * Ingress: Port = 0 indicates phy port 1 and
+ * Port = 1 indicates phy port 2.
+ * Egress: 0 sends on phy port 1 and 1 sends on
+ * phy port 2. Port = 2 goes over MAC table
+ * look-up
+ * 18..28 Length 11 bit of total packet length which is put into
+ * first BD only so that host access only one BD
+ * 29 VlanTag indicates that packet has Length/Type field of
+ * 0x08100 with VLAN tag in following byte
+ * 30 Broadcast indicates that packet goes out on both physical
+ * ports, there will be two bd but only one buffer
+ * 31 Error indicates there was an error in the packet
+ */
+#define PRUETH_BD_START_FLAG_MASK BIT(0)
+#define PRUETH_BD_START_FLAG_SHIFT 0
+
+#define PRUETH_BD_HSR_FRAME_MASK BIT(4)
+#define PRUETH_BD_HSR_FRAME_SHIFT 4
+
+#define PRUETH_BD_SUP_HSR_FRAME_MASK BIT(5)
+#define PRUETH_BD_SUP_HSR_FRAME_SHIFT 5
+
+#define PRUETH_BD_LOOKUP_SUCCESS_MASK BIT(6)
+#define PRUETH_BD_LOOKUP_SUCCESS_SHIFT 6
+
+#define PRUETH_BD_SW_FLOOD_MASK BIT(7)
+#define PRUETH_BD_SW_FLOOD_SHIFT 7
+
+#define PRUETH_BD_SHADOW_MASK BIT(14)
+#define PRUETH_BD_SHADOW_SHIFT 14
+
+#define PRUETH_BD_TIMESTAMP_MASK BIT(15)
+#define PRUETH_BD_TIMESTAMP_SHIFT 15
+
+#define PRUETH_BD_PORT_MASK GENMASK(17, 16)
+#define PRUETH_BD_PORT_SHIFT 16
+
+#define PRUETH_BD_LENGTH_MASK GENMASK(28, 18)
+#define PRUETH_BD_LENGTH_SHIFT 18
+
+#define PRUETH_BD_BROADCAST_MASK BIT(30)
+#define PRUETH_BD_BROADCAST_SHIFT 30
+
+#define PRUETH_BD_ERROR_MASK BIT(31)
+#define PRUETH_BD_ERROR_SHIFT 31
+
+/* The following offsets indicate which sections of the memory are used
+ * for EMAC internal tasks
+ */
+#define DRAM_START_OFFSET 0x1E98
+#define SRAM_START_OFFSET 0x400
+
+/* General Purpose Statistics
+ * These are present on both PRU0 and PRU1 DRAM
+ */
+/* base statistics offset */
+#define STATISTICS_OFFSET 0x1F00
+#define STAT_SIZE 0x98
+
+/* Offset for storing
+ * 1. Storm Prevention Params
+ * 2. PHY Speed Offset
+ * 3. Port Status Offset
+ * These are present on both PRU0 and PRU1
+ */
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_BC (STATISTICS_OFFSET + STAT_SIZE)
+/* 4 bytes */
+#define PHY_SPEED_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 4)
+/* 1 byte */
+#define PORT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 8)
+/* 1 byte */
+#define COLLISION_COUNTER (STATISTICS_OFFSET + STAT_SIZE + 9)
+/* 4 bytes */
+#define RX_PKT_SIZE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 10)
+/* 4 bytes */
+#define PORT_CONTROL_ADDR (STATISTICS_OFFSET + STAT_SIZE + 14)
+/* 6 bytes */
+#define PORT_MAC_ADDR (STATISTICS_OFFSET + STAT_SIZE + 18)
+/* 1 byte */
+#define RX_INT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 24)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_MC (STATISTICS_OFFSET + STAT_SIZE + 25)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_UC (STATISTICS_OFFSET + STAT_SIZE + 29)
+/* 4 bytes ? */
+#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33)
+
+/* DRAM Offsets for EMAC
+ * Present on Both DRAM0 and DRAM1
+ */
+
+/* 4 queue descriptors for port tx = 32 bytes */
+#define TX_CONTEXT_Q1_OFFSET_ADDR (PORT_QUEUE_DESC_OFFSET + 32)
+#define PORT_QUEUE_DESC_OFFSET (ICSS_EMAC_TTS_CYC_TX_SOF + 8)
+
+/* EMAC Time Triggered Send Offsets */
+#define ICSS_EMAC_TTS_CYC_TX_SOF (ICSS_EMAC_TTS_PREV_TX_SOF + 8)
+#define ICSS_EMAC_TTS_PREV_TX_SOF \
+ (ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET + 4)
+#define ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET (ICSS_EMAC_TTS_STATUS_OFFSET \
+ + 4)
+#define ICSS_EMAC_TTS_STATUS_OFFSET (ICSS_EMAC_TTS_CFG_TIME_OFFSET + 4)
+#define ICSS_EMAC_TTS_CFG_TIME_OFFSET (ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET + 4)
+#define ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET \
+ (ICSS_EMAC_TTS_CYCLE_START_OFFSET + 8)
+#define ICSS_EMAC_TTS_CYCLE_START_OFFSET ICSS_EMAC_TTS_BASE_OFFSET
+#define ICSS_EMAC_TTS_BASE_OFFSET DRAM_START_OFFSET
+
+/* Shared RAM offsets for EMAC */
+
+/* Queue Descriptors */
+
+/* 4 queue descriptors for port 0 (host receive). 32 bytes */
+#define HOST_QUEUE_DESC_OFFSET (HOST_QUEUE_SIZE_ADDR + 16)
+
+/* table offset for queue size:
+ * 3 ports * 4 Queues * 1 byte offset = 12 bytes
+ */
+#define HOST_QUEUE_SIZE_ADDR (HOST_QUEUE_OFFSET_ADDR + 8)
+/* table offset for queue:
+ * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_OFFSET_ADDR (HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR + 8)
+/* table offset for Host queue descriptors:
+ * 1 ports * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR (HOST_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define HOST_Q4_RX_CONTEXT_OFFSET (HOST_Q3_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q3_RX_CONTEXT_OFFSET (HOST_Q2_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q2_RX_CONTEXT_OFFSET (HOST_Q1_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q1_RX_CONTEXT_OFFSET (EMAC_PROMISCUOUS_MODE_OFFSET + 4)
+
+/* Promiscuous mode control */
+#define EMAC_P1_PROMISCUOUS_BIT BIT(0)
+#define EMAC_P2_PROMISCUOUS_BIT BIT(1)
+#define EMAC_PROMISCUOUS_MODE_OFFSET (EMAC_RESERVED + 4)
+#define EMAC_RESERVED EOF_48K_BUFFER_BD
+
+/* allow for max 48k buffer which spans the descriptors up to 0x1800 6kB */
+#define EOF_48K_BUFFER_BD (P0_BUFFER_DESC_OFFSET + HOST_BD_SIZE + \
+ PORT_BD_SIZE)
+
+#define HOST_BD_SIZE ((HOST_QUEUE_1_SIZE + \
+ HOST_QUEUE_2_SIZE + HOST_QUEUE_3_SIZE + \
+ HOST_QUEUE_4_SIZE) * BD_SIZE)
+#define PORT_BD_SIZE ((QUEUE_1_SIZE + QUEUE_2_SIZE + \
+ QUEUE_3_SIZE + QUEUE_4_SIZE) * 2 * BD_SIZE)
+
+#define END_OF_BD_POOL (P2_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P2_Q4_BD_OFFSET (P2_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P2_Q3_BD_OFFSET (P2_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P2_Q2_BD_OFFSET (P2_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P2_Q1_BD_OFFSET (P1_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P1_Q4_BD_OFFSET (P1_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P1_Q3_BD_OFFSET (P1_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P1_Q2_BD_OFFSET (P1_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P1_Q1_BD_OFFSET (P0_Q4_BD_OFFSET + HOST_QUEUE_4_SIZE * BD_SIZE)
+#define P0_Q4_BD_OFFSET (P0_Q3_BD_OFFSET + HOST_QUEUE_3_SIZE * BD_SIZE)
+#define P0_Q3_BD_OFFSET (P0_Q2_BD_OFFSET + HOST_QUEUE_2_SIZE * BD_SIZE)
+#define P0_Q2_BD_OFFSET (P0_Q1_BD_OFFSET + HOST_QUEUE_1_SIZE * BD_SIZE)
+#define P0_Q1_BD_OFFSET P0_BUFFER_DESC_OFFSET
+#define P0_BUFFER_DESC_OFFSET SRAM_START_OFFSET
+
+/* Memory Usage of L3 OCMC RAM */
+
+/* L3 64KB Memory - mainly buffer Pool */
+#define END_OF_BUFFER_POOL (P2_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q4_BUFFER_OFFSET (P2_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q3_BUFFER_OFFSET (P2_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q2_BUFFER_OFFSET (P2_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q1_BUFFER_OFFSET (P1_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q4_BUFFER_OFFSET (P1_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q3_BUFFER_OFFSET (P1_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q2_BUFFER_OFFSET (P1_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q1_BUFFER_OFFSET (P0_Q4_BUFFER_OFFSET + HOST_QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q4_BUFFER_OFFSET (P0_Q3_BUFFER_OFFSET + HOST_QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q3_BUFFER_OFFSET (P0_Q2_BUFFER_OFFSET + HOST_QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q2_BUFFER_OFFSET (P0_Q1_BUFFER_OFFSET + HOST_QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_COL_BUFFER_OFFSET 0xEE00
+#define P0_Q1_BUFFER_OFFSET 0x0000
+
+#endif /* __ICSS_SWITCH_H */
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 424ec3212128..d138dea7d208 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -20,6 +20,7 @@ config LIBWX
tristate
depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
+ select DIMLIB
help
Common library for Wangxun(R) Ethernet drivers.
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index c12a4cb951f6..06f401bd975c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -303,6 +303,11 @@ int wx_get_coalesce(struct net_device *netdev,
else
ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
+ if (wx->adaptive_itr) {
+ ec->use_adaptive_rx_coalesce = 1;
+ ec->use_adaptive_tx_coalesce = 1;
+ }
+
/* if in mixed tx/rx queues per vector mode, report only rx settings */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
return 0;
@@ -334,19 +339,28 @@ int wx_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (ec->tx_max_coalesced_frames_irq)
- wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+ if (ec->tx_max_coalesced_frames_irq > U16_MAX ||
+ !ec->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
+ wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
switch (wx->mac.type) {
case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
+ rx_itr_param = WX_20K_ITR;
+ tx_itr_param = WX_12K_ITR;
break;
case wx_mac_aml:
case wx_mac_aml40:
max_eitr = WX_AML_MAX_EITR;
+ rx_itr_param = WX_20K_ITR;
+ tx_itr_param = WX_12K_ITR;
break;
default:
max_eitr = WX_EM_MAX_EITR;
+ rx_itr_param = WX_7K_ITR;
+ tx_itr_param = WX_7K_ITR;
break;
}
@@ -354,36 +368,37 @@ int wx_set_coalesce(struct net_device *netdev,
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
return -EINVAL;
+ if (ec->use_adaptive_rx_coalesce) {
+ wx->adaptive_itr = true;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ return 0;
+ }
+
if (ec->rx_coalesce_usecs > 1)
wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
wx->rx_itr_setting = ec->rx_coalesce_usecs;
- if (wx->rx_itr_setting == 1)
- rx_itr_param = WX_20K_ITR;
- else
- rx_itr_param = wx->rx_itr_setting;
-
if (ec->tx_coalesce_usecs > 1)
wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else
wx->tx_itr_setting = ec->tx_coalesce_usecs;
- if (wx->tx_itr_setting == 1) {
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
- case wx_mac_aml40:
- tx_itr_param = WX_12K_ITR;
- break;
- default:
- tx_itr_param = WX_20K_ITR;
- break;
- }
- } else {
- tx_itr_param = wx->tx_itr_setting;
+ if (wx->adaptive_itr) {
+ wx->adaptive_itr = false;
+ wx->rx_itr_setting = rx_itr_param;
+ wx->tx_itr_setting = tx_itr_param;
+ } else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) {
+ wx->adaptive_itr = true;
}
+ if (wx->rx_itr_setting != 1)
+ rx_itr_param = wx->rx_itr_setting;
+
+ if (wx->tx_itr_setting != 1)
+ tx_itr_param = wx->tx_itr_setting;
+
/* mixed Rx/Tx */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
wx->tx_itr_setting = wx->rx_itr_setting;
@@ -466,6 +481,142 @@ int wx_set_channels(struct net_device *dev,
}
EXPORT_SYMBOL(wx_set_channels);
+u32 wx_rss_indir_size(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return wx_rss_indir_tbl_entries(wx);
+}
+EXPORT_SYMBOL(wx_rss_indir_size);
+
+u32 wx_get_rxfh_key_size(struct net_device *netdev)
+{
+ return WX_RSS_KEY_SIZE;
+}
+EXPORT_SYMBOL(wx_get_rxfh_key_size);
+
+static void wx_get_reta(struct wx *wx, u32 *indir)
+{
+ u32 reta_size = wx_rss_indir_tbl_entries(wx);
+ u16 rss_m = wx->ring_feature[RING_F_RSS].mask;
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ rss_m = wx->ring_feature[RING_F_RSS].indices - 1;
+
+ for (u32 i = 0; i < reta_size; i++)
+ indir[i] = wx->rss_indir_tbl[i] & rss_m;
+}
+
+int wx_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->indir)
+ wx_get_reta(wx, rxfh->indir);
+
+ if (rxfh->key)
+ memcpy(rxfh->key, wx->rss_key, WX_RSS_KEY_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_rxfh);
+
+int wx_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 reta_entries, i;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ reta_entries = wx_rss_indir_tbl_entries(wx);
+ /* Fill out the redirection table */
+ if (rxfh->indir) {
+ for (i = 0; i < reta_entries; i++)
+ wx->rss_indir_tbl[i] = rxfh->indir[i];
+
+ wx_store_reta(wx);
+ }
+
+ /* Fill out the rss hash key */
+ if (rxfh->key) {
+ memcpy(wx->rss_key, rxfh->key, WX_RSS_KEY_SIZE);
+ wx_store_rsskey(wx);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rxfh);
+
+static const struct wx_rss_flow_map rss_flow_table[] = {
+ { TCP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_TCP },
+ { TCP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_TCP },
+ { UDP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_UDP },
+ { UDP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_UDP },
+ { SCTP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_SCTP },
+ { SCTP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_SCTP },
+};
+
+int wx_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+
+ for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
+ const struct wx_rss_flow_map *entry = &rss_flow_table[i];
+
+ if (entry->flow_type == nfc->flow_type) {
+ if (wx->rss_flags & entry->flag)
+ nfc->data |= entry->data;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_rxfh_fields);
+
+int wx_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ u8 flags = wx->rss_flags;
+
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
+ const struct wx_rss_flow_map *entry = &rss_flow_table[i];
+
+ if (entry->flow_type == nfc->flow_type) {
+ if (nfc->data & entry->data)
+ flags |= entry->flag;
+ else
+ flags &= ~entry->flag;
+
+ if (flags != wx->rss_flags) {
+ wx->rss_flags = flags;
+ wx_config_rss_field(wx);
+ }
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wx_set_rxfh_fields);
+
u32 wx_get_msglevel(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -531,3 +682,36 @@ void wx_get_ptp_stats(struct net_device *dev,
}
}
EXPORT_SYMBOL(wx_get_ptp_stats);
+
+static int wx_get_link_ksettings_vf(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = wx->speed;
+
+ return 0;
+}
+
+static const struct ethtool_ops wx_ethtool_ops_vf = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = wx_get_ringparam,
+ .get_msglevel = wx_get_msglevel,
+ .get_coalesce = wx_get_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = wx_get_link_ksettings_vf,
+};
+
+void wx_set_ethtool_ops_vf(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &wx_ethtool_ops_vf;
+}
+EXPORT_SYMBOL(wx_set_ethtool_ops_vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 9e002e699eca..727093970462 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -38,10 +38,23 @@ void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
+u32 wx_rss_indir_size(struct net_device *netdev);
+u32 wx_get_rxfh_key_size(struct net_device *netdev);
+int wx_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh);
+int wx_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int wx_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd);
+int wx_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
int wx_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info);
void wx_get_ptp_stats(struct net_device *dev,
struct ethtool_ts_stats *ts_stats);
+void wx_set_ethtool_ops_vf(struct net_device *netdev);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 5cb353a97d6d..1e2713f0c921 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1998,8 +1998,17 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
-static void wx_store_reta(struct wx *wx)
+u32 wx_rss_indir_tbl_entries(struct wx *wx)
{
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return 64;
+ else
+ return 128;
+}
+
+void wx_store_reta(struct wx *wx)
+{
+ u32 reta_entries = wx_rss_indir_tbl_entries(wx);
u8 *indir_tbl = wx->rss_indir_tbl;
u32 reta = 0;
u32 i;
@@ -2007,45 +2016,110 @@ static void wx_store_reta(struct wx *wx)
/* Fill out the redirection table as follows:
* - 8 bit wide entries containing 4 bit RSS index
*/
- for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
+ for (i = 0; i < reta_entries; i++) {
reta |= indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
- wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ wr32(wx, WX_RDB_VMRSSTBL(i >> 2, wx->num_vfs), reta);
+ else
+ wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
reta = 0;
}
}
}
-static void wx_setup_reta(struct wx *wx)
+void wx_store_rsskey(struct wx *wx)
{
- u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
- u32 random_key_size = WX_RSS_KEY_SIZE / 4;
- u32 i, j;
+ u32 key_size = WX_RSS_KEY_SIZE / 4;
+ u32 i;
- if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
- if (wx->mac.type == wx_mac_em)
- rss_i = 1;
- else
- rss_i = rss_i < 4 ? 4 : rss_i;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for (i = 0; i < key_size; i++)
+ wr32(wx, WX_RDB_VMRSSRK(i, wx->num_vfs),
+ wx->rss_key[i]);
+ } else {
+ for (i = 0; i < key_size; i++)
+ wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
}
+}
+static void wx_setup_reta(struct wx *wx)
+{
/* Fill out hash function seeds */
- for (i = 0; i < random_key_size; i++)
- wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
+ wx_store_rsskey(wx);
/* Fill out redirection table */
- memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+ if (!netif_is_rxfh_configured(wx->netdev)) {
+ u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 reta_entries = wx_rss_indir_tbl_entries(wx);
+ u32 i, j;
- for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
- if (j == rss_i)
- j = 0;
+ memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
- wx->rss_indir_tbl[i] = j;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ rss_i = rss_i < 2 ? 2 : rss_i;
+ else
+ rss_i = 1;
+ }
+
+ for (i = 0, j = 0; i < reta_entries; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ wx->rss_indir_tbl[i] = j;
+ }
}
wx_store_reta(wx);
}
+void wx_config_rss_field(struct wx *wx)
+{
+ u32 rss_field;
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ rss_field = rd32(wx, WX_RDB_PL_CFG(wx->num_vfs));
+ rss_field &= ~WX_RDB_PL_CFG_RSS_MASK;
+ rss_field |= FIELD_PREP(WX_RDB_PL_CFG_RSS_MASK, wx->rss_flags);
+ wr32(wx, WX_RDB_PL_CFG(wx->num_vfs), rss_field);
+
+ /* Enable global RSS and multiple RSS to make the RSS
+ * field of each pool take effect.
+ */
+ wr32m(wx, WX_RDB_RA_CTL,
+ WX_RDB_RA_CTL_MULTI_RSS | WX_RDB_RA_CTL_RSS_EN,
+ WX_RDB_RA_CTL_MULTI_RSS | WX_RDB_RA_CTL_RSS_EN);
+ } else {
+ rss_field = rd32(wx, WX_RDB_RA_CTL);
+ rss_field &= ~WX_RDB_RA_CTL_RSS_MASK;
+ rss_field |= FIELD_PREP(WX_RDB_RA_CTL_RSS_MASK, wx->rss_flags);
+ wr32(wx, WX_RDB_RA_CTL, rss_field);
+ }
+}
+
+void wx_enable_rss(struct wx *wx, bool enable)
+{
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (enable)
+ wr32m(wx, WX_RDB_PL_CFG(wx->num_vfs),
+ WX_RDB_PL_CFG_RSS_EN, WX_RDB_PL_CFG_RSS_EN);
+ else
+ wr32m(wx, WX_RDB_PL_CFG(wx->num_vfs),
+ WX_RDB_PL_CFG_RSS_EN, 0);
+ } else {
+ if (enable)
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
+ WX_RDB_RA_CTL_RSS_EN);
+ else
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
+ }
+}
+
#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1)
#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2)
static void wx_setup_psrtype(struct wx *wx)
@@ -2076,27 +2150,12 @@ static void wx_setup_psrtype(struct wx *wx)
static void wx_setup_mrqc(struct wx *wx)
{
- u32 rss_field = 0;
-
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
- /* Perform hash on these packet types */
- rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
- WX_RDB_RA_CTL_RSS_IPV4_TCP |
- WX_RDB_RA_CTL_RSS_IPV4_UDP |
- WX_RDB_RA_CTL_RSS_IPV6 |
- WX_RDB_RA_CTL_RSS_IPV6_TCP |
- WX_RDB_RA_CTL_RSS_IPV6_UDP;
-
- netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
-
+ wx_config_rss_field(wx);
+ wx_enable_rss(wx, wx->rss_enabled);
wx_setup_reta(wx);
-
- if (wx->rss_enabled)
- rss_field |= WX_RDB_RA_CTL_RSS_EN;
-
- wr32(wx, WX_RDB_RA_CTL, rss_field);
}
/**
@@ -2389,6 +2448,8 @@ int wx_sw_init(struct wx *wx)
wx_err(wx, "rss key allocation failed\n");
return err;
}
+ wx->rss_flags = WX_RSS_FIELD_IPV4 | WX_RSS_FIELD_IPV4_TCP |
+ WX_RSS_FIELD_IPV6 | WX_RSS_FIELD_IPV6_TCP;
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr),
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 2393a743b564..13857376bbad 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -39,6 +39,11 @@ void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring);
+u32 wx_rss_indir_tbl_entries(struct wx *wx);
+void wx_store_reta(struct wx *wx);
+void wx_store_rsskey(struct wx *wx);
+void wx_config_rss_field(struct wx *wx);
+void wx_enable_rss(struct wx *wx, bool enable);
void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 723785ef87bb..3adf7048320a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -16,6 +16,7 @@
#include "wx_lib.h"
#include "wx_ptp.h"
#include "wx_hw.h"
+#include "wx_vf_lib.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static struct wx_dec_ptype wx_ptype_lookup[256] = {
@@ -832,6 +833,36 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
return !!budget;
}
+static void wx_update_rx_dim_sample(struct wx_q_vector *q_vector)
+{
+ struct dim_sample sample = {};
+
+ dim_update_sample(q_vector->total_events,
+ q_vector->rx.total_packets,
+ q_vector->rx.total_bytes,
+ &sample);
+
+ net_dim(&q_vector->rx.dim, &sample);
+}
+
+static void wx_update_tx_dim_sample(struct wx_q_vector *q_vector)
+{
+ struct dim_sample sample = {};
+
+ dim_update_sample(q_vector->total_events,
+ q_vector->tx.total_packets,
+ q_vector->tx.total_bytes,
+ &sample);
+
+ net_dim(&q_vector->tx.dim, &sample);
+}
+
+static void wx_update_dim_sample(struct wx_q_vector *q_vector)
+{
+ wx_update_rx_dim_sample(q_vector);
+ wx_update_tx_dim_sample(q_vector);
+}
+
/**
* wx_poll - NAPI polling RX/TX cleanup routine
* @napi: napi struct with our devices info in it
@@ -878,6 +909,8 @@ static int wx_poll(struct napi_struct *napi, int budget)
/* all work done, exit the polling mode */
if (likely(napi_complete_done(napi, work_done))) {
+ if (wx->adaptive_itr)
+ wx_update_dim_sample(q_vector);
if (netif_running(wx->netdev))
wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
}
@@ -1591,6 +1624,65 @@ netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
}
EXPORT_SYMBOL(wx_xmit_frame);
+static void wx_set_itr(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ u32 new_itr;
+
+ if (!wx->adaptive_itr)
+ return;
+
+ /* use the smallest value of new ITR delay calculations */
+ new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
+ new_itr <<= 2;
+
+ if (new_itr != q_vector->itr) {
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ if (wx->pdev->is_virtfn)
+ wx_write_eitr_vf(q_vector);
+ else
+ wx_write_eitr(q_vector);
+ }
+}
+
+static void wx_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder rx_moder;
+ struct wx_ring_container *rx;
+ struct wx_q_vector *q_vector;
+
+ rx = container_of(dim, struct wx_ring_container, dim);
+
+ rx_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ rx->itr = rx_moder.usec;
+
+ q_vector = container_of(rx, struct wx_q_vector, rx);
+ wx_set_itr(q_vector);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void wx_tx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder tx_moder;
+ struct wx_ring_container *tx;
+ struct wx_q_vector *q_vector;
+
+ tx = container_of(dim, struct wx_ring_container, dim);
+
+ tx_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ tx->itr = tx_moder.usec;
+
+ q_vector = container_of(tx, struct wx_q_vector, tx);
+ wx_set_itr(q_vector);
+
+ dim->state = DIM_START_MEASURE;
+}
+
void wx_napi_enable_all(struct wx *wx)
{
struct wx_q_vector *q_vector;
@@ -1598,6 +1690,11 @@ void wx_napi_enable_all(struct wx *wx)
for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
q_vector = wx->q_vector[q_idx];
+
+ INIT_WORK(&q_vector->rx.dim.work, wx_rx_dim_work);
+ INIT_WORK(&q_vector->tx.dim.work, wx_tx_dim_work);
+ q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
napi_enable(&q_vector->napi);
}
}
@@ -1611,6 +1708,8 @@ void wx_napi_disable_all(struct wx *wx)
for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
q_vector = wx->q_vector[q_idx];
napi_disable(&q_vector->napi);
+ disable_work_sync(&q_vector->rx.dim.work);
+ disable_work_sync(&q_vector->tx.dim.work);
}
}
EXPORT_SYMBOL(wx_napi_disable_all);
@@ -2197,8 +2296,10 @@ irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
struct wx_q_vector *q_vector = data;
/* EIAM disabled interrupts (on this vector) for us */
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.ring || q_vector->tx.ring) {
napi_schedule_irqoff(&q_vector->napi);
+ q_vector->total_events++;
+ }
return IRQ_HANDLED;
}
@@ -2915,14 +3016,8 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
struct wx *wx = netdev_priv(netdev);
bool need_reset = false;
- if (features & NETIF_F_RXHASH) {
- wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
- WX_RDB_RA_CTL_RSS_EN);
- wx->rss_enabled = true;
- } else {
- wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
- wx->rss_enabled = false;
- }
+ wx->rss_enabled = !!(features & NETIF_F_RXHASH);
+ wx_enable_rss(wx, wx->rss_enabled);
netdev->features = features;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index c82ae137756c..c6d158cd70da 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -150,6 +150,12 @@ static int wx_pci_sriov_enable(struct pci_dev *dev,
struct wx *wx = pci_get_drvdata(dev);
int err = 0, i;
+ if (netif_is_rxfh_configured(wx->netdev)) {
+ wx_err(wx, "Cannot enable SR-IOV while RXFH is configured\n");
+ wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
+ return -EBUSY;
+ }
+
err = __wx_enable_sriov(wx, num_vfs);
if (err)
return err;
@@ -173,12 +179,20 @@ err_out:
return err;
}
-static void wx_pci_sriov_disable(struct pci_dev *dev)
+static int wx_pci_sriov_disable(struct pci_dev *dev)
{
struct wx *wx = pci_get_drvdata(dev);
+ if (netif_is_rxfh_configured(wx->netdev)) {
+ wx_err(wx, "Cannot disable SR-IOV while RXFH is configured\n");
+ wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
+ return -EBUSY;
+ }
+
wx_disable_sriov(wx);
wx_sriov_reinit(wx);
+
+ return 0;
}
int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -187,10 +201,8 @@ int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
int err;
if (!num_vfs) {
- if (!pci_vfs_assigned(pdev)) {
- wx_pci_sriov_disable(pdev);
- return 0;
- }
+ if (!pci_vfs_assigned(pdev))
+ return wx_pci_sriov_disable(pdev);
wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 9d5d10f9e410..d89b9b8a0a2c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
+#include <linux/dim.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
@@ -167,9 +168,12 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
+#define WX_RDB_PL_CFG_RSS_EN BIT(24)
+#define WX_RDB_PL_CFG_RSS_MASK GENMASK(23, 16)
#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4
+#define WX_RDB_RA_CTL_MULTI_RSS BIT(0)
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
@@ -177,8 +181,12 @@
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
+#define WX_RDB_RA_CTL_RSS_MASK GENMASK(23, 16)
#define WX_RDB_FDIR_MATCH 0x19558
#define WX_RDB_FDIR_MISS 0x1955C
+/* VM RSS */
+#define WX_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40))
+#define WX_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40))
/******************************* PSR Registers *******************************/
/* psr control */
@@ -1033,6 +1041,7 @@ struct wx_ring_container {
unsigned int total_packets; /* total packets processed this int */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
+ struct dim dim; /* data for net_dim algorithm */
};
struct wx_ring {
struct wx_ring *next; /* pointer to next ring in q_vector */
@@ -1089,6 +1098,8 @@ struct wx_q_vector {
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
+ u16 total_events; /* number of interrupts processed */
+
char name[IFNAMSIZ + 17];
/* for dynamic allocation of rings associated with this q_vector */
@@ -1188,6 +1199,21 @@ struct vf_macvlans {
u8 vf_macvlan[ETH_ALEN];
};
+#define WX_RSS_FIELD_IPV4_TCP BIT(0)
+#define WX_RSS_FIELD_IPV4 BIT(1)
+#define WX_RSS_FIELD_IPV4_SCTP BIT(2)
+#define WX_RSS_FIELD_IPV6_SCTP BIT(3)
+#define WX_RSS_FIELD_IPV6_TCP BIT(4)
+#define WX_RSS_FIELD_IPV6 BIT(5)
+#define WX_RSS_FIELD_IPV4_UDP BIT(6)
+#define WX_RSS_FIELD_IPV6_UDP BIT(7)
+
+struct wx_rss_flow_map {
+ u8 flow_type;
+ u32 data;
+ u8 flag;
+};
+
enum wx_pf_flags {
WX_FLAG_MULTI_64_FUNC,
WX_FLAG_SWFW_RING,
@@ -1268,6 +1294,7 @@ struct wx {
int num_rx_queues;
u16 rx_itr_setting;
u16 rx_work_limit;
+ bool adaptive_itr;
int num_q_vectors; /* current number of q_vectors for device */
int max_q_vectors; /* upper limit of q_vectors for device */
@@ -1297,6 +1324,7 @@ struct wx {
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
+ u8 rss_flags;
bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
index fec1126703e3..3f16de0fa427 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
@@ -4,6 +4,7 @@
#ifndef _WX_VF_H_
#define _WX_VF_H_
+/* Control registers */
#define WX_VF_MAX_RING_NUMS 8
#define WX_VX_PF_BME 0x4B8
#define WX_VF_BME_ENABLE BIT(0)
@@ -12,16 +13,32 @@
#define WX_VXCTRL_RST BIT(0)
#define WX_VXMRQC 0x78
+#define WX_VXMRQC_PSR_L4HDR BIT(0)
+#define WX_VXMRQC_PSR_L3HDR BIT(1)
+#define WX_VXMRQC_PSR_L2HDR BIT(2)
+#define WX_VXMRQC_PSR_TUNHDR BIT(3)
+#define WX_VXMRQC_PSR_TUNMAC BIT(4)
+#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
+#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
+#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
+#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
+#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
+#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
+#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
+#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
+#define WX_VXMRQC_RSS_EN BIT(8)
+
+#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
+#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
+
+/* Interrupt registers */
#define WX_VXICR 0x100
#define WX_VXIMS 0x108
#define WX_VXIMC 0x10C
#define WX_VF_IRQ_CLEAR_MASK 7
#define WX_VF_MAX_TX_QUEUES 4
#define WX_VF_MAX_RX_QUEUES 4
-#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
-#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
-#define WX_VXRXDCTL_ENABLE BIT(0)
-#define WX_VXTXDCTL_FLUSH BIT(26)
#define WX_VXITR(i) (0x200 + (4 * (i))) /* i=[0,1] */
#define WX_VXITR_MASK GENMASK(8, 0)
@@ -29,16 +46,6 @@
#define WX_VXIVAR_MISC 0x260
#define WX_VXIVAR(i) (0x240 + (4 * (i))) /* i=[0,3] */
-#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
-#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
-#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
-#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
-
-#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
-#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
-#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
-#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
-
#define wx_conf_size(v, mwidth, uwidth) ({ \
typeof(v) _v = (v); \
(_v == 2 << (mwidth) ? 0 : _v >> (uwidth)); \
@@ -59,44 +66,35 @@
#define WX_VXRDBAH(r) (0x1004 + (0x40 * (r)))
#define WX_VXRDT(r) (0x1008 + (0x40 * (r)))
#define WX_VXRDH(r) (0x100C + (0x40 * (r)))
-
+#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
+#define WX_VXRXDCTL_ENABLE BIT(0)
+#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
+#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
+#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
+#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
+#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
+#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
#define WX_VXRXDCTL_RSCEN BIT(29)
#define WX_VXRXDCTL_DROP BIT(30)
#define WX_VXRXDCTL_VLAN BIT(31)
+/* Transimit Path */
#define WX_VXTDBAL(r) (0x3000 + (0x40 * (r)))
#define WX_VXTDBAH(r) (0x3004 + (0x40 * (r)))
#define WX_VXTDT(r) (0x3008 + (0x40 * (r)))
#define WX_VXTDH(r) (0x300C + (0x40 * (r)))
-
+#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
#define WX_VXTXDCTL_ENABLE BIT(0)
#define WX_VXTXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
#define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f)
#define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f)
-
-#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
-#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
-#define WX_VXMRQC_PSR_L4HDR BIT(0)
-#define WX_VXMRQC_PSR_L3HDR BIT(1)
-#define WX_VXMRQC_PSR_L2HDR BIT(2)
-#define WX_VXMRQC_PSR_TUNHDR BIT(3)
-#define WX_VXMRQC_PSR_TUNMAC BIT(4)
-
-#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
-#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
-
-#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
-#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
-#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
-#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
-#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
-#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
-#define WX_VXMRQC_RSS_EN BIT(8)
-#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+#define WX_VXTXDCTL_FLUSH BIT(26)
#define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g)
#define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g)
-#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
+#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
struct wx_link_reg_fields {
u32 mac_type;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
index 3023ea2732ef..a87887b9f8ee 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
@@ -10,7 +10,7 @@
#include "wx_vf.h"
#include "wx_vf_lib.h"
-static void wx_write_eitr_vf(struct wx_q_vector *q_vector)
+void wx_write_eitr_vf(struct wx_q_vector *q_vector)
{
struct wx *wx = q_vector->wx;
int v_idx = q_vector->v_idx;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
index 43ea126b79eb..a4bd23c92800 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
@@ -4,6 +4,7 @@
#ifndef _WX_VF_LIB_H_
#define _WX_VF_LIB_H_
+void wx_write_eitr_vf(struct wx_q_vector *q_vector);
void wx_configure_msix_vf(struct wx *wx);
int wx_write_uc_addr_list_vf(struct net_device *netdev);
void wx_setup_psrtype_vf(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 7e2d9ec38a30..662f28bdde8a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -115,7 +115,8 @@ static int ngbe_set_channels(struct net_device *dev,
static const struct ethtool_ops ngbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = wx_get_link_ksettings,
@@ -136,6 +137,12 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = ngbe_set_channels,
+ .get_rxfh_fields = wx_get_rxfh_fields,
+ .set_rxfh_fields = wx_set_rxfh_fields,
+ .get_rxfh_indir_size = wx_rss_indir_size,
+ .get_rxfh_key_size = wx_get_rxfh_key_size,
+ .get_rxfh = wx_get_rxfh,
+ .set_rxfh = wx_set_rxfh,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
.get_ts_info = wx_get_ts_info,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index e0fc897b0a58..58488e138beb 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -119,9 +119,9 @@ static int ngbe_sw_init(struct wx *wx)
num_online_cpus());
wx->rss_enabled = true;
- /* enable itr by default in dynamic mode */
- wx->rx_itr_setting = 1;
- wx->tx_itr_setting = 1;
+ wx->adaptive_itr = false;
+ wx->rx_itr_setting = WX_7K_ITR;
+ wx->tx_itr_setting = WX_7K_ITR;
/* set default ring sizes */
wx->tx_ring_count = NGBE_DEFAULT_TXD;
diff --git a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
index c1246ab5239c..6ef43adcc425 100644
--- a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
+++ b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_mbx.h"
#include "../libwx/wx_vf.h"
#include "../libwx/wx_vf_common.h"
+#include "../libwx/wx_ethtool.h"
#include "ngbevf_type.h"
/* ngbevf_pci_tbl - PCI Device ID Table
@@ -100,6 +101,7 @@ static int ngbevf_sw_init(struct wx *wx)
wx->mac.max_tx_queues = NGBEVF_MAX_TX_QUEUES;
wx->mac.max_rx_queues = NGBEVF_MAX_RX_QUEUES;
/* Enable dynamic interrupt throttling rates */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
/* set default ring sizes */
@@ -185,6 +187,8 @@ static int ngbevf_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ wx->driver_name = KBUILD_MODNAME;
+ wx_set_ethtool_ops_vf(netdev);
netdev->netdev_ops = &ngbevf_netdev_ops;
/* setup the private structure */
@@ -202,6 +206,7 @@ static int ngbevf_probe(struct pci_dev *pdev,
if (err)
goto err_free_sw_init;
+ wx_get_fw_version_vf(wx);
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index a4753402660e..e285b088c7b2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -538,7 +538,8 @@ static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = wx_get_drvinfo,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
@@ -559,6 +560,12 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_channels = txgbe_set_channels,
.get_rxnfc = txgbe_get_rxnfc,
.set_rxnfc = txgbe_set_rxnfc,
+ .get_rxfh_fields = wx_get_rxfh_fields,
+ .set_rxfh_fields = wx_set_rxfh_fields,
+ .get_rxfh_indir_size = wx_rss_indir_size,
+ .get_rxfh_key_size = wx_get_rxfh_key_size,
+ .get_rxfh = wx_get_rxfh,
+ .set_rxfh = wx_set_rxfh,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
.get_ts_info = wx_get_ts_info,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index a5867f3c93fc..c4c4d70d8466 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -401,6 +401,7 @@ static int txgbe_sw_init(struct wx *wx)
set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
/* enable itr by default in dynamic mode */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
index ebfce3cf753e..72663e3c4205 100644
--- a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
+++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_mbx.h"
#include "../libwx/wx_vf.h"
#include "../libwx/wx_vf_common.h"
+#include "../libwx/wx_ethtool.h"
#include "txgbevf_type.h"
/* txgbevf_pci_tbl - PCI Device ID Table
@@ -144,6 +145,7 @@ static int txgbevf_sw_init(struct wx *wx)
wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES;
wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES;
/* Enable dynamic interrupt throttling rates */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
/* set default ring sizes */
@@ -238,6 +240,8 @@ static int txgbevf_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ wx->driver_name = KBUILD_MODNAME;
+ wx_set_ethtool_ops_vf(netdev);
netdev->netdev_ops = &txgbevf_netdev_ops;
/* setup the private structure */
@@ -255,6 +259,7 @@ static int txgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_free_sw_init;
+ wx_get_fw_version_vf(wx);
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index b77f096eaf99..c5424d882135 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1142,7 +1142,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
netdev_name(ndev));
if (!priv->xfer_wq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ec6d47dc984a..284031fb2e2c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -238,6 +238,8 @@ static u64 axienet_dma_rate(struct axienet_local *lp)
*
* Calculate a control register value based on the coalescing settings. The
* run/stop bit is not set.
+ *
+ * Return: Control register value with coalescing settings configured.
*/
static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
@@ -702,7 +704,8 @@ static void axienet_dma_stop(struct axienet_local *lp)
* are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
- * Returns 0 on success or a negative error number otherwise.
+ *
+ * Return: 0 on success or a negative error number otherwise.
*/
static int axienet_device_reset(struct net_device *ndev)
{
@@ -773,7 +776,8 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of packets handled.
+ *
+ * Return: The number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
@@ -2112,6 +2116,8 @@ static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
/**
* axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
* @lp: Device private data
+ *
+ * Return: RX coalescing frame count value for DIM.
*/
static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
{
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 4a4ed2ccf72f..b63965d9a1ba 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1364,14 +1364,15 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->force_reset = false;
adapter->open_guard = false;
- adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!adapter->txrx_wq)) {
err = -ENOMEM;
goto err_free_netdev;
}
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!adapter->control_wq)) {
err = -ENOMEM;
goto err_free_txrx_wq;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 54384f9b3872..77b0c3d52041 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -53,7 +53,6 @@ struct geneve_dev_node {
};
struct geneve_config {
- struct ip_tunnel_info info;
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
@@ -61,6 +60,9 @@ struct geneve_config {
bool inner_proto_inherit;
u16 port_min;
u16 port_max;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct ip_tunnel_info info;
};
/* Pseudo network device */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4b668ebaa0f7..5cb59d72bc82 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -21,9 +21,10 @@
#include <linux/file.h>
#include <linux/gtp.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
-#include <net/inet_dscp.h>
#include <net/inet_sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -352,7 +353,7 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk)));
+ fl4->flowi4_dscp = inet_sk_dscp(inet_sk(sk));
fl4->flowi4_scope = ip_sock_rt_scope(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -2401,7 +2402,7 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
udp_tunnel_xmit_skb(rt, sk, skb_to_send,
fl4.saddr, fl4.daddr,
- fl4.flowi4_tos,
+ inet_dscp_to_dsfield(fl4.flowi4_dscp),
ip4_dst_hoplimit(&rt->dst),
0,
port, port,
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index c5e5423e1863..885992951e8a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -115,8 +115,6 @@ struct sixpack {
struct timer_list tx_t;
struct timer_list resync_t;
- refcount_t refcnt;
- struct completion dead;
spinlock_t lock;
};
@@ -354,41 +352,12 @@ out_mem:
/* ----------------------------------------------------------------------- */
/*
- * We have a potential race on dereferencing tty->disc_data, because the tty
- * layer provides no locking at all - thus one cpu could be running
- * sixpack_receive_buf while another calls sixpack_close, which zeroes
- * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
- * best way to fix this is to use a rwlock in the tty struct, but for now we
- * use a single global rwlock for all ttys in ppp line discipline.
- */
-static DEFINE_RWLOCK(disc_data_lock);
-
-static struct sixpack *sp_get(struct tty_struct *tty)
-{
- struct sixpack *sp;
-
- read_lock(&disc_data_lock);
- sp = tty->disc_data;
- if (sp)
- refcount_inc(&sp->refcnt);
- read_unlock(&disc_data_lock);
-
- return sp;
-}
-
-static void sp_put(struct sixpack *sp)
-{
- if (refcount_dec_and_test(&sp->refcnt))
- complete(&sp->dead);
-}
-
-/*
* Called by the TTY driver when there's room for more data. If we have
* more packets to send, we send them here.
*/
static void sixpack_write_wakeup(struct tty_struct *tty)
{
- struct sixpack *sp = sp_get(tty);
+ struct sixpack *sp = tty->disc_data;
int actual;
if (!sp)
@@ -400,7 +369,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sp->tx_enable = 0;
netif_wake_queue(sp->dev);
- goto out;
+ return;
}
if (sp->tx_enable) {
@@ -408,9 +377,6 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
sp->xleft -= actual;
sp->xhead += actual;
}
-
-out:
- sp_put(sp);
}
/* ----------------------------------------------------------------------- */
@@ -430,7 +396,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
if (!count)
return;
- sp = sp_get(tty);
+ sp = tty->disc_data;
if (!sp)
return;
@@ -446,7 +412,6 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
}
sixpack_decode(sp, cp, count1);
- sp_put(sp);
tty_unthrottle(tty);
}
@@ -561,8 +526,6 @@ static int sixpack_open(struct tty_struct *tty)
spin_lock_init(&sp->lock);
spin_lock_init(&sp->rxlock);
- refcount_set(&sp->refcnt, 1);
- init_completion(&sp->dead);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
@@ -638,19 +601,11 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
- tty->disc_data = NULL;
- write_unlock_irq(&disc_data_lock);
if (!sp)
return;
- /*
- * We have now ensured that nobody can start using ap from now on, but
- * we have to wait for all existing users to finish.
- */
- if (!refcount_dec_and_test(&sp->refcnt))
- wait_for_completion(&sp->dead);
+ tty->disc_data = NULL;
/* We must stop the queue to avoid potentially scribbling
* on the free buffers. The sp->dead completion is not sufficient
@@ -673,7 +628,7 @@ static void sixpack_close(struct tty_struct *tty)
static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct sixpack *sp = sp_get(tty);
+ struct sixpack *sp = tty->disc_data;
struct net_device *dev;
unsigned int tmp, err;
@@ -725,8 +680,6 @@ static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
err = tty_mode_ioctl(tty, cmd, arg);
}
- sp_put(sp);
-
return err;
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index e3e65772c599..d7e3ddbcab6f 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
-#include <net/inet_dscp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include "ipvlan.h"
@@ -433,7 +433,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
ip4h = ip_hdr(skb);
fl4.daddr = ip4h->daddr;
fl4.saddr = ip4h->saddr;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+ fl4.flowi4_dscp = ip4h_dscp(ip4h);
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0eca96eeed58..5200fd5a10e5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1583,9 +1583,6 @@ static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
if (IS_ERR(dev))
return ERR_CAST(dev);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
-
secy = &macsec_priv(dev)->secy;
tx_sc = &secy->tx_sc;
@@ -1646,8 +1643,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
return ERR_PTR(-EINVAL);
*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
if (IS_ERR(rx_sc))
@@ -1670,24 +1665,21 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
- [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
+ [MACSEC_RXSC_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
};
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
- [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
- [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
- .len = MACSEC_KEYID_LEN, },
- [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
- .len = MACSEC_MAX_KEY_LEN, },
+ [MACSEC_SA_ATTR_AN] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [MACSEC_SA_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN(NLA_UINT, 1),
+ [MACSEC_SA_ATTR_KEYID] = NLA_POLICY_EXACT_LEN(MACSEC_KEYID_LEN),
+ [MACSEC_SA_ATTR_KEY] = NLA_POLICY_MAX_LEN(MACSEC_MAX_KEY_LEN),
[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
- [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
- .len = MACSEC_SALT_LEN, },
+ [MACSEC_SA_ATTR_SALT] = NLA_POLICY_EXACT_LEN(MACSEC_SALT_LEN),
};
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
- [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+ [MACSEC_OFFLOAD_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
/* Offloads an operation to a device driver */
@@ -1739,21 +1731,6 @@ static bool validate_add_rxsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] &&
- nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -1812,14 +1789,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
@@ -1895,19 +1864,6 @@ cleanup:
return err;
}
-static bool validate_add_rxsc(struct nlattr **attrs)
-{
- if (!attrs[MACSEC_RXSC_ATTR_SCI])
- return false;
-
- if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- return true;
-}
-
static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
@@ -1925,7 +1881,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -1984,20 +1940,6 @@ static bool validate_add_txsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -2055,14 +1997,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
@@ -2339,17 +2273,6 @@ static bool validate_upd_sa(struct nlattr **attrs)
attrs[MACSEC_SA_ATTR_SALT])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
return true;
}
@@ -2556,7 +2479,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -3834,21 +3757,23 @@ static const struct device_type macsec_type = {
.name = "macsec",
};
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
- [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
- [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
+ [IFLA_MACSEC_ICV_LEN] = NLA_POLICY_RANGE(NLA_U8, MACSEC_MIN_ICV_LEN, MACSEC_STD_ICV_LEN),
+ [IFLA_MACSEC_CIPHER_SUITE] = NLA_POLICY_VALIDATE_FN(NLA_U64, validate_cipher_suite),
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
- [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
- [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
- [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
- [IFLA_MACSEC_ES] = { .type = NLA_U8 },
- [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
- [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
- [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
+ [IFLA_MACSEC_ENCODING_SA] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [IFLA_MACSEC_ENCRYPT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_INC_SCI] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_ES] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_SCB] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_REPLAY_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_VALIDATION] = NLA_POLICY_MAX(NLA_U8, MACSEC_VALIDATE_MAX),
+ [IFLA_MACSEC_OFFLOAD] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
static void macsec_free_netdev(struct net_device *dev)
@@ -4303,20 +4228,30 @@ unregister:
return err;
}
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (nla_get_u64(attr)) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
- int flag;
bool es, scb, sci;
if (!data)
return 0;
- if (data[IFLA_MACSEC_CIPHER_SUITE])
- csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
-
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
@@ -4332,34 +4267,6 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
}
- switch (csid) {
- case MACSEC_CIPHER_ID_GCM_AES_128:
- case MACSEC_CIPHER_ID_GCM_AES_256:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
- case MACSEC_DEFAULT_CIPHER_ID:
- if (icv_len < MACSEC_MIN_ICV_LEN ||
- icv_len > MACSEC_STD_ICV_LEN)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if (data[IFLA_MACSEC_ENCODING_SA]) {
- if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
- return -EINVAL;
- }
-
- for (flag = IFLA_MACSEC_ENCODING_SA + 1;
- flag < IFLA_MACSEC_VALIDATION;
- flag++) {
- if (data[flag]) {
- if (nla_get_u8(data[flag]) > 1)
- return -EINVAL;
- }
- }
-
es = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
@@ -4367,10 +4274,6 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
if ((sci && (scb || es)) || (scb && es))
return -EINVAL;
- if (data[IFLA_MACSEC_VALIDATION] &&
- nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
- return -EINVAL;
-
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
!data[IFLA_MACSEC_WINDOW])
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 4df991e494bd..7966545512cf 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -369,7 +369,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
- queue_work(system_unbound_wq, &port->bc_work);
+ queue_work(system_dfl_wq, &port->bc_work);
if (err)
goto free_nskb;
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index e1e32b687068..44380378911b 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -3,11 +3,6 @@
# MDIO Layer Configuration
#
-config MDIO_BUS
- tristate "MDIO bus consumer layer"
- help
- MDIO bus consumer layer
-
if PHYLIB
config FWNODE_MDIO
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 7baab230008a..37e35f282d9a 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -215,7 +215,9 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
- pr_warn("Incorrect MDIO clock frequency, ignoring\n");
+ dev_warn(priv->mii_bus->parent,
+ "Ignoring MDIO clock frequency request: %d vs. rate: %ld\n",
+ priv->clk_freq, rate);
ret = 0;
goto out;
}
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 98f667b121f7..1357348e01d5 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -447,6 +447,8 @@ int of_phy_register_fixed_link(struct device_node *np)
/* Old binding */
if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
ARRAY_SIZE(fixed_link_prop)) == 0) {
+ pr_warn_once("%pOF uses deprecated array-style fixed-link binding!\n",
+ np);
status.link = 1;
status.duplex = fixed_link_prop[1];
status.speed = fixed_link_prop[2];
@@ -473,6 +475,5 @@ void of_phy_deregister_fixed_link(struct device_node *np)
fixed_phy_unregister(phydev);
put_device(&phydev->mdio.dev); /* of_phy_find_device() */
- phy_device_free(phydev); /* fixed_phy_register() */
}
EXPORT_SYMBOL(of_phy_deregister_fixed_link);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e3722de08ea9..194570443493 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -300,6 +300,33 @@ static void netconsole_print_banner(struct netpoll *np)
np_info(np, "remote ethernet address %pM\n", np->remote_mac);
}
+/* Parse the string and populate the `inet_addr` union. Return 0 if IPv4 is
+ * populated, 1 if IPv6 is populated, and -1 upon failure.
+ */
+static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
+{
+ const char *end = NULL;
+ int len;
+
+ len = strlen(str);
+ if (!len)
+ return -1;
+
+ if (str[len - 1] == '\n')
+ len -= 1;
+
+ if (in4_pton(str, len, (void *)addr, -1, &end) > 0 &&
+ (!end || *end == 0 || *end == '\n'))
+ return 0;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ in6_pton(str, len, (void *)addr, -1, &end) > 0 &&
+ (!end || *end == 0 || *end == '\n'))
+ return 1;
+
+ return -1;
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -730,6 +757,7 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
+ int ipv6;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -738,23 +766,10 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- if (strnchr(buf, count, ':')) {
- const char *end;
-
- if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
- if (*end && *end != '\n') {
- pr_err("invalid IPv6 address at: <%c>\n", *end);
- goto out_unlock;
- }
- nt->np.ipv6 = true;
- } else
- goto out_unlock;
- } else {
- if (!nt->np.ipv6)
- nt->np.local_ip.ip = in_aton(buf);
- else
- goto out_unlock;
- }
+ ipv6 = netpoll_parse_ip_addr(buf, &nt->np.local_ip);
+ if (ipv6 == -1)
+ goto out_unlock;
+ nt->np.ipv6 = !!ipv6;
ret = strnlen(buf, count);
out_unlock:
@@ -767,6 +782,7 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
+ int ipv6;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -775,23 +791,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- if (strnchr(buf, count, ':')) {
- const char *end;
-
- if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
- if (*end && *end != '\n') {
- pr_err("invalid IPv6 address at: <%c>\n", *end);
- goto out_unlock;
- }
- nt->np.ipv6 = true;
- } else
- goto out_unlock;
- } else {
- if (!nt->np.ipv6)
- nt->np.remote_ip.ip = in_aton(buf);
- else
- goto out_unlock;
- }
+ ipv6 = netpoll_parse_ip_addr(buf, &nt->np.remote_ip);
+ if (ipv6 == -1)
+ goto out_unlock;
+ nt->np.ipv6 = !!ipv6;
ret = strnlen(buf, count);
out_unlock:
@@ -1742,26 +1745,6 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
spin_unlock_irqrestore(&target_list_lock, flags);
}
-static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
-{
- const char *end;
-
- if (!strchr(str, ':') &&
- in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
- if (!*end)
- return 0;
- }
- if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
-#if IS_ENABLED(CONFIG_IPV6)
- if (!*end)
- return 1;
-#else
- return -1;
-#endif
- }
- return -1;
-}
-
static int netconsole_parser_cmdline(struct netpoll *np, char *opt)
{
bool ipversion_set = false;
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index f8de93bc5f5b..14a553e000ec 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -18,6 +18,10 @@ ifneq ($(CONFIG_PSAMPLE),)
netdevsim-objs += psample.o
endif
+ifneq ($(CONFIG_INET_PSP),)
+netdevsim-objs += psp.o
+endif
+
ifneq ($(CONFIG_MACSEC),)
netdevsim-objs += macsec.o
endif
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 2672d071b325..95f66c1f59db 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -851,7 +851,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
nsim_dev = nsim_trap_data->nsim_dev;
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
@@ -867,7 +867,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
cond_resched();
}
devl_unlock(priv_to_devlink(nsim_dev));
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
}
@@ -924,7 +924,7 @@ static int nsim_dev_traps_init(struct devlink *devlink)
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index f631d90c428a..36a201533aae 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -165,11 +165,34 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
return 0;
}
+static const struct ethtool_fec_hist_range netdevsim_fec_ranges[] = {
+ { 0, 0},
+ { 1, 3},
+ { 4, 7},
+ { 0, 0}
+};
+
static void
-nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats)
+nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
+ struct ethtool_fec_hist_value *values = hist->values;
+
+ hist->ranges = netdevsim_fec_ranges;
+
fec_stats->corrected_blocks.total = 123;
fec_stats->uncorrectable_blocks.total = 4;
+
+ values[0].per_lane[0] = 125;
+ values[0].per_lane[1] = 120;
+ values[0].per_lane[2] = 100;
+ values[0].per_lane[3] = 100;
+ values[1].sum = 12;
+ values[2].sum = 2;
+ values[2].per_lane[0] = 2;
+ values[2].per_lane[1] = 0;
+ values[2].per_lane[2] = 0;
+ values[2].per_lane[3] = 0;
}
static int nsim_get_ts_info(struct net_device *dev,
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 688f05316b5e..3bd0e7a489c3 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -183,14 +183,14 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
health->empty_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_empty_reporter_ops,
- 0, health);
+ health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_dummy_reporter_ops,
- 0, health);
+ health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0178219f0db5..ebc3833e95b4 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -103,28 +103,42 @@ static int nsim_napi_rx(struct net_device *tx_dev, struct net_device *rx_dev,
static int nsim_forward_skb(struct net_device *tx_dev,
struct net_device *rx_dev,
struct sk_buff *skb,
- struct nsim_rq *rq)
+ struct nsim_rq *rq,
+ struct skb_ext *psp_ext)
{
- return __dev_forward_skb(rx_dev, skb) ?:
- nsim_napi_rx(tx_dev, rx_dev, rq, skb);
+ int ret;
+
+ ret = __dev_forward_skb(rx_dev, skb);
+ if (ret)
+ return ret;
+
+ nsim_psp_handle_ext(skb, psp_ext);
+
+ return nsim_napi_rx(tx_dev, rx_dev, rq, skb);
}
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct skb_ext *psp_ext = NULL;
struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
struct netdev_config *cfg;
struct nsim_rq *rq;
int rxq;
+ int dr;
rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out_drop_free;
+ goto out_drop_any;
peer_ns = rcu_dereference(ns->peer);
if (!peer_ns)
+ goto out_drop_any;
+
+ dr = nsim_do_psp(skb, ns, peer_ns, &psp_ext);
+ if (dr)
goto out_drop_free;
peer_dev = peer_ns->netdev;
@@ -141,7 +155,8 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_linearize(skb);
skb_tx_timestamp(skb);
- if (unlikely(nsim_forward_skb(dev, peer_dev, skb, rq) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(dev, peer_dev,
+ skb, rq, psp_ext) == NET_RX_DROP))
goto out_drop_cnt;
if (!hrtimer_active(&rq->napi_timer))
@@ -151,8 +166,10 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_dstats_tx_add(dev, len);
return NETDEV_TX_OK;
+out_drop_any:
+ dr = SKB_DROP_REASON_NOT_SPECIFIED;
out_drop_free:
- dev_kfree_skb(skb);
+ kfree_skb_reason(skb, dr);
out_drop_cnt:
rcu_read_unlock();
dev_dstats_tx_dropped(dev);
@@ -1002,6 +1019,7 @@ static void nsim_queue_uninit(struct netdevsim *ns)
static int nsim_init_netdevsim(struct netdevsim *ns)
{
+ struct netdevsim *peer;
struct mock_phc *phc;
int err;
@@ -1036,6 +1054,10 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
goto err_ipsec_teardown;
rtnl_unlock();
+ err = nsim_psp_init(ns);
+ if (err)
+ goto err_unregister_netdev;
+
if (IS_ENABLED(CONFIG_DEBUG_NET)) {
ns->nb.notifier_call = netdev_debug_event;
if (register_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
@@ -1045,6 +1067,13 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
return 0;
+err_unregister_netdev:
+ rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
+ unregister_netdevice(ns->netdev);
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
@@ -1132,6 +1161,8 @@ void nsim_destroy(struct netdevsim *ns)
unregister_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
&ns->nn);
+ nsim_psp_uninit(ns);
+
rtnl_lock();
peer = rtnl_dereference(ns->peer);
if (peer)
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index bddd24c1389d..02c1c97b7008 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -108,6 +108,12 @@ struct netdevsim {
int rq_reset_mode;
+ struct {
+ struct psp_dev *dev;
+ u32 spi;
+ u32 assoc_cnt;
+ } psp;
+
struct nsim_bus_dev *nsim_bus_dev;
struct bpf_prog *bpf_offloaded;
@@ -421,6 +427,27 @@ static inline void nsim_macsec_teardown(struct netdevsim *ns)
}
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+int nsim_psp_init(struct netdevsim *ns);
+void nsim_psp_uninit(struct netdevsim *ns);
+void nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext);
+enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext);
+#else
+static inline int nsim_psp_init(struct netdevsim *ns) { return 0; }
+static inline void nsim_psp_uninit(struct netdevsim *ns) {}
+static inline enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext)
+{
+ return 0;
+}
+
+static inline void
+nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext) {}
+#endif
+
struct nsim_bus_dev {
struct device dev;
struct list_head list;
diff --git a/drivers/net/netdevsim/psp.c b/drivers/net/netdevsim/psp.c
new file mode 100644
index 000000000000..332b5b744f01
--- /dev/null
+++ b/drivers/net/netdevsim/psp.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <net/ip6_checksum.h>
+#include <net/psp.h>
+#include <net/sock.h>
+
+#include "netdevsim.h"
+
+void nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext)
+{
+ if (psp_ext)
+ __skb_ext_set(skb, SKB_EXT_PSP, psp_ext);
+}
+
+enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext)
+{
+ enum skb_drop_reason rc = 0;
+ struct psp_assoc *pas;
+ struct net *net;
+ void **ptr;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas) {
+ rc = SKB_NOT_DROPPED_YET;
+ goto out_unlock;
+ }
+
+ if (!skb_transport_header_was_set(skb)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ ptr = psp_assoc_drv_data(pas);
+ if (*ptr != ns) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ net = sock_net(skb->sk);
+ if (!psp_dev_encapsulate(net, skb, pas->tx.spi, pas->version, 0)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ /* Now pretend we just received this frame */
+ if (peer_ns->psp.dev->config.versions & (1 << pas->version)) {
+ bool strip_icv = false;
+ u8 generation;
+
+ /* We cheat a bit and put the generation in the key.
+ * In real life if generation was too old, then decryption would
+ * fail. Here, we just make it so a bad key causes a bad
+ * generation too, and psp_sk_rx_policy_check() will fail.
+ */
+ generation = pas->tx.key[0];
+
+ skb_ext_reset(skb);
+ skb->mac_len = ETH_HLEN;
+ if (psp_dev_rcv(skb, peer_ns->psp.dev->id, generation,
+ strip_icv)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ *psp_ext = skb->extensions;
+ refcount_inc(&(*psp_ext)->refcnt);
+ skb->decrypted = 1;
+ } else {
+ struct ipv6hdr *ip6h __maybe_unused;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ __wsum csum;
+
+ /* Do not decapsulate. Receive the skb with the udp and psp
+ * headers still there as if this is a normal udp packet.
+ * psp_dev_encapsulate() sets udp checksum to 0, so we need to
+ * provide a valid checksum here, so the skb isn't dropped.
+ */
+ uh = udp_hdr(skb);
+ csum = skb_checksum(skb, skb_transport_offset(skb),
+ ntohs(uh->len), 0);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ iph = ip_hdr(skb);
+ uh->check = udp_v4_check(ntohs(uh->len), iph->saddr,
+ iph->daddr, csum);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ip6h = ipv6_hdr(skb);
+ uh->check = udp_v6_check(ntohs(uh->len), &ip6h->saddr,
+ &ip6h->daddr, csum);
+ break;
+#endif
+ }
+
+ uh->check = uh->check ?: CSUM_MANGLED_0;
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int
+nsim_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ unsigned int new;
+ int i;
+
+ new = ++ns->psp.spi & PSP_SPI_KEY_ID;
+ if (psd->generation & 1)
+ new |= PSP_SPI_KEY_PHASE;
+
+ assoc->spi = cpu_to_be32(new);
+ assoc->key[0] = psd->generation;
+ for (i = 1; i < PSP_MAX_KEY; i++)
+ assoc->key[i] = ns->psp.spi + i;
+
+ return 0;
+}
+
+static int nsim_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ void **ptr = psp_assoc_drv_data(pas);
+
+ /* Copy drv_priv from psd to assoc */
+ *ptr = psd->drv_priv;
+ ns->psp.assoc_cnt++;
+
+ return 0;
+}
+
+static int nsim_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static void nsim_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ void **ptr = psp_assoc_drv_data(pas);
+
+ *ptr = NULL;
+ ns->psp.assoc_cnt--;
+}
+
+static struct psp_dev_ops nsim_psp_ops = {
+ .set_config = nsim_psp_set_config,
+ .rx_spi_alloc = nsim_rx_spi_alloc,
+ .tx_key_add = nsim_assoc_add,
+ .tx_key_del = nsim_assoc_del,
+ .key_rotate = nsim_key_rotate,
+};
+
+static struct psp_dev_caps nsim_psp_caps = {
+ .versions = 1 << PSP_VERSION_HDR0_AES_GCM_128 |
+ 1 << PSP_VERSION_HDR0_AES_GMAC_128 |
+ 1 << PSP_VERSION_HDR0_AES_GCM_256 |
+ 1 << PSP_VERSION_HDR0_AES_GMAC_256,
+ .assoc_drv_spc = sizeof(void *),
+};
+
+void nsim_psp_uninit(struct netdevsim *ns)
+{
+ if (!IS_ERR(ns->psp.dev))
+ psp_dev_unregister(ns->psp.dev);
+ WARN_ON(ns->psp.assoc_cnt);
+}
+
+static ssize_t
+nsim_psp_rereg_write(struct file *file, const char __user *data, size_t count,
+ loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ int err;
+
+ nsim_psp_uninit(ns);
+
+ ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
+ &nsim_psp_caps, ns);
+ err = PTR_ERR_OR_ZERO(ns->psp.dev);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_psp_rereg_fops = {
+ .open = simple_open,
+ .write = nsim_psp_rereg_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+int nsim_psp_init(struct netdevsim *ns)
+{
+ struct dentry *ddir = ns->nsim_dev_port->ddir;
+ int err;
+
+ ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
+ &nsim_psp_caps, ns);
+ err = PTR_ERR_OR_ZERO(ns->psp.dev);
+ if (err)
+ return err;
+
+ debugfs_create_file("psp_rereg", 0200, ddir, ns, &nsim_psp_rereg_fops);
+ return 0;
+}
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index f6aa437473de..ecbc3530e780 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -26,11 +26,12 @@ config PCS_MTK_LYNXI
which is part of MediaTek's SoC and Ethernet switch ICs.
config PCS_RZN1_MIIC
- tristate "Renesas RZ/N1 MII converter"
- depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ tristate "Renesas RZ/N1, RZ/N2H, RZ/T2H MII converter"
+ depends on OF
+ depends on ARCH_RENESAS || COMPILE_TEST
help
- This module provides a driver for the MII converter that is available
- on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
- pass-through mode for MII.
+ This module provides a driver for the MII converter available on
+ Renesas RZ/N1, RZ/N2H, and RZ/T2H SoCs. This PCS converts MII to
+ RMII/RGMII, or can be set in pass-through mode for MII.
endmenu
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 23b40e9eacbb..677f92883976 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -49,6 +49,7 @@ static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
return LINK_INBAND_DISABLE;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return LINK_INBAND_ENABLE;
default:
@@ -115,6 +116,7 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
lynx_pcs_get_state_2500basex(lynx->mdio, state);
break;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
lynx_pcs_get_state_usxgmii(lynx->mdio, state);
break;
case PHY_INTERFACE_MODE_10GBASER:
@@ -170,6 +172,7 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs,
}
static int lynx_pcs_config_usxgmii(struct mdio_device *pcs,
+ phy_interface_t interface,
const unsigned long *advertising,
unsigned int neg_mode)
{
@@ -177,7 +180,8 @@ static int lynx_pcs_config_usxgmii(struct mdio_device *pcs,
int addr = pcs->addr;
if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED) {
- dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n");
+ dev_err(&pcs->dev, "%s only supports in-band AN for now\n",
+ phy_modes(interface));
return -EOPNOTSUPP;
}
@@ -208,7 +212,8 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
}
break;
case PHY_INTERFACE_MODE_USXGMII:
- return lynx_pcs_config_usxgmii(lynx->mdio, advertising,
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return lynx_pcs_config_usxgmii(lynx->mdio, ifmode, advertising,
neg_mode);
case PHY_INTERFACE_MODE_10GBASER:
/* Nothing to do here for 10GBASER */
@@ -317,6 +322,7 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
lynx_pcs_link_up_2500basex(lynx->mdio, neg_mode, speed, duplex);
break;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
/* At the moment, only in-band AN is supported for USXGMII
* so nothing to do in link_up
*/
@@ -341,6 +347,7 @@ static const phy_interface_t lynx_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_10GBASER,
PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10G_QXGMII,
};
static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index ce73d9474d5b..885f17c32643 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -5,8 +5,12 @@
* Clément Léger <clement.leger@bootlin.com>
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/mdio.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -14,13 +18,15 @@
#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
#include <dt-bindings/net/pcs-rzn1-miic.h>
+#include <dt-bindings/net/renesas,r9a09g077-pcs-miic.h>
#define MIIC_PRCMD 0x0
#define MIIC_ESID_CODE 0x4
#define MIIC_MODCTRL 0x8
-#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
@@ -46,21 +52,23 @@
#define MIIC_SWCTRL 0x304
#define MIIC_SWDUPC 0x308
-#define MIIC_MAX_NR_PORTS 5
-
-#define MIIC_MODCTRL_CONF_CONV_NUM 6
+#define MIIC_MODCTRL_CONF_CONV_MAX 6
#define MIIC_MODCTRL_CONF_NONE -1
+#define MIIC_MAX_NUM_RSTS 2
+
/**
* struct modctrl_match - Matching table entry for convctrl configuration
* See section 8.2.1 of manual.
* @mode_cfg: Configuration value for convctrl
* @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
- * then index 1 - 5 are CONV1 - CONV5.
+ * then index 1 - 5 are CONV1 - CONV5 for RZ/N1 SoCs. In case
+ * of RZ/T2H and RZ/N2H SoCs, the first index is SWITCH_PORTIN then
+ * index 0 - 3 are CONV0 - CONV3.
*/
struct modctrl_match {
u32 mode_cfg;
- u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
+ u8 conv[MIIC_MODCTRL_CONF_CONV_MAX];
};
static struct modctrl_match modctrl_match_table[] = {
@@ -109,7 +117,7 @@ static const char * const conf_to_string[] = {
[MIIC_HSR_PORTB] = "HSR_PORTB",
};
-static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
+static const char * const index_to_string[] = {
"SWITCH_PORTIN",
"CONV1",
"CONV2",
@@ -118,16 +126,106 @@ static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
"CONV5",
};
+static struct modctrl_match rzt2h_modctrl_match_table[] = {
+ {0x0, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ETHSW_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x1, {MIIC_MODCTRL_CONF_NONE, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_GMAC2_PORT, ETHSS_GMAC1_PORT}},
+
+ {0x2, {ETHSS_GMAC0_PORT, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x3, {MIIC_MODCTRL_CONF_NONE, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ESC_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x4, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ESC_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x5, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x6, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ETHSW_PORT1,
+ ETHSS_GMAC2_PORT, ETHSS_GMAC1_PORT}},
+
+ {0x7, {MIIC_MODCTRL_CONF_NONE, ETHSS_GMAC0_PORT, ETHSS_GMAC1_PORT,
+ ETHSS_GMAC2_PORT, MIIC_MODCTRL_CONF_NONE}}
+};
+
+static const char * const rzt2h_conf_to_string[] = {
+ [ETHSS_GMAC0_PORT] = "GMAC0_PORT",
+ [ETHSS_GMAC1_PORT] = "GMAC1_PORT",
+ [ETHSS_GMAC2_PORT] = "GMAC2_PORT",
+ [ETHSS_ESC_PORT0] = "ETHERCAT_PORT0",
+ [ETHSS_ESC_PORT1] = "ETHERCAT_PORT1",
+ [ETHSS_ESC_PORT2] = "ETHERCAT_PORT2",
+ [ETHSS_ETHSW_PORT0] = "SWITCH_PORT0",
+ [ETHSS_ETHSW_PORT1] = "SWITCH_PORT1",
+ [ETHSS_ETHSW_PORT2] = "SWITCH_PORT2",
+};
+
+static const char * const rzt2h_index_to_string[] = {
+ "SWITCH_PORTIN",
+ "CONV0",
+ "CONV1",
+ "CONV2",
+ "CONV3",
+};
+
+static const char * const rzt2h_reset_ids[] = {
+ "rst",
+ "crst",
+};
+
/**
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
* @lock: Lock used for read-modify-write access
+ * @rsts: Reset controls for the MII converter
+ * @of_data: Pointer to OF data
*/
struct miic {
void __iomem *base;
struct device *dev;
spinlock_t lock;
+ struct reset_control_bulk_data rsts[MIIC_MAX_NUM_RSTS];
+ const struct miic_of_data *of_data;
+};
+
+/**
+ * struct miic_of_data - OF data for MII converter
+ * @match_table: Matching table for convctrl configuration
+ * @match_table_count: Number of entries in the matching table
+ * @conf_conv_count: Number of entries in the conf_conv array
+ * @conf_to_string: String representations of the configuration values
+ * @conf_to_string_count: Number of entries in the conf_to_string array
+ * @index_to_string: String representations of the index values
+ * @index_to_string_count: Number of entries in the index_to_string array
+ * @miic_port_start: MIIC port start number
+ * @miic_port_max: Maximum MIIC supported
+ * @sw_mode_mask: Switch mode mask
+ * @reset_ids: Reset names array
+ * @reset_count: Number of entries in the reset_ids array
+ * @init_unlock_lock_regs: Flag to indicate if registers need to be unlocked
+ * before access.
+ * @miic_write: Function pointer to write a value to a MIIC register
+ */
+struct miic_of_data {
+ struct modctrl_match *match_table;
+ u8 match_table_count;
+ u8 conf_conv_count;
+ const char * const *conf_to_string;
+ u8 conf_to_string_count;
+ const char * const *index_to_string;
+ u8 index_to_string_count;
+ u8 miic_port_start;
+ u8 miic_port_max;
+ u8 sw_mode_mask;
+ const char * const *reset_ids;
+ u8 reset_count;
+ bool init_unlock_lock_regs;
+ void (*miic_write)(struct miic *miic, int offset, u32 value);
};
/**
@@ -149,9 +247,36 @@ static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
return container_of(pcs, struct miic_port, pcs);
}
-static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+static void miic_unlock_regs(struct miic *miic)
+{
+ /* Unprotect register writes */
+ writel(0x00A5, miic->base + MIIC_PRCMD);
+ writel(0x0001, miic->base + MIIC_PRCMD);
+ writel(0xFFFE, miic->base + MIIC_PRCMD);
+ writel(0x0001, miic->base + MIIC_PRCMD);
+}
+
+static void miic_lock_regs(struct miic *miic)
+{
+ /* Protect register writes */
+ writel(0x0000, miic->base + MIIC_PRCMD);
+}
+
+static void miic_reg_writel_unlocked(struct miic *miic, int offset, u32 value)
+{
+ writel(value, miic->base + offset);
+}
+
+static void miic_reg_writel_locked(struct miic *miic, int offset, u32 value)
{
+ miic_unlock_regs(miic);
writel(value, miic->base + offset);
+ miic_lock_regs(miic);
+}
+
+static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+{
+ miic->of_data->miic_write(miic, offset, value);
}
static u32 miic_reg_readl(struct miic *miic, int offset)
@@ -303,6 +428,7 @@ static const struct phylink_pcs_ops miic_phylink_ops = {
struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
{
+ const struct miic_of_data *of_data;
struct platform_device *pdev;
struct miic_port *miic_port;
struct device_node *pcs_np;
@@ -315,9 +441,6 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
if (of_property_read_u32(np, "reg", &port))
return ERR_PTR(-EINVAL);
- if (port > MIIC_MAX_NR_PORTS || port < 1)
- return ERR_PTR(-EINVAL);
-
/* The PCS pdev is attached to the parent node */
pcs_np = of_get_parent(np);
if (!pcs_np)
@@ -336,18 +459,24 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
return ERR_PTR(-EPROBE_DEFER);
}
+ miic = platform_get_drvdata(pdev);
+ of_data = miic->of_data;
+ if (port > of_data->miic_port_max || port < of_data->miic_port_start) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
if (!miic_port) {
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
- miic = platform_get_drvdata(pdev);
device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
put_device(&pdev->dev);
miic_port->miic = miic;
- miic_port->port = port - 1;
+ miic_port->port = port - of_data->miic_port_start;
miic_port->pcs.ops = &miic_phylink_ops;
phy_interface_set_rgmii(miic_port->pcs.supported_interfaces);
@@ -369,21 +498,23 @@ EXPORT_SYMBOL(miic_destroy);
static int miic_init_hw(struct miic *miic, u32 cfg_mode)
{
+ u8 sw_mode_mask = miic->of_data->sw_mode_mask;
int port;
/* Unlock write access to accessory registers (cf datasheet). If this
* is going to be used in conjunction with the Cortex-M3, this sequence
* will have to be moved in register write
*/
- miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
- miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
- miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
- miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+ if (miic->of_data->init_unlock_lock_regs)
+ miic_unlock_regs(miic);
+ /* TODO: Replace with FIELD_PREP() when compile-time constant
+ * restriction is lifted. Currently __ffs() returns 0 for sw_mode_mask.
+ */
miic_reg_writel(miic, MIIC_MODCTRL,
- FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
+ ((cfg_mode << __ffs(sw_mode_mask)) & sw_mode_mask));
- for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
+ for (port = 0; port < miic->of_data->miic_port_max; port++) {
miic_converter_enable(miic, port, 0);
/* Disable speed/duplex control from these registers, datasheet
* says switch registers should be used to setup switch port
@@ -396,12 +527,11 @@ static int miic_init_hw(struct miic *miic, u32 cfg_mode)
return 0;
}
-static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
+static bool miic_modctrl_match(s8 *table_val, s8 *dt_val, u8 count)
{
int i;
- for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
continue;
@@ -412,53 +542,59 @@ static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
return true;
}
-static void miic_dump_conf(struct device *dev,
- s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
+static void miic_dump_conf(struct miic *miic, s8 *conf)
{
+ const struct miic_of_data *of_data = miic->of_data;
const char *conf_name;
int i;
- for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ for (i = 0; i < of_data->conf_conv_count; i++) {
if (conf[i] != MIIC_MODCTRL_CONF_NONE)
- conf_name = conf_to_string[conf[i]];
+ conf_name = of_data->conf_to_string[conf[i]];
else
conf_name = "NONE";
- dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
+ dev_err(miic->dev, "%s: %s\n",
+ of_data->index_to_string[i], conf_name);
}
}
-static int miic_match_dt_conf(struct device *dev,
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
- u32 *mode_cfg)
+static int miic_match_dt_conf(struct miic *miic, s8 *dt_val, u32 *mode_cfg)
{
+ const struct miic_of_data *of_data = miic->of_data;
struct modctrl_match *table_entry;
int i;
- for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
- table_entry = &modctrl_match_table[i];
+ for (i = 0; i < of_data->match_table_count; i++) {
+ table_entry = &of_data->match_table[i];
- if (miic_modctrl_match(table_entry->conv, dt_val)) {
+ if (miic_modctrl_match(table_entry->conv, dt_val,
+ miic->of_data->conf_conv_count)) {
*mode_cfg = table_entry->mode_cfg;
return 0;
}
}
- dev_err(dev, "Failed to apply requested configuration\n");
- miic_dump_conf(dev, dt_val);
+ dev_err(miic->dev, "Failed to apply requested configuration\n");
+ miic_dump_conf(miic, dt_val);
return -EINVAL;
}
-static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
+static int miic_parse_dt(struct miic *miic, u32 *mode_cfg)
{
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
- struct device_node *np = dev->of_node;
+ struct device_node *np = miic->dev->of_node;
struct device_node *conv;
+ int port, ret;
+ s8 *dt_val;
u32 conf;
- int port;
- memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
+ dt_val = kmalloc_array(miic->of_data->conf_conv_count,
+ sizeof(*dt_val), GFP_KERNEL);
+ if (!dt_val)
+ return -ENOMEM;
+
+ memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(*dt_val));
if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
dt_val[0] = conf;
@@ -467,11 +603,58 @@ static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
if (of_property_read_u32(conv, "reg", &port))
continue;
+ /* Adjust for 0 based index */
+ port += !miic->of_data->miic_port_start;
if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
dt_val[port] = conf;
}
- return miic_match_dt_conf(dev, dt_val, mode_cfg);
+ ret = miic_match_dt_conf(miic, dt_val, mode_cfg);
+ kfree(dt_val);
+
+ return ret;
+}
+
+static void miic_reset_control_bulk_assert(void *data)
+{
+ struct miic *miic = data;
+ int ret;
+
+ ret = reset_control_bulk_assert(miic->of_data->reset_count, miic->rsts);
+ if (ret)
+ dev_err(miic->dev, "failed to assert reset lines\n");
+}
+
+static int miic_reset_control_init(struct miic *miic)
+{
+ const struct miic_of_data *of_data = miic->of_data;
+ struct device *dev = miic->dev;
+ int ret;
+ u8 i;
+
+ if (!of_data->reset_count)
+ return 0;
+
+ for (i = 0; i < of_data->reset_count; i++)
+ miic->rsts[i].id = of_data->reset_ids[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, of_data->reset_count,
+ miic->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get bulk reset lines\n");
+
+ ret = reset_control_bulk_deassert(of_data->reset_count, miic->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to deassert reset lines\n");
+
+ ret = devm_add_action_or_reset(dev, miic_reset_control_bulk_assert,
+ miic);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add reset action\n");
+
+ return 0;
}
static int miic_probe(struct platform_device *pdev)
@@ -481,20 +664,26 @@ static int miic_probe(struct platform_device *pdev)
u32 mode_cfg;
int ret;
- ret = miic_parse_dt(dev, &mode_cfg);
- if (ret < 0)
- return ret;
-
miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
if (!miic)
return -ENOMEM;
- spin_lock_init(&miic->lock);
+ miic->of_data = of_device_get_match_data(dev);
miic->dev = dev;
+
+ ret = miic_parse_dt(miic, &mode_cfg);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_init(&miic->lock);
miic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(miic->base))
return PTR_ERR(miic->base);
+ ret = miic_reset_control_init(miic);
+ if (ret)
+ return ret;
+
ret = devm_pm_runtime_enable(dev);
if (ret < 0)
return ret;
@@ -527,9 +716,41 @@ static void miic_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+static struct miic_of_data rzn1_miic_of_data = {
+ .match_table = modctrl_match_table,
+ .match_table_count = ARRAY_SIZE(modctrl_match_table),
+ .conf_conv_count = MIIC_MODCTRL_CONF_CONV_MAX,
+ .conf_to_string = conf_to_string,
+ .conf_to_string_count = ARRAY_SIZE(conf_to_string),
+ .index_to_string = index_to_string,
+ .index_to_string_count = ARRAY_SIZE(index_to_string),
+ .miic_port_start = 1,
+ .miic_port_max = 5,
+ .sw_mode_mask = GENMASK(4, 0),
+ .init_unlock_lock_regs = true,
+ .miic_write = miic_reg_writel_unlocked,
+};
+
+static struct miic_of_data rzt2h_miic_of_data = {
+ .match_table = rzt2h_modctrl_match_table,
+ .match_table_count = ARRAY_SIZE(rzt2h_modctrl_match_table),
+ .conf_conv_count = 5,
+ .conf_to_string = rzt2h_conf_to_string,
+ .conf_to_string_count = ARRAY_SIZE(rzt2h_conf_to_string),
+ .index_to_string = rzt2h_index_to_string,
+ .index_to_string_count = ARRAY_SIZE(rzt2h_index_to_string),
+ .miic_port_start = 0,
+ .miic_port_max = 4,
+ .sw_mode_mask = GENMASK(2, 0),
+ .reset_ids = rzt2h_reset_ids,
+ .reset_count = ARRAY_SIZE(rzt2h_reset_ids),
+ .miic_write = miic_reg_writel_locked,
+};
+
static const struct of_device_id miic_of_mtable[] = {
- { .compatible = "renesas,rzn1-miic" },
- { /* sentinel */ },
+ { .compatible = "renesas,r9a09g077-miic", .data = &rzt2h_miic_of_data },
+ { .compatible = "renesas,rzn1-miic", .data = &rzn1_miic_of_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, miic_of_mtable);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392749aae54d..98700d069191 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,6 +3,11 @@
# PHY Layer Configuration
#
+config MDIO_BUS
+ tristate "MDIO bus consumer layer"
+ help
+ MDIO bus consumer layer
+
config PHYLINK
tristate
select PHYLIB
@@ -298,7 +303,7 @@ config MICREL_PHY
depends on PTP_1588_CLOCK_OPTIONAL
select PHY_PACKAGE
help
- Supports the KSZ9021, VSC8201, KS8001 PHYs.
+ Supports the KSZ8xxx, KSZ9xxx, and LAN88xx families of Micrel/Microchip PHYs.
config MICROCHIP_T1S_PHY
tristate "Microchip 10BASE-T1S Ethernet PHYs"
@@ -465,7 +470,3 @@ config XILINX_GMII2RGMII
Ethernet physical media devices and the Gigabit Ethernet controller.
endif # PHYLIB
-
-config MICREL_KS8995MA
- tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
- depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b4795aaf9c1c..76e0db40f879 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -8,7 +8,7 @@ mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_PHYLIB
# built-in whenever PHYLIB is built-in or module
-obj-y += stubs.o mdio-boardinfo.o
+obj-y += stubs.o
endif
libphy-$(CONFIG_SWPHY) += swphy.o
@@ -72,7 +72,6 @@ obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
obj-$(CONFIG_MAXLINEAR_86110_PHY) += mxl-86110.o
obj-y += mediatek/
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
-obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o
diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
index 0c78bfabace5..31427ee343e3 100644
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -55,6 +55,7 @@
#define VEND1_GLOBAL_CFG_SERDES_MODE_SGMII 3
#define VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII 4
#define VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G 6
+#define VEND1_GLOBAL_CFG_AUTONEG_ENA BIT(3)
#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
@@ -152,6 +153,28 @@
#define AQR_MAX_LEDS 3
+/* Custom driver definitions for constructing a single variable out of
+ * aggregate firmware build information. These do not represent hardware
+ * fields.
+ */
+#define AQR_FW_FINGERPRINT_MAJOR GENMASK_ULL(63, 56)
+#define AQR_FW_FINGERPRINT_MINOR GENMASK_ULL(55, 48)
+#define AQR_FW_FINGERPRINT_BUILD_ID GENMASK_ULL(47, 40)
+#define AQR_FW_FINGERPRINT_PROV_ID GENMASK_ULL(39, 32)
+#define AQR_FW_FINGERPRINT_MISC_ID GENMASK_ULL(31, 16)
+#define AQR_FW_FINGERPRINT_MISC_VER GENMASK_ULL(15, 0)
+#define AQR_FW_FINGERPRINT(major, minor, build_id, prov_id, misc_id, misc_ver) \
+ (FIELD_PREP(AQR_FW_FINGERPRINT_MAJOR, major) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MINOR, minor) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_BUILD_ID, build_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_PROV_ID, prov_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MISC_ID, misc_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MISC_VER, misc_ver))
+
+/* 10G-QXGMII firmware for NXP SPF-30841 riser board (AQR412C) */
+#define AQR_G3_V4_3_C_AQR_NXP_SPF_30841_MUSX_ID40019_VER1198 \
+ AQR_FW_FINGERPRINT(4, 3, 0xc, 1, 40019, 1198)
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -174,10 +197,39 @@ static const struct aqr107_hw_stat aqr107_hw_stats[] = {
#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+static const struct {
+ int speed;
+ u16 reg;
+} aqr_global_cfg_regs[] = {
+ { SPEED_10, VEND1_GLOBAL_CFG_10M, },
+ { SPEED_100, VEND1_GLOBAL_CFG_100M, },
+ { SPEED_1000, VEND1_GLOBAL_CFG_1G, },
+ { SPEED_2500, VEND1_GLOBAL_CFG_2_5G, },
+ { SPEED_5000, VEND1_GLOBAL_CFG_5G, },
+ { SPEED_10000, VEND1_GLOBAL_CFG_10G, },
+};
+
+#define AQR_NUM_GLOBAL_CFG ARRAY_SIZE(aqr_global_cfg_regs)
+
+enum aqr_rate_adaptation {
+ AQR_RATE_ADAPT_NONE,
+ AQR_RATE_ADAPT_USX,
+ AQR_RATE_ADAPT_PAUSE,
+};
+
+struct aqr_global_syscfg {
+ int speed;
+ phy_interface_t interface;
+ enum aqr_rate_adaptation rate_adapt;
+};
+
struct aqr107_priv {
u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+ u64 fingerprint;
unsigned long leds_active_low;
unsigned long leds_active_high;
+ bool wait_on_global_cfg;
+ struct aqr_global_syscfg global_cfg[AQR_NUM_GLOBAL_CFG];
};
#if IS_REACHABLE(CONFIG_HWMON)
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 77a48635d7bf..41f3676c7f1e 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -26,13 +26,18 @@
#define PHY_ID_AQR111 0x03a1b610
#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
-#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR412 0x03a1b6f2
+#define PHY_ID_AQR412C 0x03a1b712
#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
#define PHY_ID_AQR114C 0x31c31c22
+#define PHY_ID_AQR115 0x31c31c63
#define PHY_ID_AQR115C 0x31c31c33
#define PHY_ID_AQR813 0x31c31cb2
+#define MDIO_PHYXS_VEND_PROV2 0xc441
+#define MDIO_PHYXS_VEND_PROV2_USX_AN BIT(3)
+
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
@@ -83,6 +88,9 @@
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
+#define PMAPMD_FW_MISC_ID 0xc41d
+#define PMAPMD_FW_MISC_VER 0xc41e
+
#define PMAPMD_RSVD_VEND_PROV 0xe400
#define PMAPMD_RSVD_VEND_PROV_MDI_CONF GENMASK(1, 0)
#define PMAPMD_RSVD_VEND_PROV_MDI_REVERSE BIT(0)
@@ -465,7 +473,7 @@ static int aqr105_config_aneg(struct phy_device *phydev)
return genphy_c45_check_and_restart_aneg(phydev, changed);
}
-static int aqr105_read_rate(struct phy_device *phydev)
+static int aqr_gen1_read_rate(struct phy_device *phydev)
{
int val;
@@ -504,8 +512,31 @@ static int aqr105_read_rate(struct phy_device *phydev)
return 0;
}
-static int aqr105_read_status(struct phy_device *phydev)
+/* Quad port PHYs like AQR412(C) have 4 system interfaces, but they can also be
+ * used with a single system interface over which all 4 ports are multiplexed
+ * (10G-QXGMII). To the MDIO registers, this mode is indistinguishable from
+ * USXGMII (which implies a single 10G port).
+ *
+ * To not rely solely on the device tree, we allow the regular system interface
+ * detection to work as usual, but we replace USXGMII with 10G-QXGMII based on
+ * the specific fingerprint of firmware images that are known to be for MUSX.
+ */
+static phy_interface_t aqr_translate_interface(struct phy_device *phydev,
+ phy_interface_t interface)
{
+ struct aqr107_priv *priv = phydev->priv;
+
+ if (phy_id_compare(phydev->drv->phy_id, PHY_ID_AQR412C, phydev->drv->phy_id_mask) &&
+ priv->fingerprint == AQR_G3_V4_3_C_AQR_NXP_SPF_30841_MUSX_ID40019_VER1198 &&
+ interface == PHY_INTERFACE_MODE_USXGMII)
+ return PHY_INTERFACE_MODE_10G_QXGMII;
+
+ return interface;
+}
+
+static int aqr_gen1_read_status(struct phy_device *phydev)
+{
+ phy_interface_t interface;
int ret;
int val;
@@ -531,155 +562,65 @@ static int aqr105_read_status(struct phy_device *phydev)
switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ interface = PHY_INTERFACE_MODE_10GKR;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
- phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ interface = PHY_INTERFACE_MODE_1000BASEKX;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
- phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ interface = PHY_INTERFACE_MODE_USXGMII;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
- phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ interface = PHY_INTERFACE_MODE_XAUI;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ interface = PHY_INTERFACE_MODE_SGMII;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
- phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ interface = PHY_INTERFACE_MODE_RXAUI;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
default:
phydev->link = false;
- phydev->interface = PHY_INTERFACE_MODE_NA;
+ interface = PHY_INTERFACE_MODE_NA;
break;
}
- /* Read rate from vendor register */
- return aqr105_read_rate(phydev);
-}
-
-static int aqr107_read_rate(struct phy_device *phydev)
-{
- u32 config_reg;
- int val;
+ phydev->interface = aqr_translate_interface(phydev, interface);
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
- if (val < 0)
- return val;
-
- if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
- case MDIO_AN_TX_VEND_STATUS1_10BASET:
- phydev->speed = SPEED_10;
- config_reg = VEND1_GLOBAL_CFG_10M;
- break;
- case MDIO_AN_TX_VEND_STATUS1_100BASETX:
- phydev->speed = SPEED_100;
- config_reg = VEND1_GLOBAL_CFG_100M;
- break;
- case MDIO_AN_TX_VEND_STATUS1_1000BASET:
- phydev->speed = SPEED_1000;
- config_reg = VEND1_GLOBAL_CFG_1G;
- break;
- case MDIO_AN_TX_VEND_STATUS1_2500BASET:
- phydev->speed = SPEED_2500;
- config_reg = VEND1_GLOBAL_CFG_2_5G;
- break;
- case MDIO_AN_TX_VEND_STATUS1_5000BASET:
- phydev->speed = SPEED_5000;
- config_reg = VEND1_GLOBAL_CFG_5G;
- break;
- case MDIO_AN_TX_VEND_STATUS1_10GBASET:
- phydev->speed = SPEED_10000;
- config_reg = VEND1_GLOBAL_CFG_10G;
- break;
- default:
- phydev->speed = SPEED_UNKNOWN;
- return 0;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
- if (val < 0)
- return val;
-
- if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
- VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
- phydev->rate_matching = RATE_MATCH_PAUSE;
- else
- phydev->rate_matching = RATE_MATCH_NONE;
-
- return 0;
+ /* Read rate from vendor register */
+ return aqr_gen1_read_rate(phydev);
}
-static int aqr107_read_status(struct phy_device *phydev)
+static int aqr_gen2_read_status(struct phy_device *phydev)
{
- int val, ret;
+ struct aqr107_priv *priv = phydev->priv;
+ int i, ret;
- ret = aqr_read_status(phydev);
+ ret = aqr_gen1_read_status(phydev);
if (ret)
return ret;
- if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
- return 0;
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
- /* The status register is not immediately correct on line side link up.
- * Poll periodically until it reflects the correct ON state.
- * Only return fail for read error, timeout defaults to OFF state.
- */
- ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PHYXS,
- MDIO_PHYXS_VEND_IF_STATUS, val,
- (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val) !=
- MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF),
- AQR107_OP_IN_PROG_SLEEP,
- AQR107_OP_IN_PROG_TIMEOUT, false);
- if (ret && ret != -ETIMEDOUT)
- return ret;
+ if (syscfg->speed != phydev->speed)
+ continue;
- switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
- phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
- phydev->interface = PHY_INTERFACE_MODE_USXGMII;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
- phydev->interface = PHY_INTERFACE_MODE_XAUI;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
- phydev->interface = PHY_INTERFACE_MODE_RXAUI;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
- default:
- phydev->link = false;
- phydev->interface = PHY_INTERFACE_MODE_NA;
+ if (syscfg->rate_adapt == AQR_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ else
+ phydev->rate_matching = RATE_MATCH_NONE;
break;
}
- /* Read possibly downshifted rate from vendor register */
- return aqr107_read_rate(phydev);
+ return 0;
}
static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
@@ -764,27 +705,46 @@ int aqr_wait_reset_complete(struct phy_device *phydev)
return ret;
}
-static void aqr107_chip_info(struct phy_device *phydev)
+static int aqr_build_fingerprint(struct phy_device *phydev)
{
u8 fw_major, fw_minor, build_id, prov_id;
+ struct aqr107_priv *priv = phydev->priv;
+ u16 misc_id, misc_ver;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
if (val < 0)
- return;
+ return val;
fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
if (val < 0)
- return;
+ return val;
build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
- phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
- fw_major, fw_minor, build_id, prov_id);
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_FW_MISC_ID);
+ if (val < 0)
+ return val;
+
+ misc_id = val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_FW_MISC_VER);
+ if (val < 0)
+ return val;
+
+ misc_ver = val;
+
+ priv->fingerprint = AQR_FW_FINGERPRINT(fw_major, fw_minor, build_id,
+ prov_id, misc_id, misc_ver);
+
+ phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u, Misc ID %u, Version %u\n",
+ fw_major, fw_minor, build_id, prov_id, misc_id, misc_ver);
+
+ return 0;
}
static int aqr107_config_mdi(struct phy_device *phydev)
@@ -810,7 +770,7 @@ static int aqr107_config_mdi(struct phy_device *phydev)
mdi_conf | PMAPMD_RSVD_VEND_PROV_MDI_FORCE);
}
-static int aqr107_config_init(struct phy_device *phydev)
+static int aqr_gen1_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_idx;
@@ -822,6 +782,7 @@ static int aqr107_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_10G_QXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
@@ -832,8 +793,14 @@ static int aqr107_config_init(struct phy_device *phydev)
"Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
ret = aqr_wait_reset_complete(phydev);
- if (!ret)
- aqr107_chip_info(phydev);
+ if (!ret) {
+ /* The PHY might work without a firmware image, so only build a
+ * fingerprint if the firmware was initialized.
+ */
+ ret = aqr_build_fingerprint(phydev);
+ if (ret)
+ return ret;
+ }
ret = aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
if (ret)
@@ -859,20 +826,145 @@ static int aqr107_config_init(struct phy_device *phydev)
return 0;
}
-static int aqcs109_config_init(struct phy_device *phydev)
+/* Walk the media-speed configuration registers to determine which
+ * host-side serdes modes may be used by the PHY depending on the
+ * negotiated media speed.
+ */
+static int aqr_gen2_read_global_syscfg(struct phy_device *phydev)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ unsigned int serdes_mode, rate_adapt;
+ phy_interface_t interface;
+ int i, val;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+
+ syscfg->speed = aqr_global_cfg_regs[i].speed;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i].reg);
+ if (val < 0)
+ return val;
+
+ serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
+ rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
+
+ switch (serdes_mode) {
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
+ if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
+ interface = PHY_INTERFACE_MODE_USXGMII;
+ else
+ interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
+ interface = PHY_INTERFACE_MODE_5GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
+ interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+
+ default:
+ phydev_warn(phydev, "unrecognised serdes mode %u\n",
+ serdes_mode);
+ interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ syscfg->interface = aqr_translate_interface(phydev, interface);
+
+ switch (rate_adapt) {
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_NONE:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_NONE;
+ break;
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_USX:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_USX;
+ break;
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_PAUSE;
+ break;
+ default:
+ phydev_warn(phydev, "unrecognized rate adapt mode %u\n",
+ rate_adapt);
+ break;
+ }
+
+ phydev_dbg(phydev,
+ "Media speed %d uses host interface %s with %s\n",
+ syscfg->speed, phy_modes(syscfg->interface),
+ syscfg->rate_adapt == AQR_RATE_ADAPT_NONE ? "no rate adaptation" :
+ syscfg->rate_adapt == AQR_RATE_ADAPT_PAUSE ? "rate adaptation through flow control" :
+ syscfg->rate_adapt == AQR_RATE_ADAPT_USX ? "rate adaptation through symbol replication" :
+ "unrecognized rate adaptation type");
+ }
+
+ return 0;
+}
+
+static int aqr_gen2_fill_interface_modes(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+ struct aqr107_priv *priv = phydev->priv;
+ phy_interface_t interface;
+ int i, val, ret;
+
+ /* It's been observed on some models that - when coming out of suspend
+ * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
+ * continue on returning zeroes for some time. Let's poll the 100M
+ * register until it returns a real value as both 113c and 115c support
+ * this mode.
+ */
+ if (priv->wait_on_global_cfg) {
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_CFG_100M, val,
+ val != 0, 1000, 100000, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = aqr_gen2_read_global_syscfg(phydev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ interface = priv->global_cfg[i].interface;
+ if (interface != PHY_INTERFACE_MODE_NA)
+ __set_bit(interface, possible);
+ }
+
+ return 0;
+}
+
+static int aqr_gen2_config_init(struct phy_device *phydev)
{
int ret;
+ ret = aqr_gen1_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return aqr_gen2_fill_interface_modes(phydev);
+}
+
+static int aqr_gen3_config_init(struct phy_device *phydev)
+{
+ return aqr_gen2_config_init(phydev);
+}
+
+static int aqcs109_config_init(struct phy_device *phydev)
+{
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
return -ENODEV;
- ret = aqr_wait_reset_complete(phydev);
- if (!ret)
- aqr107_chip_info(phydev);
-
- return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ return aqr_gen2_config_init(phydev);
}
static void aqr107_link_change_notify(struct phy_device *phydev)
@@ -920,7 +1012,7 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
-static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+static int aqr_gen1_wait_processor_intensive_op(struct phy_device *phydev)
{
int val, err;
@@ -944,17 +1036,16 @@ static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
return 0;
}
-static int aqr107_get_rate_matching(struct phy_device *phydev,
- phy_interface_t iface)
+static int aqr_gen2_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
{
if (iface == PHY_INTERFACE_MODE_10GBASER ||
- iface == PHY_INTERFACE_MODE_2500BASEX ||
- iface == PHY_INTERFACE_MODE_NA)
+ iface == PHY_INTERFACE_MODE_2500BASEX)
return RATE_MATCH_PAUSE;
return RATE_MATCH_NONE;
}
-static int aqr107_suspend(struct phy_device *phydev)
+static int aqr_gen1_suspend(struct phy_device *phydev)
{
int err;
@@ -963,10 +1054,10 @@ static int aqr107_suspend(struct phy_device *phydev)
if (err)
return err;
- return aqr107_wait_processor_intensive_op(phydev);
+ return aqr_gen1_wait_processor_intensive_op(phydev);
}
-static int aqr107_resume(struct phy_device *phydev)
+static int aqr_gen1_resume(struct phy_device *phydev)
{
int err;
@@ -975,89 +1066,7 @@ static int aqr107_resume(struct phy_device *phydev)
if (err)
return err;
- return aqr107_wait_processor_intensive_op(phydev);
-}
-
-static const u16 aqr_global_cfg_regs[] = {
- VEND1_GLOBAL_CFG_10M,
- VEND1_GLOBAL_CFG_100M,
- VEND1_GLOBAL_CFG_1G,
- VEND1_GLOBAL_CFG_2_5G,
- VEND1_GLOBAL_CFG_5G,
- VEND1_GLOBAL_CFG_10G
-};
-
-static int aqr107_fill_interface_modes(struct phy_device *phydev)
-{
- unsigned long *possible = phydev->possible_interfaces;
- unsigned int serdes_mode, rate_adapt;
- phy_interface_t interface;
- int i, val;
-
- /* Walk the media-speed configuration registers to determine which
- * host-side serdes modes may be used by the PHY depending on the
- * negotiated media speed.
- */
- for (i = 0; i < ARRAY_SIZE(aqr_global_cfg_regs); i++) {
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
- aqr_global_cfg_regs[i]);
- if (val < 0)
- return val;
-
- serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
- rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
-
- switch (serdes_mode) {
- case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
- if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
- interface = PHY_INTERFACE_MODE_USXGMII;
- else
- interface = PHY_INTERFACE_MODE_10GBASER;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
- interface = PHY_INTERFACE_MODE_5GBASER;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
- interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
- interface = PHY_INTERFACE_MODE_SGMII;
- break;
-
- default:
- phydev_warn(phydev, "unrecognised serdes mode %u\n",
- serdes_mode);
- interface = PHY_INTERFACE_MODE_NA;
- break;
- }
-
- if (interface != PHY_INTERFACE_MODE_NA)
- __set_bit(interface, possible);
- }
-
- return 0;
-}
-
-static int aqr113c_fill_interface_modes(struct phy_device *phydev)
-{
- int val, ret;
-
- /* It's been observed on some models that - when coming out of suspend
- * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
- * continue on returning zeroes for some time. Let's poll the 100M
- * register until it returns a real value as both 113c and 115c support
- * this mode.
- */
- ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_CFG_100M, val, val != 0,
- 1000, 100000, false);
- if (ret)
- return ret;
-
- return aqr107_fill_interface_modes(phydev);
+ return aqr_gen1_wait_processor_intensive_op(phydev);
}
static int aqr115c_get_features(struct phy_device *phydev)
@@ -1085,11 +1094,14 @@ static int aqr111_get_features(struct phy_device *phydev)
return 0;
}
-static int aqr113c_config_init(struct phy_device *phydev)
+static int aqr_gen4_config_init(struct phy_device *phydev)
{
+ struct aqr107_priv *priv = phydev->priv;
int ret;
- ret = aqr107_config_init(phydev);
+ priv->wait_on_global_cfg = true;
+
+ ret = aqr_gen3_config_init(phydev);
if (ret < 0)
return ret;
@@ -1098,11 +1110,55 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = aqr107_wait_processor_intensive_op(phydev);
- if (ret)
- return ret;
+ return aqr_gen1_wait_processor_intensive_op(phydev);
+}
+
+static unsigned int aqr_gen2_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_USXGMII ||
+ interface == PHY_INTERFACE_MODE_10G_QXGMII)
+ return LINK_INBAND_ENABLE | LINK_INBAND_DISABLE;
+
+ return 0;
+}
+
+static int aqr_gen2_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ struct aqr107_priv *priv = phydev->priv;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII) {
+ u16 set = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ set = MDIO_PHYXS_VEND_PROV2_USX_AN;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_VEND_PROV2,
+ MDIO_PHYXS_VEND_PROV2_USX_AN, set);
+ }
+
+ for (int i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+ u16 set = 0;
+ int err;
+
+ if (syscfg->interface != phydev->interface)
+ continue;
+
+ if (modes == LINK_INBAND_ENABLE)
+ set = VEND1_GLOBAL_CFG_AUTONEG_ENA;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i].reg,
+ VEND1_GLOBAL_CFG_AUTONEG_ENA, set);
+ if (err)
+ return err;
+ }
- return aqr113c_fill_interface_modes(phydev);
+ return 0;
}
static int aqr107_probe(struct phy_device *phydev)
@@ -1144,13 +1200,13 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR105",
.get_features = aqr105_get_features,
.probe = aqr107_probe,
- .config_init = aqr107_config_init,
+ .config_init = aqr_gen1_config_init,
.config_aneg = aqr105_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr105_read_status,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .read_status = aqr_gen1_read_status,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
@@ -1164,16 +1220,16 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen2_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1183,21 +1239,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1208,21 +1266,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
.name = "Aquantia AQR111",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1233,21 +1293,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
.name = "Aquantia AQR111B0",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1258,6 +1320,8 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
@@ -1266,20 +1330,23 @@ static struct phy_driver aqr_driver[] = {
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
.name = "Aquantia AQR112",
.probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
- .read_status = aqr107_read_status,
- .get_rate_matching = aqr107_get_rate_matching,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1289,39 +1356,65 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
.name = "Aquantia AQR412",
.probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR412C),
+ .name = "Aquantia AQR412C",
+ .probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
- .read_status = aqr107_read_status,
- .get_rate_matching = aqr107_get_rate_matching,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
.name = "Aquantia AQR113",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1331,21 +1424,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1355,21 +1450,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR114C),
.name = "Aquantia AQR114C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1380,21 +1477,50 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR115),
+ .name = "Aquantia AQR115",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr_gen2_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .get_features = aqr115c_get_features,
+ .link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR115C),
.name = "Aquantia AQR115C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1405,21 +1531,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
.name = "Aquantia AQR813",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1429,6 +1557,8 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
};
@@ -1446,9 +1576,11 @@ static const struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR412C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR115) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR115C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c
index 92697f43087d..005277360656 100644
--- a/drivers/net/phy/as21xxx.c
+++ b/drivers/net/phy/as21xxx.c
@@ -884,11 +884,12 @@ static int as21xxx_match_phy_device(struct phy_device *phydev,
u32 phy_id;
int ret;
- /* Skip PHY that are not AS21xxx or already have firmware loaded */
- if (phydev->c45_ids.device_ids[MDIO_MMD_PCS] != PHY_ID_AS21XXX)
+ /* Skip PHY that are not AS21xxx */
+ if (!phy_id_compare_vendor(phydev->c45_ids.device_ids[MDIO_MMD_PCS],
+ PHY_VENDOR_AEONSEMI))
return genphy_match_phy_device(phydev, phydrv);
- /* Read PHY ID to handle firmware just loaded */
+ /* Read PHY ID to handle firmware loaded or HW reset */
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 694df1401aa2..f20ddf649149 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -112,9 +112,8 @@ static struct phy_driver asix_driver[] = {
.resume = genphy_resume,
.soft_reset = asix_soft_reset,
}, {
- .phy_id = PHY_ID_ASIX_AX88796B,
+ PHY_ID_MATCH_MODEL(PHY_ID_ASIX_AX88796B),
.name = "Asix Electronics AX88796B",
- .phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = asix_soft_reset,
} };
@@ -124,7 +123,7 @@ module_phy_driver(asix_driver);
static const struct mdio_device_id __maybe_unused asix_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
- { PHY_ID_ASIX_AX88796B, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ASIX_AX88796B) },
{ }
};
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a60e58ef90c4..3459a0e9d8b9 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -23,9 +23,6 @@
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
-#define BRCM_PHY_MODEL(phydev) \
- ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
-
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
@@ -249,8 +246,8 @@ static int bcm54xx_phydsp_config(struct phy_device *phydev)
if (err < 0)
return err;
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) {
/* Clear bit 9 to fix a phy interop issue. */
err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08,
MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
@@ -264,7 +261,7 @@ static int bcm54xx_phydsp_config(struct phy_device *phydev)
}
}
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM57780)) {
int val;
val = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP75);
@@ -292,12 +289,12 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54210E &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811)
+ if (!(phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM57780) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54210E) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54810) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811)))
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -306,8 +303,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
orig = val;
- if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+ if ((phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) &&
BRCM_PHY_REV(phydev) >= 0x3) {
/*
* Here, bit 0 _disables_ CLK125 when set.
@@ -316,7 +313,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
clk125en = false;
} else {
if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811) {
+ if (!phy_id_compare_model(phydev->drv->phy_id,
+ PHY_ID_BCM54811)) {
/* Here, bit 0 _enables_ CLK125 when set */
val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
}
@@ -330,9 +328,9 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54210E) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54810) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811))
val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
@@ -461,14 +459,14 @@ static int bcm54xx_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+ if ((phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) &&
(phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
bcm54xx_adjust_rxrefclk(phydev);
- switch (BRCM_PHY_MODEL(phydev)) {
+ switch (phydev->drv->phy_id & PHY_ID_MATCH_MODEL_MASK) {
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
err = bcm54xx_config_clock_delay(phydev);
@@ -693,7 +691,7 @@ static int bcm5481x_read_abilities(struct phy_device *phydev)
* So we must read the bcm54811 as unable to auto-negotiate
* in BroadR-Reach mode.
*/
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811))
aneg = 0;
else
aneg = val & LRESR_LDSABILITY;
@@ -1438,8 +1436,7 @@ static int bcm54811_read_status(struct phy_device *phydev)
static struct phy_driver broadcom_drivers[] = {
{
- .phy_id = PHY_ID_BCM5411,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5411),
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1451,8 +1448,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
- .phy_id = PHY_ID_BCM5421,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5421),
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1464,8 +1460,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
- .phy_id = PHY_ID_BCM54210E,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54210E),
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
.flags = PHY_ALWAYS_CALL_SUSPEND,
@@ -1483,8 +1478,7 @@ static struct phy_driver broadcom_drivers[] = {
.set_wol = bcm54xx_phy_set_wol,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5461,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5461),
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1497,8 +1491,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54612E,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54612E),
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1513,8 +1506,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
}, {
- .phy_id = PHY_ID_BCM54616S,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54616S),
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
.soft_reset = genphy_soft_reset,
@@ -1527,8 +1519,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5464,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5464),
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1543,8 +1534,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5481,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5481),
.name = "Broadcom BCM5481",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1558,8 +1548,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54810,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54810),
.name = "Broadcom BCM54810",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1577,8 +1566,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54811,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54811),
.name = "Broadcom BCM54811",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1596,8 +1584,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5482,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5482),
.name = "Broadcom BCM5482",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1610,8 +1597,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM50610,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM50610),
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1626,8 +1612,7 @@ static struct phy_driver broadcom_drivers[] = {
.resume = bcm54xx_resume,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM50610M,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM50610M),
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1642,8 +1627,7 @@ static struct phy_driver broadcom_drivers[] = {
.resume = bcm54xx_resume,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM57780,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM57780),
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1656,8 +1640,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCMAC131,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCMAC131),
.name = "Broadcom BCMAC131",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1666,8 +1649,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = brcm_fet_suspend,
.resume = brcm_fet_config_init,
}, {
- .phy_id = PHY_ID_BCM5241,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5241),
.name = "Broadcom BCM5241",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1676,8 +1658,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = brcm_fet_suspend,
.resume = brcm_fet_config_init,
}, {
- .phy_id = PHY_ID_BCM5221,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5221),
.name = "Broadcom BCM5221",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1688,8 +1669,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5221_config_aneg,
.read_status = bcm5221_read_status,
}, {
- .phy_id = PHY_ID_BCM5395,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5395),
.name = "Broadcom BCM5395",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1700,8 +1680,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM53125,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM53125),
.name = "Broadcom BCM53125",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1715,8 +1694,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM53128,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM53128),
.name = "Broadcom BCM53128",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1730,8 +1708,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM89610,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM89610),
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1747,27 +1724,27 @@ static struct phy_driver broadcom_drivers[] = {
module_phy_driver(broadcom_drivers);
static const struct mdio_device_id __maybe_unused broadcom_tbl[] = {
- { PHY_ID_BCM5411, 0xfffffff0 },
- { PHY_ID_BCM5421, 0xfffffff0 },
- { PHY_ID_BCM54210E, 0xfffffff0 },
- { PHY_ID_BCM5461, 0xfffffff0 },
- { PHY_ID_BCM54612E, 0xfffffff0 },
- { PHY_ID_BCM54616S, 0xfffffff0 },
- { PHY_ID_BCM5464, 0xfffffff0 },
- { PHY_ID_BCM5481, 0xfffffff0 },
- { PHY_ID_BCM54810, 0xfffffff0 },
- { PHY_ID_BCM54811, 0xfffffff0 },
- { PHY_ID_BCM5482, 0xfffffff0 },
- { PHY_ID_BCM50610, 0xfffffff0 },
- { PHY_ID_BCM50610M, 0xfffffff0 },
- { PHY_ID_BCM57780, 0xfffffff0 },
- { PHY_ID_BCMAC131, 0xfffffff0 },
- { PHY_ID_BCM5221, 0xfffffff0 },
- { PHY_ID_BCM5241, 0xfffffff0 },
- { PHY_ID_BCM5395, 0xfffffff0 },
- { PHY_ID_BCM53125, 0xfffffff0 },
- { PHY_ID_BCM53128, 0xfffffff0 },
- { PHY_ID_BCM89610, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5411) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5421) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54210E) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5461) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54612E) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54616S) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5464) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5481) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54810) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54811) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5482) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM50610) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM50610M) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM57780) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCMAC131) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5221) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5241) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5395) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM53125) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM53128) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM89610) },
{ }
};
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index daab555721df..74396453f5bb 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -953,30 +953,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
-static void dp83640_free_clocks(void)
-{
- struct dp83640_clock *clock;
- struct list_head *this, *next;
-
- mutex_lock(&phyter_clocks_lock);
-
- list_for_each_safe(this, next, &phyter_clocks) {
- clock = list_entry(this, struct dp83640_clock, list);
- if (!list_empty(&clock->phylist)) {
- pr_warn("phy list non-empty while unloading\n");
- BUG();
- }
- list_del(&clock->list);
- mutex_destroy(&clock->extreg_lock);
- mutex_destroy(&clock->clock_lock);
- put_device(&clock->bus->dev);
- kfree(clock->caps.pin_config);
- kfree(clock);
- }
-
- mutex_unlock(&phyter_clocks_lock);
-}
-
static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
{
INIT_LIST_HEAD(&clock->list);
@@ -1479,6 +1455,7 @@ static void dp83640_remove(struct phy_device *phydev)
struct dp83640_clock *clock;
struct list_head *this, *next;
struct dp83640_private *tmp, *dp83640 = phydev->priv;
+ bool remove_clock = false;
if (phydev->mdio.addr == BROADCAST_ADDR)
return;
@@ -1506,11 +1483,27 @@ static void dp83640_remove(struct phy_device *phydev)
}
}
+ if (!clock->chosen && list_empty(&clock->phylist))
+ remove_clock = true;
+
dp83640_clock_put(clock);
kfree(dp83640);
+
+ if (remove_clock) {
+ mutex_lock(&phyter_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&phyter_clocks_lock);
+
+ mutex_destroy(&clock->extreg_lock);
+ mutex_destroy(&clock->clock_lock);
+ put_device(&clock->bus->dev);
+ kfree(clock->caps.pin_config);
+ kfree(clock);
+ }
}
-static struct phy_driver dp83640_driver = {
+static struct phy_driver dp83640_driver[] = {
+{
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
@@ -1521,26 +1514,15 @@ static struct phy_driver dp83640_driver = {
.config_init = dp83640_config_init,
.config_intr = dp83640_config_intr,
.handle_interrupt = dp83640_handle_interrupt,
+},
};
-static int __init dp83640_init(void)
-{
- return phy_driver_register(&dp83640_driver, THIS_MODULE);
-}
-
-static void __exit dp83640_exit(void)
-{
- dp83640_free_clocks();
- phy_driver_unregister(&dp83640_driver);
-}
+module_phy_driver(dp83640_driver);
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_LICENSE("GPL");
-module_init(dp83640_init);
-module_exit(dp83640_exit);
-
static const struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 033656d574b8..0e1b28f06f18 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/device/faux.h>
#include <linux/list.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -18,83 +17,65 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
#include <linux/linkmode.h>
#include "swphy.h"
-struct fixed_mdio_bus {
- struct mii_bus *mii_bus;
- struct list_head phys;
-};
-
struct fixed_phy {
int addr;
struct phy_device *phydev;
struct fixed_phy_status status;
- bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
- struct gpio_desc *link_gpiod;
};
-static struct faux_device *fdev;
-static struct fixed_mdio_bus platform_fmb = {
- .phys = LIST_HEAD_INIT(platform_fmb.phys),
-};
+static struct mii_bus *fmb_mii_bus;
+static LIST_HEAD(fmb_phys);
+
+static struct fixed_phy *fixed_phy_find(int addr)
+{
+ struct fixed_phy *fp;
+
+ list_for_each_entry(fp, &fmb_phys, node) {
+ if (fp->addr == addr)
+ return fp;
+ }
+
+ return NULL;
+}
int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct phy_device *phydev = dev->phydev;
struct fixed_phy *fp;
if (!phydev || !phydev->mdio.bus)
return -EINVAL;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- fp->no_carrier = !new_carrier;
- return 0;
- }
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
+ fp = fixed_phy_find(phydev->mdio.addr);
+ if (!fp)
+ return -EINVAL;
-static void fixed_phy_update(struct fixed_phy *fp)
-{
- if (!fp->no_carrier && fp->link_gpiod)
- fp->status.link = !!gpiod_get_value_cansleep(fp->link_gpiod);
+ fp->status.link = new_carrier;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
{
- struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phy_addr) {
- struct fixed_phy_status state;
-
- fp->status.link = !fp->no_carrier;
-
- /* Issue callback if user registered it. */
- if (fp->link_update)
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
-
- /* Check the GPIO for change in status */
- fixed_phy_update(fp);
- state = fp->status;
+ fp = fixed_phy_find(phy_addr);
+ if (!fp)
+ return 0xffff;
- return swphy_read_reg(reg_num, &state);
- }
- }
+ if (fp->link_update)
+ fp->link_update(fp->phydev->attached_dev, &fp->status);
- return 0xFFFF;
+ return swphy_read_reg(reg_num, &fp->status);
}
static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
@@ -112,31 +93,27 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *))
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
if (!phydev || !phydev->mdio.bus)
return -EINVAL;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- fp->link_update = link_update;
- fp->phydev = phydev;
- return 0;
- }
- }
+ fp = fixed_phy_find(phydev->mdio.addr);
+ if (!fp)
+ return -ENOENT;
- return -ENOENT;
+ fp->link_update = link_update;
+ fp->phydev = phydev;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
- const struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+static int __fixed_phy_add(int phy_addr,
+ const struct fixed_phy_status *status)
{
- int ret;
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
+ int ret;
ret = swphy_validate_state(status);
if (ret < 0)
@@ -146,23 +123,17 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
if (!fp)
return -ENOMEM;
- if (irq != PHY_POLL)
- fmb->mii_bus->irq[phy_addr] = irq;
-
fp->addr = phy_addr;
fp->status = *status;
- fp->link_gpiod = gpiod;
-
- fixed_phy_update(fp);
- list_add_tail(&fp->node, &fmb->phys);
+ list_add_tail(&fp->node, &fmb_phys);
return 0;
}
-int fixed_phy_add(int phy_addr, const struct fixed_phy_status *status)
+void fixed_phy_add(const struct fixed_phy_status *status)
{
- return fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, NULL);
+ __fixed_phy_add(0, status);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
@@ -170,87 +141,39 @@ static DEFINE_IDA(phy_fixed_ida);
static void fixed_phy_del(int phy_addr)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct fixed_phy *fp, *tmp;
-
- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
- if (fp->addr == phy_addr) {
- list_del(&fp->node);
- if (fp->link_gpiod)
- gpiod_put(fp->link_gpiod);
- kfree(fp);
- ida_free(&phy_fixed_ida, phy_addr);
- return;
- }
- }
-}
+ struct fixed_phy *fp;
-#ifdef CONFIG_OF_GPIO
-static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
-{
- struct device_node *fixed_link_node;
- struct gpio_desc *gpiod;
-
- if (!np)
- return NULL;
-
- fixed_link_node = of_get_child_by_name(np, "fixed-link");
- if (!fixed_link_node)
- return NULL;
-
- /*
- * As the fixed link is just a device tree node without any
- * Linux device associated with it, we simply have obtain
- * the GPIO descriptor from the device tree like this.
- */
- gpiod = fwnode_gpiod_get_index(of_fwnode_handle(fixed_link_node),
- "link", 0, GPIOD_IN, "mdio");
- if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
- if (PTR_ERR(gpiod) != -ENOENT)
- pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
- fixed_link_node);
- gpiod = NULL;
- }
- of_node_put(fixed_link_node);
+ fp = fixed_phy_find(phy_addr);
+ if (!fp)
+ return;
- return gpiod;
-}
-#else
-static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
-{
- return NULL;
+ list_del(&fp->node);
+ kfree(fp);
+ ida_free(&phy_fixed_ida, phy_addr);
}
-#endif
struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
struct device_node *np)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct gpio_desc *gpiod;
struct phy_device *phy;
int phy_addr;
int ret;
- if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
+ if (!fmb_mii_bus || fmb_mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
- /* Check if we have a GPIO associated with this fixed phy */
- gpiod = fixed_phy_get_gpiod(np);
- if (IS_ERR(gpiod))
- return ERR_CAST(gpiod);
-
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, gpiod);
+ ret = __fixed_phy_add(phy_addr, status);
if (ret < 0) {
ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
}
- phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+ phy = get_phy_device(fmb_mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
fixed_phy_del(phy_addr);
return ERR_PTR(-EINVAL);
@@ -309,56 +232,44 @@ void fixed_phy_unregister(struct phy_device *phy)
phy_device_remove(phy);
of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr);
+ phy_device_free(phy);
}
EXPORT_SYMBOL_GPL(fixed_phy_unregister);
static int __init fixed_mdio_bus_init(void)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
int ret;
- fdev = faux_device_create("Fixed MDIO bus", NULL, NULL);
- if (!fdev)
- return -ENODEV;
-
- fmb->mii_bus = mdiobus_alloc();
- if (fmb->mii_bus == NULL) {
- ret = -ENOMEM;
- goto err_mdiobus_reg;
- }
+ fmb_mii_bus = mdiobus_alloc();
+ if (!fmb_mii_bus)
+ return -ENOMEM;
- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
- fmb->mii_bus->name = "Fixed MDIO Bus";
- fmb->mii_bus->priv = fmb;
- fmb->mii_bus->parent = &fdev->dev;
- fmb->mii_bus->read = &fixed_mdio_read;
- fmb->mii_bus->write = &fixed_mdio_write;
- fmb->mii_bus->phy_mask = ~0;
+ snprintf(fmb_mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
+ fmb_mii_bus->name = "Fixed MDIO Bus";
+ fmb_mii_bus->read = &fixed_mdio_read;
+ fmb_mii_bus->write = &fixed_mdio_write;
+ fmb_mii_bus->phy_mask = ~0;
- ret = mdiobus_register(fmb->mii_bus);
+ ret = mdiobus_register(fmb_mii_bus);
if (ret)
goto err_mdiobus_alloc;
return 0;
err_mdiobus_alloc:
- mdiobus_free(fmb->mii_bus);
-err_mdiobus_reg:
- faux_device_destroy(fdev);
+ mdiobus_free(fmb_mii_bus);
return ret;
}
module_init(fixed_mdio_bus_init);
static void __exit fixed_mdio_bus_exit(void)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp, *tmp;
- mdiobus_unregister(fmb->mii_bus);
- mdiobus_free(fmb->mii_bus);
- faux_device_destroy(fdev);
+ mdiobus_unregister(fmb_mii_bus);
+ mdiobus_free(fmb_mii_bus);
- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+ list_for_each_entry_safe(fp, tmp, &fmb_phys, node) {
list_del(&fp->node);
kfree(fp);
}
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index fad2f54c1eac..894bcee61e65 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -475,21 +475,20 @@ static int mv2222_config_init(struct phy_device *phydev)
static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t sfp_interface;
struct mv2222_data *priv;
struct device *dev;
int ret;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, };
-
priv = phydev->priv;
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
- phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
- sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+
+ phydev->port = caps->port;
+ sfp_interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
@@ -502,7 +501,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
}
priv->line_interface = sfp_interface;
- linkmode_and(priv->supported, phydev->supported, sfp_supported);
+ linkmode_and(priv->supported, phydev->supported, caps->link_modes);
ret = mv2222_config_line(phydev);
if (ret < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 623292948fa7..c248c90510ae 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1902,6 +1902,43 @@ error:
return err;
}
+/* m88e1510_resume
+ *
+ * The 88e1510 PHY has an erratum where the phy downshift counter is not cleared
+ * after phy being suspended(BMCR_PDOWN set) and then later resumed(BMCR_PDOWN
+ * cleared). This can cause the link to intermittently downshift to a lower speed.
+ *
+ * Disabling and re-enabling the downshift feature clears the counter, allowing
+ * the PHY to retry gigabit link negotiation up to the programmed retry count
+ * before downshifting. This behavior has been observed on copper links.
+ */
+static int m88e1510_resume(struct phy_device *phydev)
+{
+ int err;
+ u8 cnt = 0;
+
+ err = marvell_resume(phydev);
+ if (err < 0)
+ return err;
+
+ /* read downshift counter value */
+ err = m88e1011_get_downshift(phydev, &cnt);
+ if (err < 0)
+ return err;
+
+ if (cnt) {
+ /* downshift disabled */
+ err = m88e1011_set_downshift(phydev, 0);
+ if (err < 0)
+ return err;
+
+ /* downshift enabled, with previous counter value */
+ err = m88e1011_set_downshift(phydev, cnt);
+ }
+
+ return err;
+}
+
static int marvell_aneg_done(struct phy_device *phydev)
{
int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
@@ -3563,20 +3600,18 @@ static int marvell_probe(struct phy_device *phydev)
static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t interface;
struct device *dev;
int oldpage;
int ret = 0;
u16 mode;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
-
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, supported, interfaces);
- interface = sfp_select_interface(phydev->sfp_bus, supported);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
@@ -3923,7 +3958,7 @@ static struct phy_driver marvell_drivers[] = {
.handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
- .resume = marvell_resume,
+ .resume = m88e1510_resume,
.suspend = marvell_suspend,
.read_page = marvell_read_page,
.write_page = marvell_write_page,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 13e81dff42c1..8fd42131cdbf 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -466,12 +466,11 @@ static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
- DECLARE_PHY_INTERFACE_MASK(interfaces);
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
- sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
- iface = sfp_select_interface(phydev->sfp_bus, support);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
if (iface != PHY_INTERFACE_MODE_10GBASER) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
deleted file mode 100644
index d3184e8f12ec..000000000000
--- a/drivers/net/phy/mdio-boardinfo.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * mdio-boardinfo - Collect pre-declarations for MDIO devices
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-
-#include "mdio-boardinfo.h"
-
-static LIST_HEAD(mdio_board_list);
-static DEFINE_MUTEX(mdio_board_lock);
-
-struct mdio_board_entry {
- struct list_head list;
- struct mdio_board_info board_info;
-};
-
-/**
- * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
- * from pre-collected board specific MDIO information
- * @bus: Bus the board_info belongs to
- * @cb: Callback to create device on bus
- * Context: can sleep
- */
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
- int (*cb)
- (struct mii_bus *bus,
- struct mdio_board_info *bi))
-{
- struct mdio_board_entry *be, *tmp;
-
- mutex_lock(&mdio_board_lock);
- list_for_each_entry_safe(be, tmp, &mdio_board_list, list) {
- struct mdio_board_info *bi = &be->board_info;
-
- if (strcmp(bus->id, bi->bus_id))
- continue;
-
- mutex_unlock(&mdio_board_lock);
- cb(bus, bi);
- mutex_lock(&mdio_board_lock);
- }
- mutex_unlock(&mdio_board_lock);
-}
-EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info);
-
-/**
- * mdiobus_register_board_info - register MDIO devices for a given board
- * @info: array of devices descriptors
- * @n: number of descriptors provided
- * Context: can sleep
- *
- * The board info passed can be marked with __initdata but be pointers
- * such as platform_data etc. are copied as-is
- */
-int mdiobus_register_board_info(const struct mdio_board_info *info,
- unsigned int n)
-{
- struct mdio_board_entry *be;
-
- be = kcalloc(n, sizeof(*be), GFP_KERNEL);
- if (!be)
- return -ENOMEM;
-
- for (int i = 0; i < n; i++, be++) {
- be->board_info = info[i];
- mutex_lock(&mdio_board_lock);
- list_add_tail(&be->list, &mdio_board_list);
- mutex_unlock(&mdio_board_lock);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
deleted file mode 100644
index 0878b77878d4..000000000000
--- a/drivers/net/phy/mdio-boardinfo.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * mdio-boardinfo.h - board info interface internal to the mdio_bus
- * component
- */
-
-#ifndef __MDIO_BOARD_INFO_H
-#define __MDIO_BOARD_INFO_H
-
-struct mii_bus;
-struct mdio_board_info;
-
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
- int (*cb)
- (struct mii_bus *bus,
- struct mdio_board_info *bi));
-
-#endif /* __MDIO_BOARD_INFO_H */
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
index f43973e73ea3..a2391d4b7e5c 100644
--- a/drivers/net/phy/mdio_bus_provider.c
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -29,8 +29,6 @@
#include <linux/uaccess.h>
#include <linux/unistd.h>
-#include "mdio-boardinfo.h"
-
/**
* mdiobus_alloc_size - allocate a mii_bus structure
* @size: extra amount of memory to allocate for private storage.
@@ -132,35 +130,6 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
}
#endif
-/**
- * mdiobus_create_device - create a full MDIO device given
- * a mdio_board_info structure
- * @bus: MDIO bus to create the devices on
- * @bi: mdio_board_info structure describing the devices
- *
- * Returns 0 on success or < 0 on error.
- */
-static int mdiobus_create_device(struct mii_bus *bus,
- struct mdio_board_info *bi)
-{
- struct mdio_device *mdiodev;
- int ret = 0;
-
- mdiodev = mdio_device_create(bus, bi->mdio_addr);
- if (IS_ERR(mdiodev))
- return -ENODEV;
-
- strscpy(mdiodev->modalias, bi->modalias,
- sizeof(mdiodev->modalias));
- mdiodev->dev.platform_data = (void *)bi->platform_data;
-
- ret = mdio_device_register(mdiodev);
- if (ret)
- mdio_device_free(mdiodev);
-
- return ret;
-}
-
static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
{
struct phy_device *phydev = ERR_PTR(-ENODEV);
@@ -404,8 +373,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
goto error;
}
- mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
-
bus->state = MDIOBUS_REGISTERED;
dev_dbg(&bus->dev, "probed\n");
return 0;
diff --git a/drivers/net/phy/mediatek/mtk-2p5ge.c b/drivers/net/phy/mediatek/mtk-2p5ge.c
index e147eab523ef..de8a41a1841d 100644
--- a/drivers/net/phy/mediatek/mtk-2p5ge.c
+++ b/drivers/net/phy/mediatek/mtk-2p5ge.c
@@ -249,8 +249,80 @@ static int mt798x_2p5ge_phy_get_rate_matching(struct phy_device *phydev,
return RATE_MATCH_PAUSE;
}
+static const unsigned long supported_triggers =
+ BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+static int mt798x_2p5ge_phy_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ bool blinking = false;
+ int err = 0;
+
+ err = mtk_phy_led_num_dly_cfg(index, delay_on, delay_off, &blinking);
+ if (err < 0)
+ return err;
+
+ err = mtk_phy_hw_led_blink_set(phydev, index, blinking);
+ if (err)
+ return err;
+
+ if (blinking)
+ mtk_phy_hw_led_on_set(phydev, index, MTK_2P5GPHY_LED_ON_MASK,
+ false);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index,
+ enum led_brightness value)
+{
+ int err;
+
+ err = mtk_phy_hw_led_blink_set(phydev, index, false);
+ if (err)
+ return err;
+
+ return mtk_phy_hw_led_on_set(phydev, index, MTK_2P5GPHY_LED_ON_MASK,
+ (value != LED_OFF));
+}
+
+static int mt798x_2p5ge_phy_led_hw_is_supported(struct phy_device *phydev,
+ u8 index, unsigned long rules)
+{
+ return mtk_phy_led_hw_is_supported(phydev, index, rules,
+ supported_triggers);
+}
+
+static int mt798x_2p5ge_phy_led_hw_control_get(struct phy_device *phydev,
+ u8 index, unsigned long *rules)
+{
+ return mtk_phy_led_hw_ctrl_get(phydev, index, rules,
+ MTK_2P5GPHY_LED_ON_SET,
+ MTK_2P5GPHY_LED_RX_BLINK_SET,
+ MTK_2P5GPHY_LED_TX_BLINK_SET);
+};
+
+static int mt798x_2p5ge_phy_led_hw_control_set(struct phy_device *phydev,
+ u8 index, unsigned long rules)
+{
+ return mtk_phy_led_hw_ctrl_set(phydev, index, rules,
+ MTK_2P5GPHY_LED_ON_SET,
+ MTK_2P5GPHY_LED_RX_BLINK_SET,
+ MTK_2P5GPHY_LED_TX_BLINK_SET);
+};
+
static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
{
+ struct mtk_socphy_priv *priv;
struct pinctrl *pinctrl;
int ret;
@@ -273,19 +345,34 @@ static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
if (ret < 0)
return ret;
- /* Setup LED */
+ /* Setup LED. On default, LED0 is on/off when link is up/down. As for
+ * LED1, it blinks as tx/rx transmission takes place.
+ */
phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_ON_CTRL,
- MTK_PHY_LED_ON_POLARITY | MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK1000 |
- MTK_PHY_LED_ON_LINK2500);
- phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
- MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX);
+ MTK_PHY_LED_ON_POLARITY | MTK_2P5GPHY_LED_ON_SET);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_BLINK_CTRL,
+ MTK_2P5GPHY_LED_TX_BLINK_SET |
+ MTK_2P5GPHY_LED_RX_BLINK_SET);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
+ MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_2P5GPHY_LED_ON_SET);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_BLINK_CTRL,
+ MTK_2P5GPHY_LED_TX_BLINK_SET |
+ MTK_2P5GPHY_LED_RX_BLINK_SET);
/* Switch pinctrl after setting polarity to avoid bogus blinking */
pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "i2p5gbe-led");
if (IS_ERR(pinctrl))
dev_err(&phydev->mdio.dev, "Fail to set LED pins!\n");
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct mtk_socphy_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ mtk_phy_leds_state_init(phydev);
+
return 0;
}
@@ -303,6 +390,11 @@ static struct phy_driver mtk_2p5gephy_driver[] = {
.resume = genphy_resume,
.read_page = mtk_phy_read_page,
.write_page = mtk_phy_write_page,
+ .led_blink_set = mt798x_2p5ge_phy_led_blink_set,
+ .led_brightness_set = mt798x_2p5ge_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_2p5ge_phy_led_hw_is_supported,
+ .led_hw_control_get = mt798x_2p5ge_phy_led_hw_control_get,
+ .led_hw_control_set = mt798x_2p5ge_phy_led_hw_control_set,
},
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 605b0315b4cb..79ce3eb6752b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -107,6 +107,7 @@
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
+#define LAN8814_INT_FLF BIT(15)
#define LAN8814_INT_LINK_DOWN BIT(2)
#define LAN8814_INT_LINK_UP BIT(0)
#define LAN8814_INT_LINK (LAN8814_INT_LINK_UP |\
@@ -266,6 +267,8 @@
#define LAN8814_LED_CTRL_1 0x0
#define LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_ BIT(6)
+#define LAN8814_LED_CTRL_2 0x1
+#define LAN8814_LED_CTRL_2_LED1_COM_DIS BIT(8)
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
@@ -362,6 +365,8 @@
/* Delay used to get the second part from the LTC */
#define LAN8841_GET_SEC_LTC_DELAY (500 * NSEC_PER_MSEC)
+#define LAN8842_REV_8832 0x8832
+
struct kszphy_hw_stat {
const char *string;
u8 reg;
@@ -448,6 +453,19 @@ struct kszphy_priv {
struct kszphy_phy_stats phy_stats;
};
+struct lan8842_phy_stats {
+ u64 rx_packets;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_errors;
+};
+
+struct lan8842_priv {
+ struct lan8842_phy_stats phy_stats;
+ struct kszphy_ptp_priv ptp_priv;
+ u16 rev;
+};
+
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
.cable_diag_reg = LAN8814_CABLE_DIAG,
@@ -2790,6 +2808,60 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
return ret;
}
+/**
+ * LAN8814_PAGE_PCS - Selects Extended Page 0.
+ *
+ * This page contains timers used for auto-negotiation, debug registers and
+ * register to configure fast link failure.
+ */
+#define LAN8814_PAGE_PCS 0
+
+/**
+ * LAN8814_PAGE_AFE_PMA - Selects Extended Page 1.
+ *
+ * This page appears to control the Analog Front-End (AFE) and Physical
+ * Medium Attachment (PMA) layers. It is used to access registers like
+ * LAN8814_PD_CONTROLS and LAN8814_LINK_QUALITY.
+ */
+#define LAN8814_PAGE_AFE_PMA 1
+
+/**
+ * LAN8814_PAGE_PCS_DIGITAL - Selects Extended Page 2.
+ *
+ * This page seems dedicated to the Physical Coding Sublayer (PCS) and other
+ * digital logic. It is used for MDI-X alignment (LAN8814_ALIGN_SWAP) and EEE
+ * state (LAN8814_EEE_STATE) in the LAN8814, and is repurposed for statistics
+ * and self-test counters in the LAN8842.
+ */
+#define LAN8814_PAGE_PCS_DIGITAL 2
+
+/**
+ * LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
+ *
+ * This page contains device-common registers that affect the entire chip.
+ * It includes controls for chip-level resets, strap status, GPIO,
+ * QSGMII, the shared 1588 PTP block, and the PVT monitor.
+ */
+#define LAN8814_PAGE_COMMON_REGS 4
+
+/**
+ * LAN8814_PAGE_PORT_REGS - Selects Extended Page 5.
+ *
+ * This page contains port-specific registers that must be accessed
+ * on a per-port basis. It includes controls for port LEDs, QSGMII PCS,
+ * rate adaptation FIFOs, and the per-port 1588 TSU block.
+ */
+#define LAN8814_PAGE_PORT_REGS 5
+
+/**
+ * LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
+ *
+ * This page appears to hold fundamental system or global controls. In the
+ * driver, it is used by the related LAN8804 to access the
+ * LAN8814_CLOCK_MANAGEMENT register.
+ */
+#define LAN8814_PAGE_SYSTEM_CTRL 31
+
#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
@@ -2840,6 +2912,27 @@ static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
return val;
}
+static int lanphy_modify_page_reg(struct phy_device *phydev, int page, u16 addr,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ ret = __phy_modify_changed(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA,
+ mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ if (ret < 0)
+ phydev_err(phydev, "__phy_modify_changed() failed: %pe\n",
+ ERR_PTR(ret));
+
+ return ret;
+}
+
static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
{
u16 val = 0;
@@ -2850,35 +2943,46 @@ static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
- return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
+ return lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_EN, val);
}
static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
{
- *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
+ *seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_SEC_HI);
*seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
+ *nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_NS_HI);
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_NS_LO);
- *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
+ *seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_MSG_HEADER2);
}
static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
{
- *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
+ *seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_SEC_HI);
*seconds = *seconds << 16 |
- lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
+ *nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_NS_HI);
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_NS_LO);
- *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
+ *seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MSG_HEADER2);
}
static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
@@ -2912,11 +3016,11 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
int i;
for (i = 0; i < FIFO_SIZE; ++i)
- lanphy_read_page_reg(phydev, 5,
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
/* Read to clear overflow status bit */
- lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS);
}
static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
@@ -2928,7 +3032,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
- int tx_mod;
ptp_priv->hwts_tx_type = config->tx_type;
ptp_priv->rx_filter = config->rx_filter;
@@ -2967,21 +3070,28 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
}
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_CONFIG, rxcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_CONFIG, txcfg);
pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
} else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ 0);
}
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
@@ -3103,29 +3213,41 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
static void lan8814_ptp_clock_set(struct phy_device *phydev,
time64_t sec, u32 nsec)
{
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
time64_t *sec, u32 *nsec)
{
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_READ_);
- *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_HI);
*sec <<= 16;
- *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_MID);
*sec <<= 16;
- *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_LO);
- *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_NS_HI);
*nsec <<= 16;
- *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -3164,14 +3286,18 @@ static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
s64 start_sec, u32 start_nsec)
{
/* Set the start time */
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
lower_16_bits(start_sec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
upper_16_bits(start_sec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
lower_16_bits(start_nsec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
upper_16_bits(start_nsec) & 0x3fff);
}
@@ -3269,9 +3395,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
adjustment_value_lo = adjustment_value & 0xffff;
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
adjustment_value_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
PTP_LTC_STEP_ADJ_DIR_ |
adjustment_value_hi);
seconds -= ((s32)adjustment_value);
@@ -3289,9 +3417,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
adjustment_value_lo = adjustment_value & 0xffff;
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
adjustment_value_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
adjustment_value_hi);
seconds += ((s32)adjustment_value);
@@ -3299,8 +3429,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
set_seconds += adjustment_value;
lan8814_ptp_update_target(phydev, set_seconds);
}
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CMD_CTL, PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
}
if (nano_seconds) {
u16 nano_seconds_lo;
@@ -3309,12 +3439,14 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
nano_seconds_lo = nano_seconds & 0xffff;
nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
nano_seconds_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
PTP_LTC_STEP_ADJ_DIR_ |
nano_seconds_hi);
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
}
}
@@ -3356,8 +3488,10 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
mutex_lock(&shared->shared_lock);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_HI,
+ kszphy_rate_adj_hi);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_LO,
+ kszphy_rate_adj_lo);
mutex_unlock(&shared->shared_lock);
return 0;
@@ -3366,17 +3500,17 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
s64 period_sec, u32 period_nsec)
{
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
lower_16_bits(period_sec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
upper_16_bits(period_sec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
lower_16_bits(period_nsec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
upper_16_bits(period_nsec) & 0x3fff);
}
@@ -3384,73 +3518,72 @@ static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
int pulse_width)
{
- u16 val;
-
- val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
- /* Set the pulse width of the event */
- val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
- /* Make sure that the target clock will be incremented each time when
+ /* Set the pulse width of the event,
+ * Make sure that the target clock will be incremented each time when
* local time reaches or pass it
+ * Set the polarity high
*/
- val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
- val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
- /* Set the polarity high */
- val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) |
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) |
+ LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event),
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
+ LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event));
}
static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
{
- u16 val;
-
/* Set target to too far in the future, effectively disabling it */
lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
/* And then reload once it recheas the target */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
- val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event),
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
}
static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
{
- u16 val;
-
/* Disable gpio alternate function,
* 1: select as gpio,
* 0: select alt func
*/
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- val |= LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ LAN8814_GPIO_EN_BIT(pin));
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- val &= ~LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ 0);
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
- val &= ~LAN8814_GPIO_BUF_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_BUF_ADDR(pin),
+ LAN8814_GPIO_BUF_BIT(pin),
+ 0);
}
static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
{
- int val;
-
/* Set as gpio output */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- val |= LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ LAN8814_GPIO_DIR_BIT(pin));
/* Enable gpio 0:for alternate function, 1:gpio */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- val &= ~LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ 0);
/* Set buffer type to push pull */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
- val |= LAN8814_GPIO_BUF_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_BUF_ADDR(pin),
+ LAN8814_GPIO_BUF_BIT(pin),
+ LAN8814_GPIO_BUF_BIT(pin));
}
static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
@@ -3565,61 +3698,64 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
{
- u16 tmp;
-
/* Set as gpio input */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ 0);
/* Map the pin to ltc pin 0 of the capture map registers */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
- tmp |= pin;
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_MAP_LO, pin, pin);
/* Enable capture on the edges of the ltc pin */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
if (flags & PTP_RISING_EDGE)
- tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0),
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0));
if (flags & PTP_FALLING_EDGE)
- tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0),
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0));
/* Enable interrupt top interrupt */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
- tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
- lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN);
}
static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
{
- u16 tmp;
-
/* Set as gpio out */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- tmp |= LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ LAN8814_GPIO_DIR_BIT(pin));
/* Enable alternate, 0:for alternate function, 1:gpio */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- tmp &= ~LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ 0);
/* Clear the mapping of pin to registers 0 of the capture registers */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
- tmp &= ~GENMASK(3, 0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_MAP_LO,
+ GENMASK(3, 0),
+ 0);
/* Disable capture on both of the edges */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
- tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
- tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin) |
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin),
+ 0);
/* Disable interrupt top interrupt */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
- tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
- lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN,
+ 0);
}
static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
@@ -3749,7 +3885,8 @@ static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
/* If other timestamps are available in the FIFO,
* process them.
*/
- reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_CAP_INFO);
} while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
}
@@ -3822,7 +3959,8 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
/* If other timestamps are available in the FIFO,
* process them.
*/
- reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_CAP_INFO);
} while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
}
@@ -3859,31 +3997,40 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
/* This is 0 because whatever was the input pin it was mapped it to
* ltc gpio pin 0
*/
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
- tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_SEL,
+ PTP_GPIO_SEL_GPIO_SEL(0),
+ PTP_GPIO_SEL_GPIO_SEL(0));
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
+ tmp = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_STS);
if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
return -1;
if (tmp & BIT(0)) {
- sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
+ sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_SEC_HI_CAP);
sec <<= 16;
- sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
+ sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_SEC_LO_CAP);
- nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
nsec <<= 16;
- nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_LO_CAP);
} else {
- sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
+ sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_SEC_HI_CAP);
sec <<= 16;
- sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
+ sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_SEC_LO_CAP);
- nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
nsec <<= 16;
- nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_LO_CAP);
}
ptp_event.index = 0;
@@ -3908,19 +4055,17 @@ static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
static int lan8804_config_init(struct phy_device *phydev)
{
- int val;
-
/* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
- val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8804_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8804_ALIGN_SWAP,
+ LAN8804_ALIGN_TX_A_B_SWAP_MASK,
+ LAN8804_ALIGN_TX_A_B_SWAP);
/* Make sure that the PHY will not stop generating the clock when the
* link partner goes down
*/
- lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
- lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_SYSTEM_CTRL,
+ LAN8814_CLOCK_MANAGEMENT, 0x27e);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_LINK_QUALITY);
return 0;
}
@@ -4002,7 +4147,8 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
}
while (true) {
- irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_STS);
if (!irq_status)
break;
@@ -4030,7 +4176,7 @@ static int lan8814_config_intr(struct phy_device *phydev)
{
int err;
- lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_INTR_CTRL_REG,
LAN8814_INTR_CTRL_REG_POLARITY |
LAN8814_INTR_CTRL_REG_INTR_ENABLE);
@@ -4056,35 +4202,41 @@ static void lan8814_ptp_init(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- u32 temp;
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
return;
- lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ TSU_HARD_RESET, TSU_HARD_RESET_);
- temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
- temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
- lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_MOD,
+ PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
+ PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
- temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
- temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
- lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_MOD,
+ PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
+ PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_CONFIG, 0);
/* Removing default registers configs related to L2 and IP */
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_IP_ADDR_EN, 0);
/* Disable checking for minorVersionPTP field */
- lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
- lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
skb_queue_head_init(&ptp_priv->tx_queue);
@@ -4105,7 +4257,8 @@ static void lan8814_ptp_init(struct phy_device *phydev)
phydev->default_timestamp = true;
}
-static int lan8814_ptp_probe_once(struct phy_device *phydev)
+static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
+ int gpios)
{
struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
@@ -4113,18 +4266,18 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
mutex_init(&shared->shared_lock);
shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
- LAN8814_PTP_GPIO_NUM,
+ gpios,
sizeof(*shared->pin_config),
GFP_KERNEL);
if (!shared->pin_config)
return -ENOMEM;
- for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) {
+ for (int i = 0; i < gpios; i++) {
struct ptp_pin_desc *ptp_pin = &shared->pin_config[i];
memset(ptp_pin, 0, sizeof(*ptp_pin));
snprintf(ptp_pin->name,
- sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i);
+ sizeof(ptp_pin->name), "%s_%02d", pin_name, i);
ptp_pin->index = i;
ptp_pin->func = PTP_PF_NONE;
}
@@ -4134,7 +4287,7 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock_info.max_adj = 31249999;
shared->ptp_clock_info.n_alarm = 0;
shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
- shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
+ shared->ptp_clock_info.n_pins = gpios;
shared->ptp_clock_info.pps = 0;
shared->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
PTP_FALLING_EDGE |
@@ -4153,8 +4306,8 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
if (IS_ERR(shared->ptp_clock)) {
- phydev_err(phydev, "ptp_clock_register failed %lu\n",
- PTR_ERR(shared->ptp_clock));
+ phydev_err(phydev, "ptp_clock_register failed %pe\n",
+ shared->ptp_clock);
return -EINVAL;
}
@@ -4169,50 +4322,60 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
- lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
- lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LTC_HARD_RESET, LTC_HARD_RESET_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_OPERATING_MODE,
PTP_OPERATING_MODE_STANDALONE_);
/* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_ENABLE_);
return 0;
}
+static int lan8814_ptp_probe_once(struct phy_device *phydev)
+{
+ return __lan8814_ptp_probe_once(phydev, "lan8814_ptp_pin",
+ LAN8814_PTP_GPIO_NUM);
+}
+
static void lan8814_setup_led(struct phy_device *phydev, int val)
{
int temp;
- temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
+ temp = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1);
if (val)
temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
else
temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
- lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1, temp);
}
static int lan8814_config_init(struct phy_device *phydev)
{
struct kszphy_priv *lan8814 = phydev->priv;
- int val;
/* Reset the PHY */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
- val |= LAN8814_QSGMII_SOFT_RESET_BIT;
- lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
/* Disable ANEG with QSGMII PCS Host side */
- val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
- val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
- lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
+ 0);
/* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
- val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8814_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_ALIGN_SWAP,
+ LAN8814_ALIGN_TX_A_B_SWAP_MASK,
+ LAN8814_ALIGN_TX_A_B_SWAP);
if (lan8814->led_mode >= 0)
lan8814_setup_led(phydev, lan8814->led_mode);
@@ -4243,29 +4406,24 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
static void lan8814_clear_2psp_bit(struct phy_device *phydev)
{
- u16 val;
-
/* It was noticed that when traffic is passing through the PHY and the
* cable is removed then the LED was still one even though there is no
* link
*/
- val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
- val &= ~LAN8814_EEE_STATE_MASK2P5P;
- lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_EEE_STATE,
+ LAN8814_EEE_STATE_MASK2P5P,
+ 0);
}
static void lan8814_update_meas_time(struct phy_device *phydev)
{
- u16 val;
-
/* By setting the measure time to a value of 0xb this will allow cables
* longer than 100m to be used. This configuration can be used
* regardless of the mode of operation of the PHY
*/
- val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
- val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
- val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
- lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_PD_CONTROLS,
+ LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK,
+ LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL);
}
static int lan8814_probe(struct phy_device *phydev)
@@ -4288,7 +4446,7 @@ static int lan8814_probe(struct phy_device *phydev)
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
- addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
+ addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, 0) & 0x1F;
devm_phy_package_join(&phydev->mdio.dev, phydev,
addr, sizeof(struct lan8814_shared_priv));
@@ -5582,8 +5740,8 @@ static int lan8841_probe(struct phy_device *phydev)
ptp_priv->ptp_clock = ptp_clock_register(&ptp_priv->ptp_clock_info,
&phydev->mdio.dev);
if (IS_ERR(ptp_priv->ptp_clock)) {
- phydev_err(phydev, "ptp_clock_register failed: %lu\n",
- PTR_ERR(ptp_priv->ptp_clock));
+ phydev_err(phydev, "ptp_clock_register failed: %pe\n",
+ ptp_priv->ptp_clock);
return -EINVAL;
}
@@ -5643,10 +5801,367 @@ static int ksz9131_resume(struct phy_device *phydev)
return kszphy_resume(phydev);
}
+#define LAN8842_PTP_GPIO_NUM 16
+
+static int lan8842_ptp_probe_once(struct phy_device *phydev)
+{
+ return __lan8814_ptp_probe_once(phydev, "lan8842_ptp_pin",
+ LAN8842_PTP_GPIO_NUM);
+}
+
+#define LAN8842_STRAP_REG 0 /* 0x0 */
+#define LAN8842_STRAP_REG_PHYADDR_MASK GENMASK(4, 0)
+#define LAN8842_SKU_REG 11 /* 0x0b */
+#define LAN8842_SELF_TEST 14 /* 0x0e */
+#define LAN8842_SELF_TEST_RX_CNT_ENA BIT(8)
+#define LAN8842_SELF_TEST_TX_CNT_ENA BIT(4)
+
+static int lan8842_probe(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv;
+ int addr;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ /* Similar to lan8814 this PHY has a pin which needs to be pulled down
+ * to enable to pass any traffic through it. Therefore use the same
+ * function as lan8814
+ */
+ ret = lan8814_release_coma_mode(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable to count the RX and TX packets */
+ ret = lanphy_write_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL,
+ LAN8842_SELF_TEST,
+ LAN8842_SELF_TEST_RX_CNT_ENA |
+ LAN8842_SELF_TEST_TX_CNT_ENA);
+ if (ret < 0)
+ return ret;
+
+ /* Revision lan8832 doesn't have support for PTP, therefore don't add
+ * any PTP clocks
+ */
+ ret = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_SKU_REG);
+ if (ret < 0)
+ return ret;
+
+ priv->rev = ret;
+ if (priv->rev == LAN8842_REV_8832)
+ return 0;
+
+ /* As the lan8814 and lan8842 has the same IP for the PTP block, the
+ * only difference is the number of the GPIOs, then make sure that the
+ * lan8842 initialized also the shared data pointer as this is used in
+ * all the PTP functions for lan8814. The lan8842 doesn't have multiple
+ * PHYs in the same package.
+ */
+ addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_STRAP_REG);
+ if (addr < 0)
+ return addr;
+ addr &= LAN8842_STRAP_REG_PHYADDR_MASK;
+
+ ret = devm_phy_package_join(&phydev->mdio.dev, phydev, addr,
+ sizeof(struct lan8814_shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = lan8842_ptp_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ lan8814_ptp_init(phydev);
+
+ return 0;
+}
+
+static int lan8842_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Reset the PHY */
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+ if (ret < 0)
+ return ret;
+
+ /* Even if the GPIOs are set to control the LEDs the behaviour of the
+ * LEDs is wrong, they are not blinking when there is traffic.
+ * To fix this it is required to set extended LED mode
+ */
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1,
+ LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_2,
+ LAN8814_LED_CTRL_2_LED1_COM_DIS,
+ LAN8814_LED_CTRL_2_LED1_COM_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* To allow the PHY to control the LEDs the GPIOs of the PHY should have
+ * a function mode and not the GPIO. Apparently by default the value is
+ * GPIO and not function even though the datasheet it says that it is
+ * function. Therefore set this value.
+ */
+ return lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN2, 0);
+}
+
+#define LAN8842_INTR_CTRL_REG 52 /* 0x34 */
+
+static int lan8842_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_INTR_CTRL_REG,
+ LAN8814_INTR_CTRL_REG_INTR_ENABLE);
+
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lan8814_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC,
+ LAN8814_INT_LINK | LAN8814_INT_FLF);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = lan8814_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static unsigned int lan8842_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* Inband configuration can be enabled or disabled using the registers
+ * PCS1G_ANEG_CONFIG.
+ */
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static int lan8842_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ bool enable;
+
+ if (modes == LINK_INBAND_DISABLE)
+ enable = false;
+ else
+ enable = true;
+
+ /* Disable or enable in-band autoneg with PCS Host side
+ * It has the same address as lan8814
+ */
+ return lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
+ enable ? LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA : 0);
+}
+
+static void lan8842_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+ struct lan8842_priv *priv;
+
+ priv = phydev->priv;
+ ptp_priv = &priv->ptp_priv;
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
+ lan8814_get_tx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_)
+ lan8814_get_rx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+}
+
+static irqreturn_t lan8842_handle_interrupt(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv = phydev->priv;
+ int ret = IRQ_NONE;
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & (LAN8814_INT_LINK | LAN8814_INT_FLF)) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ /* Phy revision lan8832 doesn't have support for PTP therefore there is
+ * not need to check the PTP and GPIO interrupts
+ */
+ if (priv->rev == LAN8842_REV_8832)
+ goto out;
+
+ while (true) {
+ irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_STS);
+ if (!irq_status)
+ break;
+
+ lan8842_handle_ptp_interrupt(phydev, irq_status);
+ ret = IRQ_HANDLED;
+ }
+
+ if (!lan8814_handle_gpio_interrupt(phydev, irq_status))
+ ret = IRQ_HANDLED;
+
+out:
+ return ret;
+}
+
+static u64 lan8842_get_stat(struct phy_device *phydev, int count, int *regs)
+{
+ u64 ret = 0;
+ int val;
+
+ for (int j = 0; j < count; ++j) {
+ val = lanphy_read_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL,
+ regs[j]);
+ if (val < 0)
+ return U64_MAX;
+
+ ret <<= 16;
+ ret += val;
+ }
+ return ret;
+}
+
+static int lan8842_update_stats(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv = phydev->priv;
+ int rx_packets_regs[] = {88, 61, 60};
+ int rx_errors_regs[] = {63, 62};
+ int tx_packets_regs[] = {89, 85, 84};
+ int tx_errors_regs[] = {87, 86};
+
+ priv->phy_stats.rx_packets = lan8842_get_stat(phydev,
+ ARRAY_SIZE(rx_packets_regs),
+ rx_packets_regs);
+ priv->phy_stats.rx_errors = lan8842_get_stat(phydev,
+ ARRAY_SIZE(rx_errors_regs),
+ rx_errors_regs);
+ priv->phy_stats.tx_packets = lan8842_get_stat(phydev,
+ ARRAY_SIZE(tx_packets_regs),
+ tx_packets_regs);
+ priv->phy_stats.tx_errors = lan8842_get_stat(phydev,
+ ARRAY_SIZE(tx_errors_regs),
+ tx_errors_regs);
+
+ return 0;
+}
+
+#define LAN8842_FLF 15 /* 0x0e */
+#define LAN8842_FLF_ENA BIT(1)
+#define LAN8842_FLF_ENA_LINK_DOWN BIT(0)
+
+static int lan8842_get_fast_down(struct phy_device *phydev, u8 *msecs)
+{
+ int ret;
+
+ ret = lanphy_read_page_reg(phydev, LAN8814_PAGE_PCS, LAN8842_FLF);
+ if (ret < 0)
+ return ret;
+
+ if (ret & LAN8842_FLF_ENA)
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_ON;
+ else
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+
+ return 0;
+}
+
+static int lan8842_set_fast_down(struct phy_device *phydev, const u8 *msecs)
+{
+ u16 flf;
+
+ switch (*msecs) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN_OFF:
+ flf = 0;
+ break;
+ case ETHTOOL_PHY_FAST_LINK_DOWN_ON:
+ flf = LAN8842_FLF_ENA | LAN8842_FLF_ENA_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS,
+ LAN8842_FLF,
+ LAN8842_FLF_ENA |
+ LAN8842_FLF_ENA_LINK_DOWN, flf);
+}
+
+static int lan8842_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return lan8842_get_fast_down(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan8842_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return lan8842_set_fast_down(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void lan8842_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct lan8842_priv *priv = phydev->priv;
+
+ stats->rx_packets = priv->phy_stats.rx_packets;
+ stats->rx_errors = priv->phy_stats.rx_errors;
+ stats->tx_packets = priv->phy_stats.tx_packets;
+ stats->tx_errors = priv->phy_stats.tx_errors;
+}
+
static struct phy_driver ksphy_driver[] = {
{
- .phy_id = PHY_ID_KS8737,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KS8737),
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
@@ -5687,8 +6202,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8041,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041),
.name = "Micrel KSZ8041",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
@@ -5703,8 +6217,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = ksz8041_suspend,
.resume = ksz8041_resume,
}, {
- .phy_id = PHY_ID_KSZ8041RNLI,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041RNLI),
.name = "Micrel KSZ8041RNLI",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
@@ -5747,9 +6260,8 @@ static struct phy_driver ksphy_driver[] = {
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8081,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8081),
.name = "Micrel KSZ8081 or KSZ8091",
- .phy_id_mask = MICREL_PHY_ID_MASK,
.flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
@@ -5768,9 +6280,8 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = ksz886x_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_KSZ8061,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8061),
.name = "Micrel KSZ8061",
- .phy_id_mask = MICREL_PHY_ID_MASK,
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
@@ -5798,8 +6309,7 @@ static struct phy_driver ksphy_driver[] = {
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
- .phy_id = PHY_ID_KSZ9031,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9031),
.name = "Micrel KSZ9031 Gigabit PHY",
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
@@ -5819,8 +6329,7 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_get_status = ksz9x31_cable_test_get_status,
.set_loopback = ksz9031_set_loopback,
}, {
- .phy_id = PHY_ID_LAN8814,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8814),
.name = "Microchip INDY Gigabit Quad PHY",
.flags = PHY_POLL_CABLE_TEST,
.config_init = lan8814_config_init,
@@ -5838,8 +6347,7 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_LAN8804,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8804),
.name = "Microchip LAN966X Gigabit PHY",
.config_init = lan8804_config_init,
.driver_data = &ksz9021_type,
@@ -5854,8 +6362,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
- .phy_id = PHY_ID_LAN8841,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8841),
.name = "Microchip LAN8841 Gigabit PHY",
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &lan8841_type,
@@ -5872,8 +6379,24 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_KSZ9131,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8842),
+ .name = "Microchip LAN8842 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &lan8814_type,
+ .probe = lan8842_probe,
+ .config_init = lan8842_config_init,
+ .config_intr = lan8842_config_intr,
+ .inband_caps = lan8842_inband_caps,
+ .config_inband = lan8842_config_inband,
+ .handle_interrupt = lan8842_handle_interrupt,
+ .get_phy_stats = lan8842_get_phy_stats,
+ .update_stats = lan8842_update_stats,
+ .get_tunable = lan8842_get_tunable,
+ .set_tunable = lan8842_set_tunable,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+}, {
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9131),
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
@@ -5894,8 +6417,7 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_get_status = ksz9x31_cable_test_get_status,
.get_features = ksz9477_get_features,
}, {
- .phy_id = PHY_ID_KSZ8873MLL,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8873MLL),
.name = "Micrel KSZ8873MLL Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
@@ -5904,8 +6426,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ886X,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ886X),
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
.driver_data = &ksz886x_type,
/* PHY_BASIC_FEATURES */
@@ -5925,8 +6446,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ9477,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9477),
.name = "Microchip KSZ9477",
.probe = kszphy_probe,
/* PHY_GBIT_FEATURES */
@@ -5953,22 +6473,24 @@ MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
- { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9031) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9131) },
{ PHY_ID_KSZ8001, 0x00fffffc },
- { PHY_ID_KS8737, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KS8737) },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
- { PHY_ID_KSZ8041, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8051, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8061, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041RNLI) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8051) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8061) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8081) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8873MLL) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ886X) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9477) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8814) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8804) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8841) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8842) },
{ }
};
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 0e91f5d1a4fd..a3593e663059 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -213,6 +213,20 @@
#define YT8521_RC1R_RGMII_2_100_NS 14
#define YT8521_RC1R_RGMII_2_250_NS 15
+/* LED CONFIG */
+#define YT8521_MAX_LEDS 3
+#define YT8521_LED0_CFG_REG 0xA00C
+#define YT8521_LED1_CFG_REG 0xA00D
+#define YT8521_LED2_CFG_REG 0xA00E
+#define YT8521_LED_ACT_BLK_IND BIT(13)
+#define YT8521_LED_FDX_ON_EN BIT(12)
+#define YT8521_LED_HDX_ON_EN BIT(11)
+#define YT8521_LED_TXACT_BLK_EN BIT(10)
+#define YT8521_LED_RXACT_BLK_EN BIT(9)
+#define YT8521_LED_1000_ON_EN BIT(6)
+#define YT8521_LED_100_ON_EN BIT(5)
+#define YT8521_LED_10_ON_EN BIT(4)
+
#define YTPHY_MISC_CONFIG_REG 0xA006
#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
#define YTPHY_MCR_FIBER_1000BX (0x1 << 0)
@@ -1681,6 +1695,106 @@ err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static const unsigned long supported_trgs = (BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX));
+
+static int yt8521_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_trgs)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules)) {
+ val |= YT8521_LED_10_ON_EN;
+ val |= YT8521_LED_100_ON_EN;
+ val |= YT8521_LED_1000_ON_EN;
+ }
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ val |= YT8521_LED_10_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ val |= YT8521_LED_100_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ val |= YT8521_LED_1000_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ val |= YT8521_LED_HDX_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ val |= YT8521_LED_FDX_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
+ test_bit(TRIGGER_NETDEV_RX, &rules))
+ val |= YT8521_LED_ACT_BLK_IND;
+
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ val |= YT8521_LED_TXACT_BLK_EN;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ val |= YT8521_LED_RXACT_BLK_EN;
+
+ return ytphy_write_ext(phydev, YT8521_LED0_CFG_REG + index, val);
+}
+
+static int yt8521_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ val = ytphy_read_ext(phydev, YT8521_LED0_CFG_REG + index);
+ if (val < 0)
+ return val;
+
+ if (val & YT8521_LED_TXACT_BLK_EN || val & YT8521_LED_ACT_BLK_IND)
+ __set_bit(TRIGGER_NETDEV_TX, rules);
+
+ if (val & YT8521_LED_RXACT_BLK_EN || val & YT8521_LED_ACT_BLK_IND)
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+
+ if (val & YT8521_LED_FDX_ON_EN)
+ __set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ if (val & YT8521_LED_HDX_ON_EN)
+ __set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+
+ if (val & YT8521_LED_1000_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if (val & YT8521_LED_100_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (val & YT8521_LED_10_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ return 0;
+}
+
static int yt8531_config_init(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -2920,6 +3034,9 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.soft_reset = yt8521_soft_reset,
.suspend = yt8521_suspend,
.resume = yt8521_resume,
+ .led_hw_is_supported = yt8521_led_hw_is_supported,
+ .led_hw_control_set = yt8521_led_hw_control_set,
+ .led_hw_control_get = yt8521_led_hw_control_get,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8531),
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 2bfe314ef881..2d8eca54c40a 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -196,6 +196,9 @@ enum rgmii_clock_delay {
#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_PCS_CTRL 16
+#define MSCC_PHY_SERDES_ANEG BIT(7)
+
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
#define MSCC_PHY_SERDES_RX_VALID_CNT 28
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 24c75903f535..ef0ef1570d39 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2202,6 +2202,28 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static unsigned int vsc85xx_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface != PHY_INTERFACE_MODE_SGMII &&
+ interface != PHY_INTERFACE_MODE_QSGMII)
+ return 0;
+
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static int vsc85xx_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 reg_val = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ reg_val = MSCC_PHY_SERDES_ANEG;
+
+ return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_3,
+ MSCC_PHY_SERDES_PCS_CTRL, MSCC_PHY_SERDES_ANEG,
+ reg_val);
+}
+
static int vsc8514_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
@@ -2414,6 +2436,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8514,
@@ -2437,6 +2461,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8530,
@@ -2557,6 +2583,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC856X,
@@ -2579,6 +2607,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8572,
@@ -2605,6 +2635,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8574,
@@ -2631,6 +2663,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8575,
@@ -2655,6 +2689,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8582,
@@ -2679,6 +2715,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8584,
@@ -2704,6 +2742,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
.link_change_notify = &vsc85xx_link_change_notify,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
}
};
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
index ff2a3a22bd5b..e5d137a37a1d 100644
--- a/drivers/net/phy/mxl-86110.c
+++ b/drivers/net/phy/mxl-86110.c
@@ -15,6 +15,7 @@
/* PHY ID */
#define PHY_ID_MXL86110 0xc1335580
+#define PHY_ID_MXL86111 0xc1335588
/* required to access extended registers */
#define MXL86110_EXTD_REG_ADDR_OFFSET 0x1E
@@ -22,7 +23,15 @@
#define PHY_IRQ_ENABLE_REG 0x12
#define PHY_IRQ_ENABLE_REG_WOL BIT(6)
-/* SyncE Configuration Register - COM_EXT SYNCE_CFG */
+/* different pages for EXTD access for MXL86111 */
+/* SerDes/PHY Control Access Register - COM_EXT_SMI_SDS_PHY */
+#define MXL86111_EXT_SMI_SDS_PHY_REG 0xA000
+#define MXL86111_EXT_SMI_SDS_PHYSPACE_MASK BIT(1)
+#define MXL86111_EXT_SMI_SDS_PHYFIBER_SPACE (0x1 << 1)
+#define MXL86111_EXT_SMI_SDS_PHYUTP_SPACE (0x0 << 1)
+#define MXL86111_EXT_SMI_SDS_PHY_AUTO 0xff
+
+/* SyncE Configuration Register - COM_EXT_SYNCE_CFG */
#define MXL86110_EXT_SYNCE_CFG_REG 0xA012
#define MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL BIT(4)
#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E_DURING_LNKDN BIT(5)
@@ -71,6 +80,11 @@
#define MXL86110_MAX_LEDS 3
/* LED registers and defines */
+#define MXL86110_COM_EXT_LED_GEN_CFG 0xA00B
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFM(x) ((BIT(0) | BIT(1)) << (3 * (x)))
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFME(x) (BIT(0) << (3 * (x)))
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFE(x) (BIT(2) << (3 * (x)))
+
#define MXL86110_LED0_CFG_REG 0xA00C
#define MXL86110_LED1_CFG_REG 0xA00D
#define MXL86110_LED2_CFG_REG 0xA00E
@@ -110,9 +124,67 @@
/* Chip Configuration Register - COM_EXT_CHIP_CFG */
#define MXL86110_EXT_CHIP_CFG_REG 0xA001
+#define MXL86111_EXT_CHIP_CFG_MODE_SEL_MASK GENMASK(2, 0)
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_RGMII 0
+#define MXL86111_EXT_CHIP_CFG_MODE_FIBER_TO_RGMII 1
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_FIBER_TO_RGMII 2
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_SGMII 3
+#define MXL86111_EXT_CHIP_CFG_MODE_SGPHY_TO_RGMAC 4
+#define MXL86111_EXT_CHIP_CFG_MODE_SGMAC_TO_RGPHY 5
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_FIBER_AUTO 6
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_FIBER_FORCE 7
+
+#define MXL86111_EXT_CHIP_CFG_CLDO_MASK GENMASK(5, 4)
+#define MXL86111_EXT_CHIP_CFG_CLDO_3V3 0
+#define MXL86111_EXT_CHIP_CFG_CLDO_2V5 1
+#define MXL86111_EXT_CHIP_CFG_CLDO_1V8_2 2
+#define MXL86111_EXT_CHIP_CFG_CLDO_1V8_3 3
+#define MXL86111_EXT_CHIP_CFG_CLDO_SHIFT 4
+#define MXL86111_EXT_CHIP_CFG_ELDO BIT(6)
#define MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE BIT(8)
#define MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE BIT(15)
+/* Specific Status Register - PHY_STAT */
+#define MXL86111_PHY_STAT_REG 0x11
+#define MXL86111_PHY_STAT_SPEED_MASK GENMASK(15, 14)
+#define MXL86111_PHY_STAT_SPEED_OFFSET 14
+#define MXL86111_PHY_STAT_SPEED_10M 0x0
+#define MXL86111_PHY_STAT_SPEED_100M 0x1
+#define MXL86111_PHY_STAT_SPEED_1000M 0x2
+#define MXL86111_PHY_STAT_DPX_OFFSET 13
+#define MXL86111_PHY_STAT_DPX BIT(13)
+#define MXL86111_PHY_STAT_LSRT BIT(10)
+
+/* 3 phy reg page modes,auto mode combines utp and fiber mode*/
+#define MXL86111_MODE_FIBER 0x1
+#define MXL86111_MODE_UTP 0x2
+#define MXL86111_MODE_AUTO 0x3
+
+/* FIBER Auto-Negotiation link partner ability - SDS_AN_LPA */
+#define MXL86111_SDS_AN_LPA_PAUSE (0x3 << 7)
+#define MXL86111_SDS_AN_LPA_ASYM_PAUSE (0x2 << 7)
+
+/* Miscellaneous Control Register - COM_EXT _MISC_CFG */
+#define MXL86111_EXT_MISC_CONFIG_REG 0xa006
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL BIT(0)
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_1000BX (0x1 << 0)
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_100BX (0x0 << 0)
+
+/* Phy fiber Link timer cfg2 Register - EXT_SDS_LINK_TIMER_CFG2 */
+#define MXL86111_EXT_SDS_LINK_TIMER_CFG2_REG 0xA5
+#define MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN BIT(15)
+
+/* default values of PHY register, required for Dual Media mode */
+#define MII_BMSR_DEFAULT_VAL 0x7949
+#define MII_ESTATUS_DEFAULT_VAL 0x2000
+
+/* Timeout in ms for PHY SW reset check in STD_CTRL/SDS_CTRL */
+#define BMCR_RESET_TIMEOUT 500
+
+/* PL P1 requires optimized RGMII timing for 1.8V RGMII voltage
+ */
+#define MXL86111_PL_P1 0x500
+
/**
* __mxl86110_write_extended_reg() - write to a PHY's extended register
* @phydev: pointer to the PHY device structure
@@ -236,6 +308,29 @@ static int mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
}
/**
+ * mxl86110_modify_extended_reg() - modify bits of a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Note: register value = (old register value & ~mask) | set.
+ *
+ * Return: 0 or negative error code
+ */
+static int mxl86110_modify_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_modify_extended_reg(phydev, regnum, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
* mxl86110_get_wol() - report if wake-on-lan is enabled
* @phydev: pointer to the phy_device
* @wol: a pointer to a &struct ethtool_wolinfo
@@ -394,6 +489,7 @@ static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
unsigned long rules)
{
u16 val = 0;
+ int ret;
if (index >= MXL86110_MAX_LEDS)
return -EINVAL;
@@ -423,8 +519,43 @@ static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
rules & BIT(TRIGGER_NETDEV_RX))
val |= MXL86110_LEDX_CFG_BLINK;
- return mxl86110_write_extended_reg(phydev,
+ ret = mxl86110_write_extended_reg(phydev,
MXL86110_LED0_CFG_REG + index, val);
+ if (ret)
+ return ret;
+
+ /* clear manual control bit */
+ ret = mxl86110_modify_extended_reg(phydev,
+ MXL86110_COM_EXT_LED_GEN_CFG,
+ MXL86110_COM_EXT_LED_GEN_CFG_LFE(index),
+ 0);
+
+ return ret;
+}
+
+static int mxl86110_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 mask, set;
+ int ret;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ /* force manual control */
+ set = MXL86110_COM_EXT_LED_GEN_CFG_LFE(index);
+ /* clear previous force mode */
+ mask = MXL86110_COM_EXT_LED_GEN_CFG_LFM(index);
+
+ /* force LED to be permanently on */
+ if (value != LED_OFF)
+ set |= MXL86110_COM_EXT_LED_GEN_CFG_LFME(index);
+
+ ret = mxl86110_modify_extended_reg(phydev,
+ MXL86110_COM_EXT_LED_GEN_CFG,
+ mask, set);
+
+ return ret;
}
/**
@@ -521,22 +652,15 @@ static int mxl86110_enable_led_activity_blink(struct phy_device *phydev)
}
/**
- * mxl86110_config_init() - initialize the PHY
+ * mxl86110_config_rgmii_delay() - configure RGMII delays
* @phydev: pointer to the phy_device
*
* Return: 0 or negative errno code
*/
-static int mxl86110_config_init(struct phy_device *phydev)
+static int mxl86110_config_rgmii_delay(struct phy_device *phydev)
{
- u16 val = 0;
int ret;
-
- phy_lock_mdio_bus(phydev);
-
- /* configure syncE / clk output */
- ret = mxl86110_synce_clk_cfg(phydev);
- if (ret < 0)
- goto out;
+ u16 val;
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -578,6 +702,31 @@ static int mxl86110_config_init(struct phy_device *phydev)
if (ret < 0)
goto out;
+out:
+ return ret;
+}
+
+/**
+ * mxl86110_config_init() - initialize the MXL86110 PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_config_rgmii_delay(phydev);
+ if (ret < 0)
+ goto out;
+
ret = mxl86110_enable_led_activity_blink(phydev);
if (ret < 0)
goto out;
@@ -589,6 +738,201 @@ out:
return ret;
}
+/**
+ * mxl86111_probe() - validate bootstrap chip config and set UTP page
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_probe(struct phy_device *phydev)
+{
+ int chip_config;
+ u16 reg_page;
+ int ret;
+
+ chip_config = mxl86110_read_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG);
+ if (chip_config < 0)
+ return chip_config;
+
+ switch (chip_config & MXL86111_EXT_CHIP_CFG_MODE_SEL_MASK) {
+ case MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_SGMII:
+ case MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_RGMII:
+ phydev->port = PORT_TP;
+ reg_page = MXL86111_EXT_SMI_SDS_PHYUTP_SPACE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = mxl86110_write_extended_reg(phydev,
+ MXL86111_EXT_SMI_SDS_PHY_REG,
+ reg_page);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * mxl86111_config_init() - initialize the MXL86111 PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86111_EXT_MISC_CONFIG_REG,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_100BX);
+ if (ret < 0)
+ goto out;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86111_EXT_MISC_CONFIG_REG,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_1000BX);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ /* RGMII modes */
+ ret = mxl86110_config_rgmii_delay(phydev);
+ if (ret < 0)
+ goto out;
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86110_EXT_RGMII_CFG1_REG,
+ MXL86110_EXT_RGMII_CFG1_FULL_MASK, ret);
+
+ /* PL P1 requires optimized RGMII timing for 1.8V RGMII voltage
+ */
+ ret = __mxl86110_read_extended_reg(phydev, 0xf);
+ if (ret < 0)
+ goto out;
+
+ if (ret == MXL86111_PL_P1) {
+ ret = __mxl86110_read_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG);
+ if (ret < 0)
+ goto out;
+
+ /* check if LDO is in 1.8V mode */
+ switch (FIELD_GET(MXL86111_EXT_CHIP_CFG_CLDO_MASK, ret)) {
+ case MXL86111_EXT_CHIP_CFG_CLDO_1V8_3:
+ case MXL86111_EXT_CHIP_CFG_CLDO_1V8_2:
+ ret = __mxl86110_write_extended_reg(phydev, 0xa010, 0xabff);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ }
+
+ ret = mxl86110_enable_led_activity_blink(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_broadcast_cfg(phydev);
+out:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86111_read_page() - read reg page
+ * @phydev: pointer to the phy_device
+ *
+ * Return: current reg space of mxl86111 or negative errno code
+ */
+static int mxl86111_read_page(struct phy_device *phydev)
+{
+ int page;
+
+ page = __mxl86110_read_extended_reg(phydev, MXL86111_EXT_SMI_SDS_PHY_REG);
+ if (page < 0)
+ return page;
+
+ return page & MXL86111_EXT_SMI_SDS_PHYSPACE_MASK;
+};
+
+/**
+ * mxl86111_write_page() - Set reg page
+ * @phydev: pointer to the phy_device
+ * @page: The reg page to set
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_write_page(struct phy_device *phydev, int page)
+{
+ return __mxl86110_modify_extended_reg(phydev, MXL86111_EXT_SMI_SDS_PHY_REG,
+ MXL86111_EXT_SMI_SDS_PHYSPACE_MASK, page);
+};
+
+static int mxl86111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int ret;
+
+ ret = phy_modify_paged(phydev, MXL86111_EXT_SMI_SDS_PHYFIBER_SPACE,
+ MII_BMCR, BMCR_ANENABLE,
+ (modes == LINK_INBAND_DISABLE) ? 0 : BMCR_ANENABLE);
+ if (ret < 0)
+ goto out;
+
+ phy_lock_mdio_bus(phydev);
+
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86111_EXT_SDS_LINK_TIMER_CFG2_REG,
+ MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN,
+ (modes == LINK_INBAND_DISABLE) ? 0 :
+ MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN);
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG,
+ MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE, 0);
+ if (ret < 0)
+ goto out;
+
+ /* For fiber forced mode, power down/up to re-aneg */
+ if (modes != LINK_INBAND_DISABLE) {
+ __phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+ usleep_range(1000, 1050);
+ __phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+ }
+
+out:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static unsigned int mxl86111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+ default:
+ return 0;
+ }
+}
+
static struct phy_driver mxl_phy_drvs[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
@@ -596,9 +940,26 @@ static struct phy_driver mxl_phy_drvs[] = {
.config_init = mxl86110_config_init,
.get_wol = mxl86110_get_wol,
.set_wol = mxl86110_set_wol,
+ .led_brightness_set = mxl86110_led_brightness_set,
+ .led_hw_is_supported = mxl86110_led_hw_is_supported,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_MXL86111),
+ .name = "MXL86111 Gigabit Ethernet",
+ .probe = mxl86111_probe,
+ .config_init = mxl86111_config_init,
+ .get_wol = mxl86110_get_wol,
+ .set_wol = mxl86110_set_wol,
+ .inband_caps = mxl86111_inband_caps,
+ .config_inband = mxl86111_config_inband,
+ .read_page = mxl86111_read_page,
+ .write_page = mxl86111_write_page,
+ .led_brightness_set = mxl86110_led_brightness_set,
.led_hw_is_supported = mxl86110_led_hw_is_supported,
- .led_hw_control_get = mxl86110_led_hw_control_get,
- .led_hw_control_set = mxl86110_led_hw_control_set,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
},
};
@@ -606,11 +967,12 @@ module_phy_driver(mxl_phy_drvs);
static const struct mdio_device_id __maybe_unused mxl_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_MXL86110) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_MXL86111) },
{ }
};
MODULE_DEVICE_TABLE(mdio, mxl_tbl);
-MODULE_DESCRIPTION("MaxLinear MXL86110 PHY driver");
+MODULE_DESCRIPTION("MaxLinear MXL86110/MXL86111 PHY driver");
MODULE_AUTHOR("Stefano Radaelli");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/nxp-c45-tja11xx-macsec.c b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
index 550ef08970f4..fc897ba79b03 100644
--- a/drivers/net/phy/nxp-c45-tja11xx-macsec.c
+++ b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
@@ -926,7 +926,6 @@ static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
- int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
@@ -939,8 +938,7 @@ static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
if (phy_secy->rx_sc)
nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
- any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
- if (any_bit_set == TX_SC_MAX)
+ if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX))
nxp_c45_macsec_en(phydev, true);
set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
@@ -953,7 +951,6 @@ static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
- int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
@@ -967,8 +964,7 @@ static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
nxp_c45_set_rx_sc0_impl(phydev, false);
clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
- any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
- if (any_bit_set == TX_SC_MAX)
+ if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX))
nxp_c45_macsec_en(phydev, false);
return 0;
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
index 157759966650..b7f0c6a3037a 100644
--- a/drivers/net/phy/phy-caps.h
+++ b/drivers/net/phy/phy-caps.h
@@ -41,7 +41,7 @@ struct link_capabilities {
__ETHTOOL_DECLARE_LINK_MODE_MASK(linkmodes);
};
-int phy_caps_init(void);
+int __init phy_caps_init(void);
size_t phy_caps_speeds(unsigned int *speeds, size_t size,
unsigned long *linkmodes);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c02da57a4da5..02da4a203ddd 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1548,9 +1548,24 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
}
break;
case PHY_HALTED:
+ if (phydev->link) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
+ if (phydev->master_slave_state !=
+ MASTER_SLAVE_STATE_UNSUPPORTED)
+ phydev->master_slave_state =
+ MASTER_SLAVE_STATE_UNKNOWN;
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ linkmode_zero(phydev->lp_advertising);
+ }
+ fallthrough;
case PHY_ERROR:
if (phydev->link) {
phydev->link = 0;
+ phydev->eee_active = false;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
state_work = PHY_STATE_WORK_SUSPEND;
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 2cc9ee97e867..23c808b59b6f 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -70,7 +70,7 @@ static int speed_duplex_to_capa(int speed, unsigned int duplex)
* unexpected linkmode setting that requires LINK_CAPS update.
*
*/
-int phy_caps_init(void)
+int __init phy_caps_init(void)
{
const struct link_mode_info *linkmode;
int i, capa;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c82c1997147b..7a67c900e79a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -91,7 +91,7 @@ const int phy_basic_ports_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_ports_array);
-static const int phy_all_ports_features_array[7] = {
+static const int phy_all_ports_features_array[7] __initconst = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
@@ -101,30 +101,30 @@ static const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-static const int phy_10_100_features_array[4] = {
+static const int phy_10_100_features_array[4] __initconst = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
-static const int phy_basic_t1_features_array[3] = {
+static const int phy_basic_t1_features_array[3] __initconst = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
-static const int phy_basic_t1s_p2mp_features_array[2] = {
+static const int phy_basic_t1s_p2mp_features_array[2] __initconst = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
};
-static const int phy_gbit_features_array[2] = {
+static const int phy_gbit_features_array[2] __initconst = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
-static const int phy_eee_cap1_features_array[] = {
+static const int phy_eee_cap1_features_array[] __initconst = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
@@ -136,7 +136,7 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
-static const int phy_eee_cap2_features_array[] = {
+static const int phy_eee_cap2_features_array[] __initconst = {
ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
@@ -144,7 +144,7 @@ static const int phy_eee_cap2_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
-static void features_init(void)
+static void __init features_init(void)
{
/* 10/100 half/full*/
linkmode_set_bit_array(phy_basic_ports_array,
@@ -3544,7 +3544,8 @@ static int phy_remove(struct device *dev)
* @new_driver: new phy_driver to register
* @owner: module owning this PHY
*/
-int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
+static int phy_driver_register(struct phy_driver *new_driver,
+ struct module *owner)
{
int retval;
@@ -3587,7 +3588,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
return 0;
}
-EXPORT_SYMBOL(phy_driver_register);
+
+static void phy_driver_unregister(struct phy_driver *drv)
+{
+ driver_unregister(&drv->mdiodrv.driver);
+}
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner)
@@ -3606,12 +3611,6 @@ int phy_drivers_register(struct phy_driver *new_driver, int n,
}
EXPORT_SYMBOL(phy_drivers_register);
-void phy_driver_unregister(struct phy_driver *drv)
-{
- driver_unregister(&drv->mdiodrv.driver);
-}
-EXPORT_SYMBOL(phy_driver_unregister);
-
void phy_drivers_unregister(struct phy_driver *drv, int n)
{
int i;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 1988b7d2089a..9d7799ea1c17 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -702,6 +702,9 @@ static int phylink_parse_fixedlink(struct phylink *pl,
return -EINVAL;
}
+ phylink_warn(pl, "%pfw uses deprecated array-style fixed-link binding!\n",
+ fwnode);
+
ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
prop, ARRAY_SIZE(prop));
if (!ret) {
@@ -3747,17 +3750,18 @@ static int phylink_sfp_config_optical(struct phylink *pl)
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
+ const struct sfp_module_caps *caps;
struct phylink *pl = upstream;
ASSERT_RTNL();
- linkmode_zero(pl->sfp_support);
- phy_interface_zero(pl->sfp_interfaces);
- sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces);
- pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support);
+ caps = sfp_get_module_caps(pl->sfp_bus);
+ phy_interface_copy(pl->sfp_interfaces, caps->interfaces);
+ linkmode_copy(pl->sfp_support, caps->link_modes);
+ pl->sfp_may_have_phy = caps->may_have_phy;
+ pl->sfp_port = caps->port;
/* If this module may have a PHY connecting later, defer until later */
- pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
if (pl->sfp_may_have_phy)
return 0;
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 51a132242462..338acd11a9b6 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -771,10 +771,10 @@ static int at8031_register_regulators(struct phy_device *phydev)
static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
linkmode_zero(phy_support);
@@ -784,12 +784,11 @@ static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phylink_set(phy_support, Pause);
phylink_set(phy_support, Asym_Pause);
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
/* Some modules support 10G modes as well as others we support.
* Mask out non-supported modes so the correct interface is picked.
*/
- linkmode_and(sfp_support, phy_support, sfp_support);
+ linkmode_and(sfp_support, phy_support, caps->link_modes);
if (linkmode_empty(sfp_support)) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 070dc8c00835..1be8295a95cb 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -646,13 +646,12 @@ exit:
static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
int ret;
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
- iface = sfp_select_interface(phydev->sfp_bus, support);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index dd0d675149ad..82d8e1335215 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/pm_wakeirq.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -31,6 +32,7 @@
#define RTL821x_INER 0x12
#define RTL8211B_INER_INIT 0x6400
#define RTL8211E_INER_LINK_STATUS BIT(10)
+#define RTL8211F_INER_PME BIT(7)
#define RTL8211F_INER_LINK_STATUS BIT(4)
#define RTL821x_INSR 0x13
@@ -96,17 +98,13 @@
#define RTL8211F_RXCR 0x15
#define RTL8211F_RX_DELAY BIT(3)
-/* RTL8211F WOL interrupt configuration */
-#define RTL8211F_INTBCR_PAGE 0xd40
-#define RTL8211F_INTBCR 0x16
-#define RTL8211F_INTBCR_INTB_PMEB BIT(5)
-
/* RTL8211F WOL settings */
-#define RTL8211F_WOL_SETTINGS_PAGE 0xd8a
+#define RTL8211F_WOL_PAGE 0xd8a
#define RTL8211F_WOL_SETTINGS_EVENTS 16
#define RTL8211F_WOL_EVENT_MAGIC BIT(12)
-#define RTL8211F_WOL_SETTINGS_STATUS 17
-#define RTL8211F_WOL_STATUS_RESET (BIT(15) | 0x1fff)
+#define RTL8211F_WOL_RST_RMSQ 17
+#define RTL8211F_WOL_RG_RSTB BIT(15)
+#define RTL8211F_WOL_RMSQ 0x1fff
/* RTL8211F Unique phyiscal and multicast address (WOL) */
#define RTL8211F_PHYSICAL_ADDR_PAGE 0xd8c
@@ -172,7 +170,8 @@ struct rtl821x_priv {
u16 phycr2;
bool has_phycr2;
struct clk *clk;
- u32 saved_wolopts;
+ /* rtl8211f */
+ u16 iner;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -255,6 +254,34 @@ static int rtl821x_probe(struct phy_device *phydev)
return 0;
}
+static int rtl8211f_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+
+ ret = rtl821x_probe(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Disable all PME events */
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_SETTINGS_EVENTS, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Mark this PHY as wakeup capable and register the interrupt as a
+ * wakeup IRQ if the PHY is marked as a wakeup source in firmware,
+ * and the interrupt is valid.
+ */
+ if (device_property_read_bool(dev, "wakeup-source") &&
+ phy_interrupt_is_valid(phydev)) {
+ device_set_wakeup_capable(dev, true);
+ devm_pm_set_wake_irq(dev, phydev->irq);
+ }
+
+ return ret;
+}
+
static int rtl8201_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -352,6 +379,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
+ struct rtl821x_priv *priv = phydev->priv;
u16 val;
int err;
@@ -362,8 +390,10 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
val = RTL8211F_INER_LINK_STATUS;
err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ if (err == 0)
+ priv->iner = val;
} else {
- val = 0;
+ priv->iner = val = 0;
err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
if (err)
return err;
@@ -426,21 +456,34 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
- if (!(irq_status & RTL8211F_INER_LINK_STATUS))
- return IRQ_NONE;
+ if (irq_status & RTL8211F_INER_LINK_STATUS) {
+ phy_trigger_machine(phydev);
+ return IRQ_HANDLED;
+ }
- phy_trigger_machine(phydev);
+ if (irq_status & RTL8211F_INER_PME) {
+ pm_wakeup_event(&phydev->mdio.dev, 0);
+ return IRQ_HANDLED;
+ }
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
static void rtl8211f_get_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
{
int wol_events;
+ /* If the PHY is not capable of waking the system, then WoL can not
+ * be supported.
+ */
+ if (!device_can_wakeup(&dev->mdio.dev)) {
+ wol->supported = 0;
+ return;
+ }
+
wol->supported = WAKE_MAGIC;
- wol_events = phy_read_paged(dev, RTL8211F_WOL_SETTINGS_PAGE, RTL8211F_WOL_SETTINGS_EVENTS);
+ wol_events = phy_read_paged(dev, RTL8211F_WOL_PAGE, RTL8211F_WOL_SETTINGS_EVENTS);
if (wol_events < 0)
return;
@@ -453,6 +496,9 @@ static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
const u8 *mac_addr = dev->attached_dev->dev_addr;
int oldpage;
+ if (!device_can_wakeup(&dev->mdio.dev))
+ return -EOPNOTSUPP;
+
oldpage = phy_save_page(dev);
if (oldpage < 0)
goto err;
@@ -464,25 +510,23 @@ static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
__phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD1, mac_addr[3] << 8 | (mac_addr[2]));
__phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD2, mac_addr[5] << 8 | (mac_addr[4]));
- /* Enable magic packet matching and reset WOL status */
- rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ /* Enable magic packet matching */
+ rtl821x_write_page(dev, RTL8211F_WOL_PAGE);
__phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, RTL8211F_WOL_EVENT_MAGIC);
- __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
-
- /* Enable the WOL interrupt */
- rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
- __phy_set_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
+ /* Set the maximum packet size, and assert WoL reset */
+ __phy_write(dev, RTL8211F_WOL_RST_RMSQ, RTL8211F_WOL_RMSQ);
} else {
- /* Disable the WOL interrupt */
- rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
- __phy_clear_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
-
- /* Disable magic packet matching and reset WOL status */
- rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ /* Disable magic packet matching */
+ rtl821x_write_page(dev, RTL8211F_WOL_PAGE);
__phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, 0);
- __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
+
+ /* Place WoL in reset */
+ __phy_clear_bits(dev, RTL8211F_WOL_RST_RMSQ,
+ RTL8211F_WOL_RG_RSTB);
}
+ device_set_wakeup_enable(&dev->mdio.dev, !!(wol->wolopts & WAKE_MAGIC));
+
err:
return phy_restore_page(dev, oldpage, 0);
}
@@ -628,6 +672,52 @@ static int rtl821x_suspend(struct phy_device *phydev)
return ret;
}
+static int rtl8211f_suspend(struct phy_device *phydev)
+{
+ u16 wol_rst;
+ int ret;
+
+ ret = rtl821x_suspend(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* If a PME event is enabled, then configure the interrupt for
+ * PME events only, disabling link interrupt. We avoid switching
+ * to PMEB mode as we don't have a status bit for that.
+ */
+ if (device_may_wakeup(&phydev->mdio.dev)) {
+ ret = phy_write_paged(phydev, 0xa42, RTL821x_INER,
+ RTL8211F_INER_PME);
+ if (ret < 0)
+ goto err;
+
+ /* Read the INSR to clear any pending interrupt */
+ phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
+
+ /* Reset the WoL to ensure that an event is picked up.
+ * Unless we do this, even if we receive another packet,
+ * we may not have a PME interrupt raised.
+ */
+ ret = phy_read_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ);
+ if (ret < 0)
+ goto err;
+
+ wol_rst = ret & ~RTL8211F_WOL_RG_RSTB;
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ, wol_rst);
+ if (ret < 0)
+ goto err;
+
+ wol_rst |= RTL8211F_WOL_RG_RSTB;
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ, wol_rst);
+ }
+
+err:
+ return ret;
+}
+
static int rtl821x_resume(struct phy_device *phydev)
{
struct rtl821x_priv *priv = phydev->priv;
@@ -645,10 +735,29 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
+static int rtl8211f_resume(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = rtl821x_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* If the device was programmed for a PME event, restore the interrupt
+ * enable so phylib can receive link state interrupts.
+ */
+ if (device_may_wakeup(&phydev->mdio.dev))
+ ret = phy_write_paged(phydev, 0xa42, RTL821x_INER, priv->iner);
+
+ return ret;
+}
+
static int rtl8211x_led_hw_is_supported(struct phy_device *phydev, u8 index,
unsigned long rules)
{
- const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
+ const unsigned long mask = BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
BIT(TRIGGER_NETDEV_LINK_100) |
BIT(TRIGGER_NETDEV_LINK_1000) |
BIT(TRIGGER_NETDEV_RX) |
@@ -706,6 +815,12 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
if (val & RTL8211F_LEDCR_LINK_1000)
__set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if ((val & RTL8211F_LEDCR_LINK_10) &&
+ (val & RTL8211F_LEDCR_LINK_100) &&
+ (val & RTL8211F_LEDCR_LINK_1000)) {
+ __set_bit(TRIGGER_NETDEV_LINK, rules);
+ }
+
if (val & RTL8211F_LEDCR_ACT_TXRX) {
__set_bit(TRIGGER_NETDEV_RX, rules);
__set_bit(TRIGGER_NETDEV_TX, rules);
@@ -723,14 +838,20 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
- if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_10, &rules)) {
reg |= RTL8211F_LEDCR_LINK_10;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_100, &rules)) {
reg |= RTL8211F_LEDCR_LINK_100;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) {
reg |= RTL8211F_LEDCR_LINK_1000;
+ }
if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
test_bit(TRIGGER_NETDEV_TX, &rules)) {
@@ -778,6 +899,12 @@ static int rtl8211e_led_hw_control_get(struct phy_device *phydev, u8 index,
if (cr2 & RTL8211E_LEDCR2_LINK_1000)
__set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if ((cr2 & RTL8211E_LEDCR2_LINK_10) &&
+ (cr2 & RTL8211E_LEDCR2_LINK_100) &&
+ (cr2 & RTL8211E_LEDCR2_LINK_1000)) {
+ __set_bit(TRIGGER_NETDEV_LINK, rules);
+ }
+
return ret;
}
@@ -805,14 +932,20 @@ static int rtl8211e_led_hw_control_set(struct phy_device *phydev, u8 index,
if (ret < 0)
return ret;
- if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_10, &rules)) {
cr2 |= RTL8211E_LEDCR2_LINK_10;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_100, &rules)) {
cr2 |= RTL8211E_LEDCR2_LINK_100;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) {
cr2 |= RTL8211E_LEDCR2_LINK_1000;
+ }
cr2 <<= RTL8211E_LEDCR2_SHIFT * index;
ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
@@ -1038,7 +1171,7 @@ static int rtl822x_probe(struct phy_device *phydev)
return 0;
}
-static int rtl822xb_config_init(struct phy_device *phydev)
+static int rtl822x_set_serdes_option_mode(struct phy_device *phydev, bool gen1)
{
bool has_2500, has_sgmii;
u16 mode;
@@ -1073,15 +1206,18 @@ static int rtl822xb_config_init(struct phy_device *phydev)
/* the following sequence with magic numbers sets up the SerDes
* option mode
*/
- ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
- if (ret < 0)
- return ret;
+
+ if (!gen1) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
+ if (ret < 0)
+ return ret;
+ }
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND1,
RTL822X_VND1_SERDES_OPTION,
RTL822X_VND1_SERDES_OPTION_MODE_MASK,
mode);
- if (ret < 0)
+ if (gen1 || ret < 0)
return ret;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6a04, 0x0503);
@@ -1095,6 +1231,16 @@ static int rtl822xb_config_init(struct phy_device *phydev)
return phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
}
+static int rtl822x_config_init(struct phy_device *phydev)
+{
+ return rtl822x_set_serdes_option_mode(phydev, true);
+}
+
+static int rtl822xb_config_init(struct phy_device *phydev)
+{
+ return rtl822x_set_serdes_option_mode(phydev, false);
+}
+
static int rtl822xb_get_rate_matching(struct phy_device *phydev,
phy_interface_t iface)
{
@@ -1280,6 +1426,21 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
return 0;
}
+static int rtl822x_c45_soft_reset(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, val,
+ !(val & MDIO_CTRL1_RESET),
+ 5000, 100000, true);
+}
+
static int rtl822xb_c45_read_status(struct phy_device *phydev)
{
int ret;
@@ -1612,15 +1773,15 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .probe = rtl821x_probe,
+ .probe = rtl8211f_probe,
.config_init = &rtl8211f_config_init,
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
.set_wol = rtl8211f_set_wol,
.get_wol = rtl8211f_get_wol,
- .suspend = rtl821x_suspend,
- .resume = rtl821x_resume,
+ .suspend = rtl8211f_suspend,
+ .resume = rtl8211f_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
@@ -1675,13 +1836,13 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc838),
.name = "RTL8226-CG 2.5Gbps PHY",
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .soft_reset = rtl822x_c45_soft_reset,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .config_init = rtl822x_config_init,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
}, {
PHY_ID_MATCH_EXACT(0x001cc848),
.name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index f13c00b5b449..b945d75966d5 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -22,7 +22,6 @@ struct sfp_bus {
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
struct sfp *sfp;
- const struct sfp_quirk *sfp_quirk;
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
@@ -30,24 +29,18 @@ struct sfp_bus {
bool registered;
bool started;
+
+ struct sfp_module_caps caps;
};
-/**
- * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- * @support: optional pointer to an array of unsigned long for the
- * ethtool support mask
- *
- * Parse the EEPROM identification given in @id, and return one of
- * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
- * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
- * the connector type.
- *
- * If the port type is not known, returns %PORT_OTHER.
- */
-int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support)
+const struct sfp_module_caps *sfp_get_module_caps(struct sfp_bus *bus)
+{
+ return &bus->caps;
+}
+EXPORT_SYMBOL_GPL(sfp_get_module_caps);
+
+static void sfp_module_parse_port(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
int port;
@@ -91,34 +84,26 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
}
- if (support) {
- switch (port) {
- case PORT_FIBRE:
- phylink_set(support, FIBRE);
- break;
+ switch (port) {
+ case PORT_FIBRE:
+ phylink_set(bus->caps.link_modes, FIBRE);
+ break;
- case PORT_TP:
- phylink_set(support, TP);
- break;
- }
+ case PORT_TP:
+ phylink_set(bus->caps.link_modes, TP);
+ break;
}
- return port;
+ bus->caps.port = port;
}
-EXPORT_SYMBOL_GPL(sfp_parse_port);
-/**
- * sfp_may_have_phy() - indicate whether the module may have a PHY
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- *
- * Parse the EEPROM identification given in @id, and return whether
- * this module may have a PHY.
- */
-bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+static void sfp_module_parse_may_have_phy(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
- if (id->base.e1000_base_t)
- return true;
+ if (id->base.e1000_base_t) {
+ bus->caps.may_have_phy = true;
+ return;
+ }
if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
switch (id->base.extended_cc) {
@@ -126,30 +111,20 @@ bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
case SFF8024_ECC_10GBASE_T_SR:
case SFF8024_ECC_5GBASE_T:
case SFF8024_ECC_2_5GBASE_T:
- return true;
+ bus->caps.may_have_phy = true;
+ return;
}
}
- return false;
+ bus->caps.may_have_phy = false;
}
-EXPORT_SYMBOL_GPL(sfp_may_have_phy);
-/**
- * sfp_parse_support() - Parse the eeprom id for supported link modes
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- * @support: pointer to an array of unsigned long for the ethtool support mask
- * @interfaces: pointer to an array of unsigned long for phy interface modes
- * mask
- *
- * Parse the EEPROM identification information and derive the supported
- * ethtool link modes for the module.
- */
-void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support, unsigned long *interfaces)
+static void sfp_module_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
+ unsigned long *interfaces = bus->caps.interfaces;
+ unsigned long *modes = bus->caps.link_modes;
unsigned int br_min, br_nom, br_max;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
/* Decode the bitrate information to MBd */
br_min = br_nom = br_max = 0;
@@ -338,13 +313,21 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(modes, Autoneg);
phylink_set(modes, Pause);
phylink_set(modes, Asym_Pause);
+}
- if (bus->sfp_quirk && bus->sfp_quirk->modes)
- bus->sfp_quirk->modes(id, modes, interfaces);
+static void sfp_init_module(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk)
+{
+ memset(&bus->caps, 0, sizeof(bus->caps));
+
+ sfp_module_parse_support(bus, id);
+ sfp_module_parse_port(bus, id);
+ sfp_module_parse_may_have_phy(bus, id);
- linkmode_or(support, support, modes);
+ if (quirk && quirk->support)
+ quirk->support(id, &bus->caps);
}
-EXPORT_SYMBOL_GPL(sfp_parse_support);
/**
* sfp_select_interface() - Select appropriate phy_interface_t mode
@@ -794,7 +777,7 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
- bus->sfp_quirk = quirk;
+ sfp_init_module(bus, id, quirk);
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
@@ -809,8 +792,6 @@ void sfp_module_remove(struct sfp_bus *bus)
if (ops && ops->module_remove)
ops->module_remove(bus->upstream);
-
- bus->sfp_quirk = NULL;
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4cd1d6c51dc2..0401fa6b24d2 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include "sfp.h"
-#include "swphy.h"
enum {
GPIO_MODDEF0,
@@ -221,6 +220,8 @@ static const enum gpiod_flags gpio_flags[] = {
*/
#define SFP_EEPROM_BLOCK_SIZE 16
+#define SFP_POLL_INTERVAL msecs_to_jiffies(100)
+
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -299,6 +300,11 @@ struct sfp {
#endif
};
+static void sfp_schedule_poll(struct sfp *sfp)
+{
+ mod_delayed_work(system_percpu_wq, &sfp->poll, SFP_POLL_INTERVAL);
+}
+
static bool sff_module_supported(const struct sfp_eeprom_id *id)
{
return id->base.phys_id == SFF8024_ID_SFF_8472 &&
@@ -440,45 +446,44 @@ static void sfp_fixup_rollball_cc(struct sfp *sfp)
}
static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ caps->link_modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, caps->interfaces);
}
static void sfp_quirk_disable_autoneg(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, modes);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, caps->link_modes);
}
static void sfp_quirk_oem_2_5g(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
/* Copper 2.5G SFP */
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, modes);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
- sfp_quirk_disable_autoneg(id, modes, interfaces);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ caps->link_modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, caps->interfaces);
+ sfp_quirk_disable_autoneg(id, caps);
}
static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
/* Ubiquiti U-Fiber Instant module claims that support all transceiver
* types including 10G Ethernet which is not truth. So clear all claimed
* modes and set only one mode which module supports: 1000baseX_Full.
*/
- linkmode_zero(modes);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
+ linkmode_zero(caps->link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ caps->link_modes);
}
-#define SFP_QUIRK(_v, _p, _m, _f) \
- { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, }
-#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL)
+#define SFP_QUIRK(_v, _p, _s, _f) \
+ { .vendor = _v, .part = _p, .support = _s, .fixup = _f, }
+#define SFP_QUIRK_S(_v, _p, _s) SFP_QUIRK(_v, _p, _s, NULL)
#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f)
static const struct sfp_quirk sfp_quirks[] = {
@@ -514,7 +519,7 @@ static const struct sfp_quirk sfp_quirks[] = {
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
// 2600MBd in their EERPOM
- SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
+ SFP_QUIRK_S("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
// their EEPROM
@@ -523,9 +528,9 @@ static const struct sfp_quirk sfp_quirks[] = {
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
- SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+ SFP_QUIRK_S("Lantech", "8330-262D-E", sfp_quirk_2500basex),
- SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
+ SFP_QUIRK_S("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
// Walsun HXSX-ATR[CI]-1 don't identify as copper, and use the
// Rollball protocol to talk to the PHY.
@@ -538,9 +543,9 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
- SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
- SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
- SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
@@ -588,8 +593,6 @@ static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
return NULL;
}
-static unsigned long poll_jiffies;
-
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
{
unsigned int i, state, v;
@@ -912,7 +915,7 @@ static void sfp_soft_start_poll(struct sfp *sfp)
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
!sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
mutex_unlock(&sfp->st_mutex);
}
@@ -1683,7 +1686,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
if (err < 0) {
if (sfp->hwmon_tries--) {
- mod_delayed_work(system_wq, &sfp->hwmon_probe,
+ mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe,
T_PROBE_RETRY_SLOW);
} else {
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
@@ -1710,7 +1713,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
static int sfp_hwmon_insert(struct sfp *sfp)
{
if (sfp->have_a2 && sfp->id.ext.diagmon & SFP_DIAGMON_DDM) {
- mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
+ mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe, 1);
sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
}
@@ -2564,7 +2567,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Force a poll to re-read the hardware signal state after
* sfp_sm_mod_probe() changed state_hw_mask.
*/
- mod_delayed_work(system_wq, &sfp->poll, 1);
+ mod_delayed_work(system_percpu_wq, &sfp->poll, 1);
err = sfp_hwmon_insert(sfp);
if (err)
@@ -3009,7 +3012,7 @@ static void sfp_poll(struct work_struct *work)
// it's unimportant if we race while reading this.
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) ||
sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
}
static struct sfp *sfp_alloc(struct device *dev)
@@ -3179,7 +3182,7 @@ static int sfp_probe(struct platform_device *pdev)
}
if (sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
/* We could have an issue in cases no Tx disable pin is available or
* wired as modules using a laser as their light source will continue to
@@ -3246,19 +3249,7 @@ static struct platform_driver sfp_driver = {
},
};
-static int sfp_init(void)
-{
- poll_jiffies = msecs_to_jiffies(100);
-
- return platform_driver_register(&sfp_driver);
-}
-module_init(sfp_init);
-
-static void sfp_exit(void)
-{
- platform_driver_unregister(&sfp_driver);
-}
-module_exit(sfp_exit);
+module_platform_driver(sfp_driver);
MODULE_ALIAS("platform:sfp");
MODULE_AUTHOR("Russell King");
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 1fd097dccb9f..879dff7afe6a 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -9,8 +9,8 @@ struct sfp;
struct sfp_quirk {
const char *vendor;
const char *part;
- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes,
- unsigned long *interfaces);
+ void (*support)(const struct sfp_eeprom_id *id,
+ struct sfp_module_caps *caps);
void (*fixup)(struct sfp *sfp);
};
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 8c9ed1889d1a..a1806b4b84be 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -85,9 +85,8 @@ config PPP_FILTER
config PPP_MPPE
tristate "PPP MPPE compression (encryption)"
depends on PPP
- select CRYPTO
- select CRYPTO_SHA1
select CRYPTO_LIB_ARC4
+ select CRYPTO_LIB_SHA1
help
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index 55954594e157..f385b759d5cf 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -406,7 +406,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
- db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
+ db->dict = vmalloc_array(hsize, sizeof(struct bsd_dict));
if (!db->dict)
{
bsd_free (db);
@@ -425,7 +425,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
*/
else
{
- db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
+ db->lens = vmalloc_array(maxmaxcode + 1, sizeof(db->lens[0]));
if (!db->lens)
{
bsd_free (db);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 702a7f7183ce..f9f0f16c41d1 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -179,11 +179,11 @@ struct channel {
struct ppp_channel *chan; /* public channel data structure */
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
- struct ppp *ppp; /* ppp unit we're connected to */
+ struct ppp __rcu *ppp; /* ppp unit we're connected to */
struct net *chan_net; /* the net channel belongs to */
netns_tracker ns_tracker;
struct list_head clist; /* link in list of channels per unit */
- rwlock_t upl; /* protects `ppp' and 'bridge' */
+ spinlock_t upl; /* protects `ppp' and 'bridge' */
struct channel __rcu *bridge; /* "bridged" ppp channel */
#ifdef CONFIG_PPP_MULTILINK
u8 avail; /* flag used in multilink stuff */
@@ -645,34 +645,34 @@ static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
*/
static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
{
- write_lock_bh(&pch->upl);
- if (pch->ppp ||
+ spin_lock(&pch->upl);
+ if (rcu_dereference_protected(pch->ppp, lockdep_is_held(&pch->upl)) ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
return -EALREADY;
}
refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
- write_lock_bh(&pchb->upl);
- if (pchb->ppp ||
+ spin_lock(&pchb->upl);
+ if (rcu_dereference_protected(pchb->ppp, lockdep_is_held(&pchb->upl)) ||
rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) {
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
goto err_unset;
}
refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
return 0;
err_unset:
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
/* Re-read pch->bridge with upl held in case it was modified concurrently */
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
synchronize_rcu();
if (pchb)
@@ -686,25 +686,25 @@ static int ppp_unbridge_channels(struct channel *pch)
{
struct channel *pchb, *pchbb;
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
if (!pchb) {
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
return -EINVAL;
}
RCU_INIT_POINTER(pch->bridge, NULL);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
/* Only modify pchb if phcb->bridge points back to pch.
* If not, it implies that there has been a race unbridging (and possibly
* even rebridging) pchb. We should leave pchb alone to avoid either a
* refcount underflow, or breaking another established bridge instance.
*/
- write_lock_bh(&pchb->upl);
+ spin_lock(&pchb->upl);
pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl));
if (pchbb == pch)
RCU_INIT_POINTER(pchb->bridge, NULL);
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
synchronize_rcu();
@@ -2158,10 +2158,9 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
#endif /* CONFIG_PPP_MULTILINK */
/* Try to send data out on a channel */
-static void __ppp_channel_push(struct channel *pch)
+static void __ppp_channel_push(struct channel *pch, struct ppp *ppp)
{
struct sk_buff *skb;
- struct ppp *ppp;
spin_lock(&pch->downl);
if (pch->chan) {
@@ -2180,7 +2179,6 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- ppp = pch->ppp;
if (ppp)
__ppp_xmit_process(ppp, NULL);
}
@@ -2189,19 +2187,21 @@ static void __ppp_channel_push(struct channel *pch)
static void ppp_channel_push(struct channel *pch)
{
struct ppp_xmit_recursion *xmit_recursion;
+ struct ppp *ppp;
- read_lock_bh(&pch->upl);
- if (pch->ppp) {
- xmit_recursion = this_cpu_ptr(pch->ppp->xmit_recursion);
- local_lock_nested_bh(&pch->ppp->xmit_recursion->bh_lock);
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
+ if (ppp) {
+ xmit_recursion = this_cpu_ptr(ppp->xmit_recursion);
+ local_lock_nested_bh(&ppp->xmit_recursion->bh_lock);
xmit_recursion->owner = current;
- __ppp_channel_push(pch);
+ __ppp_channel_push(pch, ppp);
xmit_recursion->owner = NULL;
- local_unlock_nested_bh(&pch->ppp->xmit_recursion->bh_lock);
+ local_unlock_nested_bh(&ppp->xmit_recursion->bh_lock);
} else {
- __ppp_channel_push(pch);
+ __ppp_channel_push(pch, NULL);
}
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/*
@@ -2303,6 +2303,7 @@ void
ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
{
struct channel *pch = chan->ppp;
+ struct ppp *ppp;
int proto;
if (!pch) {
@@ -2314,18 +2315,19 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
if (ppp_channel_bridge_input(pch, skb))
return;
- read_lock_bh(&pch->upl);
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
if (!ppp_decompress_proto(skb)) {
kfree_skb(skb);
- if (pch->ppp) {
- ++pch->ppp->dev->stats.rx_length_errors;
- ppp_receive_error(pch->ppp);
+ if (ppp) {
+ ++ppp->dev->stats.rx_length_errors;
+ ppp_receive_error(ppp);
}
goto done;
}
proto = PPP_PROTO(skb);
- if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
+ if (!ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb);
/* drop old frames if queue too long */
@@ -2334,11 +2336,11 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait);
} else {
- ppp_do_recv(pch->ppp, skb, pch);
+ ppp_do_recv(ppp, skb, pch);
}
done:
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/* Put a 0-length skb in the receive queue as an error indication */
@@ -2347,20 +2349,22 @@ ppp_input_error(struct ppp_channel *chan, int code)
{
struct channel *pch = chan->ppp;
struct sk_buff *skb;
+ struct ppp *ppp;
if (!pch)
return;
- read_lock_bh(&pch->upl);
- if (pch->ppp) {
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
+ if (ppp) {
skb = alloc_skb(0, GFP_ATOMIC);
if (skb) {
skb->len = 0; /* probably unnecessary */
skb->cb[0] = code;
- ppp_do_recv(pch->ppp, skb, pch);
+ ppp_do_recv(ppp, skb, pch);
}
}
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/*
@@ -2908,7 +2912,6 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pn = ppp_pernet(net);
- pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
chan->ppp = pch;
@@ -2919,7 +2922,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
- rwlock_init(&pch->upl);
+ spin_lock_init(&pch->upl);
spin_lock_bh(&pn->all_channels_lock);
pch->file.index = ++pn->last_channel_index;
@@ -2948,13 +2951,15 @@ int ppp_channel_index(struct ppp_channel *chan)
int ppp_unit_number(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
+ struct ppp *ppp;
int unit = -1;
if (pch) {
- read_lock_bh(&pch->upl);
- if (pch->ppp)
- unit = pch->ppp->file.index;
- read_unlock_bh(&pch->upl);
+ rcu_read_lock();
+ ppp = rcu_dereference(pch->ppp);
+ if (ppp)
+ unit = ppp->file.index;
+ rcu_read_unlock();
}
return unit;
}
@@ -2966,12 +2971,14 @@ char *ppp_dev_name(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
char *name = NULL;
+ struct ppp *ppp;
if (pch) {
- read_lock_bh(&pch->upl);
- if (pch->ppp && pch->ppp->dev)
- name = pch->ppp->dev->name;
- read_unlock_bh(&pch->upl);
+ rcu_read_lock();
+ ppp = rcu_dereference(pch->ppp);
+ if (ppp && ppp->dev)
+ name = ppp->dev->name;
+ rcu_read_unlock();
}
return name;
}
@@ -3494,9 +3501,9 @@ ppp_connect_channel(struct channel *pch, int unit)
ppp = ppp_find_unit(pn, unit);
if (!ppp)
goto out;
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
ret = -EINVAL;
- if (pch->ppp ||
+ if (rcu_dereference_protected(pch->ppp, lockdep_is_held(&pch->upl)) ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
goto outl;
@@ -3521,13 +3528,13 @@ ppp_connect_channel(struct channel *pch, int unit)
ppp->dev->hard_header_len = hdrlen;
list_add_tail_rcu(&pch->clist, &ppp->channels);
++ppp->n_channels;
- pch->ppp = ppp;
+ rcu_assign_pointer(pch->ppp, ppp);
refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
outl:
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
return ret;
@@ -3542,10 +3549,9 @@ ppp_disconnect_channel(struct channel *pch)
struct ppp *ppp;
int err = -EINVAL;
- write_lock_bh(&pch->upl);
- ppp = pch->ppp;
- pch->ppp = NULL;
- write_unlock_bh(&pch->upl);
+ spin_lock(&pch->upl);
+ ppp = rcu_replace_pointer(pch->ppp, NULL, lockdep_is_held(&pch->upl));
+ spin_unlock(&pch->upl);
if (ppp) {
/* remove it from the ppp unit's list */
ppp_lock(ppp);
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index bcc1eaedf58f..630cbf71c147 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -43,7 +43,7 @@
*/
#include <crypto/arc4.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
@@ -55,7 +55,6 @@
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include <linux/scatterlist.h>
#include <linux/unaligned.h>
#include "ppp_mppe.h"
@@ -67,31 +66,15 @@ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_VERSION("1.0.2");
#define SHA1_PAD_SIZE 40
-
-/*
- * kernel crypto API needs its arguments to be in kmalloc'd memory, not in the module
- * static data area. That means sha_pad needs to be kmalloc'd.
- */
-
-struct sha_pad {
- unsigned char sha_pad1[SHA1_PAD_SIZE];
- unsigned char sha_pad2[SHA1_PAD_SIZE];
-};
-static struct sha_pad *sha_pad;
-
-static inline void sha_pad_init(struct sha_pad *shapad)
-{
- memset(shapad->sha_pad1, 0x00, sizeof(shapad->sha_pad1));
- memset(shapad->sha_pad2, 0xF2, sizeof(shapad->sha_pad2));
-}
+static const u8 sha_pad1[SHA1_PAD_SIZE] = { 0 };
+static const u8 sha_pad2[SHA1_PAD_SIZE] = { [0 ... SHA1_PAD_SIZE - 1] = 0xF2 };
/*
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
struct arc4_ctx arc4;
- struct shash_desc *sha1;
- unsigned char *sha1_digest;
+ unsigned char sha1_digest[SHA1_DIGEST_SIZE];
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
unsigned keylen; /* key length in bytes */
@@ -130,16 +113,14 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- crypto_shash_init(state->sha1);
- crypto_shash_update(state->sha1, state->master_key,
- state->keylen);
- crypto_shash_update(state->sha1, sha_pad->sha_pad1,
- sizeof(sha_pad->sha_pad1));
- crypto_shash_update(state->sha1, state->session_key,
- state->keylen);
- crypto_shash_update(state->sha1, sha_pad->sha_pad2,
- sizeof(sha_pad->sha_pad2));
- crypto_shash_final(state->sha1, state->sha1_digest);
+ struct sha1_ctx ctx;
+
+ sha1_init(&ctx);
+ sha1_update(&ctx, state->master_key, state->keylen);
+ sha1_update(&ctx, sha_pad1, sizeof(sha_pad1));
+ sha1_update(&ctx, state->session_key, state->keylen);
+ sha1_update(&ctx, sha_pad2, sizeof(sha_pad2));
+ sha1_final(&ctx, state->sha1_digest);
}
/*
@@ -171,39 +152,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
- struct crypto_shash *shash;
- unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
fips_enabled)
- goto out;
+ return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
- goto out;
-
-
- shash = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(shash))
- goto out_free;
-
- state->sha1 = kmalloc(sizeof(*state->sha1) +
- crypto_shash_descsize(shash),
- GFP_KERNEL);
- if (!state->sha1) {
- crypto_free_shash(shash);
- goto out_free;
- }
- state->sha1->tfm = shash;
-
- digestsize = crypto_shash_digestsize(shash);
- if (digestsize < MPPE_MAX_KEY_LEN)
- goto out_free;
-
- state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
- if (!state->sha1_digest)
- goto out_free;
+ return NULL;
/* Save keys. */
memcpy(state->master_key, &options[CILEN_MPPE],
@@ -217,16 +174,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
*/
return (void *)state;
-
-out_free:
- kfree(state->sha1_digest);
- if (state->sha1) {
- crypto_free_shash(state->sha1->tfm);
- kfree_sensitive(state->sha1);
- }
- kfree(state);
-out:
- return NULL;
}
/*
@@ -235,12 +182,8 @@ out:
static void mppe_free(void *arg)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- if (state) {
- kfree(state->sha1_digest);
- crypto_free_shash(state->sha1->tfm);
- kfree_sensitive(state->sha1);
- kfree_sensitive(state);
- }
+
+ kfree_sensitive(state);
}
/*
@@ -649,31 +592,17 @@ static struct compressor ppp_mppe = {
.comp_extra = MPPE_PAD,
};
-/*
- * ppp_mppe_init()
- *
- * Prior to allowing load, try to load the arc4 and sha1 crypto
- * libraries. The actual use will be allocated later, but
- * this way the module will fail to insmod if they aren't available.
- */
-
static int __init ppp_mppe_init(void)
{
int answer;
- if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
- return -ENODEV;
- sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
- if (!sha_pad)
- return -ENOMEM;
- sha_pad_init(sha_pad);
+ if (fips_enabled)
+ return -ENODEV;
answer = ppp_register_compressor(&ppp_mppe);
if (answer == 0)
printk(KERN_INFO "PPP MPPE Compression module registered\n");
- else
- kfree(sha_pad);
return answer;
}
@@ -681,7 +610,6 @@ static int __init ppp_mppe_init(void)
static void __exit ppp_mppe_cleanup(void)
{
ppp_unregister_compressor(&ppp_mppe);
- kfree(sha_pad);
}
module_init(ppp_mppe_init);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 410effa42ade..4ac6afce267b 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -100,8 +100,8 @@ struct pppoe_net {
* as well, moreover in case of SMP less locking
* controversy here
*/
- struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
- rwlock_t hash_lock;
+ struct pppox_sock __rcu *hash_table[PPPOE_HASH_SIZE];
+ spinlock_t hash_lock;
};
/*
@@ -162,13 +162,13 @@ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
- ret = pn->hash_table[hash];
+ ret = rcu_dereference(pn->hash_table[hash]);
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex)
return ret;
- ret = ret->next;
+ ret = rcu_dereference(ret->next);
}
return NULL;
@@ -177,19 +177,20 @@ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
{
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
- struct pppox_sock *ret;
+ struct pppox_sock *ret, *first;
- ret = pn->hash_table[hash];
+ first = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock));
+ ret = first;
while (ret) {
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
- ret = ret->next;
+ ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock));
}
- po->next = pn->hash_table[hash];
- pn->hash_table[hash] = po;
+ RCU_INIT_POINTER(po->next, first);
+ rcu_assign_pointer(pn->hash_table[hash], po);
return 0;
}
@@ -198,20 +199,24 @@ static void __delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
- struct pppox_sock *ret, **src;
+ struct pppox_sock *ret, __rcu **src;
- ret = pn->hash_table[hash];
+ ret = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock));
src = &pn->hash_table[hash];
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex) {
- *src = ret->next;
+ struct pppox_sock *next;
+
+ next = rcu_dereference_protected(ret->next,
+ lockdep_is_held(&pn->hash_lock));
+ rcu_assign_pointer(*src, next);
break;
}
src = &ret->next;
- ret = ret->next;
+ ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock));
}
}
@@ -225,17 +230,15 @@ static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
{
struct pppox_sock *po;
- read_lock_bh(&pn->hash_lock);
po = __get_item(pn, sid, addr, ifindex);
- if (po)
- sock_hold(sk_pppox(po));
- read_unlock_bh(&pn->hash_lock);
+ if (po && !refcount_inc_not_zero(&sk_pppox(po)->sk_refcnt))
+ po = NULL;
return po;
}
-static inline struct pppox_sock *get_item_by_addr(struct net *net,
- struct sockaddr_pppox *sp)
+static inline struct pppox_sock *__get_item_by_addr(struct net *net,
+ struct sockaddr_pppox *sp)
{
struct net_device *dev;
struct pppoe_net *pn;
@@ -243,24 +246,22 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
int ifindex;
- rcu_read_lock();
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
pn = pppoe_pernet(net);
- pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
- sp->sa_addr.pppoe.remote, ifindex);
+ pppox_sock = __get_item(pn, sp->sa_addr.pppoe.sid,
+ sp->sa_addr.pppoe.remote, ifindex);
}
- rcu_read_unlock();
return pppox_sock;
}
static inline void delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
__delete_item(pn, sid, addr, ifindex);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
}
/***************************************************************************
@@ -276,14 +277,16 @@ static void pppoe_flush_dev(struct net_device *dev)
int i;
pn = pppoe_pernet(dev_net(dev));
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
- struct pppox_sock *po = pn->hash_table[i];
+ struct pppox_sock *po = rcu_dereference_protected(pn->hash_table[i],
+ lockdep_is_held(&pn->hash_lock));
struct sock *sk;
while (po) {
while (po && po->pppoe_dev != dev) {
- po = po->next;
+ po = rcu_dereference_protected(po->next,
+ lockdep_is_held(&pn->hash_lock));
}
if (!po)
@@ -300,7 +303,7 @@ static void pppoe_flush_dev(struct net_device *dev)
*/
sock_hold(sk);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
lock_sock(sk);
if (po->pppoe_dev == dev &&
@@ -320,11 +323,12 @@ static void pppoe_flush_dev(struct net_device *dev)
*/
BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
- write_lock_bh(&pn->hash_lock);
- po = pn->hash_table[i];
+ spin_lock(&pn->hash_lock);
+ po = rcu_dereference_protected(pn->hash_table[i],
+ lockdep_is_held(&pn->hash_lock));
}
}
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
}
static int pppoe_device_event(struct notifier_block *this,
@@ -375,18 +379,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
- relay_po = get_item_by_addr(sock_net(sk),
- &po->pppoe_relay);
+ relay_po = __get_item_by_addr(sock_net(sk),
+ &po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
- goto abort_put;
+ goto abort_kfree;
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
- goto abort_put;
-
- sock_put(sk_pppox(relay_po));
+ goto abort_kfree;
} else {
if (sock_queue_rcv_skb(sk, skb))
goto abort_kfree;
@@ -394,9 +396,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
-abort_put:
- sock_put(sk_pppox(relay_po));
-
abort_kfree:
kfree_skb(skb);
return NET_RX_DROP;
@@ -441,14 +440,11 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
- /* Note that get_item does a sock_hold(), so sk_pppox(po)
- * is known to be safe.
- */
- po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+ po = __get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (!po)
goto drop;
- return sk_receive_skb(sk_pppox(po), skb, 0);
+ return __sk_receive_skb(sk_pppox(po), skb, 0, 1, false);
drop:
kfree_skb(skb);
@@ -528,6 +524,11 @@ static struct proto pppoe_sk_proto __read_mostly = {
.obj_size = sizeof(struct pppox_sock),
};
+static void pppoe_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
/***********************************************************************
*
* Initialize a new struct sock.
@@ -542,11 +543,13 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
return -ENOMEM;
sock_init_data(sock, sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sock->state = SS_UNCONNECTED;
sock->ops = &pppoe_ops;
sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_destruct = pppoe_destruct;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_family = PF_PPPOX;
@@ -599,7 +602,6 @@ static int pppoe_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
@@ -681,9 +683,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
error = __set_item(pn, po);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
if (error < 0)
goto err_put;
@@ -808,11 +810,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
/* Check that the socket referenced by the address
actually exists. */
- relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+ rcu_read_lock();
+ relay_po = __get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+ rcu_read_unlock();
if (!relay_po)
break;
- sock_put(sk_pppox(relay_po));
sk->sk_state |= PPPOX_RELAY;
err = 0;
break;
@@ -1052,11 +1055,11 @@ static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
int i;
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
- po = pn->hash_table[i];
+ po = rcu_dereference(pn->hash_table[i]);
while (po) {
if (!pos--)
goto out;
- po = po->next;
+ po = rcu_dereference(po->next);
}
}
@@ -1065,19 +1068,19 @@ out:
}
static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(pn->hash_lock)
+ __acquires(RCU)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
loff_t l = *pos;
- read_lock_bh(&pn->hash_lock);
+ rcu_read_lock();
return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
}
static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
- struct pppox_sock *po;
+ struct pppox_sock *po, *next;
++*pos;
if (v == SEQ_START_TOKEN) {
@@ -1085,14 +1088,15 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
po = v;
- if (po->next)
- po = po->next;
+ next = rcu_dereference(po->next);
+ if (next)
+ po = next;
else {
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
po = NULL;
while (++hash < PPPOE_HASH_SIZE) {
- po = pn->hash_table[hash];
+ po = rcu_dereference(pn->hash_table[hash]);
if (po)
break;
}
@@ -1103,10 +1107,9 @@ out:
}
static void pppoe_seq_stop(struct seq_file *seq, void *v)
- __releases(pn->hash_lock)
+ __releases(RCU)
{
- struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
- read_unlock_bh(&pn->hash_lock);
+ rcu_read_unlock();
}
static const struct seq_operations pppoe_seq_ops = {
@@ -1149,7 +1152,7 @@ static __net_init int pppoe_init_net(struct net *net)
struct pppoe_net *pn = pppoe_pernet(net);
struct proc_dir_entry *pde;
- rwlock_init(&pn->hash_lock);
+ spin_lock_init(&pn->hash_lock);
pde = proc_create_net("pppoe", 0444, net->proc_net,
&pppoe_seq_ops, sizeof(struct seq_net_private));
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 7fab916a7f46..7ef29657ee5d 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -32,6 +32,17 @@ config PSE_PD692X0
To compile this driver as a module, choose M here: the
module will be called pd692x0.
+config PSE_SI3474
+ tristate "Si3474 PSE controller"
+ depends on I2C
+ help
+ This module provides support for Si3474 regulator based Ethernet
+ Power Sourcing Equipment.
+ Only 4-pair PSE configurations are supported.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si3474.
+
config PSE_TPS23881
tristate "TPS23881 PSE controller"
depends on I2C
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
index 9d2898b36737..cc78f7ea7f5f 100644
--- a/drivers/net/pse-pd/Makefile
+++ b/drivers/net/pse-pd/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
obj-$(CONFIG_PSE_PD692X0) += pd692x0.o
+obj-$(CONFIG_PSE_SI3474) += si3474.o
obj-$(CONFIG_PSE_TPS23881) += tps23881.o
diff --git a/drivers/net/pse-pd/si3474.c b/drivers/net/pse-pd/si3474.c
new file mode 100644
index 000000000000..aa07ffbce54d
--- /dev/null
+++ b/drivers/net/pse-pd/si3474.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Skyworks Si3474 PoE PSE Controller
+ *
+ * Chip Architecture & Terminology:
+ *
+ * The Si3474 is a single-chip PoE PSE controller managing 8 physical power
+ * delivery channels. Internally, it's structured into two logical "Quads".
+ *
+ * Quad 0: Manages physical channels ('ports' in datasheet) 0, 1, 2, 3
+ * Quad 1: Manages physical channels ('ports' in datasheet) 4, 5, 6, 7
+ *
+ * Each Quad is accessed via a separate I2C address. The base address range is
+ * set by hardware pins A1-A4, and the specific address selects Quad 0 (usually
+ * the lower/even address) or Quad 1 (usually the higher/odd address).
+ * See datasheet Table 2.2 for the address mapping.
+ *
+ * While the Quads manage channel-specific operations, the Si3474 package has
+ * several resources shared across the entire chip:
+ * - Single RESETb input pin.
+ * - Single INTb output pin (signals interrupts from *either* Quad).
+ * - Single OSS input pin (Emergency Shutdown).
+ * - Global I2C Address (0x7F) used for firmware updates.
+ * - Global status monitoring (Temperature, VDD/VPWR Undervoltage Lockout).
+ *
+ * Driver Architecture:
+ *
+ * To handle the mix of per-Quad access and shared resources correctly, this
+ * driver treats the entire Si3474 package as one logical device. The driver
+ * instance associated with the primary I2C address (Quad 0) takes ownership.
+ * It discovers and manages the I2C client for the secondary address (Quad 1).
+ * This primary instance handles shared resources like IRQ management and
+ * registers a single PSE controller device representing all logical PIs.
+ * Internal functions route I2C commands to the appropriate Quad's i2c_client
+ * based on the target channel or PI.
+ *
+ * Terminology Mapping:
+ *
+ * - "PI" (Power Interface): Refers to the logical PSE port as defined by
+ * IEEE 802.3 (typically corresponds to an RJ45 connector). This is the
+ * `id` (0-7) used in the pse_controller_ops.
+ * - "Channel": Refers to one of the 8 physical power control paths within
+ * the Si3474 chip itself (hardware channels 0-7). This terminology is
+ * used internally within the driver to avoid confusion with 'ports'.
+ * - "Quad": One of the two internal 4-channel management units within the
+ * Si3474, each accessed via its own I2C address.
+ *
+ * Relationship:
+ * - A 2-Pair PoE PI uses 1 Channel.
+ * - A 4-Pair PoE PI uses 2 Channels.
+ *
+ * ASCII Schematic:
+ *
+ * +-----------------------------------------------------+
+ * | Si3474 Chip |
+ * | |
+ * | +---------------------+ +---------------------+ |
+ * | | Quad 0 | | Quad 1 | |
+ * | | Channels 0, 1, 2, 3 | | Channels 4, 5, 6, 7 | |
+ * | +----------^----------+ +-------^-------------+ |
+ * | I2C Addr 0 | | I2C Addr 1 |
+ * | +------------------------+ |
+ * | (Primary Driver Instance) (Managed by Primary) |
+ * | |
+ * | Shared Resources (affect whole chip): |
+ * | - Single INTb Output -> Handled by Primary |
+ * | - Single RESETb Input |
+ * | - Single OSS Input -> Handled by Primary |
+ * | - Global I2C Addr (0x7F) for Firmware Update |
+ * | - Global Status (Temp, VDD/VPWR UVLO) |
+ * +-----------------------------------------------------+
+ * | | | | | | | |
+ * Ch0 Ch1 Ch2 Ch3 Ch4 Ch5 Ch6 Ch7 (Physical Channels)
+ *
+ * Example Mapping (Logical PI to Physical Channel(s)):
+ * * 2-Pair Mode (8 PIs):
+ * PI 0 -> Ch 0
+ * PI 1 -> Ch 1
+ * ...
+ * PI 7 -> Ch 7
+ * * 4-Pair Mode (4 PIs):
+ * PI 0 -> Ch 0 + Ch 1 (Managed via Quad 0 Addr)
+ * PI 1 -> Ch 2 + Ch 3 (Managed via Quad 0 Addr)
+ * PI 2 -> Ch 4 + Ch 5 (Managed via Quad 1 Addr)
+ * PI 3 -> Ch 6 + Ch 7 (Managed via Quad 1 Addr)
+ * (Note: Actual mapping depends on Device Tree and PORT_REMAP config)
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define SI3474_MAX_CHANS 8
+
+#define MANUFACTURER_ID 0x08
+#define IC_ID 0x05
+#define SI3474_DEVICE_ID (MANUFACTURER_ID << 3 | IC_ID)
+
+/* Misc registers */
+#define VENDOR_IC_ID_REG 0x1B
+#define TEMPERATURE_REG 0x2C
+#define FIRMWARE_REVISION_REG 0x41
+#define CHIP_REVISION_REG 0x43
+
+/* Main status registers */
+#define POWER_STATUS_REG 0x10
+#define PORT_MODE_REG 0x12
+#define DETECT_CLASS_ENABLE_REG 0x14
+
+/* PORTn Current */
+#define PORT1_CURRENT_LSB_REG 0x30
+
+/* PORTn Current [mA], return in [nA] */
+/* 1000 * ((PORTn_CURRENT_MSB << 8) + PORTn_CURRENT_LSB) / 16384 */
+#define SI3474_NA_STEP (1000 * 1000 * 1000 / 16384)
+
+/* VPWR Voltage */
+#define VPWR_LSB_REG 0x2E
+#define VPWR_MSB_REG 0x2F
+
+/* PORTn Voltage */
+#define PORT1_VOLTAGE_LSB_REG 0x32
+
+/* VPWR Voltage [V], return in [uV] */
+/* 60 * (( VPWR_MSB << 8) + VPWR_LSB) / 16384 */
+#define SI3474_UV_STEP (1000 * 1000 * 60 / 16384)
+
+/* Helper macros */
+#define CHAN_IDX(chan) ((chan) % 4)
+#define CHAN_BIT(chan) BIT(CHAN_IDX(chan))
+#define CHAN_UPPER_BIT(chan) BIT(CHAN_IDX(chan) + 4)
+
+#define CHAN_MASK(chan) (0x03U << (2 * CHAN_IDX(chan)))
+#define CHAN_REG(base, chan) ((base) + (CHAN_IDX(chan) * 4))
+
+struct si3474_pi_desc {
+ u8 chan[2];
+ bool is_4p;
+};
+
+struct si3474_priv {
+ struct i2c_client *client[2];
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+ struct si3474_pi_desc pi[SI3474_MAX_CHANS];
+};
+
+static struct si3474_priv *to_si3474_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct si3474_priv, pcdev);
+}
+
+static void si3474_get_channels(struct si3474_priv *priv, int id,
+ u8 *chan0, u8 *chan1)
+{
+ *chan0 = priv->pi[id].chan[0];
+ *chan1 = priv->pi[id].chan[1];
+}
+
+static struct i2c_client *si3474_get_chan_client(struct si3474_priv *priv,
+ u8 chan)
+{
+ return (chan < 4) ? priv->client[0] : priv->client[1];
+}
+
+static int si3474_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ bool is_enabled;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0) {
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN;
+ return ret;
+ }
+
+ is_enabled = ret & (CHAN_MASK(chan0) | CHAN_MASK(chan1));
+
+ if (is_enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+static int si3474_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ bool delivering;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ ret = i2c_smbus_read_byte_data(client, POWER_STATUS_REG);
+ if (ret < 0) {
+ pw_status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN;
+ return ret;
+ }
+
+ delivering = ret & (CHAN_UPPER_BIT(chan0) | CHAN_UPPER_BIT(chan1));
+
+ if (delivering)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else
+ pw_status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ return 0;
+}
+
+static int si3474_get_of_channels(struct si3474_priv *priv)
+{
+ struct pse_pi *pi;
+ u32 chan_id;
+ u8 pi_no;
+ s32 ret;
+
+ for (pi_no = 0; pi_no < SI3474_MAX_CHANS; pi_no++) {
+ pi = &priv->pcdev.pi[pi_no];
+ bool pairset_found = false;
+ u8 pairset_no;
+
+ for (pairset_no = 0; pairset_no < 2; pairset_no++) {
+ if (!pi->pairset[pairset_no].np)
+ continue;
+
+ pairset_found = true;
+
+ ret = of_property_read_u32(pi->pairset[pairset_no].np,
+ "reg", &chan_id);
+ if (ret) {
+ dev_err(&priv->client[0]->dev,
+ "Failed to read channel reg property\n");
+ return ret;
+ }
+ if (chan_id > SI3474_MAX_CHANS) {
+ dev_err(&priv->client[0]->dev,
+ "Incorrect channel number: %d\n", chan_id);
+ return -EINVAL;
+ }
+
+ priv->pi[pi_no].chan[pairset_no] = chan_id;
+ /* Mark as 4-pair if second pairset is present */
+ priv->pi[pi_no].is_4p = (pairset_no == 1);
+ }
+
+ if (pairset_found && !priv->pi[pi_no].is_4p) {
+ dev_err(&priv->client[0]->dev,
+ "Second pairset is missing for PI %pOF, only 4p configs are supported\n",
+ pi->np);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int si3474_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ s32 ret;
+
+ ret = si3474_get_of_channels(priv);
+ if (ret < 0)
+ dev_warn(&priv->client[0]->dev,
+ "Unable to parse DT PSE power interface matrix\n");
+
+ return ret;
+}
+
+static int si3474_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+ u8 val;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Release PI from shutdown */
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret;
+ val |= CHAN_MASK(chan0);
+ val |= CHAN_MASK(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, PORT_MODE_REG, val);
+ if (ret)
+ return ret;
+
+ /* DETECT_CLASS_ENABLE must be set when using AUTO mode,
+ * otherwise PI does not power up - datasheet section 2.10.2
+ */
+ val = CHAN_BIT(chan0) | CHAN_UPPER_BIT(chan0) |
+ CHAN_BIT(chan1) | CHAN_UPPER_BIT(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, DETECT_CLASS_ENABLE_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si3474_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+ u8 val;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Set PI in shutdown mode */
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret;
+ val &= ~CHAN_MASK(chan0);
+ val &= ~CHAN_MASK(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, PORT_MODE_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si3474_pi_get_chan_current(struct si3474_priv *priv, u8 chan)
+{
+ struct i2c_client *client;
+ u64 tmp_64;
+ s32 ret;
+ u8 reg;
+
+ client = si3474_get_chan_client(priv, chan);
+
+ /* Registers 0x30 to 0x3d */
+ reg = CHAN_REG(PORT1_CURRENT_LSB_REG, chan);
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret * SI3474_NA_STEP;
+
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int si3474_pi_get_chan_voltage(struct si3474_priv *priv, u8 chan)
+{
+ struct i2c_client *client;
+ s32 ret;
+ u32 val;
+ u8 reg;
+
+ client = si3474_get_chan_client(priv, chan);
+
+ /* Registers 0x32 to 0x3f */
+ reg = CHAN_REG(PORT1_VOLTAGE_LSB_REG, chan);
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret * SI3474_UV_STEP;
+
+ return (int)val;
+}
+
+static int si3474_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Check which channels are enabled*/
+ ret = i2c_smbus_read_byte_data(client, POWER_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ /* Take voltage from the first enabled channel */
+ if (ret & CHAN_BIT(chan0))
+ ret = si3474_pi_get_chan_voltage(priv, chan0);
+ else if (ret & CHAN_BIT(chan1))
+ ret = si3474_pi_get_chan_voltage(priv, chan1);
+ else
+ /* 'should' be no voltage in this case */
+ return 0;
+
+ return ret;
+}
+
+static int si3474_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ u8 chan0, chan1;
+ u32 uV, uA;
+ u64 tmp_64;
+ s32 ret;
+
+ ret = si3474_pi_get_voltage(&priv->pcdev, id);
+
+ /* Do not read currents if voltage is 0 */
+ if (ret <= 0)
+ return ret;
+ uV = ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+
+ ret = si3474_pi_get_chan_current(priv, chan0);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ ret = si3474_pi_get_chan_current(priv, chan1);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static const struct pse_controller_ops si3474_ops = {
+ .setup_pi_matrix = si3474_setup_pi_matrix,
+ .pi_enable = si3474_pi_enable,
+ .pi_disable = si3474_pi_disable,
+ .pi_get_actual_pw = si3474_pi_get_actual_pw,
+ .pi_get_voltage = si3474_pi_get_voltage,
+ .pi_get_admin_state = si3474_pi_get_admin_state,
+ .pi_get_pw_status = si3474_pi_get_pw_status,
+};
+
+static void si3474_ancillary_i2c_remove(void *data)
+{
+ struct i2c_client *client = data;
+
+ i2c_unregister_device(client);
+}
+
+static int si3474_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct si3474_priv *priv;
+ u8 fw_version;
+ s32 ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_byte_data(client, VENDOR_IC_ID_REG);
+ if (ret < 0)
+ return ret;
+
+ if (ret != SI3474_DEVICE_ID) {
+ dev_err(dev, "Wrong device ID: 0x%x\n", ret);
+ return -ENXIO;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, FIRMWARE_REVISION_REG);
+ if (ret < 0)
+ return ret;
+ fw_version = ret;
+
+ ret = i2c_smbus_read_byte_data(client, CHIP_REVISION_REG);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "Chip revision: 0x%x, firmware version: 0x%x\n",
+ ret, fw_version);
+
+ priv->client[0] = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->client[1] = i2c_new_ancillary_device(priv->client[0], "secondary",
+ priv->client[0]->addr + 1);
+ if (IS_ERR(priv->client[1]))
+ return PTR_ERR(priv->client[1]);
+
+ ret = devm_add_action_or_reset(dev, si3474_ancillary_i2c_remove, priv->client[1]);
+ if (ret < 0) {
+ dev_err(&priv->client[1]->dev, "Cannot register remove callback\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(priv->client[1], VENDOR_IC_ID_REG);
+ if (ret < 0) {
+ dev_err(&priv->client[1]->dev, "Cannot access secondary PSE controller\n");
+ return ret;
+ }
+
+ if (ret != SI3474_DEVICE_ID) {
+ dev_err(&priv->client[1]->dev,
+ "Wrong device ID for secondary PSE controller: 0x%x\n", ret);
+ return -ENXIO;
+ }
+
+ priv->np = dev->of_node;
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &si3474_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.nr_lines = SI3474_MAX_CHANS;
+
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ dev_err(dev, "Failed to register PSE controller: 0x%x\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id si3474_id[] = {
+ { "si3474" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, si3474_id);
+
+static const struct of_device_id si3474_of_match[] = {
+ {
+ .compatible = "skyworks,si3474",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, si3474_of_match);
+
+static struct i2c_driver si3474_driver = {
+ .probe = si3474_i2c_probe,
+ .id_table = si3474_id,
+ .driver = {
+ .name = "si3474",
+ .of_match_table = si3474_of_match,
+ },
+};
+module_i2c_driver(si3474_driver);
+
+MODULE_AUTHOR("Piotr Kubik <piotr.kubik@adtran.com>");
+MODULE_DESCRIPTION("Skyworks Si3474 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 47ddcb4b9a78..8192740357a0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2826,13 +2826,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (netif_running(tun->dev))
netif_tx_wake_all_queues(tun->dev);
- strcpy(ifr->ifr_name, tun->dev->name);
+ strscpy(ifr->ifr_name, tun->dev->name);
return 0;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
- strcpy(ifr->ifr_name, tun->dev->name);
+ strscpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 0a678e31cfaa..856e648d804e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -116,6 +116,7 @@ config USB_LAN78XX
select PHYLINK
select MICROCHIP_PHY
select CRC32
+ imply NET_SELFTESTS
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 1ff25f57329a..b56e2459ee3c 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -20,6 +20,7 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/selftests.h>
#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -1702,12 +1703,16 @@ static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
{
if (stringset == ETH_SS_STATS)
memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+ else if (stringset == ETH_SS_TEST)
+ net_selftest_get_strings(data);
}
static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return ARRAY_SIZE(lan78xx_gstrings);
+ else if (sset == ETH_SS_TEST)
+ return net_selftest_get_count();
else
return -EOPNOTSUPP;
}
@@ -1894,6 +1899,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_eeprom = lan78xx_ethtool_set_eeprom,
.get_ethtool_stats = lan78xx_get_stats,
.get_sset_count = lan78xx_get_sset_count,
+ .self_test = net_selftest,
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index ddff6f19ff98..92add3daadbb 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rtl8150_t *dev = netdev_priv(netdev);
u16 rx_creg = 0x9e;
- netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
@@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rx_creg &= 0x00fc;
}
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
- netif_wake_queue(netdev);
}
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 975bdc5dab84..7da5a37917e9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2185,10 +2185,9 @@ static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- xdp_frags_truesz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ xdp_frags_truesz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -5610,20 +5609,11 @@ static int virtnet_set_rxfh(struct net_device *dev,
return 0;
}
-static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+static u32 virtnet_get_rx_ring_count(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- int rc = 0;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = vi->curr_queue_pairs;
- break;
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
+ return vi->curr_queue_pairs;
}
static const struct ethtool_ops virtnet_ethtool_ops = {
@@ -5651,7 +5641,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_rxfh = virtnet_set_rxfh,
.get_rxfh_fields = virtnet_get_hashflow,
.set_rxfh_fields = virtnet_set_hashflow,
- .get_rxnfc = virtnet_get_rxnfc,
+ .get_rx_ring_count = virtnet_get_rx_ring_count,
};
static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3ccd649913b5..571847a7f86d 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -26,6 +26,7 @@
#include <linux/inetdevice.h>
#include <net/arp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
@@ -38,7 +39,6 @@
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/inet_dscp.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.1"
@@ -505,7 +505,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
/* needed to match OIF rule */
fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+ fl4.flowi4_dscp = ip4h_dscp(ip4h);
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index dab864bc733c..a5c55e7e4d79 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -446,7 +446,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
u8 eth_addr[ETH_ALEN + 2] = { 0 };
- struct vxlan_rdst *rdst;
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
int rc = 0;
@@ -459,12 +459,13 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
rcu_read_lock();
f = vxlan_find_mac_rcu(vxlan, eth_addr, vni);
- if (!f) {
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (!rdst) {
rc = -ENOENT;
goto out;
}
- rdst = first_remote_rcu(f);
vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
out:
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index 1e4c8e85d598..c5501826db1e 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -37,6 +37,7 @@ struct pef2256 {
struct device *dev;
struct regmap *regmap;
enum pef2256_version version;
+ const char *version_txt;
struct clk *mclk;
struct clk *sclkr;
struct clk *sclkx;
@@ -114,6 +115,16 @@ enum pef2256_version pef2256_get_version(struct pef2256 *pef2256)
}
EXPORT_SYMBOL_GPL(pef2256_get_version);
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pef2256 *pef2256 = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", pef2256->version_txt);
+}
+
+static DEVICE_ATTR_RO(version);
+
enum pef2256_gcm_config_item {
PEF2256_GCM_CONFIG_1544000 = 0,
PEF2256_GCM_CONFIG_2048000,
@@ -697,7 +708,6 @@ static int pef2256_probe(struct platform_device *pdev)
unsigned long sclkr_rate, sclkx_rate;
struct framer_provider *framer_provider;
struct pef2256 *pef2256;
- const char *version_txt;
void __iomem *iomem;
int ret;
int irq;
@@ -719,8 +729,8 @@ static int pef2256_probe(struct platform_device *pdev)
pef2256->regmap = devm_regmap_init_mmio(&pdev->dev, iomem,
&pef2256_regmap_config);
if (IS_ERR(pef2256->regmap)) {
- dev_err(&pdev->dev, "Failed to initialise Regmap (%ld)\n",
- PTR_ERR(pef2256->regmap));
+ dev_err(&pdev->dev, "Failed to initialise Regmap (%pe)\n",
+ pef2256->regmap);
return PTR_ERR(pef2256->regmap);
}
@@ -763,18 +773,18 @@ static int pef2256_probe(struct platform_device *pdev)
pef2256->version = pef2256_get_version(pef2256);
switch (pef2256->version) {
case PEF2256_VERSION_1_2:
- version_txt = "1.2";
+ pef2256->version_txt = "1.2";
break;
case PEF2256_VERSION_2_1:
- version_txt = "2.1";
+ pef2256->version_txt = "2.1";
break;
case PEF2256_VERSION_2_2:
- version_txt = "2.2";
+ pef2256->version_txt = "2.2";
break;
default:
return -ENODEV;
}
- dev_info(pef2256->dev, "Version %s detected\n", version_txt);
+ dev_info(pef2256->dev, "Version %s detected\n", pef2256->version_txt);
ret = pef2556_of_parse(pef2256, np);
if (ret)
@@ -835,6 +845,8 @@ static int pef2256_probe(struct platform_device *pdev)
return ret;
}
+ device_create_file(pef2256->dev, &dev_attr_version);
+
return 0;
}
@@ -849,6 +861,8 @@ static void pef2256_remove(struct platform_device *pdev)
pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
+
+ device_remove_file(pef2256->dev, &dev_attr_version);
}
static const struct of_device_id pef2256_id_table[] = {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 813bd10d3dc7..46a71ec36af8 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -333,7 +333,8 @@ static int wg_newlink(struct net_device *dev,
goto err_free_peer_hashtable;
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
- WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
+ WQ_CPU_INTENSIVE | WQ_FREEZABLE | WQ_PERCPU, 0,
+ dev->name);
if (!wg->handshake_receive_wq)
goto err_free_index_hashtable;
@@ -343,7 +344,8 @@ static int wg_newlink(struct net_device *dev,
goto err_destroy_handshake_receive;
wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ dev->name);
if (!wg->packet_crypt_wq)
goto err_destroy_handshake_send;
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 7eb76724b3ed..79b6d70de236 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -104,16 +104,11 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
- unsigned int cpu = *stored_cpu, cpu_index, i;
+ unsigned int cpu = *stored_cpu;
+
+ while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu)))
+ cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask);
- if (unlikely(cpu >= nr_cpu_ids ||
- !cpumask_test_cpu(cpu, cpu_online_mask))) {
- cpu_index = id % cpumask_weight(cpu_online_mask);
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < cpu_index; ++i)
- cpu = cpumask_next(cpu, cpu_online_mask);
- *stored_cpu = cpu;
- }
return cpu;
}
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
index 9b1d04eb4265..3a6c8111e7c6 100644
--- a/drivers/net/wireless/ath/ath10k/leds.c
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -27,7 +27,7 @@ static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
goto out;
ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
- ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, ar->leds.gpio_state_pin);
out:
mutex_unlock(&ar->conf_mutex);
@@ -64,7 +64,6 @@ int ath10k_leds_register(struct ath10k *ar)
snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
wiphy_name(ar->hw->wiphy));
ar->leds.wifi_led.active_low = 1;
- ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
ar->leds.wifi_led.name = ar->leds.label;
ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 24dd794e31ea..154ac7a70982 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/random.h>
#include "hif.h"
#include "core.h"
@@ -290,8 +291,15 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (cmd == DISABLE_KEY) {
- arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
- arg.key_data = NULL;
+ if (flags & WMI_KEY_GROUP) {
+ /* Not all hardware handles group-key deletion operation
+ * correctly. Replace the key with a junk value to invalidate it.
+ */
+ get_random_bytes(key->key, key->keylen);
+ } else {
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
+ arg.key_data = NULL;
+ }
}
return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f0713bd36173..b3f6424c17d3 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -13,7 +13,7 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc/qcom_rproc.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/iommu.h>
#include "ce.h"
@@ -1559,19 +1559,11 @@ static void ath10k_modem_deinit(struct ath10k *ar)
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
- struct device_node *node;
struct resource r;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret) {
- dev_err(dev, "failed to resolve msa fixed region\n");
- return ret;
- }
-
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
+ if (!ret) {
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb8ae751eb31..e595b0979a56 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
+ unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
- time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left) {
- /* Sometimes the PCI HIF doesn't receive interrupt
- * for the service ready message even if the buffer
- * was completed. PCIe sniffer shows that it's
- * because the corresponding CE ring doesn't fires
- * it. Workaround here by polling CE rings once.
- */
- ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
-
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings. Since
+ * the message could arrive at any time, continue
+ * polling until timeout.
+ */
+ do {
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
+ /* The 100 ms granularity is a tradeoff considering scheduler
+ * overhead and response latency
+ */
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left) {
- ath10k_warn(ar, "polling timed out\n");
- return -ETIMEDOUT;
- }
-
- ath10k_warn(ar, "service ready completion received, continuing normally\n");
- }
+ msecs_to_jiffies(100));
+ if (time_left)
+ return 0;
+ } while (time_before(jiffies, timeout));
- return 0;
+ ath10k_warn(ar, "failed to receive service ready completion\n");
+ return -ETIMEDOUT;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 50809cc1dad4..8dfe9b40c126 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -9,8 +9,8 @@
#include <linux/property.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
@@ -919,16 +919,10 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
- struct device_node *node;
struct resource r;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node)
- return -ENOENT;
-
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
@@ -937,12 +931,7 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
ab_ahb->fw.msa_paddr = r.start;
ab_ahb->fw.msa_size = resource_size(&r);
- node = of_parse_phandle(dev->of_node, "memory-region", 1);
- if (!node)
- return -ENOENT;
-
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r);
if (ret) {
dev_err(dev, "failed to resolve ce fixed region\n");
return ret;
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index c65fc9fb539e..a7a163621b21 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -354,7 +354,8 @@ static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
- ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index d49353b6b2e7..2810752260f2 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -2215,14 +2215,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab);
- ath11k_hal_srng_deinit(ab);
+ ath11k_hal_srng_clear(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
- ret = ath11k_hal_srng_init(ab);
- if (ret)
- return ret;
-
clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index ffc7482c77b6..b9e976ddcbbf 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -4615,7 +4615,6 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
msdu_details[i].buf_addr_info.info0) == 0) {
msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
msdu_desc_info->info0 |= last;
- ;
break;
}
msdu_desc_info = &msdu_details[i].rx_msdu_info;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 0c3ce7509ab8..0c797b8d0a27 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1386,6 +1386,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+void ath11k_hal_srng_clear(struct ath11k_base *ab)
+{
+ /* No need to memset rdp and wrp memory since each individual
+ * segment would get cleared in ath11k_hal_srng_src_hw_init()
+ * and ath11k_hal_srng_dst_hw_init().
+ */
+ memset(ab->hal.srng_list, 0,
+ sizeof(ab->hal.srng_list));
+ memset(ab->hal.shadow_reg_addr, 0,
+ sizeof(ab->hal.shadow_reg_addr));
+ ab->hal.avail_blk_resource = 0;
+ ab->hal.current_blk_index = 0;
+ ab->hal.num_shadow_reg_configured = 0;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_clear);
+
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
struct hal_srng *srng;
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 601542410c75..839095af9267 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_srng_clear(struct ath11k_base *ab);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
u32 **cfg, u32 *len);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 378ac96b861b..aea56c38bf8f 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -13,7 +13,7 @@
#include "debug.h"
#include "hif.h"
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#include <linux/of_irq.h>
@@ -2040,23 +2040,14 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
struct device *dev = ab->dev;
- struct device_node *hremote_node = NULL;
- struct resource res;
+ struct resource res = {};
u32 host_ddr_sz;
int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case HOST_DDR_REGION_TYPE:
- hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!hremote_node) {
- ath11k_dbg(ab, ATH11K_DBG_QMI,
- "fail to get hremote_node\n");
- return -ENODEV;
- }
-
- ret = of_address_to_resource(hremote_node, 0, &res);
- of_node_put(hremote_node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get reg from hremote\n");
@@ -2095,7 +2086,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
}
if (ath11k_core_coldboot_cal_support(ab)) {
- if (hremote_node) {
+ if (resource_size(&res)) {
ab->qmi.target_mem[idx].paddr =
res.start + host_ddr_sz;
ab->qmi.target_mem[idx].iaddr =
@@ -2557,7 +2548,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
- fw->size);
+ m3_len);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
index 3b983f4e3268..b30527c402f6 100644
--- a/drivers/net/wireless/ath/ath12k/ahb.c
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -414,7 +414,7 @@ static int ath12k_ahb_power_up(struct ath12k_base *ab)
goto err_fw2;
}
- ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, pasid, mem_region, mem_phys,
+ ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys,
mem_size, &mem_phys);
if (ret) {
ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index f93a419abf65..9a63608838ac 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -392,7 +392,8 @@ static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
- ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ ath12k_dbg(ab, ATH12K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@@ -478,7 +479,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
- ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 519f826f56c8..3d1956966a48 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_CORE_H
@@ -72,6 +72,9 @@
#define ATH12K_MAX_MLO_PEERS 256
#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+#define ATH12K_INVALID_RSSI_FULL -1
+#define ATH12K_INVALID_RSSI_EMPTY -128
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -560,6 +563,7 @@ struct ath12k_link_sta {
u32 bw_prev;
u32 peer_nss;
s8 rssi_beacon;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@@ -730,6 +734,7 @@ struct ath12k {
u32 txpower_scale;
u32 power_scale;
u32 chan_tx_pwr;
+ u32 rts_threshold;
u32 num_stations;
u32 max_num_stations;
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index 48916e4e1f60..bf254e43a68d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -26,6 +26,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
ATH12K_DBG_WOW = 0x00008000,
+ ATH12K_DBG_CE = 0x00010000,
ATH12K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index f893fce6d9bd..4a54b8c35311 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1745,7 +1745,9 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_update_rx_queue_list);
spin_lock_init(&dp->reo_cmd_lock);
+ spin_lock_init(&dp->reo_rxq_flush_lock);
dp->reo_cmd_cache_flush_count = 0;
dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 7baa48b86f7a..4ffec6ad7d8d 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -184,7 +184,7 @@ struct ath12k_pdev_dp {
#define DP_REO_REINJECT_RING_SIZE 32
#define DP_RX_RELEASE_RING_SIZE 1024
#define DP_REO_EXCEPTION_RING_SIZE 128
-#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 256
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
#define DP_RX_MAC_BUF_RING_SIZE 2048
@@ -389,15 +389,19 @@ struct ath12k_dp {
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
- struct list_head reo_cmd_list;
+ struct list_head reo_cmd_update_rx_queue_list;
struct list_head reo_cmd_cache_flush_list;
u32 reo_cmd_cache_flush_count;
-
/* protects access to below fields,
- * - reo_cmd_list
+ * - reo_cmd_update_rx_queue_list
* - reo_cmd_cache_flush_list
* - reo_cmd_cache_flush_count
*/
+ spinlock_t reo_rxq_flush_lock;
+ struct list_head reo_cmd_list;
+ /* protects access to below fields,
+ * - reo_cmd_list
+ */
spinlock_t reo_cmd_lock;
struct ath12k_hp_update_timer reo_cmd_timer;
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 8189e52ed007..009c49502148 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1441,6 +1441,34 @@ static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
}
static void
+ath12k_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data, cp_setting, ltf_size;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF;
+ eht->known = cpu_to_le32(known);
+
+ cp_setting = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_CP_SETTING);
+ ltf_size = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_LTF_SIZE);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI);
+ data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF);
+ eht->data[0] = cpu_to_le32(data);
+
+ if (!ppdu_info->ltf_size)
+ ppdu_info->ltf_size = ltf_size;
+ if (!ppdu_info->gi)
+ ppdu_info->gi = cp_setting;
+}
+
+static void
ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
const struct hal_rx_msdu_end *msdu_end)
{
@@ -1627,25 +1655,22 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
- info[1] = __le32_to_cpu(rssi->info1);
+ info[2] = __le32_to_cpu(rssi->info2);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
- u32_get_bits(info[1],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
+ u32_get_bits(info[2],
+ HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU);
ppdu_info->bw = u32_get_bits(info[0],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
+ HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
- case HAL_PHYRX_OTHER_RECEIVE_INFO: {
- const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
-
- ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
- HAL_RX_PHY_CMN_USER_INFO0_GI);
+ case HAL_PHYRX_COMMON_USER_INFO: {
+ ath12k_parse_cmn_usr_info(tlv_data, ppdu_info);
break;
}
case HAL_RX_PPDU_START_USER_INFO:
@@ -2154,8 +2179,12 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
rxs->flag |= RX_FLAG_MACTIME_START;
- rxs->signal = ppduinfo->rssi_comb + noise_floor;
rxs->nss = ppduinfo->nss + 1;
+ if (test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ rxs->signal = ppduinfo->rssi_comb;
+ else
+ rxs->signal = ppduinfo->rssi_comb + noise_floor;
if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
@@ -2244,6 +2273,7 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
+ const struct hal_rx_mon_ppdu_info *ppduinfo,
struct ieee80211_rx_status *status,
u8 decap)
{
@@ -2257,7 +2287,6 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
@@ -2271,8 +2300,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
spin_lock_bh(&ar->ab->base_lock);
- rx_info.addr2_present = false;
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
+ peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2365,7 +2393,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
decap = mon_mpdu->decap_format;
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, ppduinfo, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 8ab91273592c..5e5c14a70316 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -21,6 +21,9 @@
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid);
+
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -581,49 +584,71 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
return 0;
}
+static void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ rx_tid_rxq->tid = rx_tid->tid;
+ rx_tid_rxq->active = rx_tid->active;
+ rx_tid_rxq->qbuf = rx_tid->qbuf;
+}
+
+static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
+ struct ath12k_reoq_buf *tid_qbuf)
+{
+ if (tid_qbuf->vaddr) {
+ dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned,
+ tid_qbuf->size, DMA_BIDIRECTIONAL);
+ kfree(tid_qbuf->vaddr);
+ tid_qbuf->vaddr = NULL;
+ }
+}
+
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+ struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
- spin_lock_bh(&dp->reo_cmd_lock);
- list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
- list_del(&cmd->list);
- dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
- cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
- kfree(cmd->data.qbuf.vaddr);
- kfree(cmd);
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+ list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list,
+ list) {
+ list_del(&cmd_queue->list);
+ ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf);
+ kfree(cmd_queue);
}
-
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
- dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
- cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
- kfree(cmd_cache->data.qbuf.vaddr);
+ ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf);
kfree(cmd_cache);
}
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf);
+ kfree(cmd);
+ }
spin_unlock_bh(&dp->reo_cmd_lock);
}
static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
- struct ath12k_dp_rx_tid *rx_tid = ctx;
+ struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
if (status != HAL_REO_CMD_SUCCESS)
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
+ ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf);
}
-static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
+static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid,
enum hal_reo_cmd_type type,
struct ath12k_hal_reo_cmd *cmd,
void (*cb)(struct ath12k_dp *dp, void *ctx,
@@ -668,51 +693,95 @@ static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_ti
return 0;
}
-static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
- struct ath12k_dp_rx_tid *rx_tid)
+static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
{
struct ath12k_hal_reo_cmd cmd = {};
- unsigned long tot_desc_sz, desc_sz;
int ret;
- tot_desc_sz = rx_tid->qbuf.size;
- desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
-
- while (tot_desc_sz > desc_sz) {
- tot_desc_sz -= desc_sz;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
- HAL_REO_CMD_FLUSH_CACHE, &cmd,
- NULL);
- if (ret)
- ath12k_warn(ab,
- "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
- rx_tid->tid, ret);
- }
-
- memset(&cmd, 0, sizeof(cmd));
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs
+ *in the bitmap will be forwarded/flushed to REO output rings
+ */
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS |
+ HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
+
+ /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum
+ * window size of 1024. In such cases, the driver can issue a single
+ * 1KB descriptor flush command instead of sending multiple 128-byte
+ * flush commands for each QoS TID, improving efficiency.
+ */
+
+ if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
+ cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
+
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
&cmd, ath12k_dp_reo_cmd_free);
- if (ret) {
- ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
- rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
+ return ret;
+}
+
+static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct dp_reo_update_rx_queue_elem *elem, *tmp;
+
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) {
+ if (elem->rx_tid.active)
+ continue;
+
+ if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid))
+ break;
+
+ ath12k_peer_rx_tid_qref_reset(ab,
+ elem->is_ml_peer ? elem->ml_peer_id :
+ elem->peer_id,
+ elem->rx_tid.tid);
+
+ if (ab->hw_params->reoq_lut_support)
+ ath12k_hal_reo_shared_qaddr_cache_clear(ab);
+
+ list_del(&elem->list);
+ kfree(elem);
}
+
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath12k_base *ab = dp->ab;
- struct ath12k_dp_rx_tid *rx_tid = ctx;
+ struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
if (status == HAL_REO_CMD_DRAIN) {
@@ -724,6 +793,13 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
return;
}
+ /* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries
+ * in the pending queue list marked TID as inactive
+ */
+ spin_lock_bh(&dp->ab->base_lock);
+ ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
+ spin_unlock_bh(&dp->ab->base_lock);
+
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
goto free_desc;
@@ -731,7 +807,7 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
elem->ts = jiffies;
memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
- spin_lock_bh(&dp->reo_cmd_lock);
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
@@ -741,32 +817,44 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
- list_del(&elem->list);
- dp->reo_cmd_cache_flush_count--;
-
- /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
- * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
- * is used in only two contexts, one is in this function called
- * from napi and the other in ath12k_dp_free during core destroy.
- * Before dp_free, the irqs would be disabled and would wait to
- * synchronize. Hence there wouldn’t be any race against add or
- * delete to this list. Hence unlock-lock is safe here.
+ /* The reo_cmd_cache_flush_list is used in only two contexts,
+ * one is in this function called from napi and the
+ * other in ath12k_dp_free during core destroy.
+ * If cache command sent is success, delete the element in
+ * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup
+ * will be called during core destroy.
*/
- spin_unlock_bh(&dp->reo_cmd_lock);
- ath12k_dp_reo_cache_flush(ab, &elem->data);
+ if (ath12k_dp_reo_cache_flush(ab, &elem->data))
+ break;
+
+ list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
kfree(elem);
- spin_lock_bh(&dp->reo_cmd_lock);
}
}
- spin_unlock_bh(&dp->reo_cmd_lock);
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
+ ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
+}
+
+static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {};
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ /* Observed flush cache failure, to avoid that set vld bit during delete */
+ cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
+
+ return ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath12k_dp_rx_tid_del_func);
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -799,64 +887,38 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
ath12k_hal_reo_shared_qaddr_cache_clear(ab);
}
-static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
+static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
{
- struct ath12k_reo_queue_ref *qref;
- struct ath12k_dp *dp = &ab->dp;
- bool ml_peer = false;
-
- if (!ab->hw_params->reoq_lut_support)
- return;
+ struct dp_reo_update_rx_queue_elem *elem;
+ struct ath12k_dp_rx_tid_rxq *rx_tid;
- if (peer_id & ATH12K_PEER_ML_ID_VALID) {
- peer_id &= ~ATH12K_PEER_ML_ID_VALID;
- ml_peer = true;
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+ list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) {
+ if (elem->peer_id == peer_id) {
+ rx_tid = &elem->rx_tid;
+ if (rx_tid->tid == tid) {
+ rx_tid->active = false;
+ break;
+ }
+ }
}
-
- if (ml_peer)
- qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
- else
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
-
- qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
- qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
- u32_encode_bits(tid, DP_REO_QREF_NUM);
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
struct ath12k_peer *peer, u8 tid)
{
- struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
- int ret;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
if (!rx_tid->active)
return;
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
- ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
- HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
- ath12k_dp_rx_tid_del_func);
- if (ret) {
- ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
- tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
- rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
- }
-
- if (peer->mlo)
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
- else
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
-
rx_tid->active = false;
+
+ ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid);
+ ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
}
int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
@@ -941,9 +1003,12 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
{
struct ath12k_hal_reo_cmd cmd = {};
int ret;
+ struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
+
+ cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
@@ -953,12 +1018,12 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
}
- ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+ ret = ath12k_dp_reo_cmd_send(ar->ab, &rx_tid_rxq,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
NULL);
if (ret) {
ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
- rx_tid->tid, ret);
+ rx_tid_rxq.tid, ret);
return ret;
}
@@ -1018,6 +1083,29 @@ static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
return 0;
}
+static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
+ struct ath12k_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct dp_reo_update_rx_queue_elem *elem;
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ return -ENOMEM;
+
+ elem->peer_id = peer->peer_id;
+ elem->is_ml_peer = peer->mlo;
+ elem->ml_peer_id = peer->ml_id;
+
+ ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid);
+
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
+
+ return 0;
+}
+
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
@@ -1098,6 +1186,19 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return ret;
}
+ /* Pre-allocate the update_rxq_list for the corresponding tid
+ * This will be used during the tid delete. The reason we are not
+ * allocating during tid delete is that, if any alloc fail in update_rxq_list
+ * we may not be able to delete the tid vaddr/paddr and may lead to leak
+ */
+ ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid);
+ if (ret) {
+ ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid);
+ ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
+ }
+
paddr_aligned = rx_tid->qbuf.paddr_aligned;
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
@@ -1207,6 +1308,7 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
+ struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
u8 tid;
int ret = 0;
@@ -1253,9 +1355,11 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+
+ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
+ cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ ret = ath12k_dp_reo_cmd_send(ab, &rx_tid_rxq,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
if (ret) {
@@ -2533,6 +2637,8 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
channel_num = meta_data;
center_freq = meta_data >> 16;
+ rx_status->band = NUM_NL80211_BANDS;
+
if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
@@ -2541,21 +2647,33 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
- } else {
+ }
+
+ if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
+ !ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) {
+ ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+ rx_status->band, channel_num, center_freq, ar->pdev_idx);
+
spin_lock_bh(&ar->data_lock);
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(channel->center_freq);
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+ } else {
+ ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
}
spin_unlock_bh(&ar->data_lock);
+ goto h_rate;
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
+h_rate:
ath12k_dp_rx_h_rate(ar, rx_info);
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index e971a314bd2d..69d0a36a91d8 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -31,15 +31,29 @@ struct ath12k_dp_rx_tid {
struct ath12k_base *ab;
};
+struct ath12k_dp_rx_tid_rxq {
+ u8 tid;
+ bool active;
+ struct ath12k_reoq_buf qbuf;
+};
+
struct ath12k_dp_rx_reo_cache_flush_elem {
struct list_head list;
- struct ath12k_dp_rx_tid data;
+ struct ath12k_dp_rx_tid_rxq data;
unsigned long ts;
};
+struct dp_reo_update_rx_queue_elem {
+ struct list_head list;
+ struct ath12k_dp_rx_tid_rxq rx_tid;
+ int peer_id;
+ bool is_ml_peer;
+ u16 ml_peer_id;
+};
+
struct ath12k_dp_rx_reo_cmd {
struct list_head list;
- struct ath12k_dp_rx_tid data;
+ struct ath12k_dp_rx_tid_rxq data;
int cmd_num;
void (*handler)(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status);
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index c1750b5dc03c..efe00e167998 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -832,6 +832,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC BIT(9)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 0173f731bfef..13ddac4a9412 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1225,6 +1225,7 @@ struct hal_reo_flush_queue {
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC BIT(15)
struct hal_reo_flush_cache {
struct hal_reo_cmd_hdr cmd;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index 48aa48c48606..669096278fdd 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -89,6 +89,9 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC);
+
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index a3ab588aae19..d1ad7747b82c 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -483,15 +483,16 @@ enum hal_rx_ul_reception_type {
HAL_RECEPTION_TYPE_FRAMELESS
};
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU GENMASK(7, 0)
struct hal_rx_phyrx_rssi_legacy_info {
__le32 info0;
__le32 rsvd0[39];
__le32 info1;
- __le32 rsvd1;
+ __le32 info2;
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
@@ -695,7 +696,8 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
-#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
+#define HAL_RX_CMN_USR_INFO0_CP_SETTING GENMASK(17, 16)
+#define HAL_RX_CMN_USR_INFO0_LTF_SIZE GENMASK(19, 18)
struct hal_phyrx_common_user_info {
__le32 rsvd[2];
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 3a3965b79942..1d7b60aa5cb0 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -1822,22 +1822,16 @@ void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
skb);
}
-static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar,
+ struct ath12k_link_vif *arvif)
{
- u32 *vdev_id = data;
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ieee80211_hw *hw;
-
- if (!arvif->is_created || arvif->vdev_id != *vdev_id)
- return;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
- if (!arvif->is_up)
+ if (!(arvif->is_created && arvif->is_up))
return;
ieee80211_beacon_loss(vif);
- hw = ath12k_ar_to_hw(arvif->ar);
/* Firmware doesn't report beacon loss events repeatedly. If AP probe
* (done by mac80211) succeeds but beacons do not resume then it
@@ -1848,14 +1842,6 @@ static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
ATH12K_CONNECTION_LOSS_HZ);
}
-void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
-{
- ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
- IEEE80211_IFACE_ITER_NORMAL,
- ath12k_mac_handle_beacon_miss_iter,
- &vdev_id);
-}
-
static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
{
struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
@@ -9860,6 +9846,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
param_value = hw->wiphy->rts_threshold;
+ ar->rts_threshold = param_value;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -11240,8 +11227,8 @@ void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
- s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
- psd_power, tx_power, eirp_power;
+ s8 max_tx_power[ATH12K_NUM_PWR_LEVELS], psd_power, tx_power;
+ s8 eirp_power = 0;
struct ath12k_vif *ahvif = arvif->ahvif;
u16 start_freq, center_freq;
u8 reg_6ghz_power_mode;
@@ -11447,8 +11434,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_psd->count,
reg_psd->count);
- if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
- tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+ tpc_info->num_pwr_levels =
+ min3(tpc_info->num_pwr_levels,
+ IEEE80211_TPE_PSD_ENTRIES_320MHZ,
+ ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_psd->power[i],
@@ -11463,8 +11452,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_non_psd->count,
reg_non_psd->count);
- if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
- tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+ tpc_info->num_pwr_levels =
+ min3(tpc_info->num_pwr_levels,
+ IEEE80211_TPE_EIRP_ENTRIES_320MHZ,
+ ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_non_psd->power[i],
@@ -11687,16 +11678,32 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
int radio_idx, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct wiphy *wiphy = hw->wiphy;
struct ath12k *ar;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ int ret = 0, ret_err, i;
lockdep_assert_wiphy(hw->wiphy);
- /* Currently we set the rts threshold value to all the vifs across
- * all radios of the single wiphy.
- * TODO Once support for vif specific RTS threshold in mac80211 is
- * available, ath12k can make use of it.
- */
+ if (radio_idx >= wiphy->n_radio || radio_idx < -1)
+ return -EINVAL;
+
+ if (radio_idx != -1) {
+ /* Update RTS threshold in specified radio */
+ ar = ath12k_ah_to_ar(ah, radio_idx);
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to set RTS config for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ return ret;
+ }
+
+ ar->rts_threshold = value;
+ return 0;
+ }
+
+ /* Radio_index passed is -1, so set RTS threshold for all radios. */
for_each_ar(ah, ar, i) {
ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
if (ret) {
@@ -11705,6 +11712,25 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
break;
}
}
+ if (!ret) {
+ /* Setting new RTS threshold for vdevs of all radios passed, so update
+ * the RTS threshold value for all radios
+ */
+ for_each_ar(ah, ar, i)
+ ar->rts_threshold = value;
+ return 0;
+ }
+
+ /* RTS threshold config failed, revert to the previous RTS threshold */
+ for (i = i - 1; i >= 0; i--) {
+ ar = ath12k_ah_to_ar(ah, i);
+ ret_err = ath12k_set_vdev_param_to_all_vifs(ar, param_id,
+ ar->rts_threshold);
+ if (ret_err)
+ ath12k_warn(ar->ab,
+ "failed to restore RTS threshold for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ }
return ret;
}
@@ -12610,6 +12636,27 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static void ath12k_mac_put_chain_rssi(struct station_info *sinfo,
+ struct ath12k_link_sta *arsta)
+{
+ s8 rssi;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+ sinfo->chains &= ~BIT(i);
+ rssi = arsta->chain_signal[i];
+
+ if (rssi != ATH12K_DEFAULT_NOISE_FLOOR &&
+ rssi != ATH12K_INVALID_RSSI_FULL &&
+ rssi != ATH12K_INVALID_RSSI_EMPTY &&
+ rssi != 0) {
+ sinfo->chain_signal[i] = rssi;
+ sinfo->chains |= BIT(i);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
+ }
+}
+
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -12667,6 +12714,12 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
!(ath12k_mac_get_fw_stats(ar, &params)))
signal = arsta->rssi_beacon;
+ params.stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
+ ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ !(ath12k_mac_get_fw_stats(ar, &params)))
+ ath12k_mac_put_chain_rssi(sinfo, arsta);
+
spin_lock_bh(&ar->data_lock);
noise_floor = ath12k_pdev_get_noise_floor(ar);
spin_unlock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 18c79d4002cb..c05af40bd7a2 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -168,7 +168,8 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable);
int ath12k_mac_rfkill_config(struct ath12k *ar);
int ath12k_mac_wait_tx_complete(struct ath12k *ar);
void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
-void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar,
+ struct ath12k_link_vif *arvif);
int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 7c611a1fd6d0..36325e62aa24 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -3307,20 +3307,28 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
- req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
- req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
- req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
- req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
- req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ req->tgt_cfg[pipe_num].pipe_num =
+ __le32_to_cpu(ce_cfg[pipe_num].pipenum);
+ req->tgt_cfg[pipe_num].pipe_dir =
+ __le32_to_cpu(ce_cfg[pipe_num].pipedir);
+ req->tgt_cfg[pipe_num].nentries =
+ __le32_to_cpu(ce_cfg[pipe_num].nentries);
+ req->tgt_cfg[pipe_num].nbytes_max =
+ __le32_to_cpu(ce_cfg[pipe_num].nbytes_max);
+ req->tgt_cfg[pipe_num].flags =
+ __le32_to_cpu(ce_cfg[pipe_num].flags);
}
req->svc_cfg_valid = 1;
/* This is number of Service/CE configs */
req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
- req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
- req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
- req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ req->svc_cfg[pipe_num].service_id =
+ __le32_to_cpu(svc_cfg[pipe_num].service_id);
+ req->svc_cfg[pipe_num].pipe_dir =
+ __le32_to_cpu(svc_cfg[pipe_num].pipedir);
+ req->svc_cfg[pipe_num].pipe_num =
+ __le32_to_cpu(svc_cfg[pipe_num].pipenum);
}
/* set shadow v3 configuration */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index abdaade3b542..4767d9a2e309 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -392,17 +392,17 @@ enum qmi_wlanfw_pipedir_enum_v01 {
};
struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
- __le32 pipe_num;
- __le32 pipe_dir;
- __le32 nentries;
- __le32 nbytes_max;
- __le32 flags;
+ u32 pipe_num;
+ u32 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
};
struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
- __le32 service_id;
- __le32 pipe_dir;
- __le32 pipe_num;
+ u32 service_id;
+ u32 pipe_dir;
+ u32 pipe_num;
};
struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 29dadedefdd2..ff6b3d4ea820 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -30,6 +30,9 @@ struct ath12k_wmi_svc_ready_parse {
struct wmi_tlv_fw_stats_parse {
const struct wmi_stats_event *ev;
struct ath12k_fw_stats *stats;
+ const struct wmi_per_chain_rssi_stat_params *rssi;
+ int rssi_num;
+ bool chain_rssi_done;
};
struct ath12k_wmi_dma_ring_caps_parse {
@@ -185,6 +188,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
.min_len = sizeof(struct wmi_11d_new_cc_event) },
+ [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+ .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) },
};
__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -6462,6 +6467,8 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
}
arg->mac_addr = ev->peer_macaddr.addr;
+ arg->reason = le32_to_cpu(ev->reason);
+ arg->rssi = le32_to_cpu(ev->rssi);
kfree(tb);
return 0;
@@ -7298,8 +7305,10 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_sta_kickout_arg arg = {};
+ struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
struct ath12k_peer *peer;
+ unsigned int link_id;
struct ath12k *ar;
if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
@@ -7319,25 +7328,49 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
- if (!ar) {
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id);
+ if (!arvif) {
ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
- arg.mac_addr, NULL);
+ ar = arvif->ar;
+
+ if (peer->mlo) {
+ sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar),
+ arg.mac_addr,
+ NULL, &link_id);
+ if (peer->link_id != link_id) {
+ ath12k_warn(ab,
+ "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n",
+ arg.mac_addr, peer->link_id, link_id);
+ goto exit;
+ }
+ } else {
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+ arg.mac_addr, NULL);
+ }
if (!sta) {
- ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
- arg.mac_addr);
+ ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n",
+ peer->mlo ? "MLO " : "", arg.mac_addr);
goto exit;
}
- ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
- arg.mac_addr);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "peer sta kickout event %pM reason: %d rssi: %d\n",
+ arg.mac_addr, arg.reason, arg.rssi);
- ieee80211_report_low_ack(sta, 10);
+ switch (arg.reason) {
+ case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
+ if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath12k_mac_handle_beacon_miss(ar, arvif);
+ break;
+ }
+ fallthrough;
+ default:
+ ieee80211_report_low_ack(sta, 10);
+ }
exit:
spin_unlock_bh(&ab->base_lock);
@@ -7346,6 +7379,7 @@ exit:
static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
+ struct ath12k_link_vif *arvif;
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
u32 vdev_id;
@@ -7364,21 +7398,22 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
"wmi roam event vdev %u reason %d rssi %d\n",
vdev_id, roam_reason, roam_ev.rssi);
- rcu_read_lock();
- ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
- if (!ar) {
+ guard(rcu)();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+ if (!arvif) {
ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
- rcu_read_unlock();
return;
}
+ ar = arvif->ar;
+
if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
roam_reason, vdev_id);
switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- ath12k_mac_handle_beacon_miss(ar, vdev_id);
+ ath12k_mac_handle_beacon_miss(ar, arvif);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
@@ -7388,8 +7423,6 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
roam_reason, vdev_id);
break;
}
-
- rcu_read_unlock();
}
static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
@@ -8219,6 +8252,77 @@ exit:
return ret;
}
+static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_rssi_stat_params *stats_rssi = ptr;
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath12k_fw_stats *stats = parse->stats;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k *ar;
+ int vdev_id;
+ int j;
+
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch update stats ev");
+ return -EPROTO;
+ }
+
+ if (tag != WMI_TAG_RSSI_STATS)
+ return -EPROTO;
+
+ if (!stats)
+ return -EINVAL;
+
+ stats->pdev_id = le32_to_cpu(ev->pdev_id);
+ vdev_id = le32_to_cpu(stats_rssi->vdev_id);
+ guard(rcu)();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n",
+ stats->pdev_id);
+ return -EPROTO;
+ }
+
+ arvif = ath12k_mac_get_arvif(ar, vdev_id);
+ if (!arvif) {
+ ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id);
+ return -EPROTO;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "stats bssid %pM vif %p\n",
+ arvif->bssid, arvif->ahvif->vif);
+
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+ arvif->bssid,
+ NULL);
+ if (!sta) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
+ return -EPROTO;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
+
+ for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++)
+ arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]);
+
+ stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+
+ return 0;
+}
+
static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@@ -8233,6 +8337,22 @@ static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
case WMI_TAG_ARRAY_BYTE:
ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
break;
+ case WMI_TAG_PER_CHAIN_RSSI_STATS:
+ parse->rssi = ptr;
+ if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
+ parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (parse->rssi_num && !parse->chain_rssi_done) {
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_tlv_rssi_chain_parse,
+ parse);
+ if (ret)
+ return ret;
+
+ parse->chain_rssi_done = true;
+ }
+ break;
default:
break;
}
@@ -8346,6 +8466,12 @@ static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *sk
goto complete;
}
+ /* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
ath12k_wmi_fw_stats_process(ar, &stats);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index f3b0a6f57ec2..a8c3190e8ad9 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -4548,12 +4548,27 @@ struct wmi_scan_event {
__le32 tsf_timestamp;
} __packed;
+enum wmi_peer_sta_kickout_reason {
+ WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED = 0,
+ WMI_PEER_STA_KICKOUT_REASON_XRETRY = 1,
+ WMI_PEER_STA_KICKOUT_REASON_INACTIVITY = 2,
+ WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT = 3,
+ WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT = 4,
+ WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT = 5,
+ WMI_PEER_STA_KICKOUT_REASON_ROAMING_EVENT = 6,
+ WMI_PEER_STA_KICKOUT_REASON_PMF_ERROR = 7,
+};
+
struct wmi_peer_sta_kickout_arg {
const u8 *mac_addr;
+ enum wmi_peer_sta_kickout_reason reason;
+ u32 rssi;
};
struct wmi_peer_sta_kickout_event {
struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 reason;
+ __le32 rssi;
} __packed;
#define WMI_ROAM_REASON_MASK GENMASK(3, 0)
@@ -5875,9 +5890,10 @@ struct wmi_stats_event {
} __packed;
enum wmi_stats_id {
- WMI_REQUEST_PDEV_STAT = BIT(2),
- WMI_REQUEST_VDEV_STAT = BIT(3),
- WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_BCN_STAT = BIT(11),
};
struct wmi_request_stats_cmd {
@@ -5888,6 +5904,17 @@ struct wmi_request_stats_cmd {
__le32 pdev_id;
} __packed;
+struct wmi_rssi_stat_params {
+ __le32 vdev_id;
+ __le32 rssi_avg_beacon[WMI_MAX_CHAINS];
+ __le32 rssi_avg_data[WMI_MAX_CHAINS];
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_per_chain_rssi_stat_params {
+ __le32 num_per_chain_rssi;
+} __packed;
+
#define WLAN_MAX_AC 4
#define MAX_TX_RATE_VALUES 10
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 908c4c8b7f82..6833430130f4 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -555,7 +555,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
- cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
+ cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= !!(tim_ie->bitmap_ctrl & 0x01);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 7703a0933a14..7218fe70f3bc 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2708,6 +2708,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE;
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 8ab7d1e34a6e..6a3f187320fc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -997,9 +997,9 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43751, WCC),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43752, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359, CYW),
CYW_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8af402555b5e..8afaffe31031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5958,6 +5958,26 @@ static int brcmf_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
return brcmf_set_pmk(ifp, NULL, 0);
}
+static int brcmf_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct brcmf_if *ifp = netdev_priv(dev);
+ int ret = 0;
+
+ /* In AP mode, the "ap_isolate" value represents
+ * 0 = allow low-level bridging of frames between associated stations
+ * 1 = restrict low-level bridging of frames to isolate associated stations
+ * -1 = do not change existing setting
+ */
+ if (params->ap_isolate >= 0) {
+ ret = brcmf_fil_iovar_int_set(ifp, "ap_isolate", params->ap_isolate);
+ if (ret < 0)
+ brcmf_err("ap_isolate iovar failed: ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
static struct cfg80211_ops brcmf_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -6005,6 +6025,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.update_connect_params = brcmf_cfg80211_update_conn_params,
.set_pmk = brcmf_cfg80211_set_pmk,
.del_pmk = brcmf_cfg80211_del_pmk,
+ .change_bss = brcmf_cfg80211_change_bss,
};
struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
@@ -7659,6 +7680,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE;
+
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 9074ab49e806..4239f2b21e54 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -738,8 +738,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
- case CY_CC_43752_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
@@ -1452,7 +1452,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
- case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 83f8ed7d00f9..ef79924fd8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -554,12 +554,16 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
data = (u8 *)fw->data;
data_len = fw->size;
} else {
- if ((data = bcm47xx_nvram_get_contents(&data_len)))
+ data = bcm47xx_nvram_get_contents(&data_len);
+ if (data) {
free_bcm47xx_nvram = true;
- else if ((data = brcmf_fw_nvram_from_efi(&data_len)))
- kfree_nvram = true;
- else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
- goto fail;
+ } else {
+ data = brcmf_fw_nvram_from_efi(&data_len);
+ if (data)
+ kfree_nvram = true;
+ else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+ goto fail;
+ }
}
if (data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8a0bad5119a0..8cf9d7e7c3f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -655,10 +655,10 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(BRCM_CC_43751_CHIP_ID, 0xFFFFFFFF, 43752),
+ BRCMF_FW_ENTRY(BRCM_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
- BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
#define TXCTL_CREDITS 2
@@ -3426,8 +3426,8 @@ err:
static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
{
if (bus->ci->chip == BRCM_CC_43751_CHIP_ID ||
- bus->ci->chip == CY_CC_43012_CHIP_ID ||
- bus->ci->chip == CY_CC_43752_CHIP_ID)
+ bus->ci->chip == BRCM_CC_43752_CHIP_ID ||
+ bus->ci->chip == CY_CC_43012_CHIP_ID)
return true;
else
return false;
@@ -4278,8 +4278,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
switch (sdiod->func1->device) {
case SDIO_DEVICE_ID_BROADCOM_43751:
+ case SDIO_DEVICE_ID_BROADCOM_43752:
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
- case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4373_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index b39c5c1ee18b..df3b67ba4db2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -60,7 +60,6 @@
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
-#define CY_CC_43752_CHIP_ID 43752
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
index 1e8ab704dbfb..f00eb878b94b 100644
--- a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
@@ -50,28 +50,4 @@ struct ieee80211_measurement_params {
__le16 duration;
} __packed;
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[];
-} __packed;
-
-struct ieee80211_measurement_request {
- struct ieee80211_info_element ie;
- u8 token;
- u8 mode;
- u8 type;
- struct ieee80211_measurement_params params[];
-} __packed;
-
-struct ieee80211_measurement_report {
- struct ieee80211_info_element ie;
- u8 token;
- u8 mode;
- u8 type;
- union {
- struct ieee80211_basic_report basic[0];
- } u;
-} __packed;
-
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 9f543946b285..3e6206e739f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -9,11 +9,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_BZ_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 98
+#define IWL_BZ_UCODE_API_MIN 100
/* Memory offsets and lengths */
#define IWL_BZ_SMEM_OFFSET 0x400000
@@ -75,7 +75,7 @@ static const struct iwl_family_base_params iwl_bz_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_BZ_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_BZ_UCODE_CORE_MAX),
.ucode_api_min = IWL_BZ_UCODE_API_MIN,
};
@@ -101,8 +101,8 @@ const struct iwl_mac_cfg iwl_gl_mac_cfg = {
.low_latency_xtal = true,
};
-IWL_FW_AND_PNVM(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index 807f4e29d55a..e53a785686c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -8,11 +8,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_DR_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_DR_UCODE_API_MIN 98
+#define IWL_DR_UCODE_API_MIN 100
/* Memory offsets and lengths */
#define IWL_DR_SMEM_OFFSET 0x400000
@@ -20,9 +20,6 @@
#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
-#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
- IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-
static const struct iwl_family_base_params iwl_dr_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -73,7 +70,7 @@ static const struct iwl_family_base_params iwl_dr_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_DR_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_DR_UCODE_CORE_MAX),
.ucode_api_min = IWL_DR_UCODE_API_MIN,
};
@@ -89,5 +86,5 @@ const struct iwl_mac_cfg iwl_dr_mac_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+IWL_CORE_FW(IWL_DR_A_PE_A_FW_PRE, IWL_DR_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
index 7ff5170faaa9..c16cda087a7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
@@ -9,7 +9,7 @@
#define IWL_GF_UCODE_API_MAX 100
/* Lowest firmware API version supported */
-#define IWL_GF_UCODE_API_MIN 98
+#define IWL_GF_UCODE_API_MIN 100
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0"
@@ -23,6 +23,18 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
+#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_SC_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_SC_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
+
/* NVM versions */
#define IWL_GF_NVM_VERSION 0x0a1d
@@ -67,7 +79,7 @@ IWL_FW_AND_PNVM(IWL_MA_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_B_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_B_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
+MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC_A_GF_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC_A_GF4_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
index 9f408d276ce9..6cf187d92dbf 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
@@ -9,7 +9,7 @@
#define IWL_HR_UCODE_API_MAX 100
/* Lowest firmware API version supported */
-#define IWL_HR_UCODE_API_MIN 98
+#define IWL_HR_UCODE_API_MIN 100
#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0"
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 6d4a3bce49b9..e9449b59114a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -9,11 +9,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_SC_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 98
+#define IWL_SC_UCODE_API_MIN 100
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@@ -78,7 +78,7 @@ static const struct iwl_family_base_params iwl_sc_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_SC_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_SC_UCODE_CORE_MAX),
.ucode_api_min = IWL_SC_UCODE_API_MIN,
};
@@ -94,8 +94,8 @@ const struct iwl_mac_cfg iwl_sc_mac_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_CORE_FW(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 9f8cdb027839..d337ab543eb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -766,7 +766,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 0771a46bd552..a0a26ef482a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -378,7 +378,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
- iwl_trans_d3_suspend(priv->trans, false, true);
+ iwl_trans_d3_suspend(priv->trans, true);
goto out;
@@ -422,7 +422,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
struct ieee80211_vif *vif;
u32 base;
int ret;
- enum iwl_d3_status d3_status;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
@@ -451,15 +450,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
+ ret = iwl_trans_d3_resume(priv->trans, true);
if (ret)
goto out_unlock;
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(priv, "Device was reset during suspend\n");
- goto out_unlock;
- }
-
/* uCode is no longer operating by itself */
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 6b42d6e5f30f..e7dbba7134f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -368,7 +368,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- priv->power_data.bus_pm = priv->trans->pm_support;
+ priv->power_data.bus_pm = iwl_trans_is_pm_supported(priv->trans);
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 7ec22738b5d6..52edc19d8cdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -9,9 +9,9 @@
#include "acpi.h"
#include "fw/runtime.h"
-const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
- 0xA5, 0xB3, 0x1F, 0x73,
- 0x8E, 0x28, 0x5A, 0xDE);
+static const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
+ 0xA5, 0xB3, 0x1F, 0x73,
+ 0x8E, 0x28, 0x5A, 0xDE);
static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
[DSM_FUNC_QUERY] = sizeof(u32),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 68d8fb5f6357..20bc6671f4eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -140,8 +140,6 @@ struct iwl_dsm_internal_product_reset_cmd {
struct iwl_fw_runtime;
-extern const guid_t iwl_guid;
-
union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
int func, union acpi_object *args,
const guid_t *guid);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 53445087e9cb..d3bed0216df4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -367,6 +367,7 @@ enum iwl_wowlan_flags {
ENABLE_NBNS_FILTERING = BIT(2),
ENABLE_DHCP_FILTERING = BIT(3),
ENABLE_STORE_BEACON = BIT(4),
+ HAS_BEACON_PROTECTION = BIT(5),
};
/**
@@ -631,10 +632,65 @@ struct iwl_wowlan_gtk_status_v3 {
struct iwl_wowlan_all_rsc_tsc_v5 sc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */
+/**
+ * enum iwl_wowlan_key_status - Status of security keys in WoWLAN notifications
+ * @IWL_WOWLAN_NOTIF_NO_KEY: No key is present; this entry should be ignored.
+ * @IWL_WOWLAN_STATUS_OLD_KEY: old key exists; no rekey occurred, and only
+ * metadata is available.
+ * @IWL_WOWLAN_STATUS_NEW_KEY: A new key was created after a rekey; new key
+ * material is available.
+ */
+enum iwl_wowlan_key_status {
+ IWL_WOWLAN_NOTIF_NO_KEY = 0,
+ IWL_WOWLAN_STATUS_OLD_KEY = 1,
+ IWL_WOWLAN_STATUS_NEW_KEY = 2
+};
+
+/**
+ * struct iwl_wowlan_gtk_status - GTK status
+ * @key: GTK material
+ * @key_len: GTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @key_status: key status, see &enum iwl_wowlan_key_status
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @sc: RSC/TSC counters
+ */
+struct iwl_wowlan_gtk_status {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 key_status;
+ u8 reserved;
+ u8 tkip_mic_key[IWL_MIC_KEY_SIZE];
+ struct iwl_wowlan_all_rsc_tsc_v5 sc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_4 */
+
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
#define IWL_WOWLAN_IGTK_BIGTK_IDX_MASK (BIT(0))
/**
+ * struct iwl_wowlan_igtk_status_v1 - IGTK status
+ * @key: IGTK material
+ * @ipn: the IGTK packet number (replay counter)
+ * @key_len: IGTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
+ * (0: index 6, 1: index 7 with bigtk)
+ * bits[1:5]: IGTK index of the key in the internal DB
+ * bit[6]: Set iff this is the currently used IGTK
+ */
+struct iwl_wowlan_igtk_status_v1 {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 ipn[6];
+ u8 key_len;
+ u8 key_flags;
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+
+/**
* struct iwl_wowlan_igtk_status - IGTK status
* @key: IGTK material
* @ipn: the IGTK packet number (replay counter)
@@ -644,13 +700,17 @@ struct iwl_wowlan_gtk_status_v3 {
* (0: index 6, 1: index 7 with bigtk)
* bits[1:5]: IGTK index of the key in the internal DB
* bit[6]: Set iff this is the currently used IGTK
+ * @key_status: key status, see &enum iwl_wowlan_key_status
+ * @reserved: padding
*/
struct iwl_wowlan_igtk_status {
u8 key[WOWLAN_KEY_MAX_SIZE];
u8 ipn[6];
u8 key_len;
u8 key_flags;
-} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+ u8 key_status;
+ u8 reserved[3];
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_2 */
/**
* struct iwl_wowlan_status_v6 - WoWLAN status
@@ -700,7 +760,7 @@ struct iwl_wowlan_status_v6 {
*/
struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -735,7 +795,7 @@ struct iwl_wowlan_status_v7 {
*/
struct iwl_wowlan_info_notif_v1 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 reserved1;
@@ -817,8 +877,8 @@ struct iwl_wowlan_mlo_gtk {
*/
struct iwl_wowlan_info_notif_v3 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 reserved1;
@@ -833,6 +893,45 @@ struct iwl_wowlan_info_notif_v3 {
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */
/**
+ * struct iwl_wowlan_info_notif_v5 - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @bigtk: BIGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
+ * following this notif
+ * @tid_offloaded_tx: tid used by the firmware to transmit data packets
+ * while in wowlan
+ * @mlo_gtks: array of GTKs of size num_mlo_link_keys
+ */
+struct iwl_wowlan_info_notif_v5 {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 qos_seq_ctr;
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 num_mlo_link_keys;
+ u8 tid_offloaded_tx;
+ struct iwl_wowlan_mlo_gtk mlo_gtks[];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
+
+/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* @gtk: GTK data
* @igtk: IGTK data
@@ -854,7 +953,7 @@ struct iwl_wowlan_info_notif_v3 {
* @mlo_gtks: array of GTKs of size num_mlo_link_keys
*/
struct iwl_wowlan_info_notif {
- struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -869,7 +968,7 @@ struct iwl_wowlan_info_notif {
u8 num_mlo_link_keys;
u8 tid_offloaded_tx;
struct iwl_wowlan_mlo_gtk mlo_gtks[];
-} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_6 */
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b9f559dac39f..f76cea6e9ec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -420,6 +420,8 @@ struct iwl_mac_config_cmd {
* eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_BANDWIDTH: Covers iwl_link_ctx_cfg_cmd::modify_bandwidth.
* Request RX OMI to the AP to modify bandwidth of this link.
+ * @LINK_CONTEXT_MODIFY_UHR_PARAMS: covers iwl_link_ctx_cfg_cmd::npca_params and
+ * iwl_link_ctx_cfg_cmd::prio_edca_params. Since _VER_7.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -432,6 +434,7 @@ enum iwl_link_ctx_modify_flags {
LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = BIT(6),
LINK_CONTEXT_MODIFY_EHT_PARAMS = BIT(7),
LINK_CONTEXT_MODIFY_BANDWIDTH = BIT(8),
+ LINK_CONTEXT_MODIFY_UHR_PARAMS = BIT(9),
LINK_CONTEXT_MODIFY_ALL = 0xff,
}; /* LINK_CONTEXT_MODIFY_MASK_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 2a1c2b0f19e4..bb801650a565 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @WOWLAN_INFO_NOTIFICATION: Notification in
* &struct iwl_wowlan_info_notif_v1, iwl_wowlan_info_notif_v3,
- * or &struct iwl_wowlan_info_notif
+ * &struct iwl_wowlan_info_notif_v5 or &struct iwl_wowlan_info_notif
*/
WOWLAN_INFO_NOTIFICATION = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 786b3bf4b448..5eb8d10678fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -571,17 +571,16 @@ enum iwl_ppag_flags {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: command version 1 structure.
- * @v2: command version from 2 to 6 are same structure as v2.
- * but has a different format of the flags bitmap
- * @v3: command version 7 structure.
+ * @v5: command version 5 structure.
+ * @v7: command version 7 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
* @v1.reserved: reserved
- * @v2.flags: values from &enum iwl_ppag_flags
- * @v2.gain: table of antenna gain values per chain and sub-band
- * @v3.ppag_config_info: see @struct bios_value_u32
- * @v3.gain: table of antenna gain values per chain and sub-band
- * @v3.reserved: reserved
+ * @v5.flags: values from &enum iwl_ppag_flags
+ * @v5.gain: table of antenna gain values per chain and sub-band
+ * @v7.ppag_config_info: see @struct bios_value_u32
+ * @v7.gain: table of antenna gain values per chain and sub-band
+ * @v7.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
@@ -593,30 +592,19 @@ union iwl_ppag_table_cmd {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4,
- * VER5, VER6
- */
+ } __packed v5; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_5 */
struct {
struct bios_value_u32 ppag_config_info;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
+ } __packed v7; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
} __packed;
-#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
-#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+#define IWL_PPAG_CMD_V1_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V1_MASK | \
IWL_PPAG_ETSI_LPI_UHB_MASK | \
IWL_PPAG_USA_LPI_UHB_MASK)
-#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \
- IWL_PPAG_ETSI_VLP_UHB_MASK | \
- IWL_PPAG_ETSI_SP_UHB_MASK | \
- IWL_PPAG_USA_VLP_UHB_MASK | \
- IWL_PPAG_USA_SP_UHB_MASK | \
- IWL_PPAG_CANADA_LPI_UHB_MASK | \
- IWL_PPAG_CANADA_VLP_UHB_MASK | \
- IWL_PPAG_CANADA_SP_UHB_MASK)
-
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 3222cbcbe1ab..9c464e7aba10 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -24,6 +24,8 @@
* for BPSK (MCS 0) with 2 spatial
* streams
* @IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK: enable support for EHT extra LTF
+ * @IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK: support ELR 1.5 Mbps
+ * @IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK: support ELR 3 Mbps
*/
enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
@@ -32,6 +34,8 @@ enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK = BIT(6),
+ IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK = BIT(7),
+ IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK = BIT(8),
};
/**
@@ -201,6 +205,37 @@ struct iwl_tlc_config_cmd_v4 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
/**
+ * struct iwl_tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(&enum iwl_tlc_mng_cfg_cw)
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 sgi_ch_width_supp;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le32 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
+ __le16 max_mpdu_len;
+ __le16 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_5 */
+
+/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
* @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 2879be4b8fcb..2ce55859641c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -830,7 +830,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* reading RXF/TXF sizes */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+ if (iwl_trans_is_fw_error(fwrt->trans)) {
fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
@@ -2393,7 +2393,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
- u32 hw_type, is_cdb, is_jacket;
+ u32 hw_type, is_cdb;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@@ -2426,11 +2426,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
hw_type = CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev);
is_cdb = CSR_HW_RFID_IS_CDB(fwrt->trans->info.hw_rf_id);
- is_jacket = !!(iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR) &
- WFPM_OTP_CFG1_IS_JACKET_BIT);
-
- /* Use bits 12 and 13 to indicate jacket/CDB, respectively */
- hw_type |= (is_jacket | (is_cdb << 1)) << IWL_JACKET_CDB_SHIFT;
+ hw_type |= IWL_CDB_MASK(is_cdb);
dump->hw_type = cpu_to_le32(hw_type);
@@ -2478,36 +2474,6 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
return entry->size;
}
-static u32 iwl_dump_ini_file_name_info(struct iwl_fw_runtime *fwrt,
- struct list_head *list)
-{
- struct iwl_fw_ini_dump_entry *entry;
- struct iwl_dump_file_name_info *tlv;
- u32 len = strnlen(fwrt->trans->dbg.dump_file_name_ext,
- IWL_FW_INI_MAX_NAME);
-
- if (!fwrt->trans->dbg.dump_file_name_ext_valid)
- return 0;
-
- entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + len);
- if (!entry)
- return 0;
-
- entry->size = sizeof(*tlv) + len;
-
- tlv = (void *)entry->data;
- tlv->type = cpu_to_le32(IWL_INI_DUMP_NAME_TYPE);
- tlv->len = cpu_to_le32(len);
- memcpy(tlv->data, fwrt->trans->dbg.dump_file_name_ext, len);
-
- /* add the dump file name extension tlv to the list */
- list_add_tail(&entry->list, list);
-
- fwrt->trans->dbg.dump_file_name_ext_valid = false;
-
- return entry->size;
-}
-
static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
[IWL_FW_INI_REGION_INVALID] = {},
[IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
@@ -2764,7 +2730,6 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
- size += iwl_dump_ini_file_name_info(fwrt, list);
size += iwl_dump_ini_info(fwrt, trigger, list);
}
@@ -3151,7 +3116,7 @@ static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
.len[0] = sizeof(hcmd_data),
};
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ if (iwl_trans_is_fw_error(fwrt->trans))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index f633124979ab..ddd714cff2f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -14,13 +14,6 @@
#include "iwl-csr.h"
#include "pnvm.h"
-#define FW_ASSERT_LMAC_FATAL 0x70
-#define FW_ASSERT_LMAC2_FATAL 0x72
-#define FW_ASSERT_UMAC_FATAL 0x71
-#define UMAC_RT_NMI_LMAC2_FATAL 0x72
-#define RT_NMI_INTERRUPT_OTHER_LMAC_FATAL 0x73
-#define FW_ASSERT_NMI_UNKNOWN 0x84
-
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
@@ -103,17 +96,6 @@ struct iwl_umac_error_event_table {
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-static bool iwl_fwrt_if_errorid_other_cpu(u32 err_id)
-{
- err_id &= 0xFF;
-
- if ((err_id >= FW_ASSERT_LMAC_FATAL &&
- err_id <= RT_NMI_INTERRUPT_OTHER_LMAC_FATAL) ||
- err_id == FW_ASSERT_NMI_UNKNOWN)
- return true;
- return false;
-}
-
static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
@@ -131,13 +113,6 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
if (table.valid)
fwrt->dump.umac_err_id = table.error_id;
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.umac_err_id) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.umac_err_id);
- }
-
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
@@ -203,7 +178,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
if (err)
return;
- err = iwl_finish_nic_init(trans);
+ err = iwl_trans_activate_nic(trans);
if (err)
return;
}
@@ -213,13 +188,6 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
if (table.valid)
fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.lmac_err_id[lmac_num]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.lmac_err_id[lmac_num]);
- }
-
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
@@ -305,16 +273,6 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- if (table.valid)
- fwrt->dump.tcm_err_id[idx] = table.error_id;
-
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.tcm_err_id[idx]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.tcm_err_id[idx]);
- }
-
IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
@@ -378,16 +336,6 @@ static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- if (table.valid)
- fwrt->dump.rcm_err_id[idx] = table.error_id;
-
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.rcm_err_id[idx]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.rcm_err_id[idx]);
- }
-
IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index cf41021d59ad..c2a73cc85eff 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -372,7 +372,8 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
-#define IWL_JACKET_CDB_SHIFT 12
+#define IWL_CDB_MASK(val) val << 13
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 4d91ae065c8d..f297e82d63d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -237,11 +237,12 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
-static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
+static u8 *iwl_pnvm_get_from_fs(struct iwl_trans *trans, size_t *len)
{
const struct firmware *pnvm;
char pnvm_name[MAX_PNVM_NAME];
size_t new_len;
+ u8 *data;
int ret;
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
@@ -250,29 +251,73 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
- return ret;
+ return NULL;
}
new_len = pnvm->size;
- *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
- if (!*data)
- return -ENOMEM;
+ if (!data)
+ return NULL;
*len = new_len;
- return 0;
+ return data;
+}
+
+/**
+ * enum iwl_pnvm_source - different PNVM possible sources
+ *
+ * @IWL_PNVM_SOURCE_NONE: No PNVM.
+ * @IWL_PNVM_SOURCE_BIOS: PNVM should be read from BIOS.
+ * @IWL_PNVM_SOURCE_EXTERNAL: read .pnvm external file
+ * @IWL_PNVM_SOURCE_EMBEDDED: PNVM is embedded in the .ucode file.
+ */
+enum iwl_pnvm_source {
+ IWL_PNVM_SOURCE_NONE,
+ IWL_PNVM_SOURCE_BIOS,
+ IWL_PNVM_SOURCE_EXTERNAL,
+ IWL_PNVM_SOURCE_EMBEDDED
+};
+
+static enum iwl_pnvm_source iwl_select_pnvm_source(struct iwl_trans *trans,
+ bool intel_sku)
+{
+
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (!intel_sku)
+ return IWL_PNVM_SOURCE_BIOS;
+
+ /* Before those devices, PNVM didn't exist at all */
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return IWL_PNVM_SOURCE_NONE;
+
+ /* After those devices, we moved to embedded PNVM */
+ if (trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_AX210)
+ return IWL_PNVM_SOURCE_EMBEDDED;
+
+ /* For IWL_DEVICE_FAMILY_AX210, depends on the CRF */
+ if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_GF)
+ return IWL_PNVM_SOURCE_EXTERNAL;
+
+ return IWL_PNVM_SOURCE_NONE;
}
static const u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
__le32 sku_id[3], const struct iwl_fw *fw)
{
struct pnvm_sku_package *package;
+ enum iwl_pnvm_source pnvm_src =
+ iwl_select_pnvm_source(trans_p, sku_id[2] == 0);
u8 *image = NULL;
- /* Get PNVM from BIOS for non-Intel SKU */
- if (sku_id[2]) {
+ IWL_DEBUG_FW(trans_p, "PNVM source %d\n", pnvm_src);
+
+ if (pnvm_src == IWL_PNVM_SOURCE_NONE)
+ return NULL;
+
+ if (pnvm_src == IWL_PNVM_SOURCE_BIOS) {
package = iwl_uefi_get_pnvm(trans_p, len);
if (!IS_ERR_OR_NULL(package)) {
if (*len >= sizeof(*package)) {
@@ -289,18 +334,26 @@ static const u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
if (image)
return image;
}
+
+ /* PNVM doesn't exist in BIOS. Find the fallback source */
+ pnvm_src = iwl_select_pnvm_source(trans_p, true);
+ IWL_DEBUG_FW(trans_p, "PNVM in BIOS doesn't exist, try %d\n",
+ pnvm_src);
}
- if (fw->pnvm_data) {
- *len = fw->pnvm_size;
+ if (pnvm_src == IWL_PNVM_SOURCE_EXTERNAL) {
+ image = iwl_pnvm_get_from_fs(trans_p, len);
+ if (image)
+ return image;
+ }
+ if (pnvm_src == IWL_PNVM_SOURCE_EMBEDDED && fw->pnvm_data) {
+ *len = fw->pnvm_size;
return fw->pnvm_data;
}
- /* If it's not available, or for Intel SKU, try from the filesystem */
- if (iwl_pnvm_get_from_fs(trans_p, &image, len))
- return NULL;
- return image;
+ IWL_ERR(trans_p, "Couldn't get PNVM from required source: %d\n", pnvm_src);
+ return NULL;
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 3d6d1a85bb51..e1f28b053253 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -59,11 +59,16 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
- { .ident = "ASUS",
+ { .ident = "ASUSTEK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUS"),
+ },
+ },
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
@@ -141,11 +146,16 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
},
},
- { .ident = "ASUS",
+ { .ident = "ASUSTEK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUS"),
+ },
+ },
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
@@ -305,6 +315,7 @@ static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
return true;
}
+/* Utility function for iwlmvm and iwlxvt */
int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd, int *cmd_size)
{
@@ -344,18 +355,18 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK);
if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_bios_rev);
}
- } else if (cmd_ver >= 2 && cmd_ver <= 6) {
+ } else if (cmd_ver == 5) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
+ gain = cmd->v5.gain[0];
+ *cmd_size = sizeof(cmd->v5);
+ cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK);
if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
@@ -363,11 +374,11 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
}
} else if (cmd_ver == 7) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v3.gain[0];
- *cmd_size = sizeof(cmd->v3);
- cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source;
- cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
- cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
+ gain = cmd->v7.gain[0];
+ *cmd_size = sizeof(cmd->v7);
+ cmd->v7.ppag_config_info.table_source = fwrt->ppag_bios_source;
+ cmd->v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
+ cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
@@ -378,30 +389,22 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
"PPAG MODE bits were read from bios: %d\n",
fwrt->ppag_flags);
- if (cmd_ver == 6)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
- else if (cmd_ver == 5)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
- else if (cmd_ver < 5)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
-
- if ((cmd_ver == 1 &&
- !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
+ if (cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
- /* The 'flags' field is the same in v1 and v2 so we can just
+ /* The 'flags' field is the same in v1 and v5 so we can just
* use v1 to access it.
*/
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
(cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
- le32_to_cpu(cmd->v3.ppag_config_info.value));
+ le32_to_cpu(cmd->v7.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index a07c512b6ed4..735482e7adf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -12,7 +12,6 @@
#include "fw/api/phy.h"
#include "fw/api/config.h"
#include "fw/api/nvm-reg.h"
-#include "fw/img.h"
#include "iwl-trans.h"
#define BIOS_SAR_MAX_PROFILE_NUM 4
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index bd3bc2846cfa..806f9bcdf4f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -150,8 +150,6 @@ struct iwl_fw_runtime {
unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
u32 lmac_err_id[MAX_NUM_LMAC];
- u32 tcm_err_id[MAX_NUM_TCM];
- u32 rcm_err_id[MAX_NUM_RCM];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 99a17b9323e9..4ae4d215e633 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -727,6 +727,8 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
struct uefi_cnv_var_general_cfg *data;
int ret = -EINVAL;
+ BUILD_BUG_ON(ARRAY_SIZE(data->functions) < DSM_FUNC_NUM_FUNCS);
+
/* Not supported function index */
if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
return -EOPNOTSUPP;
@@ -742,11 +744,6 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
goto out;
}
- if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
- IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
- goto out;
- }
-
if (!(data->functions[DSM_FUNC_QUERY] & BIT(func))) {
IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
func, data->functions[DSM_FUNC_QUERY]);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 30e5f5a5cd89..a607e7ab914b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include "iwl-csr.h"
#include "iwl-drv.h"
@@ -107,6 +108,9 @@ enum iwl_nvm_type {
MODULE_FIRMWARE(pfx "-" __stringify(api) ".ucode"); \
MODULE_FIRMWARE(pfx ".pnvm")
+#define IWL_CORE_FW(pfx, core) \
+ MODULE_FIRMWARE(pfx "-c" __stringify(core) ".ucode")
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
@@ -192,8 +196,8 @@ struct iwl_family_base_params {
u8 max_ll_items;
u8 led_compensation;
- u8 ucode_api_max;
- u8 ucode_api_min;
+ u16 ucode_api_max;
+ u16 ucode_api_min;
u32 mac_addr_from_csr:10;
u8 nvm_hw_section_num;
netdev_features_t features;
@@ -211,6 +215,34 @@ struct iwl_family_base_params {
};
/*
+ * FW is released as "core N release", and we used to have a
+ * gap of 3 between the API version and core number. Now the
+ * reported API version will be 1000 + core and we encode it
+ * in the filename as "c<core>".
+ */
+#define API_IS_CORE_START 1000
+#define API_TO_CORE_OFFS 3
+#define ENCODE_CORE_AS_API(core) (API_IS_CORE_START + (core))
+
+static inline bool iwl_api_is_core_number(int api)
+{
+ return api >= API_IS_CORE_START;
+}
+
+static inline int iwl_api_to_core(int api)
+{
+ if (iwl_api_is_core_number(api))
+ return api - API_IS_CORE_START;
+
+ return api - API_TO_CORE_OFFS;
+}
+
+#define FW_API_FMT "%s%d"
+#define FW_API_ARG(n) \
+ iwl_api_is_core_number(n) ? "c" : "", \
+ iwl_api_is_core_number(n) ? (n) - API_IS_CORE_START : (n)
+
+/*
* @stbc: support Tx STBC and 1*SS Rx STBC
* @ldpc: support Tx/Rx with LDPC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -422,8 +454,8 @@ struct iwl_rf_cfg {
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
- u8 ucode_api_max;
- u8 ucode_api_min;
+ u16 ucode_api_max;
+ u16 ucode_api_min;
u16 num_rbds;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 28aad975434b..607fcea6f4ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -337,10 +337,18 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -EINVAL;
}
+ if (CSR_HW_RFID_TYPE(drv->trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_WH &&
+ CSR_HW_RFID_STEP(drv->trans->info.hw_rf_id) == SILICON_A_STEP) {
+ IWL_ERR(drv, "WH A step is not supported\n");
+ return -EINVAL;
+ }
+
fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre);
if (first)
drv->fw_index = ucode_api_max;
+ else if (drv->fw_index == ENCODE_CORE_AS_API(99))
+ drv->fw_index = 101; /* last API-scheme number below core 99 */
else
drv->fw_index--;
@@ -348,13 +356,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
IWL_ERR(drv, "no suitable firmware found!\n");
if (ucode_api_min == ucode_api_max) {
- IWL_ERR(drv, "%s-%d is required\n", fw_name_pre,
- ucode_api_max);
+ IWL_ERR(drv, "%s-" FW_API_FMT " is required\n",
+ fw_name_pre, FW_API_ARG(ucode_api_max));
} else {
- IWL_ERR(drv, "minimum version required: %s-%d\n",
- fw_name_pre, ucode_api_min);
- IWL_ERR(drv, "maximum version supported: %s-%d\n",
- fw_name_pre, ucode_api_max);
+ IWL_ERR(drv,
+ "minimum version required: %s-" FW_API_FMT "\n",
+ fw_name_pre, FW_API_ARG(ucode_api_min));
+ IWL_ERR(drv,
+ "maximum version supported: %s-" FW_API_FMT "\n",
+ fw_name_pre, FW_API_ARG(ucode_api_max));
}
IWL_ERR(drv,
@@ -362,8 +372,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s-%d.ucode",
- fw_name_pre, drv->fw_index);
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name),
+ "%s-" FW_API_FMT ".ucode",
+ fw_name_pre, FW_API_ARG(drv->fw_index));
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
@@ -1588,6 +1599,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
*/
static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
+ unsigned int min_core, max_core, loaded_core;
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
const struct iwl_ucode_header *ucode;
@@ -1650,11 +1662,24 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
- if (api_ver < api_min || api_ver > api_max) {
+
+ /*
+ * if -cN.ucode file was loaded, core version == file version,
+ * otherwise core version == file version (API version) - 3
+ */
+ if (iwl_api_is_core_number(drv->fw_index))
+ loaded_core = api_ver;
+ else
+ loaded_core = api_ver - API_TO_CORE_OFFS;
+
+ min_core = iwl_api_to_core(api_min);
+ max_core = iwl_api_to_core(api_max);
+
+ if (loaded_core < min_core || loaded_core > max_core) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
+ "Driver supports FW core %u..%u, firmware is %u.\n",
+ min_core, max_core, loaded_core);
goto try_again;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 5e483a55a4ba..b1944584c693 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -3,7 +3,6 @@
* Copyright (C) 2003-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -13,6 +12,7 @@
#include "iwl-debug.h"
#include "iwl-prph.h"
#include "iwl-fh.h"
+#include "pcie/gen1_2/internal.h"
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
@@ -160,7 +160,7 @@ int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
do {
if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
- return t;
+ return 0;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
@@ -396,96 +396,11 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
return 0;
}
-#define IWL_HOST_MON_BLOCK_PEMON 0x00
-#define IWL_HOST_MON_BLOCK_HIPM 0x22
-
-#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
-#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
-#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
-
-static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
- u32 block, u32 vec, u32 iter)
-{
- int i;
-
- IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
- iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
- for (i = 0; i < iter; i++)
- IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
- i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
-}
-
-static void iwl_dump_host_monitor(struct iwl_trans *trans)
-{
- switch (trans->mac_cfg->device_family) {
- case IWL_DEVICE_FAMILY_22000:
- case IWL_DEVICE_FAMILY_AX210:
- IWL_ERR(trans, "CSR_RESET = 0x%x\n",
- iwl_read32(trans, CSR_RESET));
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
- IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
- break;
- default:
- /* not supported yet */
- return;
- }
-}
-
-int iwl_finish_nic_init(struct iwl_trans *trans)
+int iwl_trans_activate_nic(struct iwl_trans *trans)
{
- const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
- u32 poll_ready;
- int err;
-
- if (mac_cfg->bisr_workaround) {
- /* ensure the TOP FSM isn't still in previous reset */
- mdelay(2);
- }
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
- CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
- poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
- } else {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
- }
-
- if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
- udelay(2);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_write_prph()
- * and accesses to uCode SRAM.
- */
- err = iwl_poll_bits(trans, CSR_GP_CNTRL, poll_ready, 25000);
- if (err < 0) {
- IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
-
- iwl_dump_host_monitor(trans);
- }
-
- if (mac_cfg->bisr_workaround) {
- /* ensure BISR shift has finished */
- udelay(200);
- }
-
- return err < 0 ? err : 0;
+ return iwl_pcie_gen1_2_activate_nic(trans);
}
-IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
+IWL_EXPORT_SYMBOL(iwl_trans_activate_nic);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 731cda1a4e66..5bcec239ffc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -57,7 +57,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_force_nmi(struct iwl_trans *trans);
-int iwl_finish_nic_init(struct iwl_trans *trans);
+int iwl_trans_activate_nic(struct iwl_trans *trans);
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index a67b9572aac3..23465e4c4b39 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -140,50 +140,6 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
/**
- * enum iwl_nvm_channel_flags - channel flags in NVM
- * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
- * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
- * AP allowed only in 20 MHz. Valid only
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_RADAR: radar detection required
- * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
- * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
- * on same channel on 2.4 or same UNII band on 5.2
- * @NVM_CHANNEL_UNIFORM: uniform spreading required
- * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
- * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
- * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
- * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
- * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
- * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
- * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
- * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
- * Valid only when %NVM_CHANNEL_VLP is enabled.
- */
-enum iwl_nvm_channel_flags {
- NVM_CHANNEL_VALID = BIT(0),
- NVM_CHANNEL_IBSS = BIT(1),
- NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
- NVM_CHANNEL_ACTIVE = BIT(3),
- NVM_CHANNEL_RADAR = BIT(4),
- NVM_CHANNEL_INDOOR_ONLY = BIT(5),
- NVM_CHANNEL_GO_CONCURRENT = BIT(6),
- NVM_CHANNEL_UNIFORM = BIT(7),
- NVM_CHANNEL_20MHZ = BIT(8),
- NVM_CHANNEL_40MHZ = BIT(9),
- NVM_CHANNEL_80MHZ = BIT(10),
- NVM_CHANNEL_160MHZ = BIT(11),
- NVM_CHANNEL_DC_HIGH = BIT(12),
- NVM_CHANNEL_VLP = BIT(13),
- NVM_CHANNEL_AFC = BIT(14),
- NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
-};
-
-/**
* enum iwl_reg_capa_flags_v1 - global flags applied for the whole regulatory
* domain.
* @REG_CAPA_V1_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
@@ -282,30 +238,6 @@ enum iwl_reg_capa_flags_v4 {
*/
#define REG_CAPA_V4_RESP_VER 8
-/**
- * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
- * handling the different APIs of reg_capa_flags.
- *
- * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
- * for this regulatory domain.
- * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
- * for this regulatory domain (valid only in 5 and 6 Ghz).
- * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
- * for this regulatory domain (valid only in 5 and 6 Ghz).
- * @allow_320mhz: 11be channel with a width of 320Mhz is allowed
- * for this regulatory domain (valid only in 6 Ghz).
- * @disable_11ax: 11ax is forbidden for this regulatory domain.
- * @disable_11be: 11be is forbidden for this regulatory domain.
- */
-struct iwl_reg_capa {
- bool allow_40mhz;
- bool allow_80mhz;
- bool allow_160mhz;
- bool allow_320mhz;
- bool disable_11ax;
- bool disable_11be;
-};
-
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -1042,10 +974,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
- if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
- iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
- IEEE80211_HE_MAC_CAP2_BCAST_TWT;
-
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
!is_ap) {
iftype_data->vendor_elems.data = iwl_vendor_caps;
@@ -1600,9 +1528,10 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
-static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
- int ch_idx, u16 nvm_flags,
- struct iwl_reg_capa reg_capa)
+VISIBLE_IF_IWLWIFI_KUNIT
+u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ struct iwl_reg_capa reg_capa)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1692,6 +1621,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
return flags;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_nvm_get_regdom_bw_flags);
static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 9ce9fa4e78fd..cbc92abf9f87 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -21,6 +21,80 @@ enum iwl_nvm_sbands_flags {
IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
};
+/**
+ * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
+ * handling the different APIs of reg_capa_flags.
+ *
+ * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain.
+ * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5 and 6 Ghz).
+ * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5 and 6 Ghz).
+ * @allow_320mhz: 11be channel with a width of 320Mhz is allowed
+ * for this regulatory domain (valid only in 6 Ghz).
+ * @disable_11ax: 11ax is forbidden for this regulatory domain.
+ * @disable_11be: 11be is forbidden for this regulatory domain.
+ */
+struct iwl_reg_capa {
+ bool allow_40mhz;
+ bool allow_80mhz;
+ bool allow_160mhz;
+ bool allow_320mhz;
+ bool disable_11ax;
+ bool disable_11be;
+};
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
+ * AP allowed only in 20 MHz. Valid only
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ * on same channel on 2.4 or same UNII band on 5.2
+ * @NVM_CHANNEL_UNIFORM: uniform spreading required
+ * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
+ * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
+ * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
+ * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
+ * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
+ * Valid only when %NVM_CHANNEL_VLP is enabled.
+ */
+enum iwl_nvm_channel_flags {
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+ NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+ NVM_CHANNEL_UNIFORM = BIT(7),
+ NVM_CHANNEL_20MHZ = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
+ NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ struct iwl_reg_capa reg_capa);
+#endif
+
/*
* iwl_parse_nvm_data - parse NVM data and return values
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 3694b41d6621..5232f66c2d52 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -268,9 +268,7 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_mac_cfg *mac_cfg,
- unsigned int txcmd_size,
- unsigned int txcmd_align)
+ const struct iwl_mac_cfg *mac_cfg)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -292,22 +290,12 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
- snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
- "iwl_cmd_pool:%s", dev_name(trans->dev));
- trans->dev_cmd_pool =
- kmem_cache_create(trans->dev_cmd_pool_name,
- txcmd_size, txcmd_align,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!trans->dev_cmd_pool)
- return NULL;
-
return trans;
}
void iwl_trans_free(struct iwl_trans *trans)
{
cancel_delayed_work_sync(&trans->restart.wk);
- kmem_cache_destroy(trans->dev_cmd_pool);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
@@ -318,9 +306,6 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
- if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status)))
- return -EHOSTDOWN;
-
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -348,6 +333,19 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
+struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ return iwl_pcie_gen1_2_alloc_tx_cmd(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_alloc_tx_cmd);
+
+void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+ iwl_pcie_gen1_2_free_tx_cmd(trans, dev_cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_free_tx_cmd);
+
/* Comparator for struct iwl_hcmd_names.
* Used in the binary search over a list of host commands.
*
@@ -399,7 +397,7 @@ void iwl_trans_op_mode_enter(struct iwl_trans *trans,
WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd);
- iwl_trans_pcie_op_mode_enter(trans);
+ iwl_pcie_gen1_2_op_mode_enter(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter);
@@ -408,8 +406,6 @@ int iwl_trans_start_hw(struct iwl_trans *trans)
might_sleep();
clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status);
- /* opmode may not resume if it detects errors */
- clear_bit(STATUS_SUSPENDED, &trans->status);
return iwl_trans_pcie_start_hw(trans);
}
@@ -507,33 +503,19 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
sanitize_ops, sanitize_ctx);
}
-int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset)
{
- int err;
-
might_sleep();
- err = iwl_trans_pcie_d3_suspend(trans, test, reset);
-
- if (!err)
- set_bit(STATUS_SUSPENDED, &trans->status);
-
- return err;
+ return iwl_trans_pcie_d3_suspend(trans, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
-int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset)
+int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset)
{
- int err;
-
might_sleep();
- err = iwl_trans_pcie_d3_resume(trans, status, test, reset);
-
- clear_bit(STATUS_SUSPENDED, &trans->status);
-
- return err;
+ return iwl_trans_pcie_d3_resume(trans, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
@@ -825,3 +807,18 @@ void iwl_trans_set_reduce_power(struct iwl_trans *trans,
{
iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
+
+bool iwl_trans_is_pm_supported(struct iwl_trans *trans)
+{
+ if (WARN_ON(trans->mac_cfg->gen2))
+ return false;
+
+ return iwl_pcie_gen1_is_pm_supported(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_is_pm_supported);
+
+bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans)
+{
+ return iwl_pcie_gen1_2_is_ltr_enabled(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_is_ltr_enabled);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index d0e658801c2e..a0cc5d7745e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -275,16 +275,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
- * enum iwl_d3_status - WoWLAN image/device status
- * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
- * @IWL_D3_STATUS_RESET: device was reset while suspended
- */
-enum iwl_d3_status {
- IWL_D3_STATUS_ALIVE,
- IWL_D3_STATUS_RESET,
-};
-
-/**
* enum iwl_trans_status: transport status flags
* @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
* @STATUS_DEVICE_ENABLED: APM is enabled
@@ -294,16 +284,12 @@ enum iwl_d3_status {
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
- * e.g. for testing
* @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
* via iwl_trans_finish_sw_reset()
* @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
* the firmware state yet
* @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
* attempt another reset yet
- * @STATUS_SUSPENDED: device is suspended, don't send commands that
- * aren't marked accordingly
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -314,11 +300,9 @@ enum iwl_trans_status {
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
STATUS_TRANS_DEAD,
- STATUS_SUPPRESS_CMD_ERROR_ONCE,
STATUS_IN_SW_RESET,
STATUS_RESET_PENDING,
STATUS_TRANS_RESET_IN_PROGRESS,
- STATUS_SUSPENDED,
};
static inline int
@@ -658,8 +642,6 @@ struct iwl_pc_data {
* @restart_required: indicates debug restart is required
* @last_tp_resetfw: last handling of reset during debug timepoint
* @imr_data: IMR debug data allocation
- * @dump_file_name_ext: dump file name extension
- * @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
* @pc_data: details of the program counter
* @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
@@ -698,8 +680,6 @@ struct iwl_trans_debug {
bool restart_required;
u32 last_tp_resetfw;
struct iwl_imr_data imr_data;
- u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
- bool dump_file_name_ext_valid;
u32 num_pc;
struct iwl_pc_data *pc_data;
bool yoyo_bin_loaded;
@@ -830,7 +810,6 @@ struct iwl_txq {
* @hw_rf_id: the device RF ID
* @hw_cnv_id: the device CNV ID
* @hw_crf_id: the device CRF ID
- * @hw_wfpm_id: the device wfpm ID
* @hw_id: the ID of the device / sub-device
* Bits 0:15 represent the sub-device ID
* Bits 16:31 represent the device ID.
@@ -846,7 +825,6 @@ struct iwl_trans_info {
u32 hw_rf_id;
u32 hw_crf_id;
u32 hw_cnv_id;
- u32 hw_wfpm_id;
u32 hw_id;
u8 pcie_link_speed;
u8 num_rxqs;
@@ -866,14 +844,11 @@ struct iwl_trans_info {
* @dev: pointer to struct device * that represents the device
* @info: device information for use by other layers
* @pnvm_loaded: indicates PNVM was loaded
- * @pm_support: set to true in start_hw if link pm is supported
- * @ltr_enabled: set to true if the LTR is enabled
+ * @suppress_cmd_error_once: suppress "FW error in SYNC CMD" once,
+ * e.g. for testing
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
* @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
- * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
- * The user should use iwl_trans_{alloc,free}_tx_cmd.
- * @dev_cmd_pool_name: name for the TX command allocation pool
* @dbgfs_dir: iwlwifi debugfs base dir for this device
* @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @dbg: additional debug data, see &struct iwl_trans_debug
@@ -905,18 +880,13 @@ struct iwl_trans {
const struct iwl_trans_info info;
bool reduced_cap_sku;
bool step_urm;
+ bool suppress_cmd_error_once;
- bool pm_support;
- bool ltr_enabled;
u8 pnvm_loaded:1;
u8 fail_to_parse_pnvm_image:1;
u8 reduce_power_loaded:1;
u8 failed_to_load_reduce_power_image:1;
- /* The following fields are internal only */
- struct kmem_cache *dev_cmd_pool;
- char dev_cmd_pool_name[50];
-
struct dentry *dbgfs_dir;
#ifdef CONFIG_LOCKDEP
@@ -956,29 +926,21 @@ int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
void iwl_trans_stop_device(struct iwl_trans *trans);
-int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset);
-int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset);
+int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset);
struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
-static inline struct iwl_device_tx_cmd *
-iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
-{
- return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
-}
+struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans);
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_tx_cmd *dev_cmd)
-{
- kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
-}
+void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd);
int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue);
@@ -1205,9 +1167,7 @@ static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_mac_cfg *mac_cfg,
- unsigned int txcmd_size,
- unsigned int txcmd_align);
+ const struct iwl_mac_cfg *mac_cfg);
void iwl_trans_free(struct iwl_trans *trans);
static inline bool iwl_trans_is_hw_error_value(u32 val)
@@ -1230,11 +1190,6 @@ static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
return result;
}
-static inline void iwl_trans_suppress_cmd_error_once(struct iwl_trans *trans)
-{
- set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status);
-}
-
static inline bool iwl_trans_device_enabled(struct iwl_trans *trans)
{
return test_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -1245,6 +1200,20 @@ static inline bool iwl_trans_is_dead(struct iwl_trans *trans)
return test_bit(STATUS_TRANS_DEAD, &trans->status);
}
+static inline bool iwl_trans_is_fw_error(struct iwl_trans *trans)
+{
+ return test_bit(STATUS_FW_ERROR, &trans->status);
+}
+
+/*
+ * This function notifies the transport layer of firmware error, the recovery
+ * will be handled by the op mode
+ */
+static inline void iwl_trans_notify_fw_error(struct iwl_trans *trans)
+{
+ trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &trans->status);
+}
/*****************************************************
* PCIe handling
*****************************************************/
@@ -1289,4 +1258,8 @@ static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
}
+bool iwl_trans_is_pm_supported(struct iwl_trans *trans);
+
+bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index ba1f75f739c2..f985ab90d41c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -300,13 +300,11 @@ enum iwl_sap_msg {
* @type: See &enum iwl_sap_msg.
* @len: The length of the message (header not included).
* @seq_num: For debug.
- * @payload: The payload of the message.
*/
struct iwl_sap_hdr {
__le16 type;
__le16 len;
__le32 seq_num;
- u8 payload[];
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index ed0a0f76f1c5..1d4282a21f09 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -11,6 +11,7 @@
#include "mcc.h"
#include "sta.h"
#include "mlo.h"
+#include "key.h"
#include "fw/api/d3.h"
#include "fw/api/offload.h"
@@ -40,8 +41,6 @@ enum iwl_mld_d3_notif {
struct iwl_mld_resume_key_iter_data {
struct iwl_mld *mld;
struct iwl_mld_wowlan_status *wowlan_status;
- u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher;
- bool unhandled_cipher;
};
struct iwl_mld_suspend_key_iter_data {
@@ -71,6 +70,12 @@ struct iwl_mld_mcast_key_data {
};
+struct iwl_mld_wowlan_mlo_key {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 idx, type, link_id;
+ u8 pn[6];
+};
+
/**
* struct iwl_mld_wowlan_status - contains wowlan status data from
* all wowlan notifications
@@ -89,6 +94,8 @@ struct iwl_mld_mcast_key_data {
* @bigtk: data of the last two used gtk's by the FW upon resume
* @ptk: last seq numbers per tid passed by the FW,
* holds both in tkip and aes formats
+ * @num_mlo_keys: number of &struct iwl_mld_wowlan_mlo_key structs
+ * @mlo_keys: array of MLO keys
*/
struct iwl_mld_wowlan_status {
u32 wakeup_reasons;
@@ -108,6 +115,9 @@ struct iwl_mld_wowlan_status {
struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
} ptk;
+
+ int num_mlo_keys;
+ struct iwl_mld_wowlan_mlo_key mlo_keys[WOWLAN_MAX_MLO_KEYS];
};
#define NETDETECT_QUERY_BUF_LEN \
@@ -271,7 +281,7 @@ iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data,
static void
iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
- const struct iwl_wowlan_gtk_status_v3 *gtk_data,
+ const struct iwl_wowlan_gtk_status *gtk_data,
const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
{
int status_idx = 0;
@@ -283,8 +293,9 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk);
notif_idx++) {
int rsc_idx;
+ u8 key_status = gtk_data[notif_idx].key_status;
- if (!(gtk_data[notif_idx].key_len))
+ if (!key_status)
continue;
wowlan_status->gtk[status_idx].len =
@@ -294,10 +305,6 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
wowlan_status->gtk[status_idx].id =
wowlan_status->gtk[status_idx].flags &
IWL_WOWLAN_GTK_IDX_MASK;
- memcpy(wowlan_status->gtk[status_idx].key,
- gtk_data[notif_idx].key,
- sizeof(gtk_data[notif_idx].key));
-
/* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc
* The gtk ids can be any two numbers between 0 and 3,
* the id_map maps between the key id and the index in sc->mcast
@@ -307,13 +314,27 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx],
sc, rsc_idx);
- /* if it's as long as the TKIP encryption key, copy MIC key */
- if (wowlan_status->gtk[status_idx].len ==
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
- memcpy(wowlan_status->gtk[status_idx].key +
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
- gtk_data[notif_idx].tkip_mic_key,
- sizeof(gtk_data[notif_idx].tkip_mic_key));
+ if (key_status == IWL_WOWLAN_STATUS_NEW_KEY) {
+ memcpy(wowlan_status->gtk[status_idx].key,
+ gtk_data[notif_idx].key,
+ sizeof(gtk_data[notif_idx].key));
+
+ /* if it's as long as the TKIP encryption key,
+ * copy MIC key
+ */
+ if (wowlan_status->gtk[status_idx].len ==
+ NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(wowlan_status->gtk[status_idx].key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ gtk_data[notif_idx].tkip_mic_key,
+ sizeof(gtk_data[notif_idx].tkip_mic_key));
+ } else {
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it
+ * indicates that no key material is present, Set the
+ * key length to 0 as an indication
+ */
+ wowlan_status->gtk[status_idx].len = 0;
+ }
status_idx++;
}
}
@@ -360,11 +381,11 @@ static void
iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
const struct iwl_wowlan_igtk_status *igtk)
{
- BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
-
- if (!igtk->key_len)
+ if (!igtk->key_status)
return;
+ BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
+
wowlan_status->igtk.len = igtk->key_len;
wowlan_status->igtk.flags = igtk->key_flags;
wowlan_status->igtk.id =
@@ -372,7 +393,15 @@ iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) +
WOWLAN_IGTK_MIN_INDEX;
- memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ if (igtk->key_status == IWL_WOWLAN_STATUS_NEW_KEY)
+ memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ else
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it indicates
+ * that no key material is present. Set the key length to 0
+ * as an indication.
+ */
+ wowlan_status->igtk.len = 0;
+
iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk);
}
@@ -386,7 +415,7 @@ iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM;
notif_idx++) {
- if (!bigtk[notif_idx].key_len)
+ if (!bigtk[notif_idx].key_status)
continue;
wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len;
@@ -399,32 +428,218 @@ iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) <
sizeof(bigtk[notif_idx].key));
- memcpy(wowlan_status->bigtk[status_idx].key,
- bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key));
+ if (bigtk[notif_idx].key_status == IWL_WOWLAN_STATUS_NEW_KEY)
+ memcpy(wowlan_status->bigtk[status_idx].key,
+ bigtk[notif_idx].key,
+ sizeof(bigtk[notif_idx].key));
+ else
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it
+ * indicates that no key material is present. Set the
+ * key length to 0 as an indication.
+ */
+ wowlan_status->bigtk[status_idx].len = 0;
+
iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx],
&bigtk[notif_idx]);
status_idx++;
}
}
+static void
+iwl_mld_convert_mlo_keys(struct iwl_mld *mld,
+ const struct iwl_wowlan_info_notif *notif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ if (!notif->num_mlo_link_keys)
+ return;
+
+ wowlan_status->num_mlo_keys = notif->num_mlo_link_keys;
+
+ if (IWL_FW_CHECK(mld, wowlan_status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
+ "Too many MLO keys: %d, max %d\n",
+ wowlan_status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
+ wowlan_status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
+
+ for (int i = 0; i < wowlan_status->num_mlo_keys; i++) {
+ const struct iwl_wowlan_mlo_gtk *fw_mlo_key = &notif->mlo_gtks[i];
+ struct iwl_mld_wowlan_mlo_key *driver_mlo_key =
+ &wowlan_status->mlo_keys[i];
+ u16 flags = le16_to_cpu(fw_mlo_key->flags);
+
+ driver_mlo_key->link_id =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK);
+ driver_mlo_key->type =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK);
+ driver_mlo_key->idx =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK);
+
+ BUILD_BUG_ON(sizeof(driver_mlo_key->key) != sizeof(fw_mlo_key->key));
+ BUILD_BUG_ON(sizeof(driver_mlo_key->pn) != sizeof(fw_mlo_key->pn));
+
+ memcpy(driver_mlo_key->key, fw_mlo_key->key, sizeof(fw_mlo_key->key));
+ memcpy(driver_mlo_key->pn, fw_mlo_key->pn, sizeof(fw_mlo_key->pn));
+ }
+}
+
+static void
+iwl_mld_convert_wowlan_notif_v5(const struct iwl_wowlan_info_notif_v5 *notif_v5,
+ struct iwl_wowlan_info_notif *notif)
+{
+ /* Convert GTK from v3 to the new format */
+ BUILD_BUG_ON(ARRAY_SIZE(notif->gtk) != ARRAY_SIZE(notif_v5->gtk));
+
+ for (int i = 0; i < ARRAY_SIZE(notif_v5->gtk); i++) {
+ const struct iwl_wowlan_gtk_status_v3 *gtk_v3 = &notif_v5->gtk[i];
+ struct iwl_wowlan_gtk_status *gtk = &notif->gtk[i];
+
+ /* Copy key material and metadata */
+ BUILD_BUG_ON(sizeof(gtk->key) != sizeof(gtk_v3->key));
+ BUILD_BUG_ON(sizeof(gtk->tkip_mic_key) != sizeof(gtk_v3->tkip_mic_key));
+
+ memcpy(gtk->key, gtk_v3->key, sizeof(gtk_v3->key));
+
+ gtk->key_len = gtk_v3->key_len;
+ gtk->key_flags = gtk_v3->key_flags;
+
+ memcpy(gtk->tkip_mic_key, gtk_v3->tkip_mic_key,
+ sizeof(gtk_v3->tkip_mic_key));
+ gtk->sc = gtk_v3->sc;
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (gtk_v3->key_len)
+ gtk->key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+ }
+
+ /* Convert IGTK from v1 to the new format, only one IGTK is passed by FW */
+ BUILD_BUG_ON(offsetof(struct iwl_wowlan_igtk_status, key_status) !=
+ sizeof(struct iwl_wowlan_igtk_status_v1));
+
+ memcpy(&notif->igtk[0], &notif_v5->igtk[0],
+ offsetof(struct iwl_wowlan_igtk_status, key_status));
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (notif_v5->igtk[0].key_len)
+ notif->igtk[0].key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+
+ /* Convert BIGTK from v1 to the new format */
+ BUILD_BUG_ON(ARRAY_SIZE(notif->bigtk) != ARRAY_SIZE(notif_v5->bigtk));
+
+ for (int i = 0; i < ARRAY_SIZE(notif_v5->bigtk); i++) {
+ /* Copy everything until key_status */
+ memcpy(&notif->bigtk[i], &notif_v5->bigtk[i],
+ offsetof(struct iwl_wowlan_igtk_status, key_status));
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (notif_v5->bigtk[i].key_len)
+ notif->bigtk[i].key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+ }
+
+ notif->replay_ctr = notif_v5->replay_ctr;
+ notif->pattern_number = notif_v5->pattern_number;
+ notif->qos_seq_ctr = notif_v5->qos_seq_ctr;
+ notif->wakeup_reasons = notif_v5->wakeup_reasons;
+ notif->num_of_gtk_rekeys = notif_v5->num_of_gtk_rekeys;
+ notif->transmitted_ndps = notif_v5->transmitted_ndps;
+ notif->received_beacons = notif_v5->received_beacons;
+ notif->tid_tear_down = notif_v5->tid_tear_down;
+ notif->station_id = notif_v5->station_id;
+ notif->num_mlo_link_keys = notif_v5->num_mlo_link_keys;
+ notif->tid_offloaded_tx = notif_v5->tid_offloaded_tx;
+
+ /* Copy MLO GTK keys */
+ if (notif_v5->num_mlo_link_keys) {
+ memcpy(notif->mlo_gtks, notif_v5->mlo_gtks,
+ notif_v5->num_mlo_link_keys * sizeof(struct iwl_wowlan_mlo_gtk));
+ }
+}
+
+static bool iwl_mld_validate_wowlan_notif_size(struct iwl_mld *mld,
+ u32 len,
+ u32 expected_len,
+ u8 num_mlo_keys,
+ int version)
+{
+ u32 len_with_mlo_keys;
+
+ if (IWL_FW_CHECK(mld, len < expected_len,
+ "Invalid wowlan_info_notif v%d (expected=%u got=%u)\n",
+ version, expected_len, len))
+ return false;
+
+ len_with_mlo_keys = expected_len +
+ (num_mlo_keys * sizeof(struct iwl_wowlan_mlo_gtk));
+
+ if (IWL_FW_CHECK(mld, len < len_with_mlo_keys,
+ "Invalid wowlan_info_notif v%d with MLO keys (expected=%u got=%u)\n",
+ version, len_with_mlo_keys, len))
+ return false;
+
+ return true;
+}
+
static bool
iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
struct iwl_rx_packet *pkt)
{
- const struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
- u32 expected_len, len = iwl_rx_packet_payload_len(pkt);
-
- expected_len = sizeof(*notif);
+ const struct iwl_wowlan_info_notif *notif;
+ struct iwl_wowlan_info_notif *converted_notif __free(kfree) = NULL;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+ int wowlan_info_ver = iwl_fw_lookup_notif_ver(mld->fw,
+ PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (wowlan_info_ver == 5) {
+ /* v5 format - validate before conversion */
+ const struct iwl_wowlan_info_notif_v5 *notif_v5 = (void *)pkt->data;
+
+ if (!iwl_mld_validate_wowlan_notif_size(mld, len,
+ sizeof(*notif_v5),
+ notif_v5->num_mlo_link_keys,
+ 5))
+ return true;
+
+ converted_notif = kzalloc(struct_size(converted_notif,
+ mlo_gtks,
+ notif_v5->num_mlo_link_keys),
+ GFP_ATOMIC);
+ if (!converted_notif) {
+ IWL_ERR(mld,
+ "Failed to allocate memory for converted wowlan_info_notif\n");
+ return true;
+ }
- if (IWL_FW_CHECK(mld, len < expected_len,
- "Invalid wowlan_info_notif (expected=%ud got=%ud)\n",
- expected_len, len))
+ iwl_mld_convert_wowlan_notif_v5(notif_v5,
+ converted_notif);
+ notif = converted_notif;
+ } else if (wowlan_info_ver == 6) {
+ notif = (void *)pkt->data;
+ if (!iwl_mld_validate_wowlan_notif_size(mld, len,
+ sizeof(*notif),
+ notif->num_mlo_link_keys,
+ 6))
+ return true;
+ } else {
+ /* smaller versions are not supported */
+ IWL_WARN(mld,
+ "Unsupported wowlan_info_notif version %d\n",
+ wowlan_info_ver);
return true;
+ }
if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID,
"Invalid tid_offloaded_tx %d\n",
- wowlan_status->tid_offloaded_tx))
+ notif->tid_offloaded_tx))
return true;
iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk,
@@ -442,8 +657,10 @@ iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
wowlan_status->num_of_gtk_rekeys =
le32_to_cpu(notif->num_of_gtk_rekeys);
wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons);
+
+ iwl_mld_convert_mlo_keys(mld, notif, wowlan_status);
+
return false;
- /* TODO: mlo_links (task=MLO)*/
}
static bool
@@ -619,8 +836,8 @@ iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
}
static void
-iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key,
- struct iwl_mld_mcast_key_data *key_data)
+iwl_mld_update_mcast_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
{
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
@@ -687,132 +904,53 @@ iwl_mld_resume_keys_iter(struct ieee80211_hw *hw,
struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status;
u8 status_idx;
- /* TODO: check key link id (task=MLO) */
- if (data->unhandled_cipher)
- return;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- /* ignore WEP completely, nothing to do */
- return;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_TKIP:
+ if (key->keyidx >= 0 && key->keyidx <= 3) {
+ /* PTK */
if (sta) {
iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status,
sta, key,
key->cipher ==
WLAN_CIPHER_SUITE_TKIP);
- return;
+ /* GTK */
+ } else {
+ status_idx = key->keyidx == wowlan_status->gtk[1].id;
+ iwl_mld_update_mcast_rx_seq(key,
+ &wowlan_status->gtk[status_idx]);
}
+ }
- if (WARN_ON(data->gtk_cipher &&
- data->gtk_cipher != key->cipher))
- return;
+ /* IGTK */
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ if (key->keyidx == wowlan_status->igtk.id)
+ iwl_mld_update_mcast_rx_seq(key, &wowlan_status->igtk);
+ }
- data->gtk_cipher = key->cipher;
- status_idx = key->keyidx == wowlan_status->gtk[1].id;
- iwl_mld_set_key_rx_seq(key, &wowlan_status->gtk[status_idx]);
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- if (key->keyidx == 4 || key->keyidx == 5) {
- if (WARN_ON(data->igtk_cipher &&
- data->igtk_cipher != key->cipher))
- return;
-
- data->igtk_cipher = key->cipher;
- if (key->keyidx == wowlan_status->igtk.id)
- iwl_mld_set_key_rx_seq(key, &wowlan_status->igtk);
- }
- if (key->keyidx == 6 || key->keyidx == 7) {
- if (WARN_ON(data->bigtk_cipher &&
- data->bigtk_cipher != key->cipher))
- return;
-
- data->bigtk_cipher = key->cipher;
- status_idx = key->keyidx == wowlan_status->bigtk[1].id;
- iwl_mld_set_key_rx_seq(key, &wowlan_status->bigtk[status_idx]);
- }
- break;
- default:
- data->unhandled_cipher = true;
- return;
+ /* BIGTK */
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ status_idx = key->keyidx == wowlan_status->bigtk[1].id;
+ iwl_mld_update_mcast_rx_seq(key,
+ &wowlan_status->bigtk[status_idx]);
}
- data->num_keys++;
}
static void
iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
struct iwl_mld *mld,
struct iwl_mld_mcast_key_data *key_data,
- struct ieee80211_bss_conf *link_conf,
- u32 cipher)
+ struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_key_conf *key_config;
- struct {
- struct ieee80211_key_conf conf;
- u8 key[WOWLAN_KEY_MAX_SIZE];
- } conf = {
- .conf.cipher = cipher,
- .conf.keyidx = key_data->id,
- };
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key[WOWLAN_KEY_MAX_SIZE];
-
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC);
- BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
if (!key_data->len)
return;
- switch (cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- conf.conf.keylen = WLAN_KEY_LEN_CCMP;
- break;
- case WLAN_CIPHER_SUITE_GCMP_256:
- conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- conf.conf.keylen = WLAN_KEY_LEN_TKIP;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
- break;
- default:
- WARN_ON(1);
- }
-
- memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
-
- memcpy(key, key_data->key, sizeof(key_data->key));
-
- key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key,
- sizeof(key), link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key_data->key,
+ sizeof(key_data->key), link_id);
if (IS_ERR(key_config))
return;
- iwl_mld_set_key_rx_seq(key_config, key_data);
+ iwl_mld_update_mcast_rx_seq(key_config, key_data);
/* The FW holds only one igtk so we keep track of the valid one */
if (key_config->keyidx == 4 || key_config->keyidx == 5) {
@@ -831,37 +969,78 @@ iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
}
/* Also keep track of the new BIGTK */
- if ((key_config->keyidx == 6 || key_config->keyidx == 7) &&
- vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
-
- rcu_assign_pointer(mld_vif->bigtks[key_config->keyidx - 6], key_config);
- }
+ if (key_config->keyidx == 6 || key_config->keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key_config, true);
}
static void
-iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
+iwl_mld_add_all_rekeys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
struct iwl_mld_wowlan_status *wowlan_status,
- struct iwl_mld_resume_key_iter_data *key_iter_data,
struct ieee80211_bss_conf *link_conf)
{
int i;
for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++)
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->gtk[i],
- link_conf,
- key_iter_data->gtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->gtk[i],
+ link_conf);
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->igtk,
- link_conf, key_iter_data->igtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->igtk, link_conf);
for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->bigtk[i],
- link_conf,
- key_iter_data->bigtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->bigtk[i],
+ link_conf);
+}
+
+static void iwl_mld_mlo_rekey(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_old_mlo_keys *old_keys __free(kfree) = NULL;
+
+ IWL_DEBUG_WOWLAN(mld, "Num of MLO Keys: %d\n", wowlan_status->num_mlo_keys);
+
+ if (!wowlan_status->num_mlo_keys)
+ return;
+
+ for (int i = 0; i < wowlan_status->num_mlo_keys; i++) {
+ struct iwl_mld_wowlan_mlo_key *mlo_key = &wowlan_status->mlo_keys[i];
+ struct ieee80211_key_conf *key;
+ struct ieee80211_key_seq seq;
+ u8 link_id = mlo_key->link_id;
+
+ if (IWL_FW_CHECK(mld, mlo_key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ mlo_key->idx >= 8 ||
+ mlo_key->type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES,
+ "Invalid MLO key link_id %d, idx %d, type %d\n",
+ mlo_key->link_id, mlo_key->idx, mlo_key->type))
+ continue;
+
+ if (!(vif->valid_links & BIT(link_id)) ||
+ (vif->active_links & BIT(link_id)))
+ continue;
+
+ IWL_DEBUG_WOWLAN(mld, "Add MLO key id %d, link id %d\n",
+ mlo_key->idx, link_id);
+
+ key = ieee80211_gtk_rekey_add(vif, mlo_key->idx, mlo_key->key,
+ sizeof(mlo_key->key), link_id);
+
+ if (IS_ERR(key))
+ continue;
+
+ /*
+ * mac80211 expects the PN in big-endian
+ * also note that seq is a union of all cipher types
+ * (ccmp, gcmp, cmac, gmac), and they all have the same
+ * pn field (of length 6) so just copy it to ccmp.pn.
+ */
+ for (int j = 5; j >= 0; j--)
+ seq.ccmp.pn[5 - j] = mlo_key->pn[j];
+
+ /* group keys are non-QoS and use TID 0 */
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ }
}
static bool
@@ -884,23 +1063,19 @@ iwl_mld_update_sec_keys(struct iwl_mld *mld,
ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter,
&key_iter_data);
- if (key_iter_data.unhandled_cipher)
- return false;
-
- IWL_DEBUG_WOWLAN(mld,
- "Number of installed keys: %d, Number of rekeys: %d\n",
- key_iter_data.num_keys,
+ IWL_DEBUG_WOWLAN(mld, "Number of rekeys: %d\n",
wowlan_status->num_of_gtk_rekeys);
- if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys)
+ if (!wowlan_status->num_of_gtk_rekeys)
return true;
- iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data,
+ iwl_mld_add_all_rekeys(mld, vif, wowlan_status,
link_conf);
+ iwl_mld_mlo_rekey(mld, wowlan_status, vif);
+
ieee80211_gtk_rekey_notify(vif, link_conf->bssid,
(void *)&replay_ctr, GFP_KERNEL);
- /* TODO: MLO rekey (task=MLO) */
return true;
}
@@ -1179,7 +1354,6 @@ static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
struct iwl_notification_wait wait_d3_notif;
- enum iwl_d3_status d3_status;
int ret;
if (with_wowlan)
@@ -1195,14 +1369,10 @@ static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
iwl_mld_handle_d3_notif,
resume_data);
- ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false);
- if (ret || d3_status != IWL_D3_STATUS_ALIVE) {
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mld, "Device was reset during suspend\n");
- ret = -ENOENT;
- } else {
- IWL_ERR(mld, "Transport resume failed\n");
- }
+ ret = iwl_trans_d3_resume(mld->trans, false);
+ if (ret) {
+ /* Avoid sending commands if the FW is dead */
+ iwl_trans_notify_fw_error(mld->trans);
iwl_remove_notification(&mld->notif_wait, &wait_d3_notif);
return ret;
}
@@ -1236,16 +1406,11 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
iwl_mld_low_latency_stop(mld);
- /* This will happen if iwl_mld_supsend failed with FW error */
- if (mld->trans->state == IWL_TRANS_NO_FW &&
- test_bit(STATUS_FW_ERROR, &mld->trans->status))
- return -ENODEV;
-
ret = iwl_mld_update_device_power(mld, true);
if (ret) {
IWL_ERR(mld,
"d3 suspend: couldn't send power_device %d\n", ret);
- goto out;
+ return ret;
}
ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD,
@@ -1253,24 +1418,20 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld,
"d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
- goto out;
+ return ret;
}
- ret = iwl_trans_d3_suspend(mld->trans, false, false);
+ ret = iwl_trans_d3_suspend(mld->trans, false);
if (ret) {
IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
+ /* We are going to stop the FW. Avoid sending commands in that flow */
+ iwl_trans_notify_fw_error(mld->trans);
} else {
/* Async notification might send hcmds, which is not allowed in suspend */
iwl_mld_cancel_async_notifications(mld);
mld->fw_status.in_d3 = true;
}
- out:
- if (ret) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
-
return ret;
}
@@ -1290,15 +1451,12 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+ if (ret)
+ return ret;
if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
return -ENODEV;
- if (ret) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- return ret;
- }
iwl_mld_low_latency_restart(mld);
return iwl_mld_update_device_power(mld, false);
@@ -1530,7 +1688,8 @@ static void
iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
- struct ieee80211_sta *ap_sta)
+ struct ieee80211_sta *ap_sta,
+ struct ieee80211_bss_conf *link)
{
wowlan_config_cmd->is_11n_connection =
ap_sta->deflink.ht_cap.ht_supported;
@@ -1540,6 +1699,9 @@ iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
if (ap_sta->mfp)
wowlan_config_cmd->flags |= IS_11W_ASSOC;
+ if (iwl_mld_beacon_protection_enabled(mld, link))
+ wowlan_config_cmd->flags |= HAS_BEACON_PROTECTION;
+
if (wowlan->disconnect)
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
@@ -1737,7 +1899,7 @@ iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif,
return ret;
iwl_mld_set_wowlan_config_cmd(mld, wowlan,
- &wowlan_config_cmd, ap_sta);
+ &wowlan_config_cmd, ap_sta, link_conf);
ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION,
&wowlan_config_cmd);
if (ret)
@@ -1807,7 +1969,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
};
int link_id;
int ret;
- bool fw_err = false;
lockdep_assert_wiphy(mld->wiphy);
@@ -1850,7 +2011,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
ret = iwl_mld_wait_d3_notif(mld, &resume_data, true);
if (ret) {
IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret);
- fw_err = true;
goto err;
}
@@ -1887,11 +2047,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
goto out;
err:
- if (fw_err) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
-
mld->fw_status.in_hw_restart = true;
ret = 1;
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index cc052b0aa53f..b9c9cd3f44e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -86,7 +86,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
if (count == 6 && !strcmp(buf, "nolog\n")) {
mld->fw_status.do_not_dump_once = true;
- iwl_trans_suppress_cmd_error_once(mld->trans);
+ mld->trans->suppress_cmd_error_once = true;
}
/* take the return value to make compiler happy - it will
@@ -1001,8 +1001,12 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
* If not, this is a per-link dir of a MLO vif, add in it the iwlmld
* dir.
*/
- if (!mld_link_dir)
+ if (!mld_link_dir) {
mld_link_dir = debugfs_create_dir("iwlmld", dir);
+ } else {
+ /* Release the reference from debugfs_lookup */
+ dput(mld_link_dir);
+ }
}
static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index 38993d65c052..ed379825a923 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -115,20 +115,12 @@ static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
static void iwl_mld_set_he_support(struct iwl_mld *mld,
struct ieee80211_vif *vif,
- struct iwl_mac_config_cmd *cmd,
- int cmd_ver)
+ struct iwl_mac_config_cmd *cmd)
{
- if (vif->type == NL80211_IFTYPE_AP) {
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
- else
- cmd->wifi_gen.he_ap_support = 1;
- } else {
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
- else
- cmd->wifi_gen.he_support = 1;
- }
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->wifi_gen.he_ap_support = 1;
+ else
+ cmd->wifi_gen.he_support = 1;
}
/* fill the common part for all interface types */
@@ -140,9 +132,6 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
- int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP,
- MAC_CONFIG_CMD), 0);
lockdep_assert_wiphy(mld->wiphy);
@@ -169,11 +158,8 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
- else
- cmd->wifi_gen.eht_support = 1;
+ iwl_mld_set_he_support(mld, vif, cmd);
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -181,7 +167,7 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
if (!link_conf->he_support)
continue;
- iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
+ iwl_mld_set_he_support(mld, vif, cmd);
/* EHT, if supported, was already set above */
break;
@@ -451,24 +437,21 @@ int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
return ret;
}
-int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- int ret;
lockdep_assert_wiphy(mld->wiphy);
- ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
+ iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
- return -EINVAL;
+ return;
RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
mld_vif->fw_id);
-
- return ret;
}
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index 05dcb63701b1..a3573d20f214 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -125,8 +125,6 @@ struct iwl_mld_emlsr {
* @ap_sta: pointer to AP sta, for easier access to it.
* Relevant only for STA vifs.
* @authorized: indicates the AP station was set to authorized
- * @bigtks: BIGTKs of the AP, for beacon protection.
- * Only valid for STA. (FIXME: needs to be per link)
* @num_associated_stas: number of associated STAs. Relevant only for AP mode.
* @ap_ibss_active: whether the AP/IBSS was started
* @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
@@ -158,7 +156,6 @@ struct iwl_mld_vif {
struct iwl_mld_session_protect session_protect;
struct ieee80211_sta *ap_sta;
bool authorized;
- struct ieee80211_key_conf __rcu *bigtks[2];
u8 num_associated_stas;
bool ap_ibss_active;
enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
@@ -227,7 +224,7 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
u32 action);
int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
-int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
struct ieee80211_vif *vif);
u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.c b/drivers/net/wireless/intel/iwlwifi/mld/key.c
index 13462a5ad79a..04192c5f07ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.c
@@ -368,3 +368,41 @@ int iwl_mld_update_sta_keys(struct iwl_mld *mld,
&data);
return data.err;
}
+
+void iwl_mld_track_bigtk(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, bool add)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (WARN_ON(key->keyidx < 6 || key->keyidx > 7))
+ return;
+
+ if (WARN_ON(key->link_id < 0))
+ return;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ if (add)
+ rcu_assign_pointer(link->bigtks[key->keyidx - 6], key);
+ else
+ RCU_INIT_POINTER(link->bigtks[key->keyidx - 6], NULL);
+}
+
+bool iwl_mld_beacon_protection_enabled(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return false;
+
+ return rcu_access_pointer(mld_link->bigtks[0]) ||
+ rcu_access_pointer(mld_link->bigtks[1]);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.h b/drivers/net/wireless/intel/iwlwifi/mld/key.h
index a68ea48913be..5a9efdaa3b03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/key.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.h
@@ -36,4 +36,11 @@ iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
key->hw_key_idx = STA_KEY_IDX_INVALID;
}
+void iwl_mld_track_bigtk(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, bool add);
+
+bool iwl_mld_beacon_protection_enabled(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+
#endif /* __iwl_mld_key_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 782fc41aa1c3..738f80fe0c50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -532,7 +532,8 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
le32_to_cpu(notif->consec_missed_beacons_other_link);
struct ieee80211_bss_conf *link_conf =
iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
- u32 bss_param_ch_cnt_link_id;
+ struct ieee80211_bss_conf *other_link;
+ u32 bss_param_ch_cnt_link_id, other_link_fw_id;
struct ieee80211_vif *vif;
u8 link_id;
@@ -550,11 +551,6 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (WARN_ON(!vif))
return;
- mld->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "LinkId_%d_MacType_%d", fw_link_id,
- iwl_mld_mac80211_iftype_to_fw(vif));
-
iwl_dbg_tlv_time_point(&mld->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -572,8 +568,11 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) {
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
- /* try to switch links, no-op if we don't have MLO */
- iwl_mld_int_mlo_scan(mld, vif);
+ /* Not in EMLSR and we can't hear the link.
+ * Try to switch to a better link. EMLSR case is handled below.
+ */
+ if (!iwl_mld_emlsr_active(vif))
+ iwl_mld_int_mlo_scan(mld, vif);
}
/* no more logic if we're not in EMLSR */
@@ -584,6 +583,17 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID)
return;
+ other_link_fw_id = le32_to_cpu(notif->other_link_id);
+ other_link = iwl_mld_fw_id_to_link_conf(mld, other_link_fw_id);
+
+ if (IWL_FW_CHECK(mld, !other_link, "link doesn't exist for: %d\n",
+ other_link_fw_id))
+ return;
+
+ IWL_DEBUG_EHT(mld,
+ "missed bcn on the other link (link_id=%u): %u\n",
+ other_link->link_id, scnd_lnk_bcn_lost);
+
/* Exit EMLSR if we lost more than
* IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
* OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link.
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
index cad2c9426349..9e4da8e4de93 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -36,6 +36,7 @@ struct iwl_probe_resp_data {
* @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
* @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
* This tracks the one IGTK that currently exists in FW.
+ * @bigtks: BIGTKs of the AP. Only valid for STA mode.
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
* @mon_sta: station used for TX injection in monitor interface.
@@ -59,6 +60,7 @@ struct iwl_mld_link {
struct ieee80211_chanctx_conf __rcu *chan_ctx;
bool he_ru_2mhz_block;
struct ieee80211_key_conf *igtk;
+ struct ieee80211_key_conf __rcu *bigtks[2];
);
/* And here fields that survive a fw restart */
struct iwl_mld_int_sta bcast_sta;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index b0bd01914a91..5725104a53bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -626,7 +626,7 @@ int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5)
+ if (vif->p2p)
vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
/*
@@ -1966,13 +1966,8 @@ iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_fw_runtime_suspend(&mld->fwrt);
ret = iwl_mld_wowlan_suspend(mld, wowlan);
- if (ret) {
- if (ret < 0) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
+ if (ret)
return 1;
- }
if (iwl_mld_no_wowlan_suspend(mld))
return 1;
@@ -2065,9 +2060,8 @@ static int iwl_mld_set_key_add(struct iwl_mld *mld,
return -EOPNOTSUPP;
}
- if (vif->type == NL80211_IFTYPE_STATION &&
- (keyidx == 6 || keyidx == 7))
- rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key);
+ if (keyidx == 6 || keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key, true);
/* After exiting from RFKILL, hostapd configures GTK/ITGK before the
* AP is started, but those keys can't be sent to the FW before the
@@ -2116,9 +2110,8 @@ static void iwl_mld_set_key_remove(struct iwl_mld *mld,
sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
int keyidx = key->keyidx;
- if (vif->type == NL80211_IFTYPE_STATION &&
- (keyidx == 6 || keyidx == 7))
- RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL);
+ if (keyidx == 6 || keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key, false);
if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 7b46ccc306ab..a6962256bdd1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -147,6 +147,7 @@ iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
*/
static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(UCODE_ALIVE_NTFY),
+ HCMD_NAME(REPLY_ERROR),
HCMD_NAME(INIT_COMPLETE_NOTIF),
HCMD_NAME(PHY_CONTEXT_CMD),
HCMD_NAME(SCAN_CFG_CMD),
@@ -158,12 +159,14 @@ static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(LEDS_CMD),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+ HCMD_NAME(PHY_CONFIGURATION_CMD),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(BT_CONFIG),
HCMD_NAME(REDUCE_TX_POWER_CMD),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(MAC_PM_POWER_TABLE),
@@ -251,6 +254,7 @@ static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(SEC_KEY_CMD),
HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index e57f5388fe77..241a6271d13d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -357,38 +357,26 @@ iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- enum iwl_mvm_fw_esr_recommendation action;
- const struct iwl_esr_mode_notif *notif = NULL;
-
- if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
- ESR_MODE_NOTIF, 0) > 1) {
- notif = (void *)data;
- action = le32_to_cpu(notif->action);
- } else {
- const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
-
- action = le32_to_cpu(notif_v1->action);
- }
+ const struct iwl_esr_mode_notif *notif = (void *)data;
+ enum iwl_mvm_fw_esr_recommendation action = le32_to_cpu(notif->action);
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
switch (action) {
case ESR_RECOMMEND_LEAVE:
- if (notif)
- IWL_DEBUG_INFO(mld_vif->mld,
- "FW recommend leave reason = 0x%x\n",
- le32_to_cpu(notif->leave_reason_mask));
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW recommend leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
iwl_mld_exit_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_EXIT_FW_REQUEST,
iwl_mld_get_primary_link(vif));
break;
case ESR_FORCE_LEAVE:
- if (notif)
- IWL_DEBUG_INFO(mld_vif->mld,
- "FW force leave reason = 0x%x\n",
- le32_to_cpu(notif->leave_reason_mask));
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW force leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
fallthrough;
case ESR_RECOMMEND_ENTER:
default:
@@ -735,12 +723,6 @@ iwl_mld_set_link_sel_data(struct iwl_mld *mld,
u16 max_grade = 0;
unsigned long link_id;
- /*
- * TODO: don't select links that weren't discovered in the last scan
- * This requires mac80211 (or cfg80211) changes to forward/track when
- * a BSS was last updated. cfg80211 already tracks this information but
- * it is not exposed within the kernel.
- */
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, link_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index f17aeca4fae6..884973d0b344 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -333,7 +333,6 @@ CMD_VERSIONS(bt_coex_notif,
CMD_VERSIONS(beacon_notification,
CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
CMD_VERSIONS(emlsr_mode_notif,
- CMD_VER_ENTRY(1, iwl_esr_mode_notif_v1)
CMD_VER_ENTRY(2, iwl_esr_mode_notif))
CMD_VERSIONS(emlsr_trans_fail_notif,
CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index 75d2f5cb23a7..40571125b3ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -163,18 +163,32 @@ int iwl_mld_init_sgom(struct iwl_mld *mld)
static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
{
- union iwl_ppag_table_cmd cmd = {};
- int ret, len;
+ struct iwl_fw_runtime *fwrt = &mld->fwrt;
+ union iwl_ppag_table_cmd cmd = {
+ .v7.ppag_config_info.table_source = fwrt->ppag_bios_source,
+ .v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev,
+ .v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags),
+ };
+ int ret;
- ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
- /* Not supporting PPAG table is a valid scenario */
- if (ret < 0)
- return 0;
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ fwrt->ppag_flags);
+
+ for (int chain = 0; chain < IWL_NUM_CHAIN_LIMITS; chain++) {
+ for (int subband = 0; subband < IWL_NUM_SUB_BANDS_V2; subband++) {
+ cmd.v7.gain[chain][subband] =
+ fwrt->ppag_chains[chain].subbands[subband];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ chain, subband, cmd.v7.gain[chain][subband]);
+ }
+ }
IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- &cmd, len);
+ &cmd, sizeof(cmd.v7));
if (ret < 0)
IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
index e85f45bce79a..4136c98030d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -82,9 +82,6 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
};
- u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
enum iwl_roc_activity activity;
int ret = 0;
@@ -140,7 +137,7 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- &cmd, cmd_len);
+ &cmd);
if (ret) {
IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
return ret;
@@ -190,9 +187,6 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
- u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
int ret;
lockdep_assert_wiphy(mld->wiphy);
@@ -208,7 +202,7 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
cmd.activity = cpu_to_le32(mld_vif->roc_activity);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- &cmd, cmd_len);
+ &cmd);
if (ret)
IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index b6dedd1ecd4d..20d866dd92c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -1611,20 +1611,21 @@ iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
return sta;
}
-#define KEY_IDX_LEN 2
-
static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u32 mpdu_status,
u32 mpdu_len)
{
+ struct iwl_mld_link *link;
struct wireless_dev *wdev;
struct iwl_mld_sta *mld_sta;
struct iwl_mld_vif *mld_vif;
u8 keyidx;
struct ieee80211_key_conf *key;
const u8 *frame = (void *)hdr;
+ const u8 *mmie;
+ u8 link_id;
if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -1657,21 +1658,30 @@ static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
+ link_id = rx_status->link_valid ? rx_status->link_id : 0;
+ link = rcu_dereference(mld_vif->link[link_id]);
+ if (WARN_ON_ONCE(!link))
+ return -1;
+
/* both keys will have the same cipher and MIC length, use
* whichever one is available
*/
- key = rcu_dereference(mld_vif->bigtks[0]);
+ key = rcu_dereference(link->bigtks[0]);
if (!key) {
- key = rcu_dereference(mld_vif->bigtks[1]);
+ key = rcu_dereference(link->bigtks[1]);
if (!key)
goto report;
}
- if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
+ /* get the real key ID */
+ if (mpdu_len < key->icv_len)
goto report;
- /* get the real key ID */
- keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
+ mmie = frame + (mpdu_len - key->icv_len);
+
+ /* the position of the key_id in ieee80211_mmie_16 is the same */
+ keyidx = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
+
/* and if that's the other key, look it up */
if (keyidx != key->keyidx) {
/* shouldn't happen since firmware checked, but be safe
@@ -1680,7 +1690,7 @@ static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
if (keyidx != 6 && keyidx != 7)
return -1;
- key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
+ key = rcu_dereference(link->bigtks[keyidx - 6]);
if (!key)
goto report;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 62f97a18a16c..fd1022ddc912 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -504,9 +504,7 @@ iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
*/
if (scan_status == IWL_MLD_SCAN_REGULAR &&
ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
- gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
- iwl_fw_lookup_notif_ver(mld->fw, SCAN_GROUP,
- CHANNEL_SURVEY_NOTIF, 0) >= 1)
+ gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE)
flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
return flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
index 8fb51209b4a6..5cdbfa29a202 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -401,11 +401,9 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
const struct iwl_sta_cfg_cmd *cmd)
{
- u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
- int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ?
- sizeof(*cmd) :
- sizeof(struct iwl_sta_cfg_cmd_v1);
- int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len);
+ int ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+ cmd);
if (ret)
IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
index cbc64db5eab6..7b8709716324 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -379,11 +379,14 @@ static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
/* TODO: task=statistics handle CQM notifications */
- if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
- iwl_mld_int_mlo_scan(mld, vif);
-
- if (!iwl_mld_emlsr_active(vif))
+ if (!iwl_mld_emlsr_active(vif)) {
+ /* We're not in EMLSR and our signal is bad,
+ * try to switch link maybe. EMLSR will be handled below.
+ */
+ if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
+ iwl_mld_int_mlo_scan(mld, vif);
return;
+ }
/* We are in EMLSR, check if we need to exit */
exit_emlsr_thresh =
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
index a9ca92c0455e..0e172281b0c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -157,9 +157,9 @@ iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
static void
iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_vht_cap *vht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
- u16 supp;
+ u32 supp;
int i, highest_mcs;
u8 max_nss = link_sta->rx_nss;
struct ieee80211_vht_cap ieee_vht_cap = {
@@ -182,7 +182,7 @@ iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
- cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le32(supp);
/* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
* decoded the MCS bits anyway ourselves.
@@ -196,7 +196,7 @@ iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
}
}
-static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
+static u32 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
{
switch (mcs) {
case IEEE80211_HE_MCS_SUPPORT_0_7:
@@ -216,7 +216,7 @@ static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
static void
iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
@@ -245,7 +245,7 @@ iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
+ cpu_to_le32(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
@@ -256,19 +256,19 @@ iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
- cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
+ cpu_to_le32(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
}
}
-static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
+static void iwl_mld_set_eht_mcs(__le32 ht_rates[][3],
enum IWL_TLC_MCS_PER_BW bw,
- u8 max_nss, u16 mcs_msk)
+ u8 max_nss, u32 mcs_msk)
{
if (max_nss >= 2)
- ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le32(mcs_msk);
if (max_nss >= 1)
- ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le32(mcs_msk);
}
static const
@@ -307,7 +307,7 @@ iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
/* peer RX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
@@ -405,7 +405,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_supported_band *sband,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
int i;
u16 non_ht_rates = 0;
@@ -435,7 +435,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cpu_to_le32(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -443,10 +443,30 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
0;
else
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ cpu_to_le32(ht_cap->mcs.rx_mask[1]);
}
}
+static void iwl_mld_convert_tlc_cmd_to_v4(struct iwl_tlc_config_cmd *cmd,
+ struct iwl_tlc_config_cmd_v4 *cmd_v4)
+{
+ /* Copy everything until ht_rates */
+ memcpy(cmd_v4, cmd, offsetof(struct iwl_tlc_config_cmd, ht_rates));
+
+ /* Convert ht_rates from __le32 to __le16 */
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates) != ARRAY_SIZE(cmd->ht_rates));
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates[0]) != ARRAY_SIZE(cmd->ht_rates[0]));
+
+ for (int nss = 0; nss < ARRAY_SIZE(cmd->ht_rates); nss++)
+ for (int bw = 0; bw < ARRAY_SIZE(cmd->ht_rates[nss]); bw++)
+ cmd_v4->ht_rates[nss][bw] =
+ cpu_to_le16(le32_to_cpu(cmd->ht_rates[nss][bw]));
+
+ /* Copy the rest */
+ cmd_v4->max_mpdu_len = cmd->max_mpdu_len;
+ cmd_v4->max_tx_op = cmd->max_tx_op;
+}
+
static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
@@ -458,7 +478,7 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
ieee80211_get_he_iftype_cap_vif(sband, vif);
const struct ieee80211_sta_eht_cap *own_eht_cap =
ieee80211_get_eht_iftype_cap_vif(sband, vif);
- struct iwl_tlc_config_cmd_v4 cmd = {
+ struct iwl_tlc_config_cmd cmd = {
/* For AP mode, use 20 MHz until the STA is authorized */
.max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
iwl_mld_fw_bw_from_sta_bw(link_sta) :
@@ -470,6 +490,11 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
.max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len),
};
int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0);
+ struct iwl_tlc_config_cmd_v4 cmd_v4;
+ void *cmd_ptr;
+ u8 cmd_size;
int ret;
if (fw_sta_id < 0)
@@ -481,14 +506,26 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
own_he_cap, own_eht_cap,
&cmd);
+ if (cmd_ver == 5) {
+ cmd_ptr = &cmd;
+ cmd_size = sizeof(cmd);
+ } else if (cmd_ver == 4) {
+ iwl_mld_convert_tlc_cmd_to_v4(&cmd, &cmd_v4);
+ cmd_ptr = &cmd_v4;
+ cmd_size = sizeof(cmd_v4);
+ } else {
+ IWL_ERR(mld, "Unsupported TLC config cmd version %d\n",
+ cmd_ver);
+ return;
+ }
+
IWL_DEBUG_RATE(mld,
"TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cmd.sta_id, cmd.max_ch_width, cmd.mode);
/* Send async since this can be called within a RCU-read section */
- ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD),
- CMD_ASYNC, &cmd);
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld, cmd_id, CMD_ASYNC, cmd_ptr,
+ cmd_size);
if (ret)
IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 13cdc077d8d3..ca63f780140f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -253,93 +253,6 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
-/*
- * This function receives the LB link id and checks if eSR should be
- * enabled or disabled (due to BT coex)
- */
-bool
-iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s32 link_rssi,
- bool primary)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool have_wifi_loss_rate =
- iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- BT_PROFILE_NOTIFICATION, 0) > 4 ||
- iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
- PROFILE_NOTIF, 0) >= 1;
- u8 wifi_loss_mid_high_rssi;
- u8 wifi_loss_low_rssi;
- u8 wifi_loss_rate;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
- PROFILE_NOTIF, 0) >= 1) {
- /* For now, we consider 2.4 GHz band / ANT_A only */
- wifi_loss_mid_high_rssi =
- mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
- wifi_loss_low_rssi =
- mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
- } else {
- wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
- wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
- }
-
- if (wifi_loss_low_rssi == BT_OFF)
- return true;
-
- if (primary)
- return false;
-
- /* The feature is not supported */
- if (!have_wifi_loss_rate)
- return true;
-
-
- /*
- * In case we don't know the RSSI - take the lower wifi loss,
- * so we will more likely enter eSR, and if RSSI is low -
- * we will get an update on this and exit eSR.
- */
- if (!link_rssi)
- wifi_loss_rate = wifi_loss_mid_high_rssi;
-
- else if (mvmvif->esr_active)
- /* RSSI needs to get really low to disable eSR... */
- wifi_loss_rate =
- link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
- wifi_loss_low_rssi :
- wifi_loss_mid_high_rssi;
- else
- /* ...And really high before we enable it back */
- wifi_loss_rate =
- link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
- wifi_loss_low_rssi :
- wifi_loss_mid_high_rssi;
-
- return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
-}
-
-void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
-
- if (!ieee80211_vif_is_mld(vif) ||
- !iwl_mvm_vif_from_mac80211(vif)->authorized ||
- WARN_ON(!link))
- return;
-
- if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
- (s8)link->beacon_stats.avg_signal,
- link_id == iwl_mvm_get_primary_link(vif)))
- /* In case we decided to exit eSR - stay with the primary */
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
- iwl_mvm_get_primary_link(vif));
-}
-
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -385,8 +298,6 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
-
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -525,32 +436,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = _data;
- struct ieee80211_bss_conf *link_conf;
- unsigned int link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- for_each_vif_active_link(vif, link_conf, link_id) {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference_check(link_conf->chanctx_conf,
- lockdep_is_held(&mvm->mutex));
-
- if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
- continue;
-
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
- }
-}
-
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
{
struct iwl_bt_iterator_data data = {
@@ -654,22 +539,6 @@ void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
iwl_mvm_bt_coex_notif_handle(mvm);
}
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- const struct iwl_rx_packet *pkt = rxb_addr(rxb);
- const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
-
- lockdep_assert_held(&mvm->mutex);
-
- mvm->last_bt_wifi_loss = *notif;
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_coex_notif_iterator,
- mvm);
-}
-
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 776600ddaea6..17f94663c941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2025 Intel Corporation
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __MVM_CONSTANTS_H
@@ -11,15 +11,7 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
-#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
-#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
-#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
-#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
-#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -129,14 +121,4 @@
#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
-#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
-#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
-#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
-#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
-#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
-#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
-#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
-#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
-
-#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 029c846a236f..07f1a84c274e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -928,6 +928,10 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
if (ap_sta->mfp)
wowlan_config_cmd->flags |= IS_11W_ASSOC;
+ if (rcu_access_pointer(mvmvif->bcn_prot.keys[0]) ||
+ rcu_access_pointer(mvmvif->bcn_prot.keys[1]))
+ wowlan_config_cmd->flags |= HAS_BEACON_PROTECTION;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1238,15 +1242,14 @@ static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan,
- bool test)
+ struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
struct iwl_mvm_vif_link_info *mvm_link;
- struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ struct iwl_d3_manager_config d3_cfg_cmd = {
/*
* Program the minimum sleep time to 10 seconds, as many
* platforms have issues processing a wakeup signal while
@@ -1254,23 +1257,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
*/
.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
};
- struct iwl_host_cmd d3_cfg_cmd = {
- .id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB,
- .data[0] = &d3_cfg_cmd_data,
- .len[0] = sizeof(d3_cfg_cmd_data),
- };
int ret;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
if (!wowlan) {
- /*
- * mac80211 shouldn't get here, but for D3 test
- * it doesn't warrant a warning
- */
- WARN_ON(!test);
+ /* mac80211 shouldn't get here */
+ WARN_ON(1);
return -EINVAL;
}
@@ -1278,10 +1272,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
- if (ret)
- return ret;
-
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
@@ -1351,7 +1341,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->d3_wake_sysassert)
- d3_cfg_cmd_data.wakeup_flags |=
+ d3_cfg_cmd.wakeup_flags |=
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
@@ -1364,21 +1354,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
/* must be last -- this switches firmware state */
- ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0, sizeof(d3_cfg_cmd),
+ &d3_cfg_cmd);
if (ret)
goto out;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
- if (len >= sizeof(u32)) {
- mvm->d3_test_pme_ptr =
- le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
- }
-#endif
- iwl_free_resp(&d3_cfg_cmd);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+ ret = iwl_trans_d3_suspend(mvm->trans, !unified_image);
out:
if (ret < 0) {
iwl_mvm_free_nd(mvm);
@@ -1401,7 +1384,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_fw_runtime_suspend(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
- return __iwl_mvm_suspend(hw, wowlan, false);
+ return __iwl_mvm_suspend(hw, wowlan);
}
struct iwl_multicast_key_data {
@@ -1794,63 +1777,8 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_mvm *mvm;
struct iwl_wowlan_status_data *status;
- u32 gtk_cipher, igtk_cipher, bigtk_cipher;
- bool unhandled_cipher, igtk_support, bigtk_support;
- int num_keys;
};
-static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- void *_data)
-{
- struct iwl_mvm_d3_gtk_iter_data *data = _data;
- int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
-
- if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
- return;
-
- if (data->unhandled_cipher)
- return;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- /* ignore WEP completely, nothing to do */
- return;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_TKIP:
- /* we support these */
- data->gtk_cipher = key->cipher;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- /* we support these */
- if (data->igtk_support &&
- (key->keyidx == 4 || key->keyidx == 5)) {
- data->igtk_cipher = key->cipher;
- } else if (data->bigtk_support &&
- (key->keyidx == 6 || key->keyidx == 7)) {
- data->bigtk_cipher = key->cipher;
- } else {
- data->unhandled_cipher = true;
- return;
- }
- break;
- default:
- /* everything else - disconnect from AP */
- data->unhandled_cipher = true;
- return;
- }
-
- data->num_keys++;
-}
-
static void
iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
struct ieee80211_key_seq *seq, u32 cipher)
@@ -1896,9 +1824,6 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
return;
- if (data->unhandled_cipher)
- return;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -1947,52 +1872,24 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
- struct iwl_mvm *mvm, u32 gtk_cipher)
+ struct iwl_mvm *mvm)
{
int i, j;
struct ieee80211_key_conf *key;
- DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
- WOWLAN_KEY_MAX_SIZE);
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key_data[WOWLAN_KEY_MAX_SIZE];
-
- conf->cipher = gtk_cipher;
-
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(status->gtk[0].key));
-
- switch (gtk_cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- conf->keylen = WLAN_KEY_LEN_CCMP;
- break;
- case WLAN_CIPHER_SUITE_GCMP_256:
- conf->keylen = WLAN_KEY_LEN_GCMP_256;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- conf->keylen = WLAN_KEY_LEN_TKIP;
- break;
- default:
- WARN_ON(1);
- }
for (i = 0; i < ARRAY_SIZE(status->gtk); i++) {
if (!status->gtk[i].len)
continue;
- conf->keyidx = status->gtk[i].id;
IWL_DEBUG_WOWLAN(mvm,
- "Received from FW GTK cipher %d, key index %d\n",
- conf->cipher, conf->keyidx);
- memcpy(conf->key, status->gtk[i].key,
- sizeof(status->gtk[i].key));
- memcpy(key_data, status->gtk[i].key, sizeof(status->gtk[i].key));
-
- key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id, key_data,
- sizeof(key_data), link_id);
+ "Received from FW GTK: key index %d\n",
+ status->gtk[i].id);
+
+ key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id,
+ status->gtk[i].key,
+ sizeof(status->gtk[i].key),
+ link_id);
if (IS_ERR(key)) {
/* FW may send also the old keys */
if (PTR_ERR(key) == -EALREADY)
@@ -2015,53 +1912,26 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
static bool
iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
- struct ieee80211_vif *vif, u32 cipher,
+ struct ieee80211_vif *vif,
struct iwl_multicast_key_data *key_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
- WOWLAN_KEY_MAX_SIZE);
struct ieee80211_key_conf *key_config;
struct ieee80211_key_seq seq;
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key[WOWLAN_KEY_MAX_SIZE];
s8 keyidx = key_data->id;
- conf->cipher = cipher;
- conf->keyidx = keyidx;
-
if (!key_data->len)
return true;
- iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf->cipher);
-
- switch (cipher) {
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf->keylen = WLAN_KEY_LEN_BIP_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf->keylen = WLAN_KEY_LEN_BIP_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- conf->keylen = WLAN_KEY_LEN_AES_CMAC;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf->keylen = WLAN_KEY_LEN_BIP_CMAC_256;
- break;
- default:
- WARN_ON(1);
- }
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(key_data->key));
- memcpy(conf->key, key_data->key, conf->keylen);
-
- memcpy(key, key_data->key, sizeof(key_data->key));
-
- key_config = ieee80211_gtk_rekey_add(vif, keyidx, key, sizeof(key),
- link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, keyidx, key_data->key,
+ sizeof(key_data->key), link_id);
if (IS_ERR(key_config)) {
/* FW may send also the old keys */
return PTR_ERR(key_config) == -EALREADY;
}
+
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, key_config->cipher);
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (keyidx == 4 || keyidx == 5) {
@@ -2115,27 +1985,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
-
- if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
- iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- WOWLAN_INFO_NOTIFICATION,
- 0))
- gtkdata.igtk_support = true;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- WOWLAN_INFO_NOTIFICATION,
- 0) >= 3)
- gtkdata.bigtk_support = true;
-
- /* find last GTK that we used initially, if any */
- ieee80211_iter_keys(mvm->hw, vif,
- iwl_mvm_d3_find_last_keys, &gtkdata);
- /* not trying to keep connections with MFP/unhandled ciphers */
- if (gtkdata.unhandled_cipher)
- return false;
- if (!gtkdata.num_keys)
- goto out;
-
/*
* invalidate all other GTKs that might still exist and update
* the one that we used
@@ -2149,17 +1998,15 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
status->num_of_gtk_rekeys);
- if (!iwl_mvm_gtk_rekey(status, vif, mvm, gtkdata.gtk_cipher))
+ if (!iwl_mvm_gtk_rekey(status, vif, mvm))
return false;
if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
- gtkdata.igtk_cipher,
&status->igtk))
return false;
for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) {
if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
- gtkdata.bigtk_cipher,
&status->bigtk[i]))
return false;
}
@@ -2168,7 +2015,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
(void *)&replay_ctr, GFP_KERNEL);
}
-out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
WOWLAN_GET_STATUSES,
IWL_FW_CMD_VER_UNKNOWN) < 10) {
@@ -2236,7 +2082,7 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
- struct iwl_wowlan_igtk_status *data)
+ struct iwl_wowlan_igtk_status_v1 *data)
{
int i;
@@ -2260,7 +2106,7 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
- const struct iwl_wowlan_igtk_status *data)
+ const struct iwl_wowlan_igtk_status_v1 *data)
{
int data_idx, status_idx = 0;
@@ -2291,7 +2137,7 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
- struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_info_notif_v5 *data,
struct iwl_wowlan_status_data *status,
u32 len)
{
@@ -2727,7 +2573,6 @@ enum iwl_d3_notif {
/* manage d3 resume data */
struct iwl_d3_data {
struct iwl_wowlan_status_data *status;
- bool test;
u32 d3_end_flags;
u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
@@ -2919,18 +2764,11 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
if (mvm->net_detect) {
iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
- } else {
- bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
- d3_data->status);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
-#endif
-
- return keep;
+ return false;
}
- return false;
+
+ return iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
}
#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
@@ -3069,7 +2907,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
iwl_mvm_parse_wowlan_info_notif_v3(mvm, notif,
d3_data->status, len);
} else if (wowlan_info_ver == 5) {
- struct iwl_wowlan_info_notif *notif =
+ struct iwl_wowlan_info_notif_v5 *notif =
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif(mvm, notif,
@@ -3143,10 +2981,9 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
return d3_data->notif_received == d3_data->notif_expected;
}
-static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm)
{
int ret;
- enum iwl_d3_status d3_status;
struct iwl_host_cmd cmd = {
.id = D0I3_END_CMD,
.flags = CMD_WANT_SKB,
@@ -3154,15 +2991,10 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
bool reset = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ ret = iwl_trans_d3_resume(mvm->trans, !reset);
if (ret)
return ret;
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
- return -ENOENT;
- }
-
/*
* We should trigger resume flow using command only for 22000 family
* AX210 and above don't need the command since they have
@@ -3207,7 +3039,7 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
ARRAY_SIZE(d3_resume_notif),
iwl_mvm_wait_d3_notif, d3_data);
- ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ ret = iwl_mvm_resume_firmware(mvm);
if (ret) {
iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
return ret;
@@ -3227,13 +3059,12 @@ static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
D3_END_NOTIFICATION, 0);
}
-static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+static int __iwl_mvm_resume(struct iwl_mvm *mvm)
{
struct ieee80211_vif *vif = NULL;
int ret = 1;
struct iwl_mvm_nd_results results = {};
struct iwl_d3_data d3_data = {
- .test = test,
.notif_expected =
IWL_D3_NOTIF_WOWLAN_INFO |
IWL_D3_NOTIF_D3_END_NOTIF,
@@ -3271,7 +3102,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
rt_status = iwl_mvm_check_rt_status(mvm, vif);
if (rt_status != FW_ALIVE) {
- set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_trans_notify_fw_error(mvm->trans);
if (rt_status == FW_ERROR) {
IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
iwl_mvm_dump_nic_error_log(mvm);
@@ -3298,13 +3129,11 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (ret)
goto err;
} else {
- ret = iwl_mvm_resume_firmware(mvm, test);
+ ret = iwl_mvm_resume_firmware(mvm);
if (ret < 0)
goto err;
}
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
-
/* when reset is required we can't send these following commands */
if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
goto query_wakeup_reasons;
@@ -3346,7 +3175,7 @@ out:
kfree(d3_data.status);
iwl_mvm_free_nd(mvm);
- if (!d3_data.test && !mvm->net_detect)
+ if (!mvm->net_detect)
ieee80211_iterate_active_interfaces_mtx(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter,
@@ -3385,7 +3214,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- ret = __iwl_mvm_resume(mvm, false);
+ ret = __iwl_mvm_resume(mvm);
iwl_mvm_resume_tcm(mvm);
@@ -3420,7 +3249,7 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
IWL_ERR(mvm,
"fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
- ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ ret = iwl_trans_d3_suspend(mvm->trans, false);
if (ret)
IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
}
@@ -3443,7 +3272,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
rt_status = iwl_mvm_check_rt_status(mvm, NULL);
if (rt_status != FW_ALIVE) {
- set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_trans_notify_fw_error(mvm->trans);
if (rt_status == FW_ERROR) {
IWL_ERR(mvm,
"iwl_mvm_check_rt_status failed, device is gone during suspend\n");
@@ -3455,7 +3284,6 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
&iwl_dump_desc_assert,
false, 0);
}
- mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
goto out;
@@ -3473,125 +3301,3 @@ out:
return ret;
}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- int err;
-
- if (mvm->d3_test_active)
- return -EBUSY;
-
- file->private_data = inode->i_private;
-
- iwl_mvm_pause_tcm(mvm, true);
-
- iwl_fw_runtime_suspend(&mvm->fwrt);
-
- /* start pseudo D3 */
- rtnl_lock();
- wiphy_lock(mvm->hw->wiphy);
- err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
- wiphy_unlock(mvm->hw->wiphy);
- rtnl_unlock();
- if (err > 0)
- err = -EINVAL;
- if (err)
- return err;
-
- mvm->d3_test_active = true;
- mvm->keep_vif = NULL;
- return 0;
-}
-
-static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- unsigned long end = jiffies + 60 * HZ;
- u32 pme_asserted;
-
- while (true) {
- /* read pme_ptr if available */
- if (mvm->d3_test_pme_ptr) {
- pme_asserted = iwl_trans_read_mem32(mvm->trans,
- mvm->d3_test_pme_ptr);
- if (pme_asserted)
- break;
- }
-
- if (msleep_interruptible(100))
- break;
-
- if (time_is_before_jiffies(end)) {
- IWL_ERR(mvm,
- "ending pseudo-D3 with timeout after ~60 seconds\n");
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
-
-static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- /* skip the one we keep connection on */
- if (_data == vif)
- return;
-
- if (vif->type == NL80211_IFTYPE_STATION)
- ieee80211_connection_loss(vif);
-}
-
-static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-
- mvm->d3_test_active = false;
-
- iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
-
- rtnl_lock();
- wiphy_lock(mvm->hw->wiphy);
- __iwl_mvm_resume(mvm, true);
- wiphy_unlock(mvm->hw->wiphy);
- rtnl_unlock();
-
- iwl_mvm_resume_tcm(mvm);
-
- iwl_fw_runtime_resume(&mvm->fwrt);
-
- iwl_abort_notification_waits(&mvm->notif_wait);
- if (!unified_image) {
- int remaining_time = 10;
-
- ieee80211_restart_hw(mvm->hw);
-
- /* wait for restart and disconnect all interfaces */
- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- remaining_time > 0) {
- remaining_time--;
- msleep(1000);
- }
-
- if (remaining_time == 0)
- IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
- }
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
-
- return 0;
-}
-
-const struct file_operations iwl_dbgfs_d3_test_ops = {
- .open = iwl_mvm_d3_test_open,
- .read = iwl_mvm_d3_test_read,
- .release = iwl_mvm_d3_test_release,
-};
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index fbe4e4a50852..a56c352a459a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -762,96 +762,6 @@ static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 action;
- int ret;
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return -EINVAL;
-
- if (kstrtou32(buf, 0, &action))
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- if (!action) {
- ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
- } else if (action == 1) {
- ret = iwl_mvm_int_mlo_scan(mvm, vif);
- } else {
- ret = -EINVAL;
- }
-
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- unsigned long esr_mask;
- char *buf;
- int bufsz, pos, i;
- ssize_t rv;
-
- mutex_lock(&mvm->mutex);
- esr_mask = mvmvif->esr_disable_reason;
- mutex_unlock(&mvm->mutex);
-
- bufsz = hweight32(esr_mask) * 32 + 40;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
- esr_mask);
- for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
- pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
- iwl_get_esr_state_string(BIT(i)));
-
- rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return rv;
-}
-
-static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 reason;
- u8 block;
- int ret;
-
- ret = sscanf(buf, "%u %hhu", &reason, &block);
- if (ret < 0)
- return ret;
-
- if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
- if (block)
- iwl_mvm_block_esr(mvm, vif, reason,
- iwl_mvm_get_primary_link(vif));
- else
- iwl_mvm_unblock_esr(mvm, vif, reason);
- mutex_unlock(&mvm->mutex);
-
- return count;
-}
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -884,8 +794,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -916,8 +824,6 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
&mvmvif->ftm_unprotected);
- MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index f0e184c8a81a..683c0ba5fb39 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1134,7 +1134,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
if (count == 6 && !strcmp(buf, "nolog\n")) {
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
- iwl_trans_suppress_cmd_error_once(mvm->trans);
+ mvm->trans->suppress_cmd_error_once = true;
}
/* take the return value to make compiler happy - it will fail anyway */
@@ -2159,7 +2159,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
#ifdef CONFIG_PM_SLEEP
- MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
&mvm->d3_wake_sysassert);
debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index d931c6eaf12f..865f973f677d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -837,7 +837,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
- if (!mvm->trans->ltr_enabled)
+ if (!iwl_trans_is_ltr_enabled(mvm->trans))
return 0;
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 2269acc55c0e..738facceb240 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -5,50 +5,6 @@
#include "mvm.h"
#include "time-event.h"
-#define HANDLE_ESR_REASONS(HOW) \
- HOW(BLOCKED_PREVENTION) \
- HOW(BLOCKED_WOWLAN) \
- HOW(BLOCKED_TPT) \
- HOW(BLOCKED_FW) \
- HOW(BLOCKED_NON_BSS) \
- HOW(BLOCKED_ROC) \
- HOW(BLOCKED_TMP_NON_BSS) \
- HOW(EXIT_MISSED_BEACON) \
- HOW(EXIT_LOW_RSSI) \
- HOW(EXIT_COEX) \
- HOW(EXIT_BANDWIDTH) \
- HOW(EXIT_CSA) \
- HOW(EXIT_LINK_USAGE)
-
-static const char *const iwl_mvm_esr_states_names[] = {
-#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
- HANDLE_ESR_REASONS(NAME_ENTRY)
-};
-
-const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
-{
- int offs = ilog2(state);
-
- if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
- !iwl_mvm_esr_states_names[offs])
- return "UNKNOWN";
-
- return iwl_mvm_esr_states_names[offs];
-}
-
-static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
-{
-#define NAME_FMT(x) "%s"
-#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
- IWL_DEBUG_INFO(mvm,
- "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
- " (0x%x)\n",
- HANDLE_ESR_REASONS(NAME_PR)
- mask);
-#undef NAME_FMT
-#undef NAME_PR
-}
-
static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
struct iwl_link_config_cmd *cmd,
enum iwl_ctxt_action action)
@@ -114,65 +70,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
-struct iwl_mvm_esr_iter_data {
- struct ieee80211_vif *vif;
- unsigned int link_id;
- bool lift_block;
-};
-
-static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_esr_iter_data *data = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id;
-
- if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
- return;
-
- for_each_mvm_vif_valid_link(mvmvif, link_id) {
- struct iwl_mvm_vif_link_info *link_info =
- mvmvif->link[link_id];
- if (vif == data->vif && link_id == data->link_id)
- continue;
- if (link_info->active)
- data->lift_block = false;
- }
-}
-
-int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id, bool active)
-{
- /* An active link of a non-station vif blocks EMLSR. Upon activation
- * block EMLSR on the bss vif. Upon deactivation, check if this link
- * was the last non-station link active, and if so unblock the bss vif
- */
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_esr_iter_data data = {
- .vif = vif,
- .link_id = link_id,
- .lift_block = true,
- };
-
- if (IS_ERR_OR_NULL(bss_vif))
- return 0;
-
- if (active)
- return iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_NON_BSS);
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_esr_vif_iterator, &data);
- if (data.lift_block) {
- mutex_lock(&mvm->mutex);
- iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
- mutex_unlock(&mvm->mutex);
- }
-
- return 0;
-}
-
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active)
@@ -388,452 +285,6 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
-struct iwl_mvm_rssi_to_grade {
- s8 rssi[2];
- u16 grade;
-};
-
-#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
- { \
- .rssi = {_lb, _hb_uhb}, \
- .grade = _grade \
- }
-
-/*
- * This array must be sorted by increasing RSSI for proper functionality.
- * The grades are actually estimated throughput, represented as fixed-point
- * with a scale factor of 1/10.
- */
-static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
- RSSI_TO_GRADE_LINE(-85, -89, 177),
- RSSI_TO_GRADE_LINE(-83, -86, 344),
- RSSI_TO_GRADE_LINE(-82, -85, 516),
- RSSI_TO_GRADE_LINE(-80, -83, 688),
- RSSI_TO_GRADE_LINE(-77, -79, 1032),
- RSSI_TO_GRADE_LINE(-73, -76, 1376),
- RSSI_TO_GRADE_LINE(-70, -74, 1548),
- RSSI_TO_GRADE_LINE(-69, -72, 1750),
- RSSI_TO_GRADE_LINE(-65, -68, 2064),
- RSSI_TO_GRADE_LINE(-61, -66, 2294),
- RSSI_TO_GRADE_LINE(-58, -61, 2580),
- RSSI_TO_GRADE_LINE(-55, -58, 2868),
- RSSI_TO_GRADE_LINE(-46, -55, 3098),
- RSSI_TO_GRADE_LINE(-43, -54, 3442)
-};
-
-#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
-
-#define DEFAULT_CHAN_LOAD_LB 30
-#define DEFAULT_CHAN_LOAD_HB 15
-#define DEFAULT_CHAN_LOAD_UHB 0
-
-/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
-#define SCALE_FACTOR 256
-
-/* Convert a percentage from [0,100] to [0,255] */
-#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
-
-static unsigned int
-iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
-{
- enum nl80211_chan_width chan_width =
- link_conf->chanreq.oper.width;
- int mhz = nl80211_chan_width_to_mhz(chan_width);
- unsigned int n_subchannels, n_punctured, puncturing_penalty;
-
- if (WARN_ONCE(mhz < 20 || mhz > 320,
- "Invalid channel width : (%d)\n", mhz))
- return SCALE_FACTOR;
-
- /* No puncturing, no penalty */
- if (mhz < 80)
- return SCALE_FACTOR;
-
- /* total number of subchannels */
- n_subchannels = mhz / 20;
- /* how many of these are punctured */
- n_punctured = hweight16(link_conf->chanreq.oper.punctured);
-
- puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
- return SCALE_FACTOR - puncturing_penalty;
-}
-
-static unsigned int
-iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
-{
- struct ieee80211_vif *vif = link_conf->vif;
- struct iwl_mvm_vif_link_info *mvm_link =
- iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
- const struct element *bss_load_elem;
- const struct ieee80211_bss_load_elem *bss_load;
- enum nl80211_band band = link_conf->chanreq.oper.chan->band;
- const struct cfg80211_bss_ies *ies;
- unsigned int chan_load;
- u32 chan_load_by_us;
-
- rcu_read_lock();
- if (ieee80211_vif_link_active(vif, link_conf->link_id))
- ies = rcu_dereference(link_conf->bss->beacon_ies);
- else
- ies = rcu_dereference(link_conf->bss->ies);
-
- if (ies)
- bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
- ies->data, ies->len);
- else
- bss_load_elem = NULL;
-
- /* If there isn't BSS Load element, take the defaults */
- if (!bss_load_elem ||
- bss_load_elem->datalen != sizeof(*bss_load)) {
- rcu_read_unlock();
- switch (band) {
- case NL80211_BAND_2GHZ:
- chan_load = DEFAULT_CHAN_LOAD_LB;
- break;
- case NL80211_BAND_5GHZ:
- chan_load = DEFAULT_CHAN_LOAD_HB;
- break;
- case NL80211_BAND_6GHZ:
- chan_load = DEFAULT_CHAN_LOAD_UHB;
- break;
- default:
- chan_load = 0;
- break;
- }
- /* The defaults are given in percentage */
- return NORMALIZE_PERCENT_TO_255(chan_load);
- }
-
- bss_load = (const void *)bss_load_elem->data;
- /* Channel util is in range 0-255 */
- chan_load = bss_load->channel_util;
- rcu_read_unlock();
-
- if (!mvm_link || !mvm_link->active)
- return chan_load;
-
- if (WARN_ONCE(!mvm_link->phy_ctxt,
- "Active link (%u) without phy ctxt assigned!\n",
- link_conf->link_id))
- return chan_load;
-
- /* channel load by us is given in percentage */
- chan_load_by_us =
- NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
-
- /* Use only values that firmware sends that can possibly be valid */
- if (chan_load_by_us <= chan_load)
- chan_load -= chan_load_by_us;
-
- return chan_load;
-}
-
-static unsigned int
-iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
-{
- return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
-}
-
-/* This function calculates the grade of a link. Returns 0 in error case */
-VISIBLE_IF_IWLWIFI_KUNIT
-unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
-{
- enum nl80211_band band;
- int i, rssi_idx;
- s32 link_rssi;
- unsigned int grade = MAX_GRADE;
-
- if (WARN_ON_ONCE(!link_conf))
- return 0;
-
- band = link_conf->chanreq.oper.chan->band;
- if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
- band != NL80211_BAND_5GHZ &&
- band != NL80211_BAND_6GHZ,
- "Invalid band (%u)\n", band))
- return 0;
-
- link_rssi = MBM_TO_DBM(link_conf->bss->signal);
- /*
- * For 6 GHz the RSSI of the beacons is lower than
- * the RSSI of the data.
- */
- if (band == NL80211_BAND_6GHZ)
- link_rssi += 4;
-
- rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
-
- /* No valid RSSI - take the lowest grade */
- if (!link_rssi)
- link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
-
- /* Get grade based on RSSI */
- for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
- const struct iwl_mvm_rssi_to_grade *line =
- &rssi_to_grade_map[i];
-
- if (link_rssi > line->rssi[rssi_idx])
- continue;
- grade = line->grade;
- break;
- }
-
- /* apply the channel load and puncturing factors */
- grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
- grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
- return grade;
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
-
-static
-u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
- struct iwl_mvm_link_sel_data *data,
- unsigned long usable_links,
- u8 *best_link_idx)
-{
- u8 n_data = 0;
- u16 max_grade = 0;
- unsigned long link_id;
-
- /* TODO: don't select links that weren't discovered in the last scan */
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].chandef = &link_conf->chanreq.oper;
- data[n_data].signal = link_conf->bss->signal / 100;
- data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
-
- if (data[n_data].grade > max_grade) {
- max_grade = data[n_data].grade;
- *best_link_idx = n_data;
- }
- n_data++;
- }
-
- return n_data;
-}
-
-struct iwl_mvm_bw_to_rssi_threshs {
- s8 low;
- s8 high;
-};
-
-#define BW_TO_RSSI_THRESHOLDS(_bw) \
- [IWL_PHY_CHANNEL_MODE ## _bw] = { \
- .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
- .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
- }
-
-s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
- const struct cfg80211_chan_def *chandef,
- bool low)
-{
- const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
- BW_TO_RSSI_THRESHOLDS(20),
- BW_TO_RSSI_THRESHOLDS(40),
- BW_TO_RSSI_THRESHOLDS(80),
- BW_TO_RSSI_THRESHOLDS(160)
- /* 320 MHz has the same thresholds as 20 MHz */
- };
- const struct iwl_mvm_bw_to_rssi_threshs *threshs;
- u8 chan_width = iwl_mvm_get_channel_width(chandef);
-
- if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
- chandef->chan->band != NL80211_BAND_5GHZ &&
- chandef->chan->band != NL80211_BAND_6GHZ))
- return S8_MAX;
-
- /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
- if (chan_width == IWL_PHY_CHANNEL_MODE320)
- chan_width = IWL_PHY_CHANNEL_MODE20;
-
- threshs = &bw_to_rssi_threshs_map[chan_width];
-
- return low ? threshs->low : threshs->high;
-}
-
-static u32
-iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *link,
- bool primary)
-{
- struct wiphy *wiphy = mvm->hw->wiphy;
- struct ieee80211_bss_conf *conf;
- enum iwl_mvm_esr_state ret = 0;
- s8 thresh;
-
- conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
- if (WARN_ON_ONCE(!conf))
- return false;
-
- /* BT Coex effects eSR mode only if one of the links is on LB */
- if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
- (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
- primary)))
- ret |= IWL_MVM_ESR_EXIT_COEX;
-
- thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
- false);
-
- if (link->signal < thresh)
- ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
-
- if (conf->csa_active)
- ret |= IWL_MVM_ESR_EXIT_CSA;
-
- if (ret) {
- IWL_DEBUG_INFO(mvm,
- "Link %d is not allowed for esr\n",
- link->link_id);
- iwl_mvm_print_esr_state(mvm, ret);
- }
- return ret;
-}
-
-VISIBLE_IF_IWLWIFI_KUNIT
-bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- enum iwl_mvm_esr_state ret = 0;
-
- /* Per-link considerations */
- if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
- iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
- return false;
-
- if (a->chandef->chan->band == b->chandef->chan->band ||
- a->chandef->width != b->chandef->width)
- ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
-
- if (ret) {
- IWL_DEBUG_INFO(mvm,
- "Links %d and %d are not a valid pair for EMLSR\n",
- a->link_id, b->link_id);
- iwl_mvm_print_esr_state(mvm, ret);
- return false;
- }
-
- return true;
-
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
-
-/*
- * Returns the combined eSR grade of two given links.
- * Returns 0 if eSR is not allowed with these 2 links.
- */
-static
-unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b,
- u8 *primary_id)
-{
- struct ieee80211_bss_conf *primary_conf;
- struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
- unsigned int primary_load;
-
- lockdep_assert_wiphy(wiphy);
-
- /* a is always primary, b is always secondary */
- if (b->grade > a->grade)
- swap(a, b);
-
- *primary_id = a->link_id;
-
- if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
- return 0;
-
- primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
-
- if (WARN_ON_ONCE(!primary_conf))
- return 0;
-
- primary_load = iwl_mvm_get_chan_load(primary_conf);
-
- return a->grade +
- ((b->grade * primary_load) / SCALE_FACTOR);
-}
-
-void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- struct iwl_mvm_link_sel_data *best_link;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
- u16 usable_links = ieee80211_vif_usable_links(vif);
- u8 best, primary_link, best_in_pair, n_data;
- u16 max_esr_grade = 0, new_active_links;
-
- lockdep_assert_wiphy(mvm->hw->wiphy);
-
- if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
- return;
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* The logic below is a simple version that doesn't suit more than 2
- * links
- */
- WARN_ON_ONCE(max_active_links > 2);
-
- n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
- &best);
-
- if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
- return;
-
- best_link = &data[best];
- primary_link = best_link->link_id;
- new_active_links = BIT(best_link->link_id);
-
- /* eSR is not supported/blocked, or only one usable link */
- if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
- mvmvif->esr_disable_reason || n_data == 1)
- goto set_active;
-
- for (u8 a = 0; a < n_data; a++)
- for (u8 b = a + 1; b < n_data; b++) {
- u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
- &data[b],
- &best_in_pair);
-
- if (esr_grade <= max_esr_grade)
- continue;
-
- max_esr_grade = esr_grade;
- primary_link = best_in_pair;
- new_active_links = BIT(data[a].link_id) |
- BIT(data[b].link_id);
- }
-
- /* No valid pair was found, go with the best link */
- if (hweight16(new_active_links) <= 1)
- goto set_active;
-
- /* For equal grade - prefer EMLSR */
- if (best_link->grade > max_esr_grade) {
- primary_link = best_link->link_id;
- new_active_links = BIT(best_link->link_id);
- }
-set_active:
- IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
- new_active_links, primary_link);
- ieee80211_set_active_links_async(vif, new_active_links);
- mvmvif->link_selection_res = new_active_links;
- mvmvif->link_selection_primary = primary_link;
-}
-
u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,266 +307,6 @@ u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
return __ffs(vif->active_links);
}
-/*
- * For non-MLO/single link, this will return the deflink/single active link,
- * respectively
- */
-u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
-{
- switch (hweight16(vif->active_links)) {
- case 0:
- return 0;
- default:
- WARN_ON(1);
- fallthrough;
- case 1:
- return __ffs(vif->active_links);
- case 2:
- return __ffs(vif->active_links & ~BIT(link_id));
- }
-}
-
-/* Reasons that can cause esr prevention */
-#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
-#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
-#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
-#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
-
-static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif,
- enum iwl_mvm_esr_state reason)
-{
- bool timeout_expired = time_after(jiffies,
- mvmvif->last_esr_exit.ts +
- IWL_MVM_PREVENT_ESR_TIMEOUT);
- unsigned long delay;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Only handle reasons that can cause prevention */
- if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
- return false;
-
- /*
- * Reset the counter if more than 400 seconds have passed between one
- * exit and the other, or if we exited due to a different reason.
- * Will also reset the counter after the long prevention is done.
- */
- if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
- mvmvif->exit_same_reason_count = 1;
- return false;
- }
-
- mvmvif->exit_same_reason_count++;
- if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
- mvmvif->exit_same_reason_count > 3))
- return false;
-
- mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
-
- /*
- * For the second exit, use a short prevention, and for the third one,
- * use a long prevention.
- */
- delay = mvmvif->exit_same_reason_count == 2 ?
- IWL_MVM_ESR_PREVENT_SHORT :
- IWL_MVM_ESR_PREVENT_LONG;
-
- IWL_DEBUG_INFO(mvm,
- "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
- delay / HZ, mvmvif->exit_same_reason_count,
- iwl_get_esr_state_string(reason), reason);
-
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk, delay);
- return true;
-}
-
-#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
-
-/* API to exit eSR mode */
-void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u16 new_active_links;
- bool prevented;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* Nothing to do */
- if (!mvmvif->esr_active)
- return;
-
- if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
- return;
-
- if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
- link_to_keep = __ffs(vif->active_links);
-
- new_active_links = BIT(link_to_keep);
- IWL_DEBUG_INFO(mvm,
- "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
- iwl_get_esr_state_string(reason), reason,
- vif->active_links, new_active_links);
-
- ieee80211_set_active_links_async(vif, new_active_links);
-
- /* Prevent EMLSR if needed */
- prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
-
- /* Remember why and when we exited EMLSR */
- mvmvif->last_esr_exit.ts = jiffies;
- mvmvif->last_esr_exit.reason = reason;
-
- /*
- * If EMLSR is prevented now - don't try to get back to EMLSR.
- * If we exited due to a blocking event, we will try to get back to
- * EMLSR when the corresponding unblocking event will happen.
- */
- if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
- return;
-
- /* If EMLSR is not blocked - try enabling it again in 30 seconds */
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk,
- round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
-}
-
-void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* This should be called only with disable reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return;
-
- if (mvmvif->esr_disable_reason & reason)
- return;
-
- IWL_DEBUG_INFO(mvm,
- "Blocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
-
- mvmvif->esr_disable_reason |= reason;
-
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
-
- iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
-}
-
-int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason)
-{
- int primary_link = iwl_mvm_get_primary_link(vif);
- int ret;
-
- if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
- return 0;
-
- /* This should be called only with blocking reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return 0;
-
- /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
- ret = ieee80211_set_active_links(vif, BIT(primary_link));
- if (ret)
- return ret;
-
- mutex_lock(&mvm->mutex);
- /* only additionally block for consistency and to avoid concurrency */
- iwl_mvm_block_esr(mvm, vif, reason, primary_link);
- mutex_unlock(&mvm->mutex);
-
- return 0;
-}
-
-static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
- IWL_MVM_TRIGGER_LINK_SEL_TIME);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
- mvmvif->esr_active)
- return;
-
- IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
-
- /* If we exited due to an EXIT reason, and the exit was in less than
- * 30 seconds, then a MLO scan was scheduled already.
- */
- if (!need_new_sel &&
- !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
- IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
- return;
- }
-
- /*
- * If EMLSR was blocked for more than 30 seconds, or the last link
- * selection decided to not enter EMLSR, trigger a new scan.
- */
- if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
- IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk, 0);
- /*
- * If EMLSR was blocked for less than 30 seconds, and the last link
- * selection decided to use EMLSR, activate EMLSR using the previous
- * link selection result.
- */
- } else {
- IWL_DEBUG_INFO(mvm,
- "Use the latest link selection result: 0x%x\n",
- mvmvif->link_selection_res);
- ieee80211_set_active_links_async(vif,
- mvmvif->link_selection_res);
- }
-}
-
-void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* This should be called only with disable reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return;
-
- /* No Change */
- if (!(mvmvif->esr_disable_reason & reason))
- return;
-
- mvmvif->esr_disable_reason &= ~reason;
-
- IWL_DEBUG_INFO(mvm,
- "Unblocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
-
- if (!mvmvif->esr_disable_reason)
- iwl_mvm_esr_unblocked(mvm, vif);
-}
-
void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link)
{
link->bcast_sta.sta_id = IWL_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8805ab344895..9c9e0e1c6e1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1586,13 +1586,11 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(mb->link_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
u32 mac_type;
- int link_id;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
MISSED_BEACONS_NOTIF, 0);
- struct ieee80211_bss_conf *bss_conf;
/* If the firmware uses the new notification (from MAC_CONF_GROUP),
* refer to that notification's version.
@@ -1617,16 +1615,10 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
if (!vif)
return;
- bss_conf = &vif->bss_conf;
- link_id = bss_conf->link_id;
mac_type = iwl_mvm_get_mac_type(vif);
IWL_DEBUG_INFO(mvm, "missed beacon mac_type=%u,\n", mac_type);
- mvm->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(mvm->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "MacId_%d_MacType_%d", id, mac_type);
-
rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
rx_missed_bcon_since_rx =
le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
@@ -1644,41 +1636,11 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
- } else if (link_id >= 0 && hweight16(vif->active_links) > 1) {
- u32 bss_param_ch_cnt_link_id =
- bss_conf->bss_param_ch_cnt_link_id;
- u32 scnd_lnk_bcn_lost = 0;
-
- if (notif_ver >= 5 &&
- !IWL_FW_CHECK(mvm,
- le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID,
- "No data for other link id but we are in EMLSR active_links: 0x%x\n",
- vif->active_links))
- scnd_lnk_bcn_lost =
- le32_to_cpu(mb->consec_missed_beacons_other_link);
-
- /* Exit EMLSR if we lost more than
- * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
- * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link.
- * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
- * and the link's bss_param_ch_count has changed.
- */
- if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
- scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
- rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH ||
- (bss_param_ch_cnt_link_id != link_id &&
- rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED))
- iwl_mvm_exit_esr(mvm, vif,
- IWL_MVM_ESR_EXIT_MISSED_BEACON,
- iwl_mvm_get_primary_link(vif));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
else
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
-
- /* try to switch links, no-op if we don't have MLO */
- iwl_mvm_int_mlo_scan(mvm, vif);
}
iwl_dbg_tlv_time_point(&mvm->fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4ad3d32683d8..44029ceb8f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1425,12 +1425,6 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* Stop internal MLO scan, if running */
- mutex_lock(&mvm->mutex);
- iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
- mutex_unlock(&mvm->mutex);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
@@ -1715,57 +1709,6 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
-static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvm);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
-}
-
-static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
- mlo_int_scan_wk.work);
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvmvif->mvm);
- iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
-}
-
-static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvm);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
-}
-
-static void iwl_mvm_unblock_esr_tmp_non_bss(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif,
- unblock_esr_tmp_non_bss_wk.work);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- mutex_lock(&mvm->mutex);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
- mutex_unlock(&mvm->mutex);
-}
-
void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
{
lockdep_assert_held(&mvm->mutex);
@@ -1777,18 +1720,6 @@ void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
INIT_DELAYED_WORK(&mvmvif->csa_work,
iwl_mvm_channel_switch_disconnect_wk);
-
- wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
- iwl_mvm_prevent_esr_done_wk);
-
- wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
- iwl_mvm_mlo_int_scan_wk);
-
- wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
- iwl_mvm_unblock_esr_tpt);
-
- wiphy_delayed_work_init(&mvmvif->unblock_esr_tmp_non_bss_wk,
- iwl_mvm_unblock_esr_tmp_non_bss);
}
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -1926,16 +1857,6 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
flush_work(&mvm->roc_done_wk);
}
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk);
-
cancel_delayed_work_sync(&mvmvif->csa_work);
}
@@ -4008,21 +3929,6 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
-
- memset(&mvmvif->last_esr_exit, 0,
- sizeof(mvmvif->last_esr_exit));
-
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
-
- /* Block until FW notif will arrive */
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
-
- /* when client is authorized (AP station marked as such),
- * try to enable the best link(s).
- */
- if (vif->type == NL80211_IFTYPE_STATION &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- iwl_mvm_select_links(mvm, vif);
}
mvm_sta->authorized = true;
@@ -4070,16 +3976,6 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
/* disable beacon filtering */
iwl_mvm_disable_beacon_filter(mvm, vif);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk);
}
return 0;
@@ -4920,7 +4816,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
int ret;
@@ -4933,13 +4828,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
- if (!IS_ERR_OR_NULL(bss_vif)) {
- ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_ROC);
- if (ret)
- return ret;
- }
-
guard(mvm)(mvm);
switch (vif->type) {
@@ -5604,9 +5492,9 @@ static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
}
#define IWL_MAX_CSA_BLOCK_TX 1500
-int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
+static int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -5724,9 +5612,9 @@ int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
+int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index bf24f8cb673e..b1dca76b7141 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -340,20 +340,6 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* update EMLSR mode */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- int ret;
-
- ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
- true);
- /*
- * Don't activate this link if failed to exit EMLSR in
- * the BSS interface
- */
- if (ret)
- return ret;
- }
-
guard(mvm)(mvm);
return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
@@ -472,10 +458,6 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_add_link(mvm, vif, link_conf);
}
mutex_unlock(&mvm->mutex);
-
- /* update EMLSR mode */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
- iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
}
static void
@@ -684,25 +666,6 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
&callbacks);
}
-static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
-{
- struct ieee80211_bss_conf *other_link;
- int link_id;
-
- /* Exit EMLSR if links don't have equal bandwidths */
- for_each_vif_active_link(vif, other_link, link_id) {
- if (link_id == link_conf->link_id)
- continue;
- if (link_conf->chanreq.oper.width ==
- other_link->chanreq.oper.width)
- return true;
- }
-
- return false;
-}
-
static void
iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -737,14 +700,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- if ((changes & BSS_CHANGED_BANDWIDTH) &&
- ieee80211_vif_link_active(vif, link_conf->link_id) &&
- mvmvif->esr_active &&
- !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
- iwl_mvm_exit_esr(mvm, vif,
- IWL_MVM_ESR_EXIT_BANDWIDTH,
- iwl_mvm_get_primary_link(vif));
-
/* if associated, maybe puncturing changed - we'll check later */
if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
@@ -879,11 +834,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
-
- if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
- ieee80211_vif_is_mld(vif) && mvmvif->authorized)
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk, 0);
}
static void
@@ -1239,91 +1189,6 @@ iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return NEG_TTLM_RES_ACCEPT;
}
-static int
-iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- if (mvmvif->esr_active) {
- u8 primary = iwl_mvm_get_primary_link(vif);
- int selected;
-
- /* prefer primary unless quiet CSA on it */
- if (chsw->link_id == primary && chsw->block_tx)
- selected = iwl_mvm_get_other_link(vif, primary);
- else
- selected = primary;
-
- /*
- * remembers to tell the firmware that this link can't tx
- * Note that this logic seems to be unrelated to esr, but it
- * really is needed only when esr is active. When we have a
- * single link, the firmware will handle all this on its own.
- * In multi-link scenarios, we can learn about the CSA from
- * another link and this logic is too complex for the firmware
- * to track.
- * Since we want to de-activate the link that got a CSA, we
- * need to tell the firmware not to send any frame on that link
- * as the firmware may not be aware that link is under a CSA
- * with mode=1 (no Tx allowed).
- */
- if (chsw->block_tx && mvmvif->link[chsw->link_id])
- mvmvif->link[chsw->link_id]->csa_block_tx = true;
-
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
- mutex_unlock(&mvm->mutex);
-
- /*
- * If we've not kept the link active that's doing the CSA
- * then we don't need to do anything else, just return.
- */
- if (selected != chsw->link_id)
- return 0;
-
- mutex_lock(&mvm->mutex);
- }
-
- ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
-#define IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT (5 * HZ)
-
-static void iwl_mvm_mld_prep_add_interface(struct ieee80211_hw *hw,
- enum nl80211_iftype type)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_vif *mvmvif;
- int ret;
-
- IWL_DEBUG_MAC80211(mvm, "prep_add_interface: type=%u\n",
- type);
-
- if (IS_ERR_OR_NULL(bss_vif) ||
- !(type == NL80211_IFTYPE_AP ||
- type == NL80211_IFTYPE_P2P_GO ||
- type == NL80211_IFTYPE_P2P_CLIENT))
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
- ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
- if (ret)
- return;
-
- wiphy_delayed_work_queue(mvmvif->mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk,
- IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT);
-}
-
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1377,7 +1242,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx_last_beacon = iwl_mvm_tx_last_beacon,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
@@ -1418,5 +1283,4 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_sta_links = iwl_mvm_mld_change_sta_links,
.can_activate_links = iwl_mvm_mld_can_activate_links,
.can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
- .prep_add_interface = iwl_mvm_mld_prep_add_interface,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index e1010521c3ea..d9a2801636cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -852,8 +852,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id);
}
- kfree(mvm_sta->mpdu_counters);
- mvm_sta->mpdu_counters = NULL;
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index fdaeefa305e1..b515028adc8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -348,68 +348,6 @@ struct iwl_mvm_vif_link_info {
};
/**
- * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
- * blocked.
- * The low 16 bits are used for blocking reasons, and the 16 higher bits
- * are used for exit reasons.
- * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
- * reasons - use iwl_mvm_exit_esr().
- *
- * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
- *
- * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
- * in a loop.
- * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
- * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
- * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
- * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
- * preventing EMLSR
- * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
- * @IWL_MVM_ESR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's link
- * is preventing EMLSR. This is a temporary blocking that is set when there
- * is an indication that a non-BSS interface is to be added.
- * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
- * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
- * due to low RSSI.
- * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
- * due to BT Coex.
- * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
- * preventing the enablement of EMLSR
- * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
- * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
- */
-enum iwl_mvm_esr_state {
- IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
- IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
- IWL_MVM_ESR_BLOCKED_TPT = 0x4,
- IWL_MVM_ESR_BLOCKED_FW = 0x8,
- IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
- IWL_MVM_ESR_BLOCKED_ROC = 0x20,
- IWL_MVM_ESR_BLOCKED_TMP_NON_BSS = 0x40,
- IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
- IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
- IWL_MVM_ESR_EXIT_COEX = 0x40000,
- IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
- IWL_MVM_ESR_EXIT_CSA = 0x100000,
- IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
-};
-
-#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
-
-const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
-
-/**
- * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
- * @reason: The reason for the last exit from EMLSR.
- * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
- * @ts: the time stamp of the last time we existed EMLSR.
- */
-struct iwl_mvm_esr_exit {
- unsigned long ts;
- enum iwl_mvm_esr_state reason;
-};
-
-/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @mvm: pointer back to the mvm struct
* @id: between 0 and 3
@@ -443,7 +381,6 @@ struct iwl_mvm_esr_exit {
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
* @esr_active: indicates eSR mode is active
- * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
* @pm_enabled: indicates powersave is enabled
* @link_selection_res: bitmap of active links as it was decided in the last
* link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
@@ -451,15 +388,6 @@ struct iwl_mvm_esr_exit {
* @link_selection_primary: primary link selected by link selection
* @primary_link: primary link in eSR. Valid only for an associated MLD vif,
* and in eSR mode. Valid only for a STA.
- * @last_esr_exit: Details of the last exit from EMLSR.
- * @exit_same_reason_count: The number of times we exited due to the specified
- * @last_esr_exit::reason, only counting exits due to
- * &IWL_MVM_ESR_PREVENT_REASONS.
- * @prevent_esr_done_wk: work that should be done when esr prevention ends.
- * @mlo_int_scan_wk: work for the internal MLO scan.
- * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
- * @unblock_esr_tmp_non_bss_wk: work for removing the
- * IWL_MVM_ESR_BLOCKED_TMP_NON_BSS blocking for EMLSR.
* @roc_activity: currently running ROC activity for this vif (or
* ROC_NUM_ACTIVITIES if no activity is running).
* @session_prot_connection_loss: the connection was lost due to session
@@ -515,7 +443,6 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
- u32 esr_disable_reason;
u32 ap_beacon_time;
bool bf_enabled;
bool ba_enabled;
@@ -591,12 +518,6 @@ struct iwl_mvm_vif {
u16 link_selection_res;
u8 link_selection_primary;
u8 primary_link;
- struct iwl_mvm_esr_exit last_esr_exit;
- u8 exit_same_reason_count;
- struct wiphy_delayed_work prevent_esr_done_wk;
- struct wiphy_delayed_work mlo_int_scan_wk;
- struct wiphy_work unblock_esr_tpt_wk;
- struct wiphy_delayed_work unblock_esr_tmp_non_bss_wk;
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -622,7 +543,6 @@ enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
IWL_MVM_SCAN_NETDETECT = BIT(2),
- IWL_MVM_SCAN_INT_MLO = BIT(3),
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
@@ -635,8 +555,6 @@ enum iwl_scan_status {
IWL_MVM_SCAN_STOPPING_SCHED,
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
- IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
- IWL_MVM_SCAN_STOPPING_INT_MLO,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
@@ -1017,8 +935,6 @@ struct iwl_mvm {
/* For async rx handlers that require the wiphy lock */
struct wiphy_work async_handlers_wiphy_wk;
- struct wiphy_work trig_link_selection_wk;
-
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1203,20 +1119,13 @@ struct iwl_mvm {
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
- bool d3_test_active;
- u32 d3_test_pme_ptr;
- struct ieee80211_vif *keep_vif;
u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
#endif
#endif
wait_queue_head_t rx_sync_waitq;
- /* BT-Coex - only one of those will be used */
- union {
- struct iwl_bt_coex_prof_old_notif last_bt_notif;
- struct iwl_bt_coex_profile_notif last_bt_wifi_loss;
- };
+ struct iwl_bt_coex_prof_old_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u8 bt_tx_prio;
@@ -2099,9 +2008,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
-void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
-u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
struct iwl_mvm_link_sel_data {
u8 link_id;
@@ -2111,13 +2018,6 @@ struct iwl_mvm_link_sel_data {
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
-unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
-bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b);
-
-s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
-
extern const struct iwl_hcmd_arr iwl_mvm_groups[];
extern const unsigned int iwl_mvm_groups_size;
#endif
@@ -2201,7 +2101,6 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
-int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -2327,8 +2226,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -2929,9 +2826,10 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
-int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw);
+int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw);
+
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
@@ -2988,30 +2886,6 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
/* EMLSR */
bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep);
-int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason);
-void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason);
-void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep);
-s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
- const struct cfg80211_chan_def *chandef,
- bool low);
-void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id);
-bool
-iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s32 link_rssi,
- bool primary);
-int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id, bool active);
-
void
iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index c7f08cde1f72..5ebd046371f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -143,24 +143,6 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
-static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_esr_mode_notif *notif = (void *)pkt->data;
- struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
-
- /* FW recommendations is only for entering EMLSR */
- if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
- return;
-
- if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
- else
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
- iwl_mvm_get_primary_link(vif));
-}
-
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -345,9 +327,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_bt_coex_prof_old_notif),
- RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -457,11 +436,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
- RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
- iwl_mvm_rx_esr_mode_notif,
- RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_esr_mode_notif),
-
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -661,7 +635,6 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(SEC_KEY_CMD),
- HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(BEACON_FILTER_IN_NOTIF),
@@ -1220,29 +1193,6 @@ static const struct iwl_mei_ops mei_ops = {
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
-static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
- iwl_mvm_select_links(mvmvif->mvm, vif);
-}
-
-static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, trig_link_selection_wk);
-
- mutex_lock(&mvm->mutex);
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_find_link_selection_vif,
- NULL);
- mutex_unlock(&mvm->mutex);
-}
-
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -1411,9 +1361,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
iwl_mvm_async_handlers_wiphy_wk);
- wiphy_work_init(&mvm->trig_link_selection_wk,
- iwl_mvm_trig_link_selection);
-
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8fae0d41b119..8c1bb3a7ffca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -563,7 +563,6 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
- s8 exit_esr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
@@ -619,27 +618,6 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
sig,
GFP_KERNEL);
}
-
- /* ESR recalculation */
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return;
-
- /* We're not in EMLSR and our signal is bad, try to switch link maybe */
- if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) {
- iwl_mvm_int_mlo_scan(mvm, vif);
- return;
- }
-
- /* We are in EMLSR, check if we need to exit */
- exit_esr_thresh =
- iwl_mvm_get_esr_rssi_thresh(mvm,
- &bss_conf->chanreq.oper,
- true);
-
- if (sig < exit_esr_thresh)
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
- iwl_mvm_get_other_link(vif,
- bss_conf->link_id));
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -914,10 +892,6 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
- if (link_info->phy_ctxt &&
- link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
-
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -956,111 +930,6 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
}
}
-#define SEC_LINK_MIN_PERC 10
-#define SEC_LINK_MIN_TX 3000
-#define SEC_LINK_MIN_RX 400
-
-/* Accept a ~20% short window to avoid issues due to jitter */
-#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5)
-
-static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
-{
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_vif *mvmvif;
- struct iwl_mvm_sta *mvmsta;
- unsigned long total_tx = 0, total_rx = 0;
- unsigned long sec_link_tx = 0, sec_link_rx = 0;
- u8 sec_link_tx_perc, sec_link_rx_perc;
- u8 sec_link;
- bool skip = false;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (IS_ERR_OR_NULL(bss_vif))
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
-
- if (!mvmvif->esr_active || !mvmvif->ap_sta)
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
- /* We only count for the AP sta in a MLO connection */
- if (!mvmsta->mpdu_counters)
- return;
-
- /* Get the FW ID of the secondary link */
- sec_link = iwl_mvm_get_other_link(bss_vif,
- iwl_mvm_get_primary_link(bss_vif));
- if (WARN_ON(!mvmvif->link[sec_link]))
- return;
- sec_link = mvmvif->link[sec_link]->fw_link_id;
-
- /* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mvm->trans->info.num_rxqs; q++) {
- spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
-
- /* The link IDs that doesn't exist will contain 0 */
- for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) {
- total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
- total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
- }
-
- sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
- sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
-
- /*
- * In EMLSR we have statistics every 5 seconds, so we can reset
- * the counters upon every statistics notification.
- * The FW sends the notification regularly, but it will be
- * misaligned at the start. Skipping the measurement if it is
- * short will synchronize us.
- */
- if (jiffies - mvmsta->mpdu_counters[q].window_start <
- IWL_MVM_TPT_MIN_COUNT_WINDOW)
- skip = true;
- mvmsta->mpdu_counters[q].window_start = jiffies;
- memset(mvmsta->mpdu_counters[q].per_link, 0,
- sizeof(mvmsta->mpdu_counters[q].per_link));
-
- spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
- }
-
- if (skip) {
- IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n");
- return;
- }
-
- IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
- total_tx, total_rx);
-
- /* If we don't have enough MPDUs - exit EMLSR */
- if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
- total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
- iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
- iwl_mvm_get_primary_link(bss_vif));
- return;
- }
-
- IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
- sec_link, sec_link_tx, sec_link_rx);
-
- /* Calculate the percentage of the secondary link TX/RX */
- sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
- sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
-
- /*
- * The TX/RX percentage is checked only if it exceeds the required
- * minimum. In addition, RX is checked only if the TX check failed.
- */
- if ((total_tx > SEC_LINK_MIN_TX &&
- sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
- (total_rx > SEC_LINK_MIN_RX &&
- sec_link_rx_perc < SEC_LINK_MIN_PERC))
- iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
- iwl_mvm_get_primary_link(bss_vif));
-}
-
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -1088,8 +957,6 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
-
- iwl_mvm_update_esr_mode_tpt(mvm);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 62e76a79f621..d35c63a673b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -332,6 +332,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
struct ieee80211_key_conf *key;
u32 len = le16_to_cpu(desc->mpdu_len);
const u8 *frame = (void *)hdr;
+ const u8 *mmie;
if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
return 0;
@@ -375,11 +376,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
goto report;
}
- if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+ if (len < key->icv_len)
goto report;
/* get the real key ID */
- keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+ mmie = frame + (len - key->icv_len);
+
+ /* the position of the key_id in ieee80211_mmie_16 is the same */
+ keyid = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
+
/* and if that's the other key, look it up */
if (keyid != key->keyidx) {
/*
@@ -2099,7 +2104,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
- u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
size_t desc_size;
struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
@@ -2246,6 +2250,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 sta_id = le32_get_bits(desc->status,
+ IWL_RX_MPDU_STATUS_STA_ID);
+
if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
struct ieee80211_link_sta *link_sta;
@@ -2373,16 +2380,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
-
- if (ieee80211_is_data(hdr->frame_control)) {
- u8 sub_frame_idx = desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
-
- /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
- if (!sub_frame_idx || sub_frame_idx == 1)
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
- queue);
- }
}
/* management stuff on default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9ce1ce0dab34..b588f1dcf20d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1392,8 +1392,6 @@ static u32 iwl_mvm_scan_umac_ooc_priority(int type)
{
if (type == IWL_MVM_SCAN_REGULAR)
return IWL_SCAN_PRIORITY_EXT_6;
- if (type == IWL_MVM_SCAN_INT_MLO)
- return IWL_SCAN_PRIORITY_EXT_4;
return IWL_SCAN_PRIORITY_EXT_2;
}
@@ -3220,7 +3218,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
- bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false;
@@ -3267,13 +3264,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
- } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
- /*
- * Other scan types won't necessarily scan for the MLD links channels.
- * Therefore, only select links after successful internal scan.
- */
- select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
@@ -3286,9 +3276,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
-
- if (select_links)
- wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
@@ -3483,11 +3470,6 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
- uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
- if (uid >= 0) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
- mvm->scan_uid_status[uid] = 0;
- }
uid = iwl_mvm_scan_uid_by_status(mvm,
IWL_MVM_SCAN_STOPPING_REGULAR);
@@ -3583,89 +3565,6 @@ out:
return ret;
}
-static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel **channels,
- size_t n_channels)
-{
- struct cfg80211_scan_request *req = NULL;
- struct ieee80211_scan_ies ies = {};
- size_t size, i;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
- n_channels);
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
- hweight16(vif->valid_links) == 1)
- return -EINVAL;
-
- size = struct_size(req, channels, n_channels);
- req = kzalloc(size, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- /* set the requested channels */
- for (i = 0; i < n_channels; i++)
- req->channels[i] = channels[i];
-
- req->n_channels = n_channels;
-
- /* set the rates */
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- if (mvm->hw->wiphy->bands[i])
- req->rates[i] =
- (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
-
- req->wdev = ieee80211_vif_to_wdev(vif);
- req->wiphy = mvm->hw->wiphy;
- req->scan_start = jiffies;
- req->tsf_report_link_id = -1;
-
- ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
- IWL_MVM_SCAN_INT_MLO);
- kfree(req);
-
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
- return ret;
-}
-
-int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- size_t n_channels = 0;
- u8 link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
- return -EBUSY;
- }
-
- rcu_read_lock();
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- channels[n_channels++] = link_conf->chanreq.oper.chan;
- }
-
- rcu_read_unlock();
-
- if (!n_channels)
- return -EINVAL;
-
- return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
-}
-
static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
enum nl80211_band band,
u16 phy_chan_num)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 11c6b86db4ec..363232bb74fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1835,18 +1835,6 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
- /* MPDUs are counted only when EMLSR is possible */
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- !sta->tdls && ieee80211_vif_is_mld(vif)) {
- mvm_sta->mpdu_counters =
- kcalloc(mvm->trans->info.num_rxqs,
- sizeof(*mvm_sta->mpdu_counters),
- GFP_KERNEL);
- if (mvm_sta->mpdu_counters)
- for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
- spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
- }
-
return 0;
}
@@ -4328,80 +4316,3 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}
-
-static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
- u8 fw_sta_id)
-{
- struct ieee80211_link_sta *link_sta =
- rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
- struct iwl_mvm_vif_link_info *link;
-
- if (WARN_ON_ONCE(!link_sta))
- return -EINVAL;
-
- link = mvmvif->link[link_sta->link_id];
-
- if (WARN_ON_ONCE(!link))
- return -EINVAL;
-
- return link->fw_link_id;
-}
-
-#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
-
-void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
- bool tx, int queue)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct iwl_mvm_tpt_counter *queue_counter;
- struct iwl_mvm_mpdu_counter *link_counter;
- u32 total_mpdus = 0;
- int fw_link_id;
-
- /* Count only for a BSS sta, and only when EMLSR is possible */
- if (!mvm_sta->mpdu_counters)
- return;
-
- /* Map sta id to link id */
- fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
- if (fw_link_id < 0)
- return;
-
- queue_counter = &mvm_sta->mpdu_counters[queue];
- link_counter = &queue_counter->per_link[fw_link_id];
-
- spin_lock_bh(&queue_counter->lock);
-
- if (tx)
- link_counter->tx += count;
- else
- link_counter->rx += count;
-
- /*
- * When not in EMLSR, the window and the decision to enter EMLSR are
- * handled during counting, when in EMLSR - in the statistics flow
- */
- if (mvmvif->esr_active)
- goto out;
-
- if (time_is_before_jiffies(queue_counter->window_start +
- IWL_MVM_TPT_COUNT_WINDOW)) {
- memset(queue_counter->per_link, 0,
- sizeof(queue_counter->per_link));
- queue_counter->window_start = jiffies;
-
- IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
- }
-
- for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
- total_mpdus += tx ? queue_counter->per_link[i].tx :
- queue_counter->per_link[i].rx;
-
- if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
- wiphy_work_queue(mvmvif->mvm->hw->wiphy,
- &mvmvif->unblock_esr_tpt_wk);
-
-out:
- spin_unlock_bh(&queue_counter->lock);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index f6906061510b..c25edc7c1813 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -344,24 +344,6 @@ struct iwl_mvm_link_sta {
u8 avg_energy;
};
-struct iwl_mvm_mpdu_counter {
- u32 tx;
- u32 rx;
-};
-
-/**
- * struct iwl_mvm_tpt_counter - per-queue MPDU counter
- *
- * @lock: Needed to protect the counters when modified from statistics.
- * @per_link: per-link counters.
- * @window_start: timestamp of the counting-window start
- */
-struct iwl_mvm_tpt_counter {
- spinlock_t lock;
- struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID];
- unsigned long window_start;
-} ____cacheline_aligned_in_smp;
-
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @vif: the interface the station belongs to
@@ -409,7 +391,6 @@ struct iwl_mvm_tpt_counter {
* @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
* link[0] points to deflink and link[link_id] is allocated when new link
* sta is added.
- * @mpdu_counters: RX/TX MPDUs counters for each queue.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -449,8 +430,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_link_sta deflink;
struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
-
- struct iwl_mvm_tpt_counter *mpdu_counters;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@@ -533,9 +512,6 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
- bool tx, int queue);
-
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
index bb33f4a06f1c..2267be4cfb44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -1,3 +1,3 @@
-iwlmvm-tests-y += module.o links.o hcmd.o
+iwlmvm-tests-y += module.o hcmd.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
deleted file mode 100644
index d692f1813d44..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
+++ /dev/null
@@ -1,433 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * KUnit tests for channel helper functions
- *
- * Copyright (C) 2024 Intel Corporation
- */
-#include <net/mac80211.h>
-#include "../mvm.h"
-#include <kunit/test.h>
-
-MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
-
-static struct wiphy wiphy = {
- .mtx = __MUTEX_INITIALIZER(wiphy.mtx),
-};
-
-static struct ieee80211_hw hw = {
- .wiphy = &wiphy,
-};
-
-static struct ieee80211_channel chan_5ghz = {
- .band = NL80211_BAND_5GHZ,
-};
-
-static struct ieee80211_channel chan_6ghz = {
- .band = NL80211_BAND_6GHZ,
-};
-
-static struct ieee80211_channel chan_2ghz = {
- .band = NL80211_BAND_2GHZ,
-};
-
-static struct cfg80211_chan_def chandef_a = {};
-
-static struct cfg80211_chan_def chandef_b = {};
-
-static struct iwl_mvm_phy_ctxt ctx = {};
-
-static struct iwl_mvm_vif_link_info mvm_link = {
- .phy_ctxt = &ctx,
- .active = true
-};
-
-static struct cfg80211_bss bss = {};
-
-static struct ieee80211_bss_conf link_conf = {.bss = &bss};
-
-static const struct iwl_fw_cmd_version entry = {
- .group = LEGACY_GROUP,
- .cmd = BT_PROFILE_NOTIFICATION,
- .notif_ver = 4
-};
-
-static struct iwl_fw fw = {
- .ucode_capa = {
- .n_cmd_versions = 1,
- .cmd_versions = &entry,
- },
-};
-
-static struct iwl_mvm mvm = {
- .hw = &hw,
- .fw = &fw,
-};
-
-static const struct link_grading_case {
- const char *desc;
- const struct cfg80211_chan_def chandef;
- s32 signal;
- s16 channel_util;
- int chan_load_by_us;
- unsigned int grade;
-} link_grading_cases[] = {
- {
- .desc = "UHB, RSSI below range, no factors",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -100,
- .grade = 177,
- },
- {
- .desc = "LB, RSSI in range, no factors",
- .chandef = {
- .chan = &chan_2ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -84,
- .grade = 344,
- },
- {
- .desc = "HB, RSSI above range, no factors",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -50,
- .grade = 3442,
- },
- {
- .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -66,
- .channel_util = 51,
- .grade = 1836,
- },
- {
- .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor",
- .chandef = {
- .chan = &chan_2ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -61,
- .channel_util = 51,
- .chan_load_by_us = 10,
- .grade = 2061,
- },
- {
- .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -66,
- .channel_util = 102,
- .chan_load_by_us = 50,
- .grade = 1552,
- },
- { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_80,
- .punctured = 0x0000
- },
- .signal = -72,
- .grade = 1750,
- },
- { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_160,
- .punctured = 0x3
- },
- .signal = -72,
- .grade = 1312,
- },
- { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_320,
- .punctured = 0x3
- },
- .signal = -72,
- .grade = 1806,
- },
- { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_160,
- .punctured = 0x3
- },
- .channel_util = 51,
- .chan_load_by_us = 10,
- .signal = -72,
- .grade = 1179,
- },
-};
-
-KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc)
-
-static void setup_link_conf(struct kunit *test)
-{
- const struct link_grading_case *params = test->param_value;
- size_t vif_size = sizeof(struct ieee80211_vif) +
- sizeof(struct iwl_mvm_vif);
- struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
- struct ieee80211_bss_load_elem *bss_load;
- struct element *element;
- size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element);
- struct cfg80211_bss_ies *ies;
- struct iwl_mvm_vif *mvmvif;
-
- KUNIT_ASSERT_NOT_NULL(test, vif);
-
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (params->chan_load_by_us > 0) {
- ctx.channel_load_by_us = params->chan_load_by_us;
- mvmvif->link[0] = &mvm_link;
- }
-
- link_conf.vif = vif;
- link_conf.chanreq.oper = params->chandef;
- bss.signal = DBM_TO_MBM(params->signal);
-
- ies = kunit_kzalloc(test, ies_size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ies);
- ies->len = sizeof(*bss_load) + sizeof(struct element);
-
- element = (void *)ies->data;
- element->datalen = sizeof(*bss_load);
- element->id = 11;
-
- bss_load = (void *)element->data;
- bss_load->channel_util = params->channel_util;
-
- rcu_assign_pointer(bss.ies, ies);
- rcu_assign_pointer(bss.beacon_ies, ies);
-}
-
-static void test_link_grading(struct kunit *test)
-{
- const struct link_grading_case *params = test->param_value;
- unsigned int ret;
-
- setup_link_conf(test);
-
- rcu_read_lock();
- ret = iwl_mvm_get_link_grade(&link_conf);
- rcu_read_unlock();
-
- KUNIT_EXPECT_EQ(test, ret, params->grade);
-
- kunit_kfree(test, link_conf.vif);
- RCU_INIT_POINTER(bss.ies, NULL);
-}
-
-static struct kunit_case link_grading_test_cases[] = {
- KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
- {}
-};
-
-static struct kunit_suite link_grading = {
- .name = "iwlmvm-link-grading",
- .test_cases = link_grading_test_cases,
-};
-
-kunit_test_suite(link_grading);
-
-static const struct valid_link_pair_case {
- const char *desc;
- bool bt;
- struct ieee80211_channel *chan_a;
- struct ieee80211_channel *chan_b;
- enum nl80211_chan_width cw_a;
- enum nl80211_chan_width cw_b;
- s32 sig_a;
- s32 sig_b;
- bool csa_a;
- bool valid;
-} valid_link_pair_cases[] = {
- {
- .desc = "HB + UHB, valid.",
- .chan_a = &chan_6ghz,
- .chan_b = &chan_5ghz,
- .valid = true,
- },
- {
- .desc = "LB + HB, no BT.",
- .chan_a = &chan_2ghz,
- .chan_b = &chan_5ghz,
- .valid = true,
- },
- {
- .desc = "LB + HB, with BT.",
- .bt = true,
- .chan_a = &chan_2ghz,
- .chan_b = &chan_5ghz,
- .valid = false,
- },
- {
- .desc = "Same band",
- .chan_a = &chan_2ghz,
- .chan_b = &chan_2ghz,
- .valid = false,
- },
- {
- .desc = "RSSI: LB, 20 MHz, low",
- .chan_a = &chan_2ghz,
- .cw_a = NL80211_CHAN_WIDTH_20,
- .sig_a = -68,
- .chan_b = &chan_5ghz,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 20 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_20,
- .sig_a = -66,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_20,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 40 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_40,
- .sig_a = -65,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_40,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 40 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_40,
- .sig_a = -63,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_40,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 80 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_80,
- .sig_a = -62,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_80,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 80 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_80,
- .sig_a = -60,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_80,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 160 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -59,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = false,
- },
- {
- .desc = "RSSI: HB, 160 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -5,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = true,
- },
- {
- .desc = "CSA active",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -5,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = false,
- /* same as previous entry with valid=true except for CSA */
- .csa_a = true,
- },
-};
-
-KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc)
-
-static void test_valid_link_pair(struct kunit *test)
-{
- const struct valid_link_pair_case *params = test->param_value;
- size_t vif_size = sizeof(struct ieee80211_vif) +
- sizeof(struct iwl_mvm_vif);
- struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
- struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans),
- GFP_KERNEL);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_link_sel_data link_a = {
- .chandef = &chandef_a,
- .link_id = 1,
- .signal = params->sig_a,
- };
- struct iwl_mvm_link_sel_data link_b = {
- .chandef = &chandef_b,
- .link_id = 5,
- .signal = params->sig_b,
- };
- struct ieee80211_bss_conf *conf;
- bool result;
-
- KUNIT_ASSERT_NOT_NULL(test, vif);
- KUNIT_ASSERT_NOT_NULL(test, trans);
-
- chandef_a.chan = params->chan_a;
- chandef_b.chan = params->chan_b;
-
- chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20;
- chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20;
-
- mvm.trans = trans;
-
- mvm.last_bt_notif.wifi_loss_low_rssi = params->bt;
- mvmvif->mvm = &mvm;
-
- conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, conf);
- conf->chanreq.oper = chandef_a;
- conf->csa_active = params->csa_a;
- vif->link_conf[link_a.link_id] = (void __rcu *)conf;
-
- conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, conf);
- conf->chanreq.oper = chandef_b;
- vif->link_conf[link_b.link_id] = (void __rcu *)conf;
-
- wiphy_lock(&wiphy);
- result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b);
- wiphy_unlock(&wiphy);
-
- KUNIT_EXPECT_EQ(test, result, params->valid);
-
- kunit_kfree(test, vif);
- kunit_kfree(test, trans);
-}
-
-static struct kunit_case valid_link_pair_test_cases[] = {
- KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params),
- {},
-};
-
-static struct kunit_suite valid_link_pair = {
- .name = "iwlmvm-valid-link-pair",
- .test_cases = valid_link_pair_test_cases,
-};
-
-kunit_test_suite(valid_link_pair);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index aa653782d6d7..0c9c2492d8a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -47,7 +47,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
struct ieee80211_vif *vif = mvm->p2p_device_vif;
lockdep_assert_held(&mvm->mutex);
@@ -125,8 +124,6 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_rm_aux_sta(mvm);
}
- if (!IS_ERR_OR_NULL(bss_vif))
- iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 25d1a882a6a0..bb97837baeda 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1769,9 +1769,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
- if (tid < IWL_MAX_TID_COUNT)
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
- true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -2150,13 +2147,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_res->tx_rate, false);
}
- if (mvmsta) {
+ if (mvmsta)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
- iwl_mvm_count_mpdu(mvmsta, sta_id,
- le16_to_cpu(ba_res->txed), true, 0);
- }
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 62da0132f383..22602c32faa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -22,11 +22,6 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
{
int ret;
-#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
- if (WARN_ON(mvm->d3_test_active))
- return -EIO;
-#endif
-
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two
@@ -79,11 +74,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
lockdep_assert_held(&mvm->mutex);
-#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
- if (WARN_ON(mvm->d3_test_active))
- return -EIO;
-#endif
-
/*
* Only synchronous commands can wait for status,
* we use WANT_SKB so the caller can't.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 7e56e4ff7642..b21a4d8eb105 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1179,16 +1179,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans)
return;
- cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
-
- iwl_drv_stop(trans->drv);
-
- iwl_trans_pcie_free(trans);
+ iwl_pcie_gen1_2_remove(trans);
}
#ifdef CONFIG_PM_SLEEP
@@ -1260,7 +1255,7 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* won't really know how to recover.
*/
iwl_pcie_prepare_card_hw(trans);
- iwl_finish_nic_init(trans);
+ iwl_trans_activate_nic(trans);
iwl_op_mode_device_powered_off(trans->op_mode);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
index f48aeebb151c..207c56e338dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
@@ -400,6 +400,11 @@ struct iwl_pcie_txqs {
* @me_recheck_wk: worker to recheck WiAMT/CSME presence
* @invalid_tx_cmd: invalid TX command buffer
* @wait_command_queue: wait queue for sync commands
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -506,6 +511,12 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr invalid_tx_cmd;
wait_queue_head_t wait_command_queue;
+
+ struct kmem_cache *dev_cmd_pool;
+ char dev_cmd_pool_name[50];
+
+ bool pm_support;
+ bool ltr_enabled;
};
static inline struct iwl_trans_pcie *
@@ -783,6 +794,23 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
return le16_to_cpu(tb->hi_n_len) >> 4;
}
+static inline struct iwl_device_tx_cmd *
+iwl_pcie_gen1_2_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return kmem_cache_zalloc(trans_pcie->dev_cmd_pool, GFP_ATOMIC);
+}
+
+static inline void
+iwl_pcie_gen1_2_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ kmem_cache_free(trans_pcie->dev_cmd_pool, dev_cmd);
+}
+
void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs, bool is_flush);
void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
@@ -818,6 +846,8 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
trans_pcie->fh_init_mask);
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
trans_pcie->hw_init_mask);
+ trans_pcie->fh_mask = 0;
+ trans_pcie->hw_mask = 0;
}
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
@@ -1000,6 +1030,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
} else {
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
trans_pcie->fh_init_mask);
+ trans_pcie->fh_mask = 0;
iwl_enable_hw_int_msk_msix(trans,
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
@@ -1047,7 +1078,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
+void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans);
int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
@@ -1064,9 +1095,8 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset);
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+ bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset);
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
@@ -1081,6 +1111,7 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_mac_cfg *mac_cfg,
u8 __iomem *hw_base, u32 hw_rev);
+void iwl_pcie_gen1_2_remove(struct iwl_trans *trans);
/* transport gen 1 exported functions */
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
@@ -1105,6 +1136,7 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
+int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
@@ -1124,4 +1156,17 @@ int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
+static inline bool iwl_pcie_gen1_is_pm_supported(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->pm_support;
+}
+
+static inline bool iwl_pcie_gen1_2_is_ltr_enabled(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->ltr_enabled;
+}
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
index 1951be3a30b7..b15c5d486527 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
@@ -47,7 +47,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
index 327366bf87de..59307b5df441 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
@@ -25,13 +25,52 @@
#include "fw/dbg.h"
#include "fw/api/tx.h"
#include "fw/acpi.h"
-#include "fw/api/tx.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
#include "pcie/iwl-context-info-v2.h"
#include "pcie/utils.h"
+#define IWL_HOST_MON_BLOCK_PEMON 0x00
+#define IWL_HOST_MON_BLOCK_HIPM 0x22
+
+#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
+#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
+#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
+
+static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
+ u32 block, u32 vec, u32 iter)
+{
+ int i;
+
+ IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
+ iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
+ for (i = 0; i < iter; i++)
+ IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
+ i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
+}
+
+static void iwl_pcie_dump_host_monitor(struct iwl_trans *trans)
+{
+ switch (trans->mac_cfg->device_family) {
+ case IWL_DEVICE_FAMILY_22000:
+ case IWL_DEVICE_FAMILY_AX210:
+ IWL_ERR(trans, "CSR_RESET = 0x%x\n",
+ iwl_read32(trans, CSR_RESET));
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
+ break;
+ default:
+ return;
+ }
+}
+
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
@@ -175,13 +214,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+ trans_pcie->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
- trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+ trans_pcie->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
- trans->ltr_enabled ? "En" : "Dis");
+ trans_pcie->ltr_enabled ? "En" : "Dis");
}
/*
@@ -228,7 +267,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
if (trans->mac_cfg->base->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
@@ -301,7 +340,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
ret = iwl_trans_pcie_sw_reset(trans, true);
if (!ret)
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
@@ -1397,17 +1436,10 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
}
static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset)
+ bool reset)
{
iwl_disable_interrupts(trans);
- /*
- * in testing mode, the host stays awake and the
- * hardware won't be reset (not even partially)
- */
- if (test)
- return;
-
iwl_pcie_disable_ict(trans);
iwl_pcie_synchronize_irqs(trans);
@@ -1478,7 +1510,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return ret;
}
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset)
{
int ret;
@@ -1491,26 +1523,18 @@ int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
if (ret)
return ret;
- iwl_pcie_d3_complete_suspend(trans, test, reset);
+ iwl_pcie_d3_complete_suspend(trans, reset);
return 0;
}
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
+ bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
int ret;
- if (test) {
- iwl_enable_interrupts(trans);
- *status = IWL_D3_STATUS_ALIVE;
- ret = 0;
- goto out;
- }
-
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
@@ -1518,9 +1542,12 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_finish_nic_init(trans);
- if (ret)
+ ret = iwl_trans_activate_nic(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to init nic upon resume. err = %d\n",
+ ret);
return ret;
+ }
/*
* Reconfigure IVAR table in case of MSIX or reset ict table in
@@ -1554,18 +1581,13 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_read_umac_prph(trans, WFPM_GP2));
val = iwl_read32(trans, CSR_RESET);
- if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
- *status = IWL_D3_STATUS_RESET;
- else
- *status = IWL_D3_STATUS_ALIVE;
-
-out:
- if (*status == IWL_D3_STATUS_ALIVE)
- ret = iwl_pcie_d3_handshake(trans, false);
- else
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
+ IWL_INFO(trans, "Device was reset during suspend\n");
trans->state = IWL_TRANS_NO_FW;
+ return -ENOENT;
+ }
- return ret;
+ return iwl_pcie_d3_handshake(trans, false);
}
static void
@@ -1744,7 +1766,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret < 0)
return ret;
@@ -1882,7 +1904,7 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
+void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2000,6 +2022,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
free_percpu(trans_pcie->txqs.tso_hdr_page);
}
+ kmem_cache_destroy(trans_pcie->dev_cmd_pool);
iwl_trans_free(trans);
}
@@ -3516,7 +3539,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
- bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+ bool dump_rbs = iwl_trans_is_fw_error(trans) &&
!trans->mac_cfg->mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
@@ -3684,28 +3707,40 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-static int iwl_trans_pcie_set_txcmd_info(const struct iwl_mac_cfg *mac_cfg,
- unsigned int *txcmd_size,
- unsigned int *txcmd_align)
+static int iwl_trans_pcie_alloc_txcmd_pool(struct iwl_trans *trans)
{
- if (!mac_cfg->gen2) {
- *txcmd_size = sizeof(struct iwl_tx_cmd_v6);
- *txcmd_align = sizeof(void *);
- } else if (mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
- *txcmd_size = sizeof(struct iwl_tx_cmd_v9);
- *txcmd_align = 64;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int txcmd_size, txcmd_align;
+
+ if (!trans->mac_cfg->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v6);
+ txcmd_align = sizeof(void *);
+ } else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v9);
+ txcmd_align = 64;
} else {
- *txcmd_size = sizeof(struct iwl_tx_cmd);
- *txcmd_align = 128;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = 128;
}
- *txcmd_size += sizeof(struct iwl_cmd_header);
- *txcmd_size += 36; /* biggest possible 802.11 header */
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON((mac_cfg->gen2 && *txcmd_size >= *txcmd_align)))
+ if (WARN_ON((trans->mac_cfg->gen2 && txcmd_size >= txcmd_align)))
return -EINVAL;
+ snprintf(trans_pcie->dev_cmd_pool_name,
+ sizeof(trans_pcie->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+
+ trans_pcie->dev_cmd_pool =
+ kmem_cache_create(trans_pcie->dev_cmd_pool_name,
+ txcmd_size, txcmd_align,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!trans_pcie->dev_cmd_pool)
+ return -ENOMEM;
+
return 0;
}
@@ -3715,18 +3750,12 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_info *info, u8 __iomem *hw_base)
{
struct iwl_trans_pcie *trans_pcie, **priv;
- unsigned int txcmd_size, txcmd_align;
struct iwl_trans *trans;
unsigned int bc_tbl_n_entries;
int ret, addr_size;
- ret = iwl_trans_pcie_set_txcmd_info(mac_cfg, &txcmd_size,
- &txcmd_align);
- if (ret)
- return ERR_PTR(ret);
-
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
- mac_cfg, txcmd_size, txcmd_align);
+ mac_cfg);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3737,6 +3766,10 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
+ ret = iwl_trans_pcie_alloc_txcmd_pool(trans);
+ if (ret)
+ goto out_free_trans;
+
if (trans->mac_cfg->gen2) {
trans_pcie->txqs.tfd.addr_size = 64;
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
@@ -3756,7 +3789,7 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_txcmd_pool;
}
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -3906,6 +3939,8 @@ out_free_ndev:
free_netdev(trans_pcie->napi_dev);
out_free_tso:
free_percpu(trans_pcie->txqs.tso_hdr_page);
+out_free_txcmd_pool:
+ kmem_cache_destroy(trans_pcie->dev_cmd_pool);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
@@ -3999,6 +4034,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans,
/* Read cdb info (also contains the jacket info if needed in the future */
hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
+
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);
}
@@ -4014,10 +4050,8 @@ static int map_crf_id(struct iwl_trans *iwl_trans,
u32 val = info->hw_crf_id;
u32 step_id = REG_CRF_ID_STEP(val);
u32 slave_id = REG_CRF_ID_SLAVE(val);
- u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);
u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,
WFPM_OTP_CFG1_ADDR);
- u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);
u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);
/* Map between crf id to rf id */
@@ -4066,21 +4100,12 @@ static int map_crf_id(struct iwl_trans *iwl_trans,
IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
}
- /* Set Jacket capabilities */
- if (jacket_id_wfpm || jacket_id_cnv) {
- info->hw_rf_id += BIT(29);
- IWL_INFO(iwl_trans, "Adding jacket to rf id\n");
- }
-
IWL_INFO(iwl_trans,
"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",
REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);
IWL_INFO(iwl_trans,
- "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",
- cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);
- IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",
- jacket_id_cnv, info->hw_cnv_id);
-
+ "Detected cdb-id 0x%x from wfpm id 0x%x\n",
+ cdb_id_wfpm, hw_wfpm_id);
out:
return ret;
}
@@ -4163,7 +4188,7 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
*/
ret = iwl_pcie_prepare_card_hw(iwl_trans);
if (!ret) {
- ret = iwl_finish_nic_init(iwl_trans);
+ ret = iwl_trans_activate_nic(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
@@ -4271,3 +4296,63 @@ out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
}
+
+void iwl_pcie_gen1_2_remove(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
+
+ iwl_drv_stop(trans->drv);
+
+ iwl_trans_pcie_free(trans);
+}
+
+int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans)
+{
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
+ u32 poll_ready;
+ int err;
+
+ if (mac_cfg->bisr_workaround) {
+ /* ensure the TOP FSM isn't still in previous reset */
+ mdelay(2);
+ }
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ } else {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
+ }
+
+ if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ err = iwl_poll_bits(trans, CSR_GP_CNTRL, poll_ready, 25000);
+ if (err < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
+
+ iwl_pcie_dump_host_monitor(trans);
+ }
+
+ if (mac_cfg->bisr_workaround) {
+ /* ensure BISR shift has finished */
+ udelay(200);
+ }
+
+ return err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
index bb03dad4a300..6e85aa519e1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
@@ -2602,8 +2602,9 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
+ if (trans->suppress_cmd_error_once) {
+ trans->suppress_cmd_error_once = false;
+ } else {
IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
dump_stack();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
index 1b49241c578f..b996c45d43e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-iwlwifi-tests-y += module.o devinfo.o utils.o
+iwlwifi-tests-y += module.o devinfo.o utils.o nvm_parse.o
ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c b/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c
new file mode 100644
index 000000000000..853911900bfd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for NVM parse
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <iwl-nvm-parse.h>
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static const struct nvm_flag_case {
+ const char *desc;
+ u16 nvm_flags;
+ u32 reg_rule_flags;
+ u32 set_reg_rule_flags;
+ u32 clear_reg_rule_flags;
+} nvm_flag_cases[] = {
+ {
+ .desc = "Restricting VLP client and AP access",
+ .nvm_flags = 0,
+ .set_reg_rule_flags = NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ .clear_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP,
+ },
+ {
+ .desc = "Allow VLP client and AP access",
+ .nvm_flags = NVM_CHANNEL_VLP,
+ .set_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP,
+ .clear_reg_rule_flags = NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ },
+ {
+ .desc = "Allow VLP client access, while restricting AP access",
+ .nvm_flags = NVM_CHANNEL_VLP | NVM_CHANNEL_VLP_AP_NOT_ALLOWED,
+ .set_reg_rule_flags = 0,
+ .clear_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP |
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(nvm_flag, nvm_flag_cases, desc)
+
+static void test_nvm_flags(struct kunit *test)
+{
+ const struct nvm_flag_case *params = test->param_value;
+ struct iwl_reg_capa reg_capa = {};
+ u32 flags = 0;
+
+ flags = iwl_nvm_get_regdom_bw_flags(NULL, 0, params->nvm_flags,
+ reg_capa);
+
+ if ((params->set_reg_rule_flags & flags) != params->set_reg_rule_flags)
+ KUNIT_FAIL(test, "Expected set bits:0x%08x flags:0x%08x\n",
+ params->set_reg_rule_flags, flags);
+
+ if (params->clear_reg_rule_flags & flags)
+ KUNIT_FAIL(test, "Expected clear bits:0x%08x flags:0x%08x\n",
+ params->clear_reg_rule_flags, flags);
+}
+
+static struct kunit_case nvm_flags_test_cases[] = {
+ KUNIT_CASE_PARAM(test_nvm_flags,
+ nvm_flag_gen_params),
+ {},
+};
+
+static struct kunit_suite nvm_flags_suite = {
+ .name = "iwlwifi-nvm_flags",
+ .test_cases = nvm_flags_test_cases,
+};
+
+kunit_test_suite(nvm_flags_suite);
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 2deb1bb54f24..1294a1d6528e 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -317,7 +317,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
tim_len = tim[1];
tim_ie = (struct ieee80211_tim_ie *) &tim[2];
- new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid);
+ new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid, false);
if (new_psm != priv->powersave_override) {
priv->powersave_override = new_psm;
p54_set_ps(priv);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 524034699972..fc5318035822 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1181,7 +1181,8 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
INIT_LIST_HEAD(&card->packets);
- card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ card->workqueue = alloc_workqueue("libertas_sdio",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (unlikely(!card->workqueue)) {
ret = -ENOMEM;
goto err_queue;
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index b722a6587fd3..8a2504a62840 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1153,7 +1153,8 @@ static int if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
+ card->workqueue = alloc_workqueue("libertas_spi",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!card->workqueue) {
err = -ENOMEM;
goto remove_card;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index d1067874428f..fb20fe31cd36 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -708,7 +708,7 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
- lbtf_wq = alloc_workqueue("libertastf", WQ_MEM_RECLAIM, 0);
+ lbtf_wq = alloc_workqueue("libertastf", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4c8c7a5fdf23..be23a29e7de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -686,10 +686,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
return;
}
- /* Don't send world or same regdom info to firmware */
- if (strncmp(request->alpha2, "00", 2) &&
- strncmp(request->alpha2, adapter->country_code,
- sizeof(request->alpha2))) {
+ /* Don't send same regdom info to firmware */
+ if (strncmp(request->alpha2, adapter->country_code,
+ sizeof(request->alpha2)) != 0) {
memcpy(adapter->country_code, request->alpha2,
sizeof(request->alpha2));
mwifiex_send_domain_info_cmd_fw(wiphy);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 1ec069bc8ea1..ff177b06f42d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -494,6 +494,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
return;
}
+ if (adapter->rgpower_data) {
+ release_firmware(adapter->rgpower_data);
+ adapter->rgpower_data = NULL;
+ }
+
mwifiex_unregister(adapter);
pr_debug("info: %s: free adapter\n", __func__);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 9ac36bef980e..27559e2ddc31 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -982,6 +982,7 @@ struct mwifiex_adapter {
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
const struct firmware *cal_data;
+ const struct firmware *rgpower_data;
struct device_node *dt_node;
/* 11AC */
@@ -1579,6 +1580,8 @@ int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
struct device_node *node, const char *prefix);
void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
+int mwifiex_send_rgpower_table(struct mwifiex_private *priv, const u8 *data,
+ const size_t size);
extern const struct ethtool_ops mwifiex_ethtool_ops;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index c93281f5a47c..dcca71158fc6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1483,6 +1483,119 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_rgpower_table_advance_to_content(u8 **pos, const u8 *data,
+ const size_t size)
+{
+ while (*pos - data < size) {
+ /* skip spaces, tabs and empty lines */
+ if (**pos == '\r' || **pos == '\n' || **pos == '\0' ||
+ isspace(**pos)) {
+ (*pos)++;
+ continue;
+ }
+ /* skip line comments */
+ if (**pos == '#') {
+ *pos = strchr(*pos, '\n');
+ if (!*pos)
+ return -EINVAL;
+ (*pos)++;
+ continue;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+int mwifiex_send_rgpower_table(struct mwifiex_private *priv, const u8 *data,
+ const size_t size)
+{
+ int ret = 0;
+ bool start_raw = false;
+ u8 *ptr, *token, *pos = NULL;
+ u8 *_data __free(kfree) = NULL;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_ds_misc_cmd *hostcmd __free(kfree) = NULL;
+
+ hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
+ if (!hostcmd)
+ return -ENOMEM;
+
+ _data = kmemdup(data, size, GFP_KERNEL);
+ if (!_data)
+ return -ENOMEM;
+
+ pos = _data;
+ ptr = hostcmd->cmd;
+ while ((pos - _data) < size) {
+ ret = mwifiex_rgpower_table_advance_to_content(&pos, _data, size);
+ if (ret) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: failed to advance to content in rgpower table\n",
+ __func__);
+ return ret;
+ }
+
+ if (*pos == '}' && start_raw) {
+ hostcmd->len = get_unaligned_le16(&hostcmd->cmd[2]);
+ ret = mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, false);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to send hostcmd %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ memset(hostcmd->cmd, 0, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ ptr = hostcmd->cmd;
+ start_raw = false;
+ pos++;
+ continue;
+ }
+
+ if (!start_raw) {
+ pos = strchr(pos, '=');
+ if (pos) {
+ pos = strchr(pos, '{');
+ if (pos) {
+ start_raw = true;
+ pos++;
+ continue;
+ }
+ }
+ mwifiex_dbg(adapter, ERROR,
+ "%s: syntax error in hostcmd\n", __func__);
+ return -EINVAL;
+ }
+
+ if (start_raw) {
+ while ((*pos != '\r' && *pos != '\n') &&
+ (token = strsep((char **)&pos, " "))) {
+ if (ptr - hostcmd->cmd >=
+ MWIFIEX_SIZE_OF_CMD_BUFFER) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: hostcmd is larger than %d, aborting\n",
+ __func__, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ return -ENOMEM;
+ }
+
+ ret = kstrtou8(token, 16, ptr);
+ if (ret < 0) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: failed to parse hostcmd %d token: %s\n",
+ __func__, ret, token);
+ return ret;
+ }
+ ptr++;
+ }
+ }
+ }
+
+ return ret;
+}
+
/* This function prepares command of set_cfg_data. */
static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, void *data_buf)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index f79589cafe57..ef6722ffdc74 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -180,7 +180,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
}
-void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+static void mwifiex_dnld_dt_txpwr_table(struct mwifiex_private *priv)
{
if (priv->adapter->dt_node) {
char txpwr[] = {"marvell,00_txpwrlimit"};
@@ -190,6 +190,62 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
}
}
+static int mwifiex_request_rgpower_table(struct mwifiex_private *priv)
+{
+ struct mwifiex_802_11d_domain_reg *domain_info = &priv->adapter->domain_reg;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ char rgpower_table_name[30];
+ char country_code[3];
+
+ strscpy(country_code, domain_info->country_code, sizeof(country_code));
+
+ /* World regulatory domain "00" has WW as country code */
+ if (strncmp(country_code, "00", 2) == 0)
+ strscpy(country_code, "WW", sizeof(country_code));
+
+ snprintf(rgpower_table_name, sizeof(rgpower_table_name),
+ "nxp/rgpower_%s.bin", country_code);
+
+ mwifiex_dbg(adapter, INFO, "info: %s: requesting regulatory power table %s\n",
+ __func__, rgpower_table_name);
+
+ if (adapter->rgpower_data) {
+ release_firmware(adapter->rgpower_data);
+ adapter->rgpower_data = NULL;
+ }
+
+ if ((request_firmware(&adapter->rgpower_data, rgpower_table_name,
+ adapter->dev))) {
+ mwifiex_dbg(
+ adapter, INFO,
+ "info: %s: failed to request regulatory power table\n",
+ __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mwifiex_dnld_rgpower_table(struct mwifiex_private *priv)
+{
+ int ret;
+
+ ret = mwifiex_request_rgpower_table(priv);
+ if (ret)
+ return ret;
+
+ return mwifiex_send_rgpower_table(priv, priv->adapter->rgpower_data->data,
+ priv->adapter->rgpower_data->size);
+}
+
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+{
+ if (mwifiex_dnld_rgpower_table(priv) == 0)
+ return;
+
+ mwifiex_dnld_dt_txpwr_table(priv);
+}
+
static int mwifiex_process_country_ie(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 07c386c7b4d0..936ab1ca9246 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -173,6 +173,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
return;
+ if (wcid->def_wcid)
+ wcid = wcid->def_wcid;
tid = rcu_dereference(wcid->aggr[tidno]);
if (!tid)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index 77b75792eb48..130af1b254db 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -314,21 +314,24 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
kfree(mlink);
}
-static void mt76_roc_complete(struct mt76_phy *phy)
+void mt76_roc_complete(struct mt76_phy *phy)
{
struct mt76_vif_link *mlink = phy->roc_link;
+ struct mt76_dev *dev = phy->dev;
if (!phy->roc_vif)
return;
if (mlink)
mlink->mvif->roc_phy = NULL;
- if (phy->main_chandef.chan)
+ if (phy->main_chandef.chan &&
+ !test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link);
phy->roc_vif = NULL;
phy->roc_link = NULL;
- ieee80211_remain_on_channel_expired(phy->hw);
+ if (!test_bit(MT76_MCU_RESET, &dev->phy.state))
+ ieee80211_remain_on_channel_expired(phy->hw);
}
void mt76_roc_complete_work(struct work_struct *work)
@@ -351,6 +354,7 @@ void mt76_abort_roc(struct mt76_phy *phy)
mt76_roc_complete(phy);
mutex_unlock(&dev->mutex);
}
+EXPORT_SYMBOL_GPL(mt76_abort_roc);
int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration,
@@ -368,7 +372,8 @@ int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
- if (phy->roc_vif || dev->scan.phy == phy) {
+ if (phy->roc_vif || dev->scan.phy == phy ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)) {
ret = -EBUSY;
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 87f531297f85..1fa7de1d2c45 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -186,6 +186,37 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void
+mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ if (!mt76_queue_is_wed_rro(q))
+ return;
+
+ q->magic_cnt = 0;
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_desc *rro_desc;
+ u32 data1 = FIELD_PREP(RRO_IND_DATA1_MAGIC_CNT_MASK,
+ MT_DMA_WED_IND_CMD_CNT - 1);
+ int i;
+
+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
+ for (i = 0; i < q->ndesc; i++) {
+ struct mt76_wed_rro_ind *cmd;
+
+ cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
+ cmd->data1 = cpu_to_le32(data1);
+ }
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ struct mt76_rro_rxdmad_c *dmad = (void *)q->desc;
+ u32 data3 = FIELD_PREP(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
+ MT_DMA_MAGIC_CNT - 1);
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ dmad[i].data3 = cpu_to_le32(data3);
+ }
+}
+
+static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(q, desc_base, q->desc_dma);
@@ -197,13 +228,14 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
q->tail = q->head;
}
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (!mt76_queue_is_wed_rro_ind(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
int i;
/* clear descriptors */
@@ -211,27 +243,26 @@ void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
}
+ mt76_dma_queue_magic_cnt_init(dev, q);
if (reset_idx) {
- Q_WRITE(q, cpu_idx, 0);
+ if (mt76_queue_is_emi(q))
+ *q->emi_cpu_idx = 0;
+ else
+ Q_WRITE(q, cpu_idx, 0);
Q_WRITE(q, dma_idx, 0);
}
mt76_dma_sync_idx(dev, q);
}
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-{
- __mt76_dma_queue_reset(dev, q, true);
-}
-
static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, void *data)
{
struct mt76_queue_entry *entry = &q->entry[q->head];
struct mt76_txwi_cache *txwi = NULL;
+ u32 buf1 = 0, ctrl, info = 0;
struct mt76_desc *desc;
int idx = q->head;
- u32 buf1 = 0, ctrl;
int rx_token;
if (mt76_queue_is_wed_rro_ind(q)) {
@@ -240,6 +271,9 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
data = &rro_desc[q->head];
goto done;
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ data = &q->desc[q->head];
+ goto done;
}
desc = &q->desc[q->head];
@@ -248,7 +282,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
#endif
- if (mt76_queue_is_wed_rx(q)) {
+ if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
txwi = mt76_get_rxwi(dev);
if (!txwi)
return -ENOMEM;
@@ -261,12 +295,26 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
ctrl |= MT_DMA_CTL_TO_HOST;
+
+ txwi->qid = q - dev->q_rx;
+ }
+
+ if (mt76_queue_is_wed_rro_msdu_pg(q) &&
+ dev->drv->rx_rro_add_msdu_page) {
+ if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data))
+ return -ENOMEM;
+ }
+
+ if (q->flags & MT_QFLAG_WED_RRO_EN) {
+ info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
+ if ((q->head + 1) == q->ndesc)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
}
WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
- WRITE_ONCE(desc->info, 0);
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
done:
entry->dma_addr[0] = buf->addr;
@@ -375,7 +423,10 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
- Q_WRITE(q, cpu_idx, q->head);
+ if (mt76_queue_is_emi(q))
+ *q->emi_cpu_idx = cpu_to_le16(q->head);
+ else
+ Q_WRITE(q, cpu_idx, q->head);
}
static void
@@ -419,15 +470,61 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
static void *
+mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int *len, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_rro_rxdmad_c *dmad = e->buf;
+ u32 data1 = le32_to_cpu(dmad->data1);
+ u32 data2 = le32_to_cpu(dmad->data2);
+ struct mt76_txwi_cache *t;
+ u16 rx_token_id;
+ u8 ind_reason;
+ void *buf;
+
+ rx_token_id = FIELD_GET(RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK, data2);
+ t = mt76_rx_token_release(dev, rx_token_id);
+ if (!t)
+ return ERR_PTR(-EAGAIN);
+
+ q = &dev->q_rx[t->qid];
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ if (len)
+ *len = FIELD_GET(RRO_RXDMAD_DATA1_SDL0_MASK, data1);
+ if (more)
+ *more = !FIELD_GET(RRO_RXDMAD_DATA1_LS_MASK, data1);
+
+ buf = t->ptr;
+ ind_reason = FIELD_GET(RRO_RXDMAD_DATA2_IND_REASON_MASK, data2);
+ if (ind_reason == MT_DMA_WED_IND_REASON_REPEAT ||
+ ind_reason == MT_DMA_WED_IND_REASON_OLDPKT) {
+ mt76_put_page_pool_buf(buf, false);
+ buf = ERR_PTR(-EAGAIN);
+ }
+ t->ptr = NULL;
+ t->dma_addr = 0;
+
+ mt76_put_rxwi(dev, t);
+
+ return buf;
+}
+
+static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
- int *len, u32 *info, bool *more, bool *drop)
+ int *len, u32 *info, bool *more, bool *drop, bool flush)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
u32 ctrl, desc_info, buf1;
void *buf = e->buf;
- if (mt76_queue_is_wed_rro_ind(q))
+ if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
+ buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
+
+ if (mt76_queue_is_wed_rro(q))
goto done;
ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
@@ -482,20 +579,50 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (mt76_queue_is_wed_rro_data(q))
- return NULL;
+ if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q))
+ goto done;
+
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_ind *cmd;
+ u8 magic_cnt;
+
+ if (flush)
+ goto done;
+
+ cmd = q->entry[idx].buf;
+ magic_cnt = FIELD_GET(RRO_IND_DATA1_MAGIC_CNT_MASK,
+ le32_to_cpu(cmd->data1));
+ if (magic_cnt != q->magic_cnt)
+ return NULL;
+
+ if (q->tail == q->ndesc - 1)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ struct mt76_rro_rxdmad_c *dmad;
+ u16 magic_cnt;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (flush)
+ goto done;
+
+ dmad = q->entry[idx].buf;
+ magic_cnt = FIELD_GET(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
+ le32_to_cpu(dmad->data3));
+ if (magic_cnt != q->magic_cnt)
+ return NULL;
+
+ if (q->tail == q->ndesc - 1)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
+ } else {
if (flush)
q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
}
-
+done:
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
}
static int
@@ -646,7 +773,8 @@ mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
void *buf = NULL;
int offset;
- if (mt76_queue_is_wed_rro_ind(q))
+ if (mt76_queue_is_wed_rro_ind(q) ||
+ mt76_queue_is_wed_rro_rxdmad_c(q))
goto done;
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
@@ -676,9 +804,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
{
int frames;
- if (!q->ndesc)
- return 0;
-
spin_lock_bh(&q->lock);
frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
@@ -708,19 +833,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->desc)
return -ENOMEM;
- if (mt76_queue_is_wed_rro_ind(q)) {
- struct mt76_wed_rro_desc *rro_desc;
- int i;
-
- rro_desc = (struct mt76_wed_rro_desc *)q->desc;
- for (i = 0; i < q->ndesc; i++) {
- struct mt76_wed_rro_ind *cmd;
-
- cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
- cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
- }
- }
-
+ mt76_dma_queue_magic_cnt_init(dev, q);
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
@@ -740,7 +853,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
return 0;
}
- mt76_dma_queue_reset(dev, q);
+ /* HW specific driver is supposed to reset brand-new EMI queues since
+ * it needs to set cpu index pointer.
+ */
+ mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q));
return 0;
}
@@ -783,7 +899,8 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
if (!q->ndesc)
return;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (!mt76_queue_is_wed_rro_ind(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
int i;
for (i = 0; i < q->ndesc; i++)
@@ -843,8 +960,9 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
bool allow_direct = !mt76_queue_is_wed_rx(q);
bool more;
- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
- mt76_queue_is_wed_tx_free(q)) {
+ if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
+ (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+ mt76_queue_is_wed_tx_free(q))) {
dma_idx = Q_READ(q, dma_idx);
check_ddone = true;
}
@@ -866,6 +984,20 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (!data)
break;
+ if (PTR_ERR(data) == -EAGAIN) {
+ done++;
+ continue;
+ }
+
+ if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
+ dev->drv->rx_rro_ind_process(dev, data);
+
+ if (mt76_queue_is_wed_rro(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ done++;
+ continue;
+ }
+
if (drop)
goto free_frag;
@@ -943,6 +1075,15 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
}
EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
+static void
+mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ int (*poll)(struct napi_struct *napi, int budget))
+{
+ netif_napi_add(dev->napi_dev, &dev->napi[qid], poll);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[qid], false);
+ napi_enable(&dev->napi[qid]);
+}
+
static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
@@ -975,9 +1116,10 @@ mt76_dma_init(struct mt76_dev *dev,
init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
- napi_enable(&dev->napi[i]);
+ if (mt76_queue_is_wed_rro(&dev->q_rx[i]))
+ continue;
+
+ mt76_dma_rx_queue_init(dev, i, poll);
}
return 0;
@@ -990,6 +1132,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_queue_init = mt76_dma_rx_queue_init,
.rx_cleanup = mt76_dma_rx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index e3ddc7a83757..17a80e1757fc 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -31,7 +31,12 @@
#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
#define MT_DMA_CTL_VER_MASK BIT(7)
-#define MT_DMA_RRO_EN BIT(13)
+#define MT_DMA_SDP0 GENMASK(15, 0)
+#define MT_DMA_TOKEN_ID GENMASK(31, 16)
+#define MT_DMA_MAGIC_MASK GENMASK(31, 28)
+#define MT_DMA_RRO_EN BIT(13)
+
+#define MT_DMA_MAGIC_CNT 16
#define MT_DMA_WED_IND_CMD_CNT 8
#define MT_DMA_WED_IND_REASON GENMASK(15, 12)
@@ -53,6 +58,21 @@ struct mt76_wed_rro_desc {
__le32 buf1;
} __packed __aligned(4);
+/* data1 */
+#define RRO_RXDMAD_DATA1_LS_MASK BIT(30)
+#define RRO_RXDMAD_DATA1_SDL0_MASK GENMASK(29, 16)
+/* data2 */
+#define RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK GENMASK(31, 16)
+#define RRO_RXDMAD_DATA2_IND_REASON_MASK GENMASK(15, 12)
+/* data3 */
+#define RRO_RXDMAD_DATA3_MAGIC_CNT_MASK GENMASK(31, 28)
+struct mt76_rro_rxdmad_c {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+};
+
enum mt76_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -81,14 +101,13 @@ void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct);
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx);
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- dev->queue_ops->reset_q(dev, q);
+ dev->queue_ops->reset_q(dev, q, true);
if (mtk_wed_device_active(&dev->mmio.wed))
mt76_wed_dma_setup(dev, q, true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 443517d06c9f..a987c5e4eff6 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -163,13 +163,16 @@ static int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int len)
return mt76_get_of_data_from_nvmem(dev, eep, "eeprom", len);
}
-void
+int
mt76_eeprom_override(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
struct device_node *np = dev->dev->of_node;
+ int err;
- of_get_mac_address(np, phy->macaddr);
+ err = of_get_mac_address(np, phy->macaddr);
+ if (err == -EPROBE_DEFER)
+ return err;
if (!is_valid_ether_addr(phy->macaddr)) {
eth_random_addr(phy->macaddr);
@@ -177,6 +180,8 @@ mt76_eeprom_override(struct mt76_phy *phy)
"Invalid MAC address, using random address %pM\n",
phy->macaddr);
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76_eeprom_override);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 59adf3312617..5ceaf78c9ea0 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -824,6 +824,9 @@ static void mt76_reset_phy(struct mt76_phy *phy)
return;
INIT_LIST_HEAD(&phy->tx_list);
+ phy->num_sta = 0;
+ phy->chanctx = NULL;
+ mt76_roc_complete(phy);
}
void mt76_reset_device(struct mt76_dev *dev)
@@ -844,6 +847,8 @@ void mt76_reset_device(struct mt76_dev *dev)
}
rcu_read_unlock();
+ mt76_abort_scan(dev);
+
INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
dev->vif_mask = 0;
@@ -1235,6 +1240,8 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
mstat = *((struct mt76_rx_status *)skb->cb);
memset(status, 0, sizeof(*status));
+ skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+
status->flag = mstat.flag;
status->freq = mstat.freq;
status->enc_flags = mstat.enc_flags;
@@ -2060,3 +2067,55 @@ void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
mt76_abort_roc(mvif->roc_phy);
}
EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
+
+u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct {
+ u8 link_id;
+ enum nl80211_band band;
+ } data[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned int link_id;
+ int i, n_data = 0;
+ u16 sel_links = 0;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ if (vif->active_links == usable_links)
+ return vif->active_links;
+
+ rcu_read_lock();
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ n_data++;
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n_data; i++) {
+ int j;
+
+ if (!(BIT(data[i].link_id) & vif->active_links))
+ continue;
+
+ sel_links = BIT(data[i].link_id);
+ for (j = 0; j < n_data; j++) {
+ if (data[i].band != data[j].band) {
+ sel_links |= BIT(data[j].link_id);
+ if (hweight16(sel_links) == max_active_links)
+ break;
+ }
+ }
+ break;
+ }
+
+ return sel_links;
+}
+EXPORT_SYMBOL_GPL(mt76_select_links);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 127637454c82..e0d50b58cd01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -33,6 +33,7 @@
#define MT_QFLAG_WED BIT(5)
#define MT_QFLAG_WED_RRO BIT(6)
#define MT_QFLAG_WED_RRO_EN BIT(7)
+#define MT_QFLAG_EMI_EN BIT(8)
#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
@@ -45,6 +46,7 @@
#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
+#define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0)
struct mt76_dev;
struct mt76_phy;
@@ -71,6 +73,13 @@ enum mt76_wed_type {
MT76_WED_RRO_Q_DATA,
MT76_WED_RRO_Q_MSDU_PG,
MT76_WED_RRO_Q_IND,
+ MT76_WED_RRO_Q_RXDMAD_C,
+};
+
+enum mt76_hwrro_mode {
+ MT76_HWRRO_OFF,
+ MT76_HWRRO_V3,
+ MT76_HWRRO_V3_1,
};
struct mt76_bus_ops {
@@ -129,6 +138,7 @@ enum mt76_rxq_id {
MT_RXQ_TXFREE_BAND1,
MT_RXQ_TXFREE_BAND2,
MT_RXQ_RRO_IND,
+ MT_RXQ_RRO_RXDMAD_C,
__MT_RXQ_MAX
};
@@ -232,6 +242,9 @@ struct mt76_queue {
u8 buf_offset;
u16 flags;
+ u8 magic_cnt;
+
+ __le16 *emi_cpu_idx;
struct mtk_wed_device *wed;
u32 wed_regs;
@@ -286,11 +299,15 @@ struct mt76_queue_ops {
void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
bool flush);
+ void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ int (*poll)(struct napi_struct *napi, int budget));
+
void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
- void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
};
enum mt76_phy_type {
@@ -398,15 +415,16 @@ struct mt76_txq {
bool aggr;
};
+/* data0 */
+#define RRO_IND_DATA0_IND_REASON_MASK GENMASK(31, 28)
+#define RRO_IND_DATA0_START_SEQ_MASK GENMASK(27, 16)
+#define RRO_IND_DATA0_SEQ_ID_MASK GENMASK(11, 0)
+/* data1 */
+#define RRO_IND_DATA1_MAGIC_CNT_MASK GENMASK(31, 29)
+#define RRO_IND_DATA1_IND_COUNT_MASK GENMASK(12, 0)
struct mt76_wed_rro_ind {
- u32 se_id : 12;
- u32 rsv : 4;
- u32 start_sn : 12;
- u32 ind_reason : 4;
- u32 ind_cnt : 13;
- u32 win_sz : 3;
- u32 rsv2 : 13;
- u32 magic_cnt : 3;
+ __le32 data0;
+ __le32 data1;
};
struct mt76_txwi_cache {
@@ -417,6 +435,8 @@ struct mt76_txwi_cache {
struct sk_buff *skb;
void *ptr;
};
+
+ u8 qid;
};
struct mt76_rx_tid {
@@ -533,6 +553,10 @@ struct mt76_driver_ops {
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+ void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data);
+ int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q,
+ dma_addr_t p, void *data);
+
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps);
@@ -910,6 +934,7 @@ struct mt76_dev {
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
+ enum mt76_hwrro_mode hwrro_mode;
struct mt76_worker tx_worker;
struct napi_struct tx_napi;
@@ -1214,6 +1239,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_queue_init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
@@ -1268,7 +1294,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
-void mt76_eeprom_override(struct mt76_phy *phy);
+int mt76_eeprom_override(struct mt76_phy *phy);
int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len);
int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
const char *cell_name, int len);
@@ -1617,6 +1643,7 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
void mt76_scan_work(struct work_struct *work);
void mt76_abort_scan(struct mt76_dev *dev);
void mt76_roc_complete_work(struct work_struct *work);
+void mt76_roc_complete(struct mt76_phy *phy);
void mt76_abort_roc(struct mt76_phy *phy);
struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
struct ieee80211_vif *vif);
@@ -1782,21 +1809,34 @@ static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
}
+static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C;
+}
+
static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
{
return mt76_queue_is_wed_rro(q) &&
- (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA ||
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG);
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA;
}
-static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q)
{
- if (!(q->flags & MT_QFLAG_WED))
- return false;
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) ==
+ MT76_WED_RRO_Q_MSDU_PG;
+}
- return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
- mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q);
+static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+{
+ return (q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+}
+static inline bool mt76_queue_is_emi(struct mt76_queue *q)
+{
+ return q->flags & MT_QFLAG_EMI_EN;
}
struct mt76_txwi_cache *
@@ -1872,6 +1912,7 @@ mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif)
}
void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif);
+u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links);
static inline struct mt76_vif_link *
mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index f5a6b03bc61d..88382b537a33 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -182,7 +182,6 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
dev->mphy.antenna_mask = 1;
dev->mphy.chainmask = dev->mphy.antenna_mask;
- mt76_eeprom_override(&dev->mphy);
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index 08590aa68356..1dd372372048 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev)
return 0;
error:
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(mdev);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index ccedea7e8a50..d4bc7e11e772 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -351,8 +351,6 @@ int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
EXPORT_SYMBOL_GPL(mt7615_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index aae80005a3c1..3e7af3e58736 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -570,7 +570,10 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
ETH_ALEN);
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
- mt76_eeprom_override(mphy);
+
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ return ret;
/* second phy can only handle 5 GHz */
mphy->cap.has_5ghz = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 1013cad57a7f..c5eaedca11e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -294,6 +294,13 @@ enum tx_frag_idx {
#define MT_TXP_BUF_LEN GENMASK(11, 0)
#define MT_TXP_DMA_ADDR_H GENMASK(15, 12)
+#define MT_TXP0_TOKEN_ID0 GENMASK(14, 0)
+#define MT_TXP0_TOKEN_ID0_VALID_MASK BIT(15)
+
+#define MT_TXP1_TID_ADDBA GENMASK(14, 12)
+#define MT_TXP3_ML0_MASK BIT(15)
+#define MT_TXP3_DMA_ADDR_H GENMASK(13, 12)
+
#define MT_TX_RATE_STBC BIT(14)
#define MT_TX_RATE_NSS GENMASK(13, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 16db0f2082d1..fc3e6728fcfb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1662,6 +1662,31 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
return err;
}
+ if (enable && vif->bss_conf.bssid_indicator) {
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bss_info_uni_mbssid mbssid;
+ } mbssid_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .mbssid = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_11V_MBSSID),
+ .len = cpu_to_le16(sizeof(struct bss_info_uni_mbssid)),
+ .max_indicator = vif->bss_conf.bssid_indicator,
+ .mbss_idx = vif->bss_conf.bssid_index,
+ },
+ };
+
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &mbssid_req, sizeof(mbssid_req), true);
+ if (err < 0)
+ return err;
+ }
+
return mt76_connac_mcu_uni_set_chctx(phy, mvif, ctx);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 4de45a56812d..d4506b8b46fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -332,7 +332,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
memcpy(dev->mphy.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
+
+ err = mt76_eeprom_override(&dev->mphy);
+ if (err)
+ return err;
+
mt76x02_mac_setaddr(dev, dev->mphy.macaddr);
mt76x0_set_chip_cap(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 156b16c17b2b..221805deb42f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -499,7 +499,9 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ return ret;
dev->mphy.macaddr[0] &= ~BIT(1);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 0c62272fe7d0..009ef713f437 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -624,13 +624,13 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
continue;
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
}
mt76_tx_status_check(&dev->mt76, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index c0f3402d30bb..38dfd5de365c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -284,9 +284,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 31aec0f40232..73611c9d26e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -50,9 +50,9 @@ enum mt7915_eeprom_field {
#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7981 (144 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT)
#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT)
-#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3)
@@ -180,6 +180,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev)
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
MT_EE_CAL_GROUP_SIZE_7916;
+ } else if (is_mt7981(&dev->mt76)) {
+ return MT_EE_CAL_GROUP_SIZE_7981;
} else if (mt7915_check_adie(dev, false)) {
return MT_EE_CAL_GROUP_SIZE_7976;
} else {
@@ -192,8 +194,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
{
if (is_mt7915(&dev->mt76))
return MT_EE_CAL_DPD_SIZE_V1;
- else if (is_mt7981(&dev->mt76))
- return MT_EE_CAL_DPD_SIZE_V2_7981;
else
return MT_EE_CAL_DPD_SIZE_V2;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 3e30ca5155d2..5ea8b46e092e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -702,7 +702,9 @@ mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
}
- mt76_eeprom_override(mphy);
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ return ret;
/* init wiphy according to mphy and phy */
mt7915_init_wiphy(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2928e75b2397..c1fdd3c4f1ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -3052,30 +3052,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
/* 5G BW160 */
5250, 5570, 5815
};
- static const u16 freq_list_v2_7981[] = {
- /* 5G BW20 */
- 5180, 5200, 5220, 5240,
- 5260, 5280, 5300, 5320,
- 5500, 5520, 5540, 5560,
- 5580, 5600, 5620, 5640,
- 5660, 5680, 5700, 5720,
- 5745, 5765, 5785, 5805,
- 5825, 5845, 5865, 5885,
- /* 5G BW160 */
- 5250, 5570, 5815
- };
- const u16 *freq_list = freq_list_v1;
- int n_freqs = ARRAY_SIZE(freq_list_v1);
- int idx;
+ const u16 *freq_list;
+ int idx, n_freqs;
if (!is_mt7915(&dev->mt76)) {
- if (is_mt7981(&dev->mt76)) {
- freq_list = freq_list_v2_7981;
- n_freqs = ARRAY_SIZE(freq_list_v2_7981);
- } else {
- freq_list = freq_list_v2;
- n_freqs = ARRAY_SIZE(freq_list_v2);
- }
+ freq_list = freq_list_v2;
+ n_freqs = ARRAY_SIZE(freq_list_v2);
+ } else {
+ freq_list = freq_list_v1;
+ n_freqs = ARRAY_SIZE(freq_list_v1);
}
if (freq < 4000) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 4a82f8e4c118..36488aa6cc20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -664,8 +664,8 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
MT_WPDMA_GLO_CFG;
- wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
- MT_RXQ_WED_DATA_RING_BASE;
+ wed->wlan.wpdma_rx[0] = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_DATA_RING_BASE;
} else {
struct platform_device *plat_dev = pdev_ptr;
struct resource *res;
@@ -687,7 +687,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
- wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
+ wed->wlan.wpdma_rx[0] = res->start + MT_RXQ_WED_DATA_RING_BASE;
}
wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE;
wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 14e17dc90256..b9098a7331b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -189,7 +189,9 @@ static int __mt7921_init_hardware(struct mt792x_dev *dev)
if (ret)
goto out;
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ goto out;
ret = mt7921_mcu_set_eeprom(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 5881040ac195..67383c41a319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -135,6 +135,8 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
if (is_mt7922(phy->mt76->dev)) {
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ he_cap_elem->phy_cap_info[4] |=
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index fe9751851ff7..100bdba32ba5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
/* Netgear, Inc. [A8000,AXE3000] */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* Netgear, Inc. A7500 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
/* TP-Link TXE50UH */
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 4249bad83c93..d7d5afe365ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -249,7 +249,9 @@ static int __mt7925_init_hardware(struct mt792x_dev *dev)
if (ret)
goto out;
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ goto out;
ret = mt7925_mcu_set_eeprom(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index b581ab9427f2..1e44e96f034e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -1300,7 +1300,6 @@ void mt7925_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&dev->mphy.mac_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
- dev->sar_inited = false;
for (i = 0; i < 10; i++) {
mutex_lock(&dev->mt76.mutex);
@@ -1329,6 +1328,10 @@ void mt7925_mac_reset_work(struct work_struct *work)
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7925_vif_connect_iter, NULL);
mt76_connac_power_save_sched(&dev->mt76.phy, pm);
+
+ mt792x_mutex_acquire(dev);
+ mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
+ mt792x_mutex_release(dev);
}
void mt7925_coredump_work(struct work_struct *work)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index b0e053b15227..ac3d485a2f78 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -240,6 +240,7 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
static const u8 ext_capa_sta[] = {
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static struct wiphy_iftype_ext_capab ext_capab[] = {
@@ -310,7 +311,6 @@ void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy)
int __mt7925_start(struct mt792x_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
- struct mt792x_dev *dev = phy->dev;
int err;
err = mt7925_mcu_set_channel_domain(mphy);
@@ -321,13 +321,6 @@ int __mt7925_start(struct mt792x_phy *phy)
if (err)
return err;
- if (!dev->sar_inited) {
- err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
- if (err)
- return err;
- dev->sar_inited = true;
- }
-
mt792x_mac_reset_counters(phy);
set_bit(MT76_STATE_RUNNING, &mphy->state);
@@ -987,56 +980,6 @@ int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
-static u16
-mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
-{
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct {
- u8 link_id;
- enum nl80211_band band;
- } data[IEEE80211_MLD_MAX_NUM_LINKS];
- u8 link_id, i, j, n_data = 0;
- u16 sel_links = 0;
-
- if (!ieee80211_vif_is_mld(vif))
- return 0;
-
- if (vif->active_links == usable_links)
- return vif->active_links;
-
- rcu_read_lock();
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- n_data++;
- }
- rcu_read_unlock();
-
- for (i = 0; i < n_data; i++) {
- if (!(BIT(data[i].link_id) & vif->active_links))
- continue;
-
- sel_links = BIT(data[i].link_id);
-
- for (j = 0; j < n_data; j++) {
- if (data[i].band != data[j].band) {
- sel_links |= BIT(data[j].link_id);
- break;
- }
- }
-
- break;
- }
-
- return sel_links;
-}
-
static void
mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
{
@@ -1047,7 +990,7 @@ mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper;
enum nl80211_band band = chandef->chan->band, secondary_band;
- u16 sel_links = mt7925_mac_select_links(mdev, vif);
+ u16 sel_links = mt76_select_links(vif, 2);
u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links);
if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2)
@@ -1731,13 +1674,7 @@ static int mt7925_set_sar_specs(struct ieee80211_hw *hw,
int err;
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_set_clc(dev, dev->mt76.alpha2,
- dev->country_ie_env);
- if (err < 0)
- goto out;
-
err = mt7925_set_tx_sar_pwr(hw, sar);
-out:
mt792x_mutex_release(dev);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index cd457be26523..8eda407e4135 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -759,7 +759,6 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
}
}
- ret = mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
out:
release_firmware(fw);
@@ -2622,6 +2621,25 @@ mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf
}
static void
+mt7925_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
+ bool enable)
+{
+ struct bss_info_uni_mbssid *mbssid;
+ struct tlv *tlv;
+
+ if (!enable && !link_conf->bssid_indicator)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_11V_MBSSID,
+ sizeof(*mbssid));
+
+ mbssid = (struct bss_info_uni_mbssid *)tlv;
+ mbssid->max_indicator = link_conf->bssid_indicator;
+ mbssid->mbss_idx = link_conf->bssid_index;
+ mbssid->tx_bss_omac_idx = 0;
+}
+
+static void
mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
struct mt792x_phy *phy)
{
@@ -2787,8 +2805,10 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_color_tlv(skb, link_conf, enable);
}
- if (enable)
+ if (enable) {
mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx);
+ mt7925_mcu_bss_mbssid_tlv(skb, link_conf, enable);
+ }
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
@@ -3703,6 +3723,8 @@ out:
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
{
+ struct mt76_dev *mdev = phy->dev;
+ struct mt792x_dev *dev = mt792x_hw_dev(mdev->hw);
int err;
if (phy->cap.has_2ghz) {
@@ -3719,7 +3741,7 @@ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
return err;
}
- if (phy->cap.has_6ghz) {
+ if (phy->cap.has_6ghz && dev->phy.clc_chan_conf) {
err = mt7925_mcu_rate_txpower_band(phy,
NL80211_BAND_6GHZ);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 89dc30f7c6b7..8eb1fe1082d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -529,7 +529,7 @@ restore_suspend:
return err;
}
-static int mt7925_pci_resume(struct device *device)
+static int _mt7925_pci_resume(struct device *device, bool restore)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
@@ -569,6 +569,9 @@ static int mt7925_pci_resume(struct device *device)
napi_schedule(&mdev->tx_napi);
local_bh_enable();
+ if (restore)
+ goto failed;
+
mt76_connac_mcu_set_hif_suspend(mdev, false, false);
ret = wait_event_timeout(dev->wait,
dev->hif_resumed, 3 * HZ);
@@ -585,7 +588,7 @@ static int mt7925_pci_resume(struct device *device)
failed:
pm->suspended = false;
- if (err < 0)
+ if (err < 0 || restore)
mt792x_reset(&dev->mt76);
return err;
@@ -596,7 +599,24 @@ static void mt7925_pci_shutdown(struct pci_dev *pdev)
mt7925_pci_remove(pdev);
}
-static DEFINE_SIMPLE_DEV_PM_OPS(mt7925_pm_ops, mt7925_pci_suspend, mt7925_pci_resume);
+static int mt7925_pci_resume(struct device *device)
+{
+ return _mt7925_pci_resume(device, false);
+}
+
+static int mt7925_pci_restore(struct device *device)
+{
+ return _mt7925_pci_resume(device, true);
+}
+
+static const struct dev_pm_ops mt7925_pm_ops = {
+ .suspend = pm_sleep_ptr(mt7925_pci_suspend),
+ .resume = pm_sleep_ptr(mt7925_pci_resume),
+ .freeze = pm_sleep_ptr(mt7925_pci_suspend),
+ .thaw = pm_sleep_ptr(mt7925_pci_resume),
+ .poweroff = pm_sleep_ptr(mt7925_pci_suspend),
+ .restore = pm_sleep_ptr(mt7925_pci_restore),
+};
static struct pci_driver mt7925_pci_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 4dfbc1b6cfdd..bf040f34e4b9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -12,6 +12,9 @@
static const struct usb_device_id mt7925u_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
+ /* Netgear, Inc. A9000 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 443d397d9961..f2c8b9e4aa0f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -234,7 +234,6 @@ struct mt792x_dev {
bool aspm_supported:1;
bool hif_idle:1;
bool hif_resumed:1;
- bool sar_inited:1;
bool regd_change:1;
wait_queue_head_t wait;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index e3a703398b30..c0e56541a954 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -689,8 +689,12 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
- if (is_mt7921(&dev->mt76))
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+
+ if (is_mt7921(&dev->mt76)) {
ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ }
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 6f9db782338e..69217ce91130 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -181,13 +181,13 @@ mt792x_dma_reset(struct mt792x_dev *dev, bool force)
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt76_queue_reset(dev, dev->mphy.q_tx[i], true);
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
mt76_tx_status_check(&dev->mt76, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index c8bef0b2a144..659015f93d32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -17,7 +17,7 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
idx -= MT_TXQ_ID(0);
- if (phy->mt76->band_idx == MT_BAND2)
+ if (wed == &dev->mt76.mmio.wed_hif2)
flags = MT_WED_Q_TX(0);
else
flags = MT_WED_Q_TX(idx);
@@ -83,36 +83,74 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
break;
}
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* band0 */
RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
MT7996_RXQ_RRO_BAND0);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
- MT7996_RXQ_MSDU_PG_BAND0);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
- MT7996_RXQ_TXFREE0);
- /* band1 */
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
- MT7996_RXQ_MSDU_PG_BAND1);
- /* band2 */
- RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
- MT7996_RXQ_RRO_BAND2);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
- MT7996_RXQ_MSDU_PG_BAND2);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
- MT7996_RXQ_TXFREE2);
-
- RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
- MT7996_RXQ_RRO_IND);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND0,
+ MT7996_RXQ_MSDU_PG_BAND0);
+ if (is_mt7996(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
+ /* band1 */
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND1,
+ MT7996_RXQ_MSDU_PG_BAND1);
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND2,
+ MT7996_RXQ_RRO_BAND2);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND2,
+ MT7996_RXQ_MSDU_PG_BAND2);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
+ MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
+ } else {
+ RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND1,
+ MT7996_RXQ_RRO_BAND1);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
+ MT_INT_RX_DONE_RRO_IND,
+ MT7996_RXQ_RRO_IND);
+ else
+ RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
+ MT_INT_RX_DONE_RRO_RXDMAD_C,
+ MT7996_RXQ_RRO_RXDMAD_C);
}
/* data tx queue */
- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
if (is_mt7996(&dev->mt76)) {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
- TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ if (dev->hif2) {
+ /* default bn1:ring19 bn2:ring21 */
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0/1:ring18 bn2:ring19 */
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
} else {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ if (dev->hif2) {
+ /* bn0:ring18 bn1:ring21 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0:ring18 bn1:ring19 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
}
/* mcu tx queue */
@@ -166,11 +204,12 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
/* Rx TxFreeDone From MAC Rings */
val = is_mt7996(&dev->mt76) ? 4 : 8;
- if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
+ if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
+ is_mt7990(&dev->mt76))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
if (is_mt7990(&dev->mt76) && dev->hif2)
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
- else if (is_mt7996(&dev->mt76) && dev->has_rro)
+ else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
/* Rx Data Rings */
@@ -179,7 +218,7 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
/* Rx RRO Rings */
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
@@ -288,11 +327,14 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
if (mt7996_band_valid(dev, MT_BAND0))
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (mt7996_band_valid(dev, MT_BAND1))
+ if (mt7996_band_valid(dev, MT_BAND1)) {
irq_mask |= MT_INT_BAND1_RX_DONE;
+ if (is_mt7992(&dev->mt76) && dev->hif2)
+ irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
+ }
if (mt7996_band_valid(dev, MT_BAND2))
- irq_mask |= MT_INT_BAND2_RX_DONE;
+ irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
if (mtk_wed_device_active(wed) && wed_reset) {
u32 wed_irq_mask = irq_mask;
@@ -378,13 +420,48 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND |
- MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+
+ if (is_mt7996(&dev->mt76))
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ else
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
/* AXI read outstanding number */
mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
+ if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_5_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x1));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x1));
+ } else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_8_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x2));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x2));
+ }
+
/* WFDMA rx threshold */
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
@@ -397,27 +474,58 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
* so, redirect pcie0 rx ring3 interrupt to pcie1
*/
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
- dev->has_rro)
+ mt7996_has_hwrro(dev)) {
+ u32 intr = is_mt7996(&dev->mt76) ?
+ MT_WFDMA0_RX_INT_SEL_RING6 :
+ MT_WFDMA0_RX_INT_SEL_RING9 |
+ MT_WFDMA0_RX_INT_SEL_RING5;
+
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
- MT_WFDMA0_RX_INT_SEL_RING6);
- else
+ intr);
+ } else {
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
MT_WFDMA0_RX_INT_SEL_RING3);
+ }
}
mt7996_dma_start(dev, reset, true);
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 irq_mask;
int ret;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ /* rxdmad_c */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
+ else
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RRO_AP_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* We need to set cpu idx pointer before resetting the EMI
+ * queues.
+ */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
+ &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
+ mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C], true);
+ goto start_hw_rro;
+ }
+
/* ind cmd */
mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
- mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
MT_RXQ_ID(MT_RXQ_RRO_IND),
MT7996_RX_RING_SIZE,
@@ -428,7 +536,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band0 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
MT7996_RX_RING_SIZE,
@@ -437,11 +547,13 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
if (ret)
return ret;
- if (mt7996_band_valid(dev, MT_BAND1)) {
+ if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
/* rx msdu page queue for band1 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
MT7996_RX_RING_SIZE,
@@ -455,7 +567,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band2 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
MT7996_RX_RING_SIZE,
@@ -465,15 +579,42 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
return ret;
}
- irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
- MT_INT_TX_DONE_BAND2;
- mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
- mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
- mt7996_irq_enable(dev, irq_mask);
+start_hw_rro:
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ irq_mask = mdev->mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+ mt7996_irq_enable(dev, irq_mask);
+ } else {
+ if (is_mt7996(&dev->mt76)) {
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
+ mt76_dma_rx_poll);
+ }
+
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
+ mt76_dma_rx_poll);
+ }
+ mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
+ }
return 0;
}
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
int mt7996_dma_init(struct mt7996_dev *dev)
{
@@ -560,7 +701,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for band0 */
- if (mtk_wed_device_active(wed) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed) &&
+ ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
+ (is_mt7992(&dev->mt76)))) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
@@ -615,7 +758,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7996 band2
* use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/
- if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
}
@@ -630,6 +773,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
} else if (mt7996_band_valid(dev, MT_BAND1)) {
/* rx data queue for mt7992 band1 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
+ dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
MT7996_RX_RING_SIZE,
@@ -641,6 +789,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7992 band1 */
if (mt7996_has_wa(dev)) {
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ if (mtk_wed_device_active(wed_hif2)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
+ MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7996_RX_MCU_RING_SIZE,
@@ -651,12 +805,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
}
}
- if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
- dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* rx rro data queue for band0 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
MT_RXQ_ID(MT_RXQ_RRO_BAND0),
MT7996_RX_RING_SIZE,
@@ -665,23 +819,44 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- /* tx free notify event from WA for band0 */
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ if (is_mt7992(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ /* tx free notify event from WA for band0 */
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
- MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
- MT7996_RX_MCU_RING_SIZE,
- MT7996_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
- if (ret)
- return ret;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx rro data queue for band2 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
MT_RXQ_ID(MT_RXQ_RRO_BAND2),
MT7996_RX_RING_SIZE,
@@ -752,6 +927,10 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mt76_tx_status_check(&dev->mt76, true);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
+
/* reset wfsys */
if (force)
mt7996_wfsys_reset(dev);
@@ -775,21 +954,32 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
- if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
- mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
- continue;
+ struct mt76_queue *q = &dev->mt76.q_rx[i];
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ if (mt76_queue_is_wed_rro(q) ||
+ mt76_queue_is_wed_tx_free(q)) {
+ if (force && mt76_queue_is_wed_rro_data(q))
+ mt76_queue_reset(dev, q, false);
+ continue;
+ }
+ }
+ mt76_queue_reset(dev, q, true);
}
mt76_tx_status_check(&dev->mt76, true);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
+ (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
+ continue;
+
mt76_queue_rx_reset(dev, i);
+ }
mt7996_dma_enable(dev, !force);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 87c6192b6384..da3231c9aa11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -334,9 +334,8 @@ int mt7996_eeprom_init(struct mt7996_dev *dev)
return ret;
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index a9599c286328..5e95a36b42d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -63,6 +63,33 @@ static const struct ieee80211_iface_combination if_comb = {
.beacon_int_min_gcd = 100,
};
+static const u8 if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = if_types_ext_capa_ap,
+ .extended_capabilities_mask = if_types_ext_capa_ap,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP,
+ .mld_capa_and_ops =
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS,
+ MT7996_MAX_RADIOS - 1),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = if_types_ext_capa_ap,
+ .extended_capabilities_mask = if_types_ext_capa_ap,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_ap),
+ .mld_capa_and_ops =
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS,
+ MT7996_MAX_RADIOS - 1),
+ },
+};
+
static ssize_t mt7996_thermal_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -383,6 +410,7 @@ mt7996_init_wiphy_band(struct ieee80211_hw *hw, struct mt7996_phy *phy)
phy->slottime = 9;
phy->beacon_rate = -1;
+ phy->rxfilter = MT_WF_RFCR_DROP_OTHER_UC;
if (phy->mt76->cap.has_2ghz) {
phy->mt76->sband_2g.sband.ht_cap.cap |=
@@ -463,8 +491,11 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy->radio = dev->radios;
wiphy->reg_notifier = mt7996_regd_notifier;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SUPPORTS_MLO;
wiphy->mbssid_max_interfaces = 16;
+ wiphy->iftype_ext_capab = iftypes_ext_capa;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -575,19 +606,21 @@ void mt7996_mac_init(struct mt7996_dev *dev)
}
/* rro module init */
- if (is_mt7996(&dev->mt76))
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
- else
+ if (dev->hif2)
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE,
- dev->hif2 ? 7 : 0);
+ is_mt7996(&dev->mt76) ? 2 : 7);
+ else
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 0);
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
u16 timeout;
timeout = mt76_rr(dev, MT_HW_REV) == MT_HW_REV1 ? 512 : 128;
mt7996_mcu_set_rro(dev, UNI_RRO_SET_FLUSH_TIMEOUT, timeout);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE,
+ is_mt7996(&dev->mt76) ? 1 : 2);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH,
+ !is_mt7996(&dev->mt76));
} else {
mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
@@ -634,7 +667,9 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
if (!mt7996_band_valid(dev, band))
return 0;
- if (is_mt7996(&dev->mt76) && band == MT_BAND2 && dev->hif2) {
+ if (dev->hif2 &&
+ ((is_mt7996(&dev->mt76) && band == MT_BAND2) ||
+ (is_mt7992(&dev->mt76) && band == MT_BAND1))) {
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
wed = &dev->mt76.mmio.wed_hif2;
}
@@ -667,17 +702,26 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
if (band == MT_BAND2)
mphy->macaddr[0] ^= BIT(6);
}
- mt76_eeprom_override(mphy);
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ goto error;
/* init wiphy according to mphy and phy */
mt7996_init_wiphy_band(mphy->hw, phy);
- ret = mt7996_init_tx_queues(mphy->priv,
- MT_TXQ_ID(band),
- MT7996_TX_RING_SIZE,
- MT_TXQ_RING_BASE(band) + hif1_ofs,
- wed);
- if (ret)
- goto error;
+
+ if (is_mt7996(&dev->mt76) && !dev->hif2 && band == MT_BAND1) {
+ int i;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++)
+ mphy->q_tx[i] = dev->mt76.phys[MT_BAND0]->q_tx[0];
+ } else {
+ ret = mt7996_init_tx_queues(mphy->priv, MT_TXQ_ID(band),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(band) + hif1_ofs,
+ wed);
+ if (ret)
+ goto error;
+ }
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
@@ -685,10 +729,9 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
goto error;
if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
- u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
-
- mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask);
- mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, irq_mask);
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
}
return 0;
@@ -724,30 +767,151 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
msleep(20);
}
-static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+static void mt7996_rro_hw_init_v3(struct mt7996_dev *dev)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ u32 session_id;
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1)
+ return;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ wed->wlan.ind_cmd.win_size =
+ ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
+ if (is_mt7996(&dev->mt76))
+ wed->wlan.ind_cmd.particular_sid =
+ MT7996_RRO_MAX_SESSION;
+ else
+ wed->wlan.ind_cmd.particular_sid = 1;
+ wed->wlan.ind_cmd.particular_se_phys =
+ dev->wed_rro.session.phy_addr;
+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
+ }
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
+ } else {
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0);
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, 0);
+ }
+
+ /* particular session configure */
+ /* use max session idx + 1 as particular session id */
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
+
+ session_id = is_mt7996(&dev->mt76) ? MT7996_RRO_MAX_SESSION : 1;
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
+ MT_RRO_PARTICULAR_CONFG_EN |
+ FIELD_PREP(MT_RRO_PARTICULAR_SID, session_id));
+}
+
+void mt7996_rro_hw_init(struct mt7996_dev *dev)
+{
u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
+ int i;
+
+ if (!mt7996_has_hwrro(dev))
+ return;
+
+ INIT_LIST_HEAD(&dev->wed_rro.page_cache);
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++)
+ INIT_LIST_HEAD(&dev->wed_rro.page_map[i]);
+
+ if (!is_mt7996(&dev->mt76)) {
+ reg = MT_RRO_MSDU_PG_SEG_ADDR0;
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_clear(dev, MT_RRO_3_0_EMU_CONF,
+ MT_RRO_3_0_EMU_CONF_EN_MASK);
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_RXDMAD_SEL);
+ if (!mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_RX_DIDX_WR_EN |
+ MT_RRO_3_1_GLOBAL_CONFIG_RX_CIDX_RD_EN);
+ mt76_wr(dev, MT_RRO_RX_RING_AP_CIDX_ADDR,
+ dev->wed_rro.emi_rings_cpu.phy_addr >> 4);
+ mt76_wr(dev, MT_RRO_RX_RING_AP_DIDX_ADDR,
+ dev->wed_rro.emi_rings_dma.phy_addr >> 4);
+ }
+ } else {
+ /* set emul 3.0 function */
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
+ MT_RRO_3_0_EMU_CONF_EN_MASK);
+
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0,
+ dev->wed_rro.addr_elem[0].phy_addr);
+ }
+
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN);
+
+ /* setup Msdu page address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ mt76_wr(dev, reg,
+ dev->wed_rro.msdu_pg[i].phy_addr >> 4);
+ reg += 4;
+ }
+ } else {
+ /* TODO: remove line after WM has set */
+ mt76_clear(dev, WF_RRO_AXI_MST_CFG,
+ WF_RRO_AXI_MST_CFG_DIDX_OK);
+
+ /* setup BA bitmap cache address */
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
+ dev->wed_rro.ba_bitmap[0].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
+ dev->wed_rro.ba_bitmap[1].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
+
+ /* Setup Address element address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
+ mt76_wr(dev, reg,
+ dev->wed_rro.addr_elem[i].phy_addr >> 4);
+ reg += 4;
+ }
+
+ /* Setup Address element address - separate address segment
+ * mode.
+ */
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
+ }
+
+ mt7996_rro_hw_init_v3(dev);
+
+ /* interrupt enable */
+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
+}
+
+static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+{
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_wed_rro_addr *addr;
void *ptr;
int i;
- if (!dev->has_rro)
+ if (!mt7996_has_hwrro(dev))
return 0;
- if (!mtk_wed_device_active(wed))
- return 0;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3) {
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_BA_BITMAP_CR_SIZE,
+ &dev->wed_rro.ba_bitmap[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
- ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
- MT7996_RRO_BA_BITMAP_CR_SIZE,
- &dev->wed_rro.ba_bitmap[i].phy_addr,
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ }
}
for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
@@ -766,12 +930,53 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
addr = dev->wed_rro.addr_elem[i].ptr;
for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
- wed->wlan.ind_cmd.addr_elem_phys[i] =
- dev->wed_rro.addr_elem[i].phy_addr;
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) {
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ wed->wlan.ind_cmd.addr_elem_phys[i] =
+ dev->wed_rro.addr_elem[i].phy_addr;
+ }
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR,
+ &dev->wed_rro.msdu_pg[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.msdu_pg[i].ptr = ptr;
+
+ memset(dev->wed_rro.msdu_pg[i].ptr, 0,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ sizeof(dev->wed_rro.emi_rings_cpu.ptr),
+ &dev->wed_rro.emi_rings_cpu.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.emi_rings_cpu.ptr = ptr;
+
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ sizeof(dev->wed_rro.emi_rings_dma.ptr),
+ &dev->wed_rro.emi_rings_dma.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.emi_rings_dma.ptr = ptr;
}
ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
@@ -784,69 +989,20 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
dev->wed_rro.session.ptr = ptr;
addr = dev->wed_rro.session.ptr;
for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
- /* rro hw init */
- /* TODO: remove line after WM has set */
- mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
+ mt7996_rro_hw_init(dev);
- /* setup BA bitmap cache address */
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
- dev->wed_rro.ba_bitmap[0].phy_addr);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
- dev->wed_rro.ba_bitmap[1].phy_addr);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
-
- /* setup Address element address */
- for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
- mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4);
- reg += 4;
- }
-
- /* setup Address element address - separate address segment mode */
- mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
- MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
-
- wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
- wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
- wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
- wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
- wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
-
- mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
- mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
- MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
-
- /* particular session configure */
- /* use max session idx + 1 as particular session id */
- mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
- mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
- MT_RRO_PARTICULAR_CONFG_EN |
- FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION));
-
- /* interrupt enable */
- mt76_wr(dev, MT_RRO_HOST_INT_ENA,
- MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
-
- /* rro ind cmd queue init */
return mt7996_dma_rro_init(dev);
-#else
- return 0;
-#endif
}
static void mt7996_wed_rro_free(struct mt7996_dev *dev)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int i;
- if (!dev->has_rro)
- return;
-
- if (!mtk_wed_device_active(&dev->mt76.mmio.wed))
+ if (!mt7996_has_hwrro(dev))
return;
for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
@@ -870,6 +1026,16 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
dev->wed_rro.addr_elem[i].phy_addr);
}
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ if (!dev->wed_rro.msdu_pg[i].ptr)
+ continue;
+
+ dmam_free_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR,
+ dev->wed_rro.msdu_pg[i].ptr,
+ dev->wed_rro.msdu_pg[i].phy_addr);
+ }
+
if (!dev->wed_rro.session.ptr)
return;
@@ -878,12 +1044,11 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
sizeof(struct mt7996_wed_rro_addr),
dev->wed_rro.session.ptr,
dev->wed_rro.session.phy_addr);
-#endif
}
static void mt7996_wed_rro_work(struct work_struct *work)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_dev *dev;
LIST_HEAD(list);
@@ -920,13 +1085,12 @@ static void mt7996_wed_rro_work(struct work_struct *work)
MT7996_RRO_WINDOW_MAX_LEN;
reset:
elem = ptr + elem_id * sizeof(*elem);
- elem->signature = 0xff;
+ elem->data |= cpu_to_le32(val);
}
mt7996_mcu_wed_rro_reset_sessions(dev, e->id);
out:
kfree(e);
}
-#endif
}
static int mt7996_variant_type_init(struct mt7996_dev *dev)
@@ -1321,7 +1485,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap->has_eht = true;
eht_cap_elem->mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
@@ -1551,6 +1714,9 @@ void mt7996_unregister_device(struct mt7996_dev *dev)
mt7996_mcu_exit(dev);
mt7996_tx_token_put(dev);
mt7996_dma_cleanup(dev);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
tasklet_disable(&dev->mt76.irq_tasklet);
mt76_free_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index b3fcca9bbb95..9501def3e0e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -229,7 +229,9 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
- struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
+ struct mt7996_sta_link *msta_link = (void *)status->wcid;
+ struct mt7996_sta *msta = msta_link->sta;
+ struct ieee80211_bss_conf *link_conf;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
@@ -246,8 +248,11 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
if (!msta || !msta->vif)
return -EINVAL;
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ sta = wcid_to_sta(status->wcid);
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]);
+ if (!link_conf)
+ return -EINVAL;
/* store the info from RXD and ethhdr to avoid being overridden */
frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
@@ -260,7 +265,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
switch (frame_control & (IEEE80211_FCTL_TODS |
IEEE80211_FCTL_FROMDS)) {
case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ ether_addr_copy(hdr.addr3, link_conf->bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
@@ -797,6 +802,9 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
if (is_mt7990(&dev->mt76))
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
+ else
+ txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD);
+
tid = MT_TX_ADDBA;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = MT_TX_NORMAL;
@@ -960,8 +968,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DAS;
- if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val = MT_TXD6_DAS | MT_TXD6_VTA;
+ if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ||
+ skb->protocol == cpu_to_be16(ETH_P_PAE))
val |= MT_TXD6_DIS_MAT;
if (is_mt7996(&dev->mt76))
@@ -1029,10 +1038,10 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_connac_txp_common *txp;
struct mt76_txwi_cache *t;
int id, i, pid, nbuf = tx_info->nbuf - 1;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ __le32 *ptr = (__le32 *)txwi_ptr;
u8 *txwi = (u8 *)txwi_ptr;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
@@ -1048,6 +1057,41 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
+ /* Since the rules of HW MLD address translation are not fully
+ * compatible with 802.11 EAPOL frame, we do the translation by
+ * software
+ */
+ if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) {
+ struct ieee80211_hdr *hdr = (void *)tx_info->skb->data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ link_conf = rcu_dereference(vif->link_conf[wcid->link_id]);
+ if (!link_conf)
+ return -EINVAL;
+
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ memcpy(hdr->addr4, vif->addr, ETH_ALEN);
+ } else if (ieee80211_has_tods(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ } else if (ieee80211_has_fromds(hdr->frame_control)) {
+ memcpy(hdr->addr3, vif->addr, ETH_ALEN);
+ }
+
+ dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+ }
+
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
memset(txwi_ptr, 0, MT_TXD_SIZE);
/* Transmit non qos data by 802.11 header and need to fill txd by host*/
@@ -1055,46 +1099,76 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
pid, qid, 0);
- txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
- for (i = 0; i < nbuf; i++) {
- u16 len;
+ /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA
+ * req
+ */
+ if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) {
+ u32 val;
+
+ ptr = (__le32 *)(txwi + MT_TXD_SIZE);
+ memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp));
+
+ val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) |
+ MT_TXP0_TOKEN_ID0_VALID_MASK;
+ ptr[0] = cpu_to_le32(val);
- len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+ val = FIELD_PREP(MT_TXP1_TID_ADDBA,
+ tx_info->skb->priority &
+ IEEE80211_QOS_CTL_TID_MASK);
+ ptr[1] = cpu_to_le32(val);
+ ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF);
+
+ val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) |
+ MT_TXP3_ML0_MASK;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
- tx_info->buf[i + 1].addr >> 32);
+ val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H,
+ tx_info->buf[1].addr >> 32);
#endif
+ ptr[3] = cpu_to_le32(val);
+ } else {
+ struct mt76_connac_txp_common *txp;
- txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->fw.len[i] = cpu_to_le16(len);
- }
- txp->fw.nbuf = nbuf;
+ txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ u16 len;
- txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+ len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
+ tx_info->buf[i + 1].addr >> 32);
+#endif
- if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+ txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->fw.len[i] = cpu_to_le16(len);
+ }
+ txp->fw.nbuf = nbuf;
- if (!key)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
- if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
- if (vif) {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_vif_link *mlink = NULL;
+ if (!key)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
- if (wcid->offchannel)
- mlink = rcu_dereference(mvif->mt76.offchannel_link);
- if (!mlink)
- mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
+ if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
- txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
- }
+ if (vif) {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = NULL;
+
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
- txp->fw.token = cpu_to_le16(id);
- txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
+ txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
+ }
+
+ txp->fw.token = cpu_to_le16(id);
+ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
+ }
tx_info->skb = NULL;
@@ -1183,8 +1257,14 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (link_sta) {
wcid_idx = wcid->idx;
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7996_tx_check_aggr(link_sta, wcid, t->skb);
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
+ struct mt7996_sta *msta;
+
+ /* AMPDU state is stored in the primary link */
+ msta = (void *)link_sta->sta->drv_priv;
+ mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid,
+ t->skb);
+ }
} else {
wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
}
@@ -1242,6 +1322,9 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
struct ieee80211_sta *sta;
+ unsigned long valid_links;
+ struct mt7996_sta *msta;
+ unsigned int id;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
@@ -1256,7 +1339,21 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
if (!link_sta)
goto next;
- mt76_wcid_add_poll(&dev->mt76, wcid);
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ valid_links = sta->valid_links ?: BIT(0);
+
+ /* For MLD STA, add all link's wcid to sta_poll_list */
+ for_each_set_bit(id, &valid_links,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = rcu_dereference(msta->link[id]);
+ if (!msta_link)
+ continue;
+
+ mt76_wcid_add_poll(&dev->mt76,
+ &msta_link->wcid);
+ }
next:
/* ver 7 has a new DW with pair = 1, skip it */
if (ver == 7 && ((void *)(cur_info + 1) < end) &&
@@ -1574,6 +1671,363 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static struct mt7996_msdu_page *
+mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ if (!list_empty(&dev->wed_rro.page_cache)) {
+ p = list_first_entry(&dev->wed_rro.page_cache,
+ struct mt7996_msdu_page, list);
+ if (p)
+ list_del(&p->list);
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return p;
+}
+
+static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p) {
+ p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
+ if (p)
+ INIT_LIST_HEAD(&p->list);
+ }
+
+ return p;
+}
+
+static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev,
+ struct mt7996_msdu_page *p)
+{
+ if (p->buf) {
+ mt76_put_page_pool_buf(p->buf, false);
+ p->buf = NULL;
+ }
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add(&p->list, &dev->wed_rro.page_cache);
+ spin_unlock(&dev->wed_rro.lock);
+}
+
+static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev)
+{
+ while (true) {
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p)
+ break;
+
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+
+ kfree(p);
+ }
+}
+
+static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)
+{
+ u32 val = 0;
+ int i = 0;
+
+ while (dma_addr) {
+ val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
+ dma_addr >>= 8;
+ i += 13;
+ }
+
+ return val % MT7996_RRO_MSDU_PG_HASH_SIZE;
+}
+
+static struct mt7996_msdu_page *
+mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr)
+{
+ u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+ struct mt7996_msdu_page *p, *tmp, *addr = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash],
+ list) {
+ if (p->dma_addr == dma_addr) {
+ list_del(&p->list);
+ addr = p;
+ break;
+ }
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return addr;
+}
+
+static void mt7996_rx_token_put(struct mt7996_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(&dev->mt76, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->dma_addr = 0;
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+ }
+}
+
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p, *tmp;
+ int i;
+
+ local_bh_disable();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) {
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i],
+ list) {
+ list_del_init(&p->list);
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+ kfree(p);
+ }
+ }
+ mt7996_msdu_page_free_cache(dev);
+
+ local_bh_enable();
+
+ mt7996_rx_token_put(dev);
+}
+
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_msdu_page_info *pinfo = data;
+ struct mt7996_msdu_page *p;
+ u32 hash;
+
+ pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1));
+ p = mt7996_msdu_page_get(dev);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf = data;
+ p->dma_addr = dma_addr;
+ p->q = q;
+
+ hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add_tail(&p->list, &dev->wed_rro.page_map[hash]);
+ spin_unlock(&dev->wed_rro.lock);
+
+ return 0;
+}
+
+static struct mt7996_wed_rro_addr *
+mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num)
+{
+ u32 idx = 0;
+ void *addr;
+
+ if (session_id == MT7996_RRO_MAX_SESSION) {
+ addr = dev->wed_rro.session.ptr;
+ } else {
+ idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ addr = dev->wed_rro.addr_elem[idx].ptr;
+
+ idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ idx = idx * MT7996_RRO_WINDOW_MAX_LEN;
+ }
+ idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN;
+
+ return addr + idx * sizeof(struct mt7996_wed_rro_addr);
+}
+
+#define MT996_RRO_SN_MASK GENMASK(11, 0)
+
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
+ u32 cmd_data0 = le32_to_cpu(cmd->data0);
+ u32 cmd_data1 = le32_to_cpu(cmd->data1);
+ u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0);
+ u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0);
+ u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0);
+ u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1);
+ struct mt7996_msdu_page_info *pinfo = NULL;
+ struct mt7996_msdu_page *p = NULL;
+ int i, seq_num = 0;
+
+ for (i = 0; i < ind_count; i++) {
+ struct mt7996_wed_rro_addr *e;
+ struct mt76_rx_status *status;
+ struct mt7996_rro_hif *rxd;
+ int j, len, qid, data_len;
+ struct mt76_txwi_cache *t;
+ dma_addr_t dma_addr = 0;
+ u16 rx_token_id, count;
+ struct mt76_queue *q;
+ struct sk_buff *skb;
+ u32 info = 0, data;
+ u8 signature;
+ void *buf;
+ bool ls;
+
+ seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i);
+ e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num);
+ data = le32_to_cpu(e->data);
+ signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data);
+ if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) {
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK,
+ 0xff);
+
+ e->data |= cpu_to_le32(val);
+ goto update_ack_seq_num;
+ }
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data);
+ dma_addr <<= 32;
+#endif
+ dma_addr |= le32_to_cpu(e->head_low);
+
+ count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data);
+ for (j = 0; j < count; j++) {
+ if (!p) {
+ p = mt7996_rro_msdu_page_get(dev, dma_addr);
+ if (!p)
+ continue;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr,
+ SKB_WITH_OVERHEAD(p->q->buf_size),
+ page_pool_get_dma_dir(p->q->page_pool));
+ pinfo = (struct mt7996_msdu_page_info *)p->buf;
+ }
+
+ rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
+ len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK,
+ le32_to_cpu(rxd->data1));
+
+ rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK,
+ le32_to_cpu(rxd->data4));
+ t = mt76_rx_token_release(mdev, rx_token_id);
+ if (!t)
+ goto next_page;
+
+ qid = t->qid;
+ buf = t->ptr;
+ q = &mdev->q_rx[qid];
+ dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ t->dma_addr = 0;
+ t->ptr = NULL;
+ mt76_put_rxwi(mdev, t);
+ if (!buf)
+ goto next_page;
+
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ mt76_put_page_pool_buf(buf, false);
+ q->rx_head = NULL;
+ goto next_page;
+ }
+
+ ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK,
+ le32_to_cpu(rxd->data1));
+ if (q->rx_head) {
+ /* TODO: Take into account non-linear skb. */
+ mt76_put_page_pool_buf(buf, false);
+ if (ls) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
+ goto next_page;
+ }
+
+ if (ls && !mt7996_rx_check(mdev, buf, len))
+ goto next_page;
+
+ skb = build_skb(buf, q->buf_size);
+ if (!skb)
+ goto next_page;
+
+ skb_reserve(skb, q->buf_offset);
+ skb_mark_for_recycle(skb);
+ __skb_put(skb, len);
+
+ if (ind_reason == 1 || ind_reason == 2) {
+ dev_kfree_skb(skb);
+ goto next_page;
+ }
+
+ if (!ls) {
+ q->rx_head = skb;
+ goto next_page;
+ }
+
+ status = (struct mt76_rx_status *)skb->cb;
+ if (seq_id != MT7996_RRO_MAX_SESSION)
+ status->aggr = true;
+
+ mt7996_queue_rx_skb(mdev, qid, skb, &info);
+next_page:
+ if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr =
+ FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK,
+ le32_to_cpu(pinfo->data));
+ dma_addr <<= 32;
+ dma_addr |= le32_to_cpu(pinfo->pg_low);
+#else
+ dma_addr = le32_to_cpu(pinfo->pg_low);
+#endif
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+update_ack_seq_num:
+ if ((i + 1) % 4 == 0)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK,
+ seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK,
+ seq_num));
+ if (p) {
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+ /* Update ack_seq_num for remaining addr_elem */
+ if (i % 4)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num));
+}
+
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
@@ -1722,7 +2176,8 @@ mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!link || link->phy != phy)
continue;
- mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf);
+ mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf,
+ link_conf->enable_beacon);
}
}
@@ -1766,13 +2221,10 @@ void mt7996_tx_token_put(struct mt7996_dev *dev)
static int
mt7996_mac_restart(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7996_phy *phy;
int i, ret;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
-
if (dev->hif2) {
mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
@@ -1784,20 +2236,14 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
}
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7996_for_each_phy(dev, phy)
+ set_bit(MT76_RESET, &phy->mt76->state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2)
- set_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- set_bit(MT76_RESET, &phy3->mt76->state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (phy2)
- mt76_txq_schedule_all(phy2->mt76);
- if (phy3)
- mt76_txq_schedule_all(phy3->mt76);
+ mt7996_for_each_phy(dev, phy)
+ mt76_txq_schedule_all(phy->mt76);
/* disable all tx/rx napi */
mt76_worker_disable(&dev->mt76.tx_worker);
@@ -1849,42 +2295,57 @@ mt7996_mac_restart(struct mt7996_dev *dev)
if (ret)
goto out;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt7996_has_hwrro(dev)) {
+ u32 wed_irq_mask = dev->mt76.mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt7996_rro_hw_init(dev);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ false);
+ mt7996_irq_enable(dev, wed_irq_mask);
+ mt7996_irq_disable(dev, 0);
+ }
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
+ MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
+ }
+
/* set the necessary init items */
ret = mt7996_mcu_set_eeprom(dev);
if (ret)
goto out;
mt7996_mac_init(dev);
- mt7996_init_txpower(&dev->phy);
- mt7996_init_txpower(phy2);
- mt7996_init_txpower(phy3);
+ mt7996_for_each_phy(dev, phy)
+ mt7996_init_txpower(phy);
ret = mt7996_txbf_init(dev);
+ if (ret)
+ goto out;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(&dev->phy);
- if (ret)
- goto out;
- }
-
- if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2);
- if (ret)
- goto out;
- }
+ mt7996_for_each_phy(dev, phy) {
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
- if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3);
+ ret = mt7996_run(&dev->phy);
if (ret)
goto out;
}
out:
/* reset done */
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
napi_enable(&dev->mt76.tx_napi);
local_bh_disable();
@@ -1896,74 +2357,121 @@ out:
}
static void
+mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msta->link); i++) {
+ struct mt7996_sta_link *msta_link = NULL;
+
+ msta_link = rcu_replace_pointer(msta->link[i], msta_link,
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!msta_link)
+ continue;
+
+ mt7996_mac_sta_deinit_link(dev, msta_link);
+
+ if (msta->deflink_id == i) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ continue;
+ }
+
+ kfree_rcu(msta_link, rcu_head);
+ }
+}
+
+static void
+mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
+
+ mlink = mt76_dereference(mvif->link[i], &dev->mt76);
+ if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv)
+ continue;
+
+ rcu_assign_pointer(mvif->link[i], NULL);
+ kfree_rcu(mlink, rcu_head);
+ }
+ rcu_read_unlock();
+}
+
+static void
mt7996_mac_full_reset(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
+ LIST_HEAD(list);
int i;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
dev->recovery.hw_full_reset = true;
wake_up(&dev->mt76.mcu.wait);
- ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
+ ieee80211_stop_queues(hw);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2)
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- if (phy3)
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy)
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
if (!mt7996_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (phy2)
- ieee80211_restart_hw(phy2->mt76->hw);
- if (phy3)
- ieee80211_restart_hw(phy3->mt76->hw);
+ mt7996_for_each_phy(dev, phy)
+ phy->omac_mask = 0;
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev);
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ mt7996_mac_reset_vif_iter, dev);
+ mt76_reset_device(&dev->mt76);
+
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+ spin_lock_bh(&dev->wed_rro.lock);
+ list_splice_init(&dev->wed_rro.poll_list, &list);
+ spin_unlock_bh(&dev->wed_rro.lock);
+
+ while (!list_empty(&list)) {
+ struct mt7996_wed_rro_session_id *e;
+
+ e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
+ list);
+ list_del_init(&e->list);
+ kfree(e);
+ }
+
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev),
- &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
}
void mt7996_mac_reset_work(struct work_struct *work)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw;
struct mt7996_dev *dev;
+ struct mt7996_phy *phy;
int i;
dev = container_of(work, struct mt7996_dev, reset_work);
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
+ hw = mt76_hw(dev);
/* chip full reset */
if (dev->recovery.restart) {
@@ -1994,7 +2502,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
return;
dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
- wiphy_name(dev->mt76.hw->wiphy));
+ wiphy_name(hw->wiphy));
if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
@@ -2003,25 +2511,19 @@ void mt7996_mac_reset_work(struct work_struct *work)
mtk_wed_device_stop(&dev->mt76.mmio.wed);
ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt76_abort_scan(&dev->mt76);
wake_up(&dev->mt76.mcu.wait);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2) {
- set_bit(MT76_RESET, &phy2->mt76->state);
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- }
- if (phy3) {
- set_bit(MT76_RESET, &phy3->mt76->state);
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy) {
+ mt76_abort_roc(phy->mt76);
+ set_bit(MT76_RESET, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
}
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2052,15 +2554,14 @@ void mt7996_mac_reset_work(struct work_struct *work)
/* enable DMA Tx/Tx and interrupt */
mt7996_dma_start(dev, false, false);
+ if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
+
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
+ u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 |
dev->mt76.mmio.irqmask;
- if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
- wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
-
mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
-
mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
true);
mt7996_irq_enable(dev, wed_irq_mask);
@@ -2074,11 +2575,8 @@ void mt7996_mac_reset_work(struct work_struct *work)
}
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2100,25 +2598,14 @@ void mt7996_mac_reset_work(struct work_struct *work)
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_wake_queues(hw);
mutex_unlock(&dev->mt76.mutex);
mt7996_update_beacons(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
wiphy_name(dev->mt76.hw->wiphy));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 84f731b387d2..581314368c5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -138,6 +138,28 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
+static int get_own_mld_idx(u64 mask, bool group_mld)
+{
+ u8 start = group_mld ? 0 : 16;
+ u8 end = group_mld ? 15 : 63;
+ int idx;
+
+ idx = get_free_idx(mask, start, end);
+ if (idx)
+ return idx - 1;
+
+ /* If the 16-63 range is not available, perform another lookup in the
+ * range 0-15
+ */
+ if (!group_mld) {
+ idx = get_free_idx(mask, 0, 15);
+ if (idx)
+ return idx - 1;
+ }
+
+ return -EINVAL;
+}
+
static void
mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlink)
{
@@ -160,112 +182,105 @@ mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlin
static int
mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+ unsigned int link_id, struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
int idx = key->keyidx;
- unsigned int link_id;
- unsigned long links;
+ u8 *wcid_keyidx;
+ bool is_bigtk;
+ int err;
- if (key->link_id >= 0)
- links = BIT(key->link_id);
- else if (sta && sta->valid_links)
- links = sta->valid_links;
- else if (vif->valid_links)
- links = vif->valid_links;
- else
- links = BIT(0);
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ return 0;
- for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
- u8 *wcid_keyidx;
- int err;
+ if (!mt7996_vif_link_phy(link))
+ return 0;
- link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
- continue;
+ if (sta) {
+ struct mt7996_sta *msta;
- if (sta) {
- struct mt7996_sta *msta;
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ msta_link = mt76_dereference(msta->link[link_id],
+ &dev->mt76);
+ if (!msta_link)
+ return 0;
- msta = (struct mt7996_sta *)sta->drv_priv;
- msta_link = mt76_dereference(msta->link[link_id],
- &dev->mt76);
- if (!msta_link)
- continue;
+ if (!msta_link->wcid.sta)
+ return -EOPNOTSUPP;
+ } else {
+ msta_link = &link->msta_link;
+ }
+ wcid_keyidx = &msta_link->wcid.hw_key_idx;
- if (!msta_link->wcid.sta)
- return -EOPNOTSUPP;
- } else {
- msta_link = &link->msta_link;
- }
- wcid_keyidx = &msta_link->wcid.hw_key_idx;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &msta_link->wcid.hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- }
- break;
- default:
- break;
+ is_bigtk = key->keyidx == 6 || key->keyidx == 7;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (is_bigtk) {
+ wcid_keyidx = &msta_link->wcid.hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
}
+ break;
+ default:
+ break;
+ }
- if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
- struct ieee80211_bss_conf *link_conf;
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ link_conf = &vif->bss_conf;
- link_conf = link_conf_dereference_protected(vif,
- link_id);
- if (!link_conf)
- link_conf = &vif->bss_conf;
+ if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
+ link->mt76.cipher =
+ mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, msta_link, true);
+ }
- link->mt76.cipher =
- mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
- &link->mt76, msta_link, true);
- }
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- continue;
- }
+ if (cmd != SET_KEY && sta)
+ return 0;
- mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
+ mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
- if (key->keyidx == 6 || key->keyidx == 7) {
- err = mt7996_mcu_bcn_prot_enable(dev, link,
- msta_link, key);
- if (err)
- return err;
- }
+ err = mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta_link->wcid, cmd);
- err = mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta_link->wcid, cmd);
- if (err)
- return err;
+ /* remove and add beacon in order to enable beacon protection */
+ if (cmd == SET_KEY && is_bigtk && link_conf->enable_beacon) {
+ mt7996_mcu_add_beacon(hw, vif, link_conf, false);
+ mt7996_mcu_add_beacon(hw, vif, link_conf, true);
}
- return 0;
+ return err;
}
+struct mt7996_key_iter_data {
+ enum set_key_cmd cmd;
+ unsigned int link_id;
+};
+
static void
mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
void *data)
{
+ struct mt7996_key_iter_data *it = data;
+
if (sta)
return;
- WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, key));
+ WARN_ON(mt7996_set_hw_key(hw, it->cmd, vif, NULL, it->link_id, key));
}
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -278,8 +293,12 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
u8 band_idx = phy->mt76->band_idx;
+ struct mt7996_key_iter_data it = {
+ .cmd = SET_KEY,
+ .link_id = link_conf->link_id,
+ };
struct mt76_txq *mtxq;
- int idx, ret;
+ int mld_idx, idx, ret;
mlink->idx = __ffs64(~dev->mt76.vif_mask);
if (mlink->idx >= mt7996_max_interface_num(dev))
@@ -289,6 +308,17 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ if (!dev->mld_idx_mask) { /* first link in the group */
+ mvif->mld_group_idx = get_own_mld_idx(dev->mld_idx_mask, true);
+ mvif->mld_remap_idx = get_free_idx(dev->mld_remap_idx_mask,
+ 0, 15);
+ }
+
+ mld_idx = get_own_mld_idx(dev->mld_idx_mask, false);
+ if (mld_idx < 0)
+ return -ENOSPC;
+
+ link->mld_idx = mld_idx;
link->phy = phy;
mlink->omac_idx = idx;
mlink->band_idx = band_idx;
@@ -301,6 +331,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
return ret;
dev->mt76.vif_mask |= BIT_ULL(mlink->idx);
+ if (!dev->mld_idx_mask) {
+ dev->mld_idx_mask |= BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask |= BIT_ULL(mvif->mld_remap_idx);
+ }
+ dev->mld_idx_mask |= BIT_ULL(link->mld_idx);
phy->omac_mask |= BIT_ULL(mlink->omac_idx);
idx = MT7996_WTBL_RESERVED - mlink->idx;
@@ -339,7 +374,7 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
- ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, NULL);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it);
if (mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED)
mvif->mt76.deflink_id = link_conf->link_id;
@@ -356,8 +391,14 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
+ struct mt7996_key_iter_data it = {
+ .cmd = SET_KEY,
+ .link_id = link_conf->link_id,
+ };
int idx = msta_link->wcid.idx;
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it);
+
mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
CONN_STATE_DISCONNECT, false);
mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false);
@@ -380,7 +421,13 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
}
dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
+ dev->mld_idx_mask &= ~BIT_ULL(link->mld_idx);
phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
+ if (!(dev->mld_idx_mask & ~BIT_ULL(mvif->mld_group_idx))) {
+ /* last link */
+ dev->mld_idx_mask &= ~BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask &= ~BIT_ULL(mvif->mld_remap_idx);
+ }
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta_link->wcid.poll_list))
@@ -551,8 +598,9 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- int err;
+ unsigned int link_id;
+ unsigned long links;
+ int err = 0;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -586,11 +634,22 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (!mt7996_vif_link_phy(&mvif->deflink))
- return 0; /* defer until after link add */
-
mutex_lock(&dev->mt76.mutex);
- err = mt7996_set_hw_key(hw, cmd, vif, sta, key);
+
+ if (key->link_id >= 0)
+ links = BIT(key->link_id);
+ else if (sta && sta->valid_links)
+ links = sta->valid_links;
+ else if (vif->valid_links)
+ links = vif->valid_links;
+ else
+ links = BIT(0);
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, link_id, key);
+ if (err)
+ break;
+ }
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -850,7 +909,7 @@ mt7996_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
link->mt76.beacon_rates_idx =
mt7996_get_rates_table(phy, info, true, false);
- mt7996_mcu_add_beacon(hw, vif, info);
+ mt7996_mcu_add_beacon(hw, vif, info, info->enable_beacon);
}
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -878,7 +937,7 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf, vif->bss_conf.enable_beacon);
mutex_unlock(&dev->mt76.mutex);
}
@@ -925,6 +984,7 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
msta_link->wcid.sta = 1;
msta_link->wcid.idx = idx;
msta_link->wcid.link_id = link_id;
+ msta_link->wcid.def_wcid = &msta->deflink.wcid;
ewma_avg_signal_init(&msta_link->avg_ack_signal);
ewma_signal_init(&msta_link->wcid.rssi);
@@ -941,18 +1001,9 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
return 0;
}
-static void
-mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
- struct mt7996_sta_link *msta_link)
+void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msta_link->wcid.aggr); i++)
- mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, i);
-
- mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta_link->wcid.poll_list))
list_del_init(&msta_link->wcid.poll_list);
@@ -982,6 +1033,9 @@ mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (!msta_link)
continue;
+ mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
mt7996_mac_sta_deinit_link(dev, msta_link);
link = mt7996_vif_link(dev, vif, link_id);
if (!link)
@@ -1036,16 +1090,17 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
goto error_unlink;
}
- err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
- link_id);
- if (err)
- goto error_unlink;
-
mphy = mt76_vif_link_phy(&link->mt76);
if (!mphy) {
err = -EINVAL;
goto error_unlink;
}
+
+ err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
+ link_id);
+ if (err)
+ goto error_unlink;
+
mphy->num_sta++;
}
@@ -1179,6 +1234,24 @@ mt7996_mac_sta_remove(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mutex_unlock(&dev->mt76.mutex);
}
+static void
+mt7996_set_active_links(struct ieee80211_vif *vif)
+{
+ u16 active_links;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return;
+
+ active_links = mt76_select_links(vif, MT7996_MAX_RADIOS);
+ if (hweight16(active_links) < 2)
+ return;
+
+ ieee80211_set_active_links_async(vif, active_links);
+}
+
static int
mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, enum ieee80211_sta_state old_state,
@@ -1196,16 +1269,18 @@ mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7996_mac_sta_remove(dev, vif, sta);
if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC)
+ new_state == IEEE80211_STA_ASSOC) {
+ mt7996_set_active_links(vif);
ev = MT76_STA_EVENT_ASSOC;
- else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED)
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
ev = MT76_STA_EVENT_AUTHORIZE;
- else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH)
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
ev = MT76_STA_EVENT_DISASSOC;
- else
+ } else {
return 0;
+ }
return mt7996_mac_sta_event(dev, vif, sta, ev);
}
@@ -1214,28 +1289,61 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct mt7996_sta *msta = sta ? (void *)sta->drv_priv : NULL;
struct mt76_phy *mphy = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
+ struct mt7996_vif *mvif = vif ? (void *)vif->drv_priv : NULL;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
u8 link_id = u32_get_bits(info->control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
rcu_read_lock();
- if (vif) {
- struct mt7996_vif *mvif = (void *)vif->drv_priv;
+ /* Use primary link_id if the value from mac80211 is set to
+ * IEEE80211_LINK_UNSPECIFIED.
+ */
+ if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+ if (msta)
+ link_id = msta->deflink_id;
+ else if (mvif)
+ link_id = mvif->mt76.deflink_id;
+ }
+
+ if (vif && ieee80211_vif_is_mld(vif)) {
+ struct ieee80211_bss_conf *link_conf;
+
+ if (msta) {
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ link_sta = rcu_dereference(sta->link[msta->deflink_id]);
+
+ if (link_sta) {
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ if (ether_addr_equal(sta->addr, hdr->addr3))
+ memcpy(hdr->addr3, link_sta->addr, ETH_ALEN);
+ }
+ }
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (link_conf) {
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ether_addr_equal(vif->addr, hdr->addr3))
+ memcpy(hdr->addr3, link_conf->addr, ETH_ALEN);
+ }
+ }
+
+ if (mvif) {
struct mt76_vif_link *mlink = &mvif->deflink.mt76;
if (link_id < IEEE80211_LINK_UNSPECIFIED)
mlink = rcu_dereference(mvif->mt76.link[link_id]);
- if (!mlink) {
- ieee80211_free_txskb(hw, skb);
- goto unlock;
- }
-
if (mlink->wcid)
wcid = mlink->wcid;
@@ -1254,8 +1362,7 @@ static void mt7996_tx(struct ieee80211_hw *hw,
goto unlock;
}
- if (control->sta && link_id < IEEE80211_LINK_UNSPECIFIED) {
- struct mt7996_sta *msta = (void *)control->sta->drv_priv;
+ if (msta && link_id < IEEE80211_LINK_UNSPECIFIED) {
struct mt7996_sta_link *msta_link;
msta_link = rcu_dereference(msta->link[link_id]);
@@ -1292,16 +1399,13 @@ static int
mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- enum ieee80211_ampdu_mlme_action action = params->action;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
- struct ieee80211_link_sta *link_sta;
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
- unsigned int link_id;
int ret = 0;
if (!txq)
@@ -1311,59 +1415,42 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
- for_each_sta_active_link(vif, sta, link_sta, link_id) {
- struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
-
- msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
- if (!msta_link)
- continue;
-
- link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
- continue;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid,
- ssn, params->buf_size);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, true);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, false);
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, true);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta_link->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, false);
- break;
- case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta_link->wcid.ampdu_state);
- ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta_link->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, false);
- break;
- }
-
- if (ret)
- break;
- }
-
- if (action == IEEE80211_AMPDU_TX_STOP_CONT)
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ /* Since packets belonging to the same TID can be split over
+ * multiple links, store the AMPDU state for reordering in the
+ * primary link
+ */
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid,
+ ssn, params->buf_size);
+ ret = mt7996_mcu_add_rx_ba(dev, params, vif, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+ ret = mt7996_mcu_add_rx_ba(dev, params, vif, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -1617,19 +1704,13 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
}
}
-static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+static void mt7996_link_rate_ctrl_update(void *data,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta *msta = msta_link->sta;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
- struct mt7996_sta_link *msta_link;
u32 *changed = data;
- rcu_read_lock();
-
- msta_link = rcu_dereference(msta->link[msta->deflink_id]);
- if (!msta_link)
- goto out;
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
msta_link->changed |= *changed;
@@ -1637,8 +1718,6 @@ static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
list_add_tail(&msta_link->rc_list, &dev->sta_rc_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
-out:
- rcu_read_unlock();
}
static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
@@ -1646,11 +1725,32 @@ static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
- mt7996_link_rate_ctrl_update(&changed, sta);
- ieee80211_queue_work(hw, &dev->rc_work);
+ rcu_read_lock();
+
+ msta_link = rcu_dereference(msta->link[link_sta->link_id]);
+ if (msta_link) {
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
+ ieee80211_queue_work(hw, &dev->rc_work);
+ }
+
+ rcu_read_unlock();
+}
+
+static void mt7996_sta_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+ u32 *changed = data;
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (msta_link)
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
}
static int
@@ -1671,7 +1771,7 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
* then multiple MCS setting (MCS 4,5,6) is not supported.
*/
- ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update,
+ ieee80211_iterate_stations_atomic(hw, mt7996_sta_rate_ctrl_update,
&changed);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -2072,9 +2172,7 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
struct mt76_vif_link *mlink;
- struct mt7996_phy *phy;
mlink = rcu_dereference(mvif->mt76.link[msta->deflink_id]);
if (!mlink)
@@ -2087,12 +2185,9 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!msta_link->wcid.sta || msta_link->wcid.idx > MT7996_WTBL_STA)
return -EIO;
- link = (struct mt7996_vif_link *)mlink;
- phy = mt7996_vif_link_phy(link);
- if (!phy)
- return -ENODEV;
-
- if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
+ if (dev->hif2 &&
+ ((is_mt7996(&dev->mt76) && msta_link->wcid.phy_idx == MT_BAND2) ||
+ (is_mt7992(&dev->mt76) && msta_link->wcid.phy_idx == MT_BAND1)))
wed = &dev->mt76.mmio.wed_hif2;
if (!mtk_wed_device_active(wed))
@@ -2105,7 +2200,11 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
path->mtk_wdma.queue = 0;
path->mtk_wdma.wcid = msta_link->wcid.idx;
- path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
+ if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU) &&
+ mtk_wed_is_amsdu_supported(wed))
+ path->mtk_wdma.amsdu = msta_link->wcid.amsdu;
+ else
+ path->mtk_wdma.amsdu = 0;
ctx->dev = NULL;
return 0;
@@ -2121,6 +2220,19 @@ mt7996_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static void
+mt7996_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
+
+ ieee80211_wake_queues(hw);
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
+}
+
const struct ieee80211_ops mt7996_ops = {
.add_chanctx = mt76_add_chanctx,
.remove_chanctx = mt76_remove_chanctx,
@@ -2179,4 +2291,5 @@ const struct ieee80211_ops mt7996_ops = {
#endif
.change_vif_links = mt7996_change_vif_links,
.change_sta_links = mt7996_mac_sta_change_links,
+ .reconfig_complete = mt7996_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 0be03eb3cf46..0347ee0c2dd7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -242,6 +242,30 @@ mt7996_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return ret;
}
+static void
+mt7996_mcu_set_timeout(struct mt76_dev *mdev, int cmd)
+{
+ mdev->mcu.timeout = 5 * HZ;
+
+ if (!(cmd & __MCU_CMD_FIELD_UNI))
+ return;
+
+ switch (FIELD_GET(__MCU_CMD_FIELD_ID, cmd)) {
+ case MCU_UNI_CMD_THERMAL:
+ case MCU_UNI_CMD_TWT:
+ case MCU_UNI_CMD_GET_MIB_INFO:
+ case MCU_UNI_CMD_STA_REC_UPDATE:
+ case MCU_UNI_CMD_BSS_INFO_UPDATE:
+ mdev->mcu.timeout = 2 * HZ;
+ return;
+ case MCU_UNI_CMD_EFUSE_CTRL:
+ mdev->mcu.timeout = 20 * HZ;
+ return;
+ default:
+ break;
+ }
+}
+
static int
mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
@@ -255,7 +279,7 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
u32 val;
u8 seq;
- mdev->mcu.timeout = 20 * HZ;
+ mt7996_mcu_set_timeout(mdev, cmd);
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
@@ -660,7 +684,7 @@ mt7996_mcu_wed_rro_event(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct mt7996_mcu_wed_rro_event *event = (void *)skb->data;
- if (!dev->has_rro)
+ if (!mt7996_has_hwrro(dev))
return;
skb_pull(skb, sizeof(struct mt7996_mcu_rxd) + 4);
@@ -899,17 +923,28 @@ mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
}
static void
-mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link)
{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
-
mld = (struct bss_mld_tlv *)tlv;
- mld->group_mld_id = 0xff;
- mld->own_mld_id = mlink->idx;
- mld->remap_idx = 0xff;
+ mld->own_mld_id = link->mld_idx;
+ mld->link_id = link_conf->link_id;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mld->group_mld_id = mvif->mld_group_idx;
+ mld->remap_idx = mvif->mld_remap_idx;
+ memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
+ } else {
+ mld->group_mld_id = 0xff;
+ mld->remap_idx = 0xff;
+ }
}
static void
@@ -1108,6 +1143,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
goto out;
if (enable) {
+ struct mt7996_vif_link *link;
+
mt7996_mcu_bss_rfch_tlv(skb, phy);
mt7996_mcu_bss_bmc_tlv(skb, mlink, phy);
mt7996_mcu_bss_ra_tlv(skb, phy);
@@ -1118,7 +1155,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
mt7996_mcu_bss_he_tlv(skb, vif, link_conf, phy);
/* this tag is necessary no matter if the vif is MLD */
- mt7996_mcu_bss_mld_tlv(skb, mlink);
+ link = container_of(mlink, struct mt7996_vif_link, mt76);
+ mt7996_mcu_bss_mld_tlv(skb, link_conf, link);
}
mt7996_mcu_bss_mbssid_tlv(skb, link_conf, enable);
@@ -1149,9 +1187,8 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
static int
mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ struct mt76_wcid *wcid, bool enable, bool tx)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -1170,7 +1207,7 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
ba->ba_en = enable << params->tid;
ba->amsdu = params->amsdu;
ba->tid = params->tid;
- ba->ba_rdd_rro = !tx && enable && dev->has_rro;
+ ba->ba_rdd_rro = !tx && enable && mt7996_has_hwrro(dev);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
@@ -1179,20 +1216,67 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, bool enable)
+ struct ieee80211_vif *vif, bool enable)
{
- if (enable && !params->amsdu)
- msta_link->wcid.amsdu = false;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = 0;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ if (enable && !params->amsdu)
+ msta_link->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true);
+ ret = mt7996_mcu_sta_ba(dev, &link->mt76, params,
+ &msta_link->wcid, enable, true);
+ if (ret)
+ break;
+ }
+
+ return ret;
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable)
+ struct ieee80211_vif *vif, bool enable)
{
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false);
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = 0;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ ret = mt7996_mcu_sta_ba(dev, &link->mt76, params,
+ &msta_link->wcid, enable, false);
+ if (ret)
+ break;
+ }
+
+ return ret;
}
static void
@@ -1753,19 +1837,6 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
bf->mem_20m = bf->nrow < BF_MAT_ORDER ?
matrix[bf->nrow][bf->ncol] : 0;
}
-
- switch (link_sta->bandwidth) {
- case IEEE80211_STA_RX_BW_160:
- case IEEE80211_STA_RX_BW_80:
- bf->mem_total = bf->mem_20m * 2;
- break;
- case IEEE80211_STA_RX_BW_40:
- bf->mem_total = bf->mem_20m;
- break;
- case IEEE80211_STA_RX_BW_20:
- default:
- break;
- }
}
static void
@@ -2265,13 +2336,10 @@ error_unlock:
}
static int
-mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7996_mcu_add_group(struct mt7996_dev *dev, struct mt7996_vif_link *link,
+ struct mt76_wcid *wcid)
{
#define MT_STA_BSS_GROUP 1
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta_link *msta_link;
- struct mt7996_sta *msta;
struct {
u8 __rsv1[4];
@@ -2286,13 +2354,10 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.tag = cpu_to_le16(UNI_VOW_DRR_CTRL),
.len = cpu_to_le16(sizeof(req) - 4),
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
+ .val = cpu_to_le32(link->mt76.idx % 16),
+ .wlan_idx = cpu_to_le16(wcid->idx),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
- msta_link = msta ? &msta->deflink : &mvif->deflink.msta_link;
- req.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
-
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
sizeof(req), true);
}
@@ -2432,7 +2497,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev,
}
}
- ret = mt7996_mcu_add_group(dev, link_conf->vif, sta);
+ ret = mt7996_mcu_add_group(dev, link, wcid);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2467,41 +2532,58 @@ mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
enum set_key_cmd cmd)
{
struct sta_rec_sec_uni *sec;
+ struct sec_key_uni *sec_key;
struct tlv *tlv;
+ u8 cipher;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
- u8 cipher;
-
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->mgmt_prot = 0;
- sec_key->cipher_id = cipher;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- sec_key->need_resp = 0;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
+ sec->add = 0;
+ sec->n_cipher = 1;
+ sec_key = &sec->key[0];
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->key_id = key->keyidx;
- sec->n_cipher = 1;
- } else {
- sec->n_cipher = 0;
+ if (cmd != SET_KEY)
+ return 0;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key->mgmt_prot = 0;
+ sec_key->cipher_id = cipher;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = key->keylen;
+ sec_key->need_resp = 0;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ return 0;
}
+ if (sec_key->key_id != 6 && sec_key->key_id != 7)
+ return 0;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ sec_key->bcn_mode = BP_SW_MODE;
+
return 0;
}
@@ -2519,99 +2601,12 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
ret = mt7996_mcu_sta_key_tlv(wcid, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
-}
-
-static int mt7996_mcu_get_pn(struct mt7996_dev *dev,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, u8 *pn)
-{
-#define TSC_TYPE_BIGTK_PN 2
- struct sta_rec_pn_info *pn_info;
- struct sk_buff *skb, *rskb;
- struct tlv *tlv;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
- &msta_link->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PN_INFO, sizeof(*pn_info));
- pn_info = (struct sta_rec_pn_info *)tlv;
-
- pn_info->tsc_type = TSC_TYPE_BIGTK_PN;
- ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb,
- MCU_WM_UNI_CMD_QUERY(STA_REC_UPDATE),
- true, &rskb);
- if (ret)
- return ret;
-
- skb_pull(rskb, 4);
-
- pn_info = (struct sta_rec_pn_info *)rskb->data;
- if (le16_to_cpu(pn_info->tag) == STA_REC_PN_INFO)
- memcpy(pn, pn_info->pn, 6);
-
- dev_kfree_skb(rskb);
- return 0;
-}
-
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link,
- struct ieee80211_key_conf *key)
-{
- struct mt7996_mcu_bcn_prot_tlv *bcn_prot;
- struct sk_buff *skb;
- struct tlv *tlv;
- u8 pn[6] = {};
- int len = sizeof(struct bss_req_hdr) +
- sizeof(struct mt7996_mcu_bcn_prot_tlv);
- int ret;
-
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BCN_PROT, sizeof(*bcn_prot));
-
- bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv;
-
- ret = mt7996_mcu_get_pn(dev, link, msta_link, pn);
if (ret) {
dev_kfree_skb(skb);
return ret;
}
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- default:
- dev_err(dev->mt76.dev, "Not supported Bigtk Cipher\n");
- dev_kfree_skb(skb);
- return -EOPNOTSUPP;
- }
-
- pn[0]++;
- memcpy(bcn_prot->pn, pn, 6);
- bcn_prot->enable = BP_SW_MODE;
- memcpy(bcn_prot->key, key->key, WLAN_MAX_KEY_LEN);
- bcn_prot->key_id = key->keyidx;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -2752,7 +2747,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev,
}
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf, bool enabled)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf);
@@ -2763,7 +2758,6 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct tlv *tlv;
struct bss_bcn_content_tlv *bcn;
int len, extra_len = 0;
- bool enabled = link_conf->enable_beacon;
if (link_conf->nontransmitted)
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 130ea95626d5..c841da1c60e5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -351,17 +351,6 @@ enum {
BP_HW_MODE,
};
-struct mt7996_mcu_bcn_prot_tlv {
- __le16 tag;
- __le16 len;
- u8 pn[6];
- u8 enable;
- u8 cipher_id;
- u8 key[WLAN_MAX_KEY_LEN];
- u8 key_id;
- u8 __rsv[3];
-} __packed;
-
struct bss_ra_tlv {
__le16 tag;
__le16 len;
@@ -481,7 +470,8 @@ struct bss_mld_tlv {
u8 own_mld_id;
u8 mac_addr[ETH_ALEN];
u8 remap_idx;
- u8 __rsv[3];
+ u8 link_id;
+ u8 __rsv[2];
} __packed;
struct sta_rec_ht_uni {
@@ -530,6 +520,9 @@ struct sec_key_uni {
u8 key_len;
u8 need_resp;
u8 key[32];
+ u8 pn[6];
+ u8 bcn_mode;
+ u8 _rsv;
} __packed;
struct sta_rec_sec_uni {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 30b40f4a91be..d14b626ee511 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -459,14 +459,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct pci_dev *pci_dev = pdev_ptr;
- u32 hif1_ofs = 0;
+ u32 hif1_ofs;
if (!wed_enable)
return 0;
- dev->has_rro = true;
+ dev->mt76.hwrro_mode = is_mt7996(&dev->mt76) ? MT76_HWRRO_V3
+ : MT76_HWRRO_V3_1;
- hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+ hif1_ofs = dev->hif2 ? MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0) : 0;
if (hif2)
wed = &dev->mt76.mmio.wed_hif2;
@@ -490,11 +491,18 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
MT_TXQ_RING_BASE(0) +
MT7996_TXQ_BAND2 * MT_RING_SIZE;
- if (dev->has_rro) {
- wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(0) +
- MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
- wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ if (mt7996_has_hwrro(dev)) {
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
+ } else {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_BAND1_EXT) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_EXT * MT_RING_SIZE;
+ }
} else {
wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
MT_RXQ_RING_BASE(0) +
@@ -503,14 +511,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
}
wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
- wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
- MT7996_RXQ_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND2) +
+ MT7996_RXQ_BAND2 * MT_RING_SIZE;
wed->wlan.id = MT7996_DEVICE_ID_2;
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
} else {
- wed->wlan.hw_rro = dev->has_rro; /* default on */
+ wed->wlan.hw_rro = mt7996_has_hwrro(dev);
wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
@@ -518,16 +526,26 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
- wed->wlan.wpdma_rx = wed->wlan.phy_base +
- MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
- MT7996_RXQ_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
- wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
- MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ } else {
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND1) +
+ MT7996_RXQ_RRO_BAND1 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND1) +
+ MT7996_RXQ_BAND1 * MT_RING_SIZE;
+ }
+
wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
@@ -537,10 +555,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
- wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
-
wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
- wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+ } else {
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND1) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND1) - 1;
+ }
wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
@@ -548,16 +570,27 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
- if (dev->has_rro) {
- wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
- MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
- wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ if (is_mt7996(&dev->mt76)) {
+ if (mt7996_has_hwrro(dev)) {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ } else {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
+ }
} else {
wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
}
dev->mt76.rx_token_size = MT7996_TOKEN_SIZE + wed->wlan.rx_npkt;
+
+ if (dev->hif2 && is_mt7992(&dev->mt76))
+ wed->wlan.id = 0x7992;
}
wed->wlan.nbuf = MT7996_HW_TOKEN_SIZE;
@@ -576,8 +609,10 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.reset_complete = mt76_wed_reset_complete;
}
- if (mtk_wed_device_attach(wed))
+ if (mtk_wed_device_attach(wed)) {
+ dev->mt76.hwrro_mode = MT76_HWRRO_OFF;
return 0;
+ }
*irq = wed->irq;
dev->mt76.dma_dev = wed->dev;
@@ -690,12 +725,18 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
dev->mt76.mmio.irqmask);
if (intr1 & MT_INT_RX_TXFREE_EXT)
napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
+
+ if (intr1 & MT_INT_RX_DONE_BAND2_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND2]);
+
+ if (intr1 & MT_INT_RX_TXFREE_BAND1_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1_WA]);
}
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
- intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
+ intr |= (intr1 & ~MT_INT_TX_RX_DONE_EXT);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
@@ -781,6 +822,8 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
+ .rx_rro_ind_process = mt7996_rro_rx_process,
+ .rx_rro_add_msdu_page = mt7996_rro_msdu_page_add,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
.vif_link_add = mt7996_vif_link_add,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 8509d508e1e1..8ec2acdb3319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -112,6 +112,8 @@
#define MT7996_CRIT_TEMP 110
#define MT7996_MAX_TEMP 120
+#define MT7996_MAX_HIF_RXD_IN_PG 5
+#define MT7996_RRO_MSDU_PG_HASH_SIZE 127
#define MT7996_RRO_MAX_SESSION 1024
#define MT7996_RRO_WINDOW_MAX_LEN 1024
#define MT7996_RRO_ADDR_ELEM_LEN 128
@@ -128,6 +130,10 @@
#define MT7996_RX_MSDU_PAGE_SIZE (128 + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+/* RRO 3.1 */
+#define MT7996_RRO_MSDU_PG_CR_CNT 8
+#define MT7996_RRO_MSDU_PG_SIZE_PER_CR 0x10000
+
struct mt7996_vif;
struct mt7996_sta;
struct mt7996_dfs_pulse;
@@ -178,7 +184,7 @@ enum mt7996_rxq_id {
MT7996_RXQ_BAND1 = 5, /* for mt7992 */
MT7996_RXQ_BAND2 = 5,
MT7996_RXQ_RRO_BAND0 = 8,
- MT7996_RXQ_RRO_BAND1 = 8,/* unused */
+ MT7996_RXQ_RRO_BAND1 = 9,
MT7996_RXQ_RRO_BAND2 = 6,
MT7996_RXQ_MSDU_PG_BAND0 = 10,
MT7996_RXQ_MSDU_PG_BAND1 = 11,
@@ -187,6 +193,7 @@ enum mt7996_rxq_id {
MT7996_RXQ_TXFREE1 = 9,
MT7996_RXQ_TXFREE2 = 7,
MT7996_RXQ_RRO_IND = 0,
+ MT7996_RXQ_RRO_RXDMAD_C = 0,
MT7990_RXQ_TXFREE0 = 6,
MT7990_RXQ_TXFREE1 = 7,
};
@@ -248,11 +255,16 @@ struct mt7996_vif_link {
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
struct cfg80211_bitrate_mask bitrate_mask;
+
+ u8 mld_idx;
};
struct mt7996_vif {
struct mt7996_vif_link deflink; /* must be first */
struct mt76_vif_data mt76;
+
+ u8 mld_group_idx;
+ u8 mld_remap_idx;
};
/* crash-dump */
@@ -270,15 +282,17 @@ struct mt7996_hif {
struct device *dev;
void __iomem *regs;
int irq;
+
+ enum pci_bus_speed speed;
+ enum pcie_link_width width;
};
+#define WED_RRO_ADDR_SIGNATURE_MASK GENMASK(31, 24)
+#define WED_RRO_ADDR_COUNT_MASK GENMASK(14, 4)
+#define WED_RRO_ADDR_HEAD_HIGH_MASK GENMASK(3, 0)
struct mt7996_wed_rro_addr {
- u32 head_low;
- u32 head_high : 4;
- u32 count: 11;
- u32 oor: 1;
- u32 rsv : 8;
- u32 signature : 8;
+ __le32 head_low;
+ __le32 data;
};
struct mt7996_wed_rro_session_id {
@@ -286,6 +300,44 @@ struct mt7996_wed_rro_session_id {
u16 id;
};
+struct mt7996_msdu_page {
+ struct list_head list;
+
+ struct mt76_queue *q;
+ dma_addr_t dma_addr;
+ void *buf;
+};
+
+/* data1 */
+#define RRO_HIF_DATA1_LS_MASK BIT(30)
+#define RRO_HIF_DATA1_SDL_MASK GENMASK(29, 16)
+/* data4 */
+#define RRO_HIF_DATA4_RX_TOKEN_ID_MASK GENMASK(15, 0)
+struct mt7996_rro_hif {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+ __le32 data4;
+ __le32 data5;
+};
+
+#define MSDU_PAGE_INFO_OWNER_MASK BIT(31)
+#define MSDU_PAGE_INFO_PG_HIGH_MASK GENMASK(3, 0)
+struct mt7996_msdu_page_info {
+ struct mt7996_rro_hif rxd[MT7996_MAX_HIF_RXD_IN_PG];
+ __le32 pg_low;
+ __le32 data;
+};
+
+#define MT7996_MAX_RRO_RRS_RING 4
+struct mt7996_rro_queue_regs_emi {
+ struct {
+ __le16 idx;
+ __le16 rsv;
+ } ring[MT7996_MAX_RRO_RRS_RING];
+};
+
struct mt7996_phy {
struct mt76_phy *mt76;
struct mt7996_dev *dev;
@@ -337,6 +389,9 @@ struct mt7996_dev {
u32 q_int_mask[MT7996_MAX_QUEUE];
u32 q_wfdma_mask;
+ u64 mld_idx_mask;
+ u64 mld_remap_idx_mask;
+
const struct mt76_bus_ops *bus_ops;
struct mt7996_phy phy;
@@ -377,7 +432,6 @@ struct mt7996_dev {
bool flash_mode:1;
bool has_eht:1;
- bool has_rro:1;
struct {
struct {
@@ -392,10 +446,25 @@ struct mt7996_dev {
void *ptr;
dma_addr_t phy_addr;
} session;
+ struct {
+ void *ptr;
+ dma_addr_t phy_addr;
+ } msdu_pg[MT7996_RRO_MSDU_PG_CR_CNT];
+ struct {
+ struct mt7996_rro_queue_regs_emi *ptr;
+ dma_addr_t phy_addr;
+ } emi_rings_cpu;
+ struct {
+ struct mt7996_rro_queue_regs_emi *ptr;
+ dma_addr_t phy_addr;
+ } emi_rings_dma;
struct work_struct work;
struct list_head poll_list;
spinlock_t lock;
+
+ struct list_head page_cache;
+ struct list_head page_map[MT7996_RRO_MSDU_PG_HASH_SIZE];
} wed_rro;
bool ibf;
@@ -552,6 +621,7 @@ extern struct pci_driver mt7996_hif_driver;
struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
+void mt7996_rro_hw_init(struct mt7996_dev *dev);
void mt7996_wfsys_reset(struct mt7996_dev *dev);
irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link);
@@ -604,16 +674,15 @@ int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
struct mt7996_sta_link *msta_link);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, bool enable);
+ struct ieee80211_vif *vif, bool enable);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable);
+ struct ieee80211_vif *vif, bool enable);
int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf);
+ struct ieee80211_bss_conf *link_conf, bool enabled);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
struct ieee80211_bss_conf *link_conf,
struct mt7996_vif_link *link, u32 changed);
@@ -669,6 +738,11 @@ int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id);
int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled);
+static inline bool mt7996_has_hwrro(struct mt7996_dev *dev)
+{
+ return dev->mt76.hwrro_mode != MT76_HWRRO_OFF;
+}
+
static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
{
return min(MT7996_MAX_INTERFACES * (1 + mt7996_band_valid(dev, MT_BAND1) +
@@ -743,6 +817,8 @@ void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
struct mt7996_vif_link *link,
struct mt7996_sta_link *msta_link,
u8 flowid);
+void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link);
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt);
@@ -753,6 +829,10 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7996_tx_token_put(struct mt7996_dev *dev);
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev);
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data);
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data);
bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7996_stats_work(struct work_struct *work);
int mt76_dfs_start_rdd(struct mt7996_dev *dev, bool force);
@@ -787,8 +867,6 @@ u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
int mt7996_mtk_init_debugfs(struct mt7996_phy *phy, struct dentry *dir);
#endif
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev);
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
index 19e99bc1c6c4..3f49bbbba3b9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -87,6 +87,7 @@ static int mt7996_pci_hif2_probe(struct pci_dev *pdev)
hif->dev = &pdev->dev;
hif->regs = pcim_iomap_table(pdev)[0];
hif->irq = pdev->irq;
+ pcie_bandwidth_available(pdev, NULL, &hif->speed, &hif->width);
spin_lock_bh(&hif_lock);
list_add(&hif->list, &hif_list);
spin_unlock_bh(&hif_lock);
@@ -137,6 +138,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mdev = &dev->mt76;
mt7996_wfsys_reset(dev);
hif2 = mt7996_pci_init_hif2(pdev);
+ dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
if (ret < 0)
@@ -161,7 +163,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
if (hif2) {
hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
- dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq);
if (ret < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index e942c0058731..0fa325f87fcd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -88,6 +88,8 @@ enum offs_rev {
#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
+
+#define MT_RRO_ADDR_ARRAY_BASE0 MT_RRO_TOP(0x30)
#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
@@ -108,6 +110,19 @@ enum offs_rev {
#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
+#define MT_RRO_3_0_EMU_CONF MT_RRO_TOP(0x600)
+#define MT_RRO_3_0_EMU_CONF_EN_MASK BIT(11)
+
+#define MT_RRO_3_1_GLOBAL_CONFIG MT_RRO_TOP(0x604)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RXDMAD_SEL BIT(6)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RX_CIDX_RD_EN BIT(3)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RX_DIDX_WR_EN BIT(2)
+#define MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN BIT(0)
+
+#define MT_RRO_MSDU_PG_SEG_ADDR0 MT_RRO_TOP(0x620)
+#define MT_RRO_RX_RING_AP_CIDX_ADDR MT_RRO_TOP(0x6f0)
+#define MT_RRO_RX_RING_AP_DIDX_ADDR MT_RRO_TOP(0x6f4)
+
#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
@@ -412,7 +427,9 @@ enum offs_rev {
#define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
#define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
+#define MT_WFDMA0_RX_INT_SEL_RING5 BIT(5)
#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
+#define MT_WFDMA0_RX_INT_SEL_RING9 BIT(9)
#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
@@ -430,6 +447,7 @@ enum offs_rev {
#define MT_WFDMA0_PAUSE_RX_Q_RRO_TH MT_WFDMA0(0x27c)
#define WF_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
+#define WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK GENMASK(27, 24)
#define WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD BIT(18)
#define WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE BIT(14)
@@ -451,6 +469,8 @@ enum offs_rev {
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 BIT(20)
+#define MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 BIT(21)
#define MT_WFDMA_HOST_CONFIG_BAND2_PCIE1 BIT(22)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
@@ -459,6 +479,9 @@ enum offs_rev {
#define MT_WFDMA_AXI_R2A_CTRL MT_WFDMA_EXT_CSR(0x500)
#define MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK GENMASK(4, 0)
+#define MT_WFDMA_AXI_R2A_CTRL2 MT_WFDMA_EXT_CSR(0x508)
+#define MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK GENMASK(31, 28)
+
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
@@ -492,6 +515,8 @@ enum offs_rev {
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
+#define MT_RXQ_RRO_AP_RING_BASE MT_RRO_TOP(0x650)
+
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q) * 0x4)
#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
@@ -514,7 +539,9 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_EXT BIT(3) /* for mt7992 */
#define MT_INT_RX_DONE_WA_TRI BIT(3)
#define MT_INT_RX_TXFREE_MAIN BIT(17)
+#define MT_INT_RX_TXFREE_BAND1 BIT(15)
#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_RX_TXFREE_BAND1_EXT BIT(19) /* for mt7992 two PCIE*/
#define MT_INT_RX_TXFREE_BAND0_MT7990 BIT(14)
#define MT_INT_RX_TXFREE_BAND1_MT7990 BIT(15)
#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
@@ -522,9 +549,10 @@ enum offs_rev {
#define MT_INT_MCU_CMD BIT(29)
#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
-#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
+#define MT_INT_RX_DONE_RRO_BAND1 BIT(17)
#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
#define MT_INT_RX_DONE_RRO_IND BIT(11)
+#define MT_INT_RX_DONE_RRO_RXDMAD_C BIT(11)
#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
@@ -552,6 +580,8 @@ enum offs_rev {
#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
MT_INT_RX(MT_RXQ_RRO_BAND1) | \
MT_INT_RX(MT_RXQ_RRO_BAND2) | \
+ MT_INT_RX(MT_RXQ_RRO_IND) | \
+ MT_INT_RX(MT_RXQ_RRO_RXDMAD_C) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
index 9b20ccbeb8cf..5a875aac410f 100644
--- a/drivers/net/wireless/mediatek/mt76/scan.c
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -16,11 +16,13 @@ static void mt76_scan_complete(struct mt76_dev *dev, bool abort)
clear_bit(MT76_SCANNING, &phy->state);
- if (dev->scan.chan && phy->main_chandef.chan)
+ if (dev->scan.chan && phy->main_chandef.chan &&
+ !test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
mt76_put_vif_phy_link(phy, dev->scan.vif, dev->scan.mlink);
memset(&dev->scan, 0, sizeof(dev->scan));
- ieee80211_scan_completed(phy->hw, &info);
+ if (!test_bit(MT76_MCU_RESET, &dev->phy.state))
+ ieee80211_scan_completed(phy->hw, &info);
}
void mt76_abort_scan(struct mt76_dev *dev)
@@ -28,6 +30,7 @@ void mt76_abort_scan(struct mt76_dev *dev)
cancel_delayed_work_sync(&dev->scan_work);
mt76_scan_complete(dev, true);
}
+EXPORT_SYMBOL_GPL(mt76_abort_scan);
static void
mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
@@ -112,9 +115,6 @@ void mt76_scan_work(struct work_struct *work)
local_bh_enable();
out:
- if (!duration)
- return;
-
if (dev->scan.chan)
duration = max_t(int, duration,
msecs_to_jiffies(req->duration +
@@ -139,7 +139,8 @@ int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
- if (dev->scan.req || phy->roc_vif) {
+ if (dev->scan.req || phy->roc_vif ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)) {
ret = -EBUSY;
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 8ab5840fee57..b78ae6a34b65 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -618,7 +618,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control) &&
(!ieee80211_is_bufferable_mmpdu(skb) ||
- ieee80211_is_deauth(hdr->frame_control)))
+ ieee80211_is_deauth(hdr->frame_control) ||
+ head == &wcid->tx_offchannel))
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
diff --git a/drivers/net/wireless/mediatek/mt76/wed.c b/drivers/net/wireless/mediatek/mt76/wed.c
index 63f69e152b1c..907a8e43e72a 100644
--- a/drivers/net/wireless/mediatek/mt76/wed.c
+++ b/drivers/net/wireless/mediatek/mt76/wed.c
@@ -118,7 +118,7 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
@@ -133,21 +133,21 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
break;
case MT76_WED_RRO_Q_DATA:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_MSDU_PG:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_IND:
q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index a395829ebadf..c39e7f313ea1 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -794,12 +794,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int change_bss(struct wiphy *wiphy, struct net_device *dev,
- struct bss_parameters *params)
-{
- return 0;
-}
-
static int set_wiphy_params(struct wiphy *wiphy, int radio_idx, u32 changed)
{
int ret = -EINVAL;
@@ -1709,7 +1703,6 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.change_station = change_station,
.get_station = get_station,
.dump_station = dump_station,
- .change_bss = change_bss,
.set_wiphy_params = set_wiphy_params,
.external_auth = external_auth,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 7db29e90eb4f..f8a6f9c968a1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -679,7 +679,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
- cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid);
+ cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= (tim_ie->bitmap_ctrl & 0x01);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 831b5025c634..3ded5952729f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -1901,6 +1901,27 @@ static void rtl8xxxu_dump_efuse(struct rtl8xxxu_priv *priv)
priv->efuse_wifi.raw, EFUSE_MAP_LEN, true);
}
+static ssize_t read_file_efuse(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8xxxu_priv *priv = file_inode(file)->i_private;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ priv->efuse_wifi.raw, EFUSE_MAP_LEN);
+}
+
+static const struct debugfs_short_fops fops_efuse = {
+ .read = read_file_efuse,
+};
+
+static void rtl8xxxu_debugfs_init(struct rtl8xxxu_priv *priv)
+{
+ struct dentry *phydir;
+
+ phydir = debugfs_create_dir("rtl8xxxu", priv->hw->wiphy->debugfsdir);
+ debugfs_create_file("efuse", 0400, phydir, priv, &fops_efuse);
+}
+
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -7815,7 +7836,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
untested = 0;
break;
case 0x2357:
- if (id->idProduct == 0x0109 || id->idProduct == 0x0135)
+ if (id->idProduct == 0x0109 || id->idProduct == 0x010c ||
+ id->idProduct == 0x0135)
untested = 0;
break;
case 0x0b05:
@@ -7974,6 +7996,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
rtl8xxxu_init_led(priv);
+ rtl8xxxu_debugfs_init(priv);
return 0;
@@ -8172,8 +8195,6 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 6241e4fed4f6..bcab12c3b4c1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -519,7 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
/* 1. What about buffered unicast traffic for our AID? */
u_buffed = ieee80211_check_tim(tim_ie, tim_len,
- rtlpriv->mac80211.assoc_id);
+ rtlpriv->mac80211.assoc_id, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
m_buffed = tim_ie->bitmap_ctrl & 0x01;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 00a6778df704..9480823af838 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -291,7 +291,6 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
{RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
- {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
diff --git a/drivers/net/wireless/realtek/rtw88/led.c b/drivers/net/wireless/realtek/rtw88/led.c
index 25aa6cbaa728..4cc62e49d167 100644
--- a/drivers/net/wireless/realtek/rtw88/led.c
+++ b/drivers/net/wireless/realtek/rtw88/led.c
@@ -6,13 +6,17 @@
#include "debug.h"
#include "led.h"
-static int rtw_led_set_blocking(struct led_classdev *led,
- enum led_brightness brightness)
+static int rtw_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
{
struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ mutex_lock(&rtwdev->mutex);
+
rtwdev->chip->ops->led_set(led, brightness);
+ mutex_unlock(&rtwdev->mutex);
+
return 0;
}
@@ -36,10 +40,7 @@ void rtw_led_init(struct rtw_dev *rtwdev)
if (!rtwdev->chip->ops->led_set)
return;
- if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
- led->brightness_set = rtwdev->chip->ops->led_set;
- else
- led->brightness_set_blocking = rtw_led_set_blocking;
+ led->brightness_set_blocking = rtw_led_set;
snprintf(rtwdev->led_name, sizeof(rtwdev->led_name),
"rtw88-%s", dev_name(rtwdev->dev));
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index cc2d4fef3587..99d7c629eac6 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -144,6 +144,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
{
+ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
+ !rtw_sdio_is_bus_addr(addr))
+ return false;
+
return !rtw_sdio_is_sdio30_supported(rtwdev) ||
rtw_sdio_is_bus_addr(addr);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index f7d1c5d3b92e..86f1b39a967f 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -281,6 +281,7 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
+ int i, j;
hal->entity_pause = false;
bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
@@ -289,6 +290,11 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
INIT_LIST_HEAD(&mgnt->active_list);
+ for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) {
+ for (j = 0; j < __RTW89_MLD_MAX_LINK_NUM; j++)
+ mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE;
+ }
+
rtw89_config_default_chandef(rtwdev);
}
@@ -353,7 +359,7 @@ static void rtw89_normalize_link_chanctx(struct rtw89_dev *rtwdev,
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
- u8 link_index)
+ u8 link_index, bool nullchk)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
@@ -400,6 +406,9 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
return rtw89_chan_get(rtwdev, chanctx_idx);
dflt:
+ if (unlikely(nullchk))
+ return NULL;
+
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"%s (%s): prefetch NULL on link index %u\n",
__func__, caller_message ?: "", link_index);
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index b1175419f92b..5b22764d5329 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -180,10 +180,16 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
- u8 link_index);
+ u8 link_index, bool nullchk);
#define rtw89_mgnt_chan_get(rtwdev, link_index) \
- __rtw89_mgnt_chan_get(rtwdev, __func__, link_index)
+ __rtw89_mgnt_chan_get(rtwdev, __func__, link_index, false)
+
+static inline const struct rtw89_chan *
+rtw89_mgnt_chan_get_or_null(struct rtw89_dev *rtwdev, u8 link_index)
+{
+ return __rtw89_mgnt_chan_get(rtwdev, NULL, link_index, true);
+}
struct rtw89_mcc_links_info {
struct rtw89_vif_link *links[NUM_OF_RTW89_MCC_ROLES];
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index e4e6daf51a1b..0f7ae572ef91 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -93,7 +93,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO),
[CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
@@ -4153,6 +4153,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_DEF2:
_slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
@@ -4162,6 +4163,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_2GBWMIXB:
if (a2dp->exist)
@@ -4170,6 +4172,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_E2G, 5, tbl_w1, SLOT_MIX);
_slot_set_le(btc, CXST_EBT, cpu_to_le16(40),
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_WL: /* for 4-way */
_slot_set(btc, CXST_E2G, 5, cxtbl[1], SLOT_MIX);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 57590f5577a3..917b2adede61 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
#include <linux/ip.h>
+#include <linux/sort.h>
#include <linux/udp.h>
#include "cam.h"
@@ -272,17 +273,18 @@ rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
return NULL;
}
-bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
+bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate)
{
- struct ieee80211_rate rate;
+ const struct ieee80211_rate *rate;
- if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
- rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
+ if (unlikely(legacy_rate >= ARRAY_SIZE(rtw89_bitrates))) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "invalid legacy rate %d\n", legacy_rate);
return false;
}
- rate = rtw89_bitrates[rpt_rate];
- *bitrate = rate.bitrate;
+ rate = &rtw89_bitrates[legacy_rate];
+ *bitrate = rate->bitrate;
return true;
}
@@ -697,6 +699,66 @@ static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
}
+u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ switch (qsel) {
+ default:
+ rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
+ fallthrough;
+ case RTW89_TX_QSEL_BE_0:
+ case RTW89_TX_QSEL_BE_1:
+ case RTW89_TX_QSEL_BE_2:
+ case RTW89_TX_QSEL_BE_3:
+ return RTW89_TXCH_ACH0;
+ case RTW89_TX_QSEL_BK_0:
+ case RTW89_TX_QSEL_BK_1:
+ case RTW89_TX_QSEL_BK_2:
+ case RTW89_TX_QSEL_BK_3:
+ return RTW89_TXCH_ACH1;
+ case RTW89_TX_QSEL_VI_0:
+ case RTW89_TX_QSEL_VI_1:
+ case RTW89_TX_QSEL_VI_2:
+ case RTW89_TX_QSEL_VI_3:
+ return RTW89_TXCH_ACH2;
+ case RTW89_TX_QSEL_VO_0:
+ case RTW89_TX_QSEL_VO_1:
+ case RTW89_TX_QSEL_VO_2:
+ case RTW89_TX_QSEL_VO_3:
+ return RTW89_TXCH_ACH3;
+ case RTW89_TX_QSEL_B0_MGMT:
+ return RTW89_TXCH_CH8;
+ case RTW89_TX_QSEL_B0_HI:
+ return RTW89_TXCH_CH9;
+ case RTW89_TX_QSEL_B1_MGMT:
+ return RTW89_TXCH_CH10;
+ case RTW89_TX_QSEL_B1_HI:
+ return RTW89_TXCH_CH11;
+ }
+}
+EXPORT_SYMBOL(rtw89_core_get_ch_dma);
+
+u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ switch (qsel) {
+ default:
+ rtw89_warn(rtwdev, "Cannot map qsel to dma v1: %d\n", qsel);
+ fallthrough;
+ case RTW89_TX_QSEL_BE_0:
+ case RTW89_TX_QSEL_BK_0:
+ return RTW89_TXCH_ACH0;
+ case RTW89_TX_QSEL_VI_0:
+ case RTW89_TX_QSEL_VO_0:
+ return RTW89_TXCH_ACH2;
+ case RTW89_TX_QSEL_B0_MGMT:
+ case RTW89_TX_QSEL_B0_HI:
+ return RTW89_TXCH_CH8;
+ case RTW89_TX_QSEL_B1_MGMT:
+ case RTW89_TX_QSEL_B1_HI:
+ return RTW89_TXCH_CH10;
+ }
+}
+EXPORT_SYMBOL(rtw89_core_get_ch_dma_v1);
+
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -710,7 +772,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
u8 qsel, ch_dma;
qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req);
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
@@ -927,7 +989,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
@@ -1073,42 +1135,46 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ tx_wait_work.work);
+
+ rtw89_tx_wait_list_clear(rtwdev);
+}
+
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
{
u8 ch_dma;
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
rtw89_hci_tx_kick_off(rtwdev, ch_dma);
}
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
- int qsel, unsigned int timeout)
+ struct rtw89_tx_wait_info *wait, int qsel,
+ unsigned int timeout)
{
- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
- struct rtw89_tx_wait_info *wait;
unsigned long time_left;
int ret = 0;
- wait = kzalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- rtw89_core_tx_kick_off(rtwdev, qsel);
- return 0;
- }
-
- init_completion(&wait->completion);
- rcu_assign_pointer(skb_data->wait, wait);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
rtw89_core_tx_kick_off(rtwdev, qsel);
time_left = wait_for_completion_timeout(&wait->completion,
msecs_to_jiffies(timeout));
- if (time_left == 0)
- ret = -ETIMEDOUT;
- else if (!wait->tx_done)
- ret = -EAGAIN;
- rcu_assign_pointer(skb_data->wait, NULL);
- kfree_rcu(wait, rcu_head);
+ if (time_left == 0) {
+ ret = -ETIMEDOUT;
+ list_add_tail(&wait->list, &rtwdev->tx_waits);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work,
+ RTW89_TX_WAIT_WORK_TIMEOUT);
+ } else {
+ if (!wait->tx_done)
+ ret = -EAGAIN;
+ rtw89_tx_wait_release(wait);
+ }
return ret;
}
@@ -1157,10 +1223,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
- struct sk_buff *skb, int *qsel, bool sw_mld)
+ struct sk_buff *skb, int *qsel, bool sw_mld,
+ struct rtw89_tx_wait_info *wait)
{
struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct rtw89_core_tx_request tx_req = {};
int ret;
@@ -1177,6 +1245,8 @@ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
rtw89_core_tx_wake(rtwdev, &tx_req);
+ rcu_assign_pointer(skb_data->wait, wait);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -1213,7 +1283,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
}
}
- return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
+ return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false,
+ NULL);
}
static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
@@ -1835,6 +1906,10 @@ static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
phy_ppdu->rpl_avg = tmp_rpl >> 1;
+
+ if (!phy_ppdu->hdr_2_en)
+ phy_ppdu->rx_path_en =
+ le32_get_bits(ie->w3, RTW89_PHY_STS_IE00_W3_RX_PATH_EN);
}
static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
@@ -2221,6 +2296,435 @@ static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
}
+static u32 rtw89_bcn_calc_min_tbtt(struct rtw89_dev *rtwdev, u32 tbtt1, u32 tbtt2)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 close_bcn_intvl_th = bcn_track->close_bcn_intvl_th;
+ u32 tbtt_diff_th = bcn_track->tbtt_diff_th;
+
+ if (tbtt2 > tbtt1)
+ swap(tbtt1, tbtt2);
+
+ if (tbtt1 - tbtt2 > tbtt_diff_th)
+ return tbtt1;
+ else if (tbtt2 > close_bcn_intvl_th)
+ return tbtt2;
+ else if (tbtt1 > close_bcn_intvl_th)
+ return tbtt1;
+ else
+ return tbtt2;
+}
+
+static void rtw89_bcn_cfg_tbtt_offset(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 offset = bcn_track->tbtt_offset;
+
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
+ u32 bcnspc, val;
+
+ bcnspc = rtw89_read32_port_mask(rtwdev, rtwvif_link,
+ p->bcn_space, B_AX_BCN_SPACE_MASK);
+ val = bcnspc - (offset / 1024);
+ val = u32_encode_bits(val, B_AX_TBTT_SHIFT_OFST_MAG) |
+ B_AX_TBTT_SHIFT_OFST_SIGN;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
+ B_AX_TBTT_SHIFT_OFST_MASK, val);
+
+ return;
+ }
+
+ rtw89_fw_h2c_tbtt_tuning(rtwdev, rtwvif_link, offset);
+}
+
+static void rtw89_bcn_update_tbtt_offset(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 *tbtt_us = bcn_stat->tbtt_us;
+ u32 offset = tbtt_us[0];
+ u8 i;
+
+ for (i = 1; i < RTW89_BCN_TRACK_STAT_NR; i++)
+ offset = rtw89_bcn_calc_min_tbtt(rtwdev, tbtt_us[i], offset);
+
+ if (bcn_track->tbtt_offset == offset)
+ return;
+
+ bcn_track->tbtt_offset = offset;
+ rtw89_bcn_cfg_tbtt_offset(rtwdev, rtwvif_link);
+}
+
+static int cmp_u16(const void *a, const void *b)
+{
+ return *(const u16 *)a - *(const u16 *)b;
+}
+
+static u16 _rtw89_bcn_calc_drift(u16 tbtt, u16 offset, u16 beacon_int)
+{
+ if (tbtt < offset)
+ return beacon_int - offset + tbtt;
+
+ return tbtt - offset;
+}
+
+static void rtw89_bcn_calc_drift(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u16 offset_tu = bcn_track->tbtt_offset / 1024;
+ u16 *tbtt_tu = bcn_stat->tbtt_tu;
+ u16 *drift = bcn_stat->drift;
+ u8 i;
+
+ bcn_stat->tbtt_tu_min = U16_MAX;
+ bcn_stat->tbtt_tu_max = 0;
+ for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) {
+ drift[i] = _rtw89_bcn_calc_drift(tbtt_tu[i], offset_tu,
+ bcn_track->beacon_int);
+
+ bcn_stat->tbtt_tu_min = min(bcn_stat->tbtt_tu_min, tbtt_tu[i]);
+ bcn_stat->tbtt_tu_max = max(bcn_stat->tbtt_tu_max, tbtt_tu[i]);
+ }
+
+ sort(drift, RTW89_BCN_TRACK_STAT_NR, sizeof(*drift), cmp_u16, NULL);
+}
+
+static void rtw89_bcn_calc_distribution(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 lower_bound, upper_bound, outlier_count = 0;
+ u16 *drift = bcn_stat->drift;
+ u16 *bins = bcn_dist->bins;
+ u16 q1, q3, iqr, tmp;
+ u8 i;
+
+ BUILD_BUG_ON(RTW89_BCN_TRACK_STAT_NR % 4 != 0);
+
+ memset(bcn_dist, 0, sizeof(*bcn_dist));
+
+ bcn_dist->min = drift[0];
+ bcn_dist->max = drift[RTW89_BCN_TRACK_STAT_NR - 1];
+
+ tmp = RTW89_BCN_TRACK_STAT_NR / 4;
+ q1 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2;
+
+ tmp = (RTW89_BCN_TRACK_STAT_NR * 3) / 4;
+ q3 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2;
+
+ iqr = q3 - q1;
+ tmp = (3 * iqr) / 2;
+
+ if (bcn_dist->min <= 5)
+ lower_bound = bcn_dist->min;
+ else if (q1 > tmp)
+ lower_bound = (q1 - tmp) / RTW89_BCN_TRACK_SCALE_FACTOR;
+ else
+ lower_bound = 0;
+
+ upper_bound = (q3 + tmp) / RTW89_BCN_TRACK_SCALE_FACTOR;
+
+ for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) {
+ u16 tbtt = bcn_stat->tbtt_tu[i];
+ u16 min = bcn_stat->tbtt_tu_min;
+ u8 bin_idx;
+
+ /* histogram */
+ bin_idx = min((tbtt - min) / RTW89_BCN_TRACK_BIN_WIDTH,
+ RTW89_BCN_TRACK_MAX_BIN_NUM - 1);
+ bins[bin_idx]++;
+
+ /* boxplot outlier */
+ if (drift[i] < lower_bound || drift[i] > upper_bound)
+ outlier_count++;
+ }
+
+ bcn_dist->outlier_count = outlier_count;
+ bcn_dist->lower_bound = lower_bound;
+ bcn_dist->upper_bound = upper_bound;
+}
+
+static u8 rtw89_bcn_get_coverage(struct rtw89_dev *rtwdev, u16 threshold)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ int l = 0, r = RTW89_BCN_TRACK_STAT_NR - 1, m;
+ u16 *drift = bcn_stat->drift;
+ int index = -1;
+ u8 count = 0;
+
+ while (l <= r) {
+ m = l + (r - l) / 2;
+
+ if (drift[m] <= threshold) {
+ index = m;
+ l = m + 1;
+ } else {
+ r = m - 1;
+ }
+ }
+
+ count = (index == -1) ? 0 : (index + 1);
+
+ return (count * PERCENT) / RTW89_BCN_TRACK_STAT_NR;
+}
+
+static u16 rtw89_bcn_get_histogram_bound(struct rtw89_dev *rtwdev, u8 target)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 tbtt_tu_max = bcn_stat->tbtt_tu_max;
+ u16 upper, lower = bcn_stat->tbtt_tu_min;
+ u8 i, count = 0;
+
+ for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) {
+ upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1;
+ if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1)
+ upper = max(upper, tbtt_tu_max);
+
+ count += bcn_dist->bins[i];
+ if (count > target)
+ break;
+
+ lower = upper + 1;
+ }
+
+ return upper;
+}
+
+static u16 rtw89_bcn_get_rx_time(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+#define RTW89_SYMBOL_TIME_2GHZ 192
+#define RTW89_SYMBOL_TIME_5GHZ 20
+#define RTW89_SYMBOL_TIME_6GHZ 20
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ u16 bitrate, val;
+
+ if (!rtw89_legacy_rate_to_bitrate(rtwdev, pkt_stat->beacon_rate, &bitrate))
+ return 0;
+
+ val = (pkt_stat->beacon_len * 8 * RTW89_BCN_TRACK_SCALE_FACTOR) / bitrate;
+
+ switch (chan->band_type) {
+ default:
+ case RTW89_BAND_2G:
+ val += RTW89_SYMBOL_TIME_2GHZ;
+ break;
+ case RTW89_BAND_5G:
+ val += RTW89_SYMBOL_TIME_5GHZ;
+ break;
+ case RTW89_BAND_6G:
+ val += RTW89_SYMBOL_TIME_6GHZ;
+ break;
+ }
+
+ /* convert to millisecond */
+ return DIV_ROUND_UP(val, 1000);
+}
+
+static void rtw89_bcn_calc_timeout(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+#define RTW89_BCN_TRACK_EXTEND_TIMEOUT 5
+#define RTW89_BCN_TRACK_COVERAGE_TH 0 /* unit: TU */
+#define RTW89_BCN_TRACK_STRONG_RSSI 80
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 outlier_high_bcn_th = bcn_track->outlier_high_bcn_th;
+ u16 outlier_low_bcn_th = bcn_track->outlier_low_bcn_th;
+ u8 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
+ u16 target_bcn_th = bcn_track->target_bcn_th;
+ u16 low_bcn_th = bcn_track->low_bcn_th;
+ u16 med_bcn_th = bcn_track->med_bcn_th;
+ u16 beacon_int = bcn_track->beacon_int;
+ u16 bcn_timeout;
+
+ if (pkt_stat->beacon_nr < low_bcn_th) {
+ bcn_timeout = (RTW89_BCN_TRACK_TARGET_BCN * beacon_int) / PERCENT;
+ goto out;
+ }
+
+ if (bcn_dist->outlier_count >= outlier_high_bcn_th) {
+ bcn_timeout = bcn_dist->max;
+ goto out;
+ }
+
+ if (pkt_stat->beacon_nr < med_bcn_th) {
+ if (bcn_dist->outlier_count > outlier_low_bcn_th)
+ bcn_timeout = (bcn_dist->max + bcn_dist->upper_bound) / 2;
+ else
+ bcn_timeout = bcn_dist->upper_bound +
+ RTW89_BCN_TRACK_EXTEND_TIMEOUT;
+
+ goto out;
+ }
+
+ if (rssi >= RTW89_BCN_TRACK_STRONG_RSSI) {
+ if (rtw89_bcn_get_coverage(rtwdev, RTW89_BCN_TRACK_COVERAGE_TH) >= 90) {
+ /* ideal case */
+ bcn_timeout = 0;
+ } else {
+ u16 offset_tu = bcn_track->tbtt_offset / 1024;
+ u16 upper_bound;
+
+ upper_bound =
+ rtw89_bcn_get_histogram_bound(rtwdev, target_bcn_th);
+ bcn_timeout =
+ _rtw89_bcn_calc_drift(upper_bound, offset_tu, beacon_int);
+ }
+
+ goto out;
+ }
+
+ bcn_timeout = bcn_stat->drift[target_bcn_th];
+
+out:
+ bcn_track->bcn_timeout = bcn_timeout + rtw89_bcn_get_rx_time(rtwdev, chan);
+}
+
+static void rtw89_bcn_update_timeout(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ rtw89_bcn_calc_drift(rtwdev);
+ rtw89_bcn_calc_distribution(rtwdev);
+ rtw89_bcn_calc_timeout(rtwdev, rtwvif_link);
+}
+
+static void rtw89_core_bcn_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_vif *rtwvif;
+ unsigned int link_id;
+
+ if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ return;
+
+ if (!rtwdev->lps_enabled)
+ return;
+
+ if (!bcn_track->is_data_ready)
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ if (!(rtwvif_link->wifi_role == RTW89_WIFI_ROLE_STATION ||
+ rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT))
+ continue;
+
+ rtw89_bcn_update_tbtt_offset(rtwdev, rtwvif_link);
+ rtw89_bcn_update_timeout(rtwdev, rtwvif_link);
+ }
+ }
+}
+
+static bool rtw89_core_bcn_track_can_lps(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+
+ if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ return true;
+
+ return bcn_track->is_data_ready;
+}
+
+static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+#define RTW89_BCN_TRACK_MED_BCN 70
+#define RTW89_BCN_TRACK_LOW_BCN 30
+#define RTW89_BCN_TRACK_OUTLIER_HIGH_BCN 30
+#define RTW89_BCN_TRACK_OUTLIER_LOW_BCN 20
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 period = jiffies_to_msecs(RTW89_TRACK_WORK_PERIOD);
+ struct ieee80211_bss_conf *bss_conf;
+ u32 beacons_in_period;
+ u32 bcn_intvl_us;
+ u16 beacon_int;
+ u8 dtim;
+
+ rcu_read_lock();
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ beacon_int = bss_conf->beacon_int;
+ dtim = bss_conf->dtim_period;
+ rcu_read_unlock();
+
+ beacons_in_period = period / beacon_int / dtim;
+ bcn_intvl_us = ieee80211_tu_to_usec(beacon_int);
+
+ bcn_track->low_bcn_th =
+ (beacons_in_period * RTW89_BCN_TRACK_LOW_BCN) / PERCENT;
+ bcn_track->med_bcn_th =
+ (beacons_in_period * RTW89_BCN_TRACK_MED_BCN) / PERCENT;
+ bcn_track->outlier_low_bcn_th =
+ (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_LOW_BCN) / PERCENT;
+ bcn_track->outlier_high_bcn_th =
+ (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_HIGH_BCN) / PERCENT;
+ bcn_track->target_bcn_th =
+ (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_TARGET_BCN) / PERCENT;
+
+ bcn_track->close_bcn_intvl_th = ieee80211_tu_to_usec(beacon_int - 3);
+ bcn_track->tbtt_diff_th = (bcn_intvl_us * 85) / PERCENT;
+ bcn_track->beacon_int = beacon_int;
+ bcn_track->dtim = dtim;
+}
+
+static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev)
+{
+ memset(&rtwdev->phystat.bcn_stat, 0, sizeof(rtwdev->phystat.bcn_stat));
+ memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track));
+}
+
+static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev,
+ struct ieee80211_bss_conf *bss_conf,
+ struct sk_buff *skb)
+{
+#define RTW89_APPEND_TSF_2GHZ 384
+#define RTW89_APPEND_TSF_5GHZ 52
+#define RTW89_APPEND_TSF_6GHZ 52
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int);
+ u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp);
+ u8 wp, num = bcn_stat->num;
+ u16 append;
+
+ if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ return;
+
+ switch (rx_status->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ append = RTW89_APPEND_TSF_2GHZ;
+ break;
+ case NL80211_BAND_5GHZ:
+ append = RTW89_APPEND_TSF_5GHZ;
+ break;
+ case NL80211_BAND_6GHZ:
+ append = RTW89_APPEND_TSF_6GHZ;
+ break;
+ }
+
+ wp = bcn_stat->wp;
+ div_u64_rem(tsf - append, bcn_intvl_us, &bcn_stat->tbtt_us[wp]);
+ bcn_stat->tbtt_tu[wp] = bcn_stat->tbtt_us[wp] / 1024;
+ bcn_stat->wp = (wp + 1) % RTW89_BCN_TRACK_STAT_NR;
+ bcn_stat->num = umin(num + 1, RTW89_BCN_TRACK_STAT_NR);
+ bcn_track->is_data_ready = bcn_stat->num == RTW89_BCN_TRACK_STAT_NR;
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -2237,6 +2741,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_bss_conf *bss_conf;
struct rtw89_vif_link *rtwvif_link;
const u8 *bssid = iter_data->bssid;
+ const u8 *target_bssid;
if (rtwdev->scanning &&
(ieee80211_is_beacon(hdr->frame_control) ||
@@ -2258,7 +2763,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
goto out;
}
- if (!ether_addr_equal(bss_conf->bssid, bssid))
+ target_bssid = ieee80211_is_beacon(hdr->frame_control) &&
+ bss_conf->nontransmitted ?
+ bss_conf->transmitter_bssid : bss_conf->bssid;
+ if (!ether_addr_equal(target_bssid, bssid))
goto out;
if (is_mld) {
@@ -2272,7 +2780,6 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
}
- pkt_stat->beacon_nr++;
if (phy_ppdu) {
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
@@ -2280,7 +2787,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx;
}
+ pkt_stat->beacon_nr++;
pkt_stat->beacon_rate = desc_info->data_rate;
+ pkt_stat->beacon_len = skb->len;
+
+ rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb);
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@@ -3226,7 +3737,7 @@ static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
u8 qsel, ch_dma;
qsel = rtw89_core_get_qsel(rtwdev, tid);
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma);
}
@@ -3411,6 +3922,7 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_sta_link *rtwsta_link;
+ struct rtw89_tx_wait_info *wait;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct rtw89_sta *rtwsta;
@@ -3420,6 +3932,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return 0;
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait)
+ return -ENOMEM;
+
+ init_completion(&wait->completion);
+
rcu_read_lock();
sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
@@ -3434,6 +3952,8 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
goto out;
}
+ wait->skb = skb;
+
hdr = (struct ieee80211_hdr *)skb->data;
if (ps)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -3441,10 +3961,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rtwsta_link = rtwsta->links[rtwvif_link->link_id];
if (unlikely(!rtwsta_link)) {
ret = -ENOLINK;
+ dev_kfree_skb_any(skb);
goto out;
}
- ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
+ ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true,
+ wait);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@@ -3453,10 +3975,11 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rcu_read_unlock();
- return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
+ return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel,
timeout);
out:
rcu_read_unlock();
+ kfree(wait);
return ret;
}
@@ -3697,6 +4220,9 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
vif->type == NL80211_IFTYPE_P2P_CLIENT))
continue;
+ if (!rtw89_core_bcn_track_can_lps(rtwdev))
+ continue;
+
rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@@ -3883,6 +4409,7 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
rtw89_btc_ntfy_wl_sta(rtwdev);
}
rtw89_mac_bf_monitor_track(rtwdev);
+ rtw89_core_bcn_track(rtwdev);
rtw89_phy_stat_track(rtwdev);
rtw89_phy_env_monitor_track(rtwdev);
rtw89_phy_dig(rtwdev);
@@ -4129,8 +4656,10 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
rtw89_assoc_link_clr(rtwsta_link);
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
+ rtw89_core_bcn_track_reset(rtwdev);
+ }
if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
rtw89_p2p_noa_once_deinit(rtwvif_link);
@@ -4271,6 +4800,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan);
rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link);
+ rtw89_core_bcn_track_assoc(rtwdev, rtwvif_link);
ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id);
if (ret) {
@@ -4829,39 +5359,96 @@ void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
}
}
-int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
+struct rtw89_wait_response *
+rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond)
{
- struct completion *cmpl = &wait->completion;
- unsigned long time_left;
+ struct rtw89_wait_response *prep;
unsigned int cur;
+ /* use -EPERM _iff_ telling eval side not to make any changes */
+
cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
if (cur != RTW89_WAIT_COND_IDLE)
- return -EBUSY;
+ return ERR_PTR(-EPERM);
+
+ prep = kzalloc(sizeof(*prep), GFP_KERNEL);
+ if (!prep)
+ return ERR_PTR(-ENOMEM);
+
+ init_completion(&prep->completion);
+
+ rcu_assign_pointer(wait->resp, prep);
+
+ return prep;
+}
+
+int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait,
+ struct rtw89_wait_response *prep, int err)
+{
+ unsigned long time_left;
+
+ if (IS_ERR(prep)) {
+ err = err ?: PTR_ERR(prep);
+
+ /* special error case: no permission to reset anything */
+ if (PTR_ERR(prep) == -EPERM)
+ return err;
- time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
+ goto reset;
+ }
+
+ if (err)
+ goto cleanup;
+
+ time_left = wait_for_completion_timeout(&prep->completion,
+ RTW89_WAIT_FOR_COND_TIMEOUT);
if (time_left == 0) {
- atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto cleanup;
}
+ wait->data = prep->data;
+
+cleanup:
+ rcu_assign_pointer(wait->resp, NULL);
+ kfree_rcu(prep, rcu_head);
+
+reset:
+ atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
+
+ if (err)
+ return err;
+
if (wait->data.err)
return -EFAULT;
return 0;
}
+static void rtw89_complete_cond_resp(struct rtw89_wait_response *resp,
+ const struct rtw89_completion_data *data)
+{
+ resp->data = *data;
+ complete(&resp->completion);
+}
+
void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data)
{
+ struct rtw89_wait_response *resp;
unsigned int cur;
+ guard(rcu)();
+
+ resp = rcu_dereference(wait->resp);
+ if (!resp)
+ return;
+
cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
if (cur != cond)
return;
- wait->data = *data;
- complete(&wait->completion);
+ rtw89_complete_cond_resp(resp, data);
}
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event)
@@ -4908,6 +5495,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
+ rtw89_phy_init_bb_afe(rtwdev);
+
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@@ -4952,6 +5541,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
+ rtw89_tas_fw_timer_enable(rtwdev, true);
return 0;
}
@@ -4967,6 +5557,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
return;
+ rtw89_tas_fw_timer_enable(rtwdev, false);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF);
clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
@@ -4978,6 +5569,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
wiphy_work_cancel(wiphy, &btc->dhcp_notify_work);
wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
@@ -5203,6 +5795,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
INIT_LIST_HEAD(&rtwdev->scan_info.chan_list);
+ INIT_LIST_HEAD(&rtwdev->tx_waits);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -5214,6 +5807,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work);
+ wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
@@ -5813,6 +6407,7 @@ int rtw89_core_register(struct rtw89_dev *rtwdev)
return ret;
}
+ rtw89_phy_dm_init_data(rtwdev);
rtw89_debugfs_init(rtwdev);
return 0;
@@ -5863,6 +6458,9 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
ops->cancel_remain_on_channel = NULL;
}
+ if (!chip->support_noise)
+ ops->get_survey = NULL;
+
driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
hw = ieee80211_alloc_hw(driver_data_size, ops);
if (!hw)
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 43e10278e14d..928c8c84c964 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -1011,6 +1011,7 @@ struct rtw89_port_reg {
u32 ptcl_dbg;
u32 ptcl_dbg_info;
u32 bcn_drop_all;
+ u32 bcn_psr_rpt;
u32 hiq_win[RTW89_PORT_NUM];
};
@@ -3506,9 +3507,12 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
+#define RTW89_TX_WAIT_WORK_TIMEOUT msecs_to_jiffies(500)
struct rtw89_tx_wait_info {
struct rcu_head rcu_head;
+ struct list_head list;
struct completion completion;
+ struct sk_buff *skb;
bool tx_done;
};
@@ -3759,6 +3763,7 @@ struct rtw89_chip_ops {
void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+ u8 (*get_ch_dma)(struct rtw89_dev *rtwdev, u8 qsel);
int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
@@ -4363,6 +4368,9 @@ struct rtw89_chanctx_listener {
(struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state);
};
+#define RTW89_NHM_TH_NUM 11
+#define RTW89_NHM_RPT_NUM 12
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
enum rtw89_chip_gen chip_gen;
@@ -4397,6 +4405,7 @@ struct rtw89_chip_info {
bool support_ant_gain;
bool support_tas;
bool support_sar_by_ant;
+ bool support_noise;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool rx_freq_frome_ie;
@@ -4481,6 +4490,8 @@ struct rtw89_chip_info {
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
+ const struct rtw89_reg_def (*nhm_report)[RTW89_NHM_RPT_NUM];
+ const struct rtw89_reg_def (*nhm_th)[RTW89_NHM_TH_NUM];
const struct rtw89_imr_info *imr_info;
const struct rtw89_imr_table *imr_dmac_table;
const struct rtw89_imr_table *imr_cmac_table;
@@ -4542,17 +4553,23 @@ struct rtw89_completion_data {
u8 buf[RTW89_COMPLETION_BUF_SIZE];
};
+struct rtw89_wait_response {
+ struct rcu_head rcu_head;
+ struct completion completion;
+ struct rtw89_completion_data data;
+};
+
struct rtw89_wait_info {
atomic_t cond;
- struct completion completion;
struct rtw89_completion_data data;
+ struct rtw89_wait_response __rcu *resp;
};
#define RTW89_WAIT_FOR_COND_TIMEOUT msecs_to_jiffies(100)
static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
{
- init_completion(&wait->completion);
+ rcu_assign_pointer(wait->resp, NULL);
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
}
@@ -4622,6 +4639,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_EXTRA_OP,
RTW89_FW_FEATURE_RFK_NTFY_MCC_V0,
RTW89_FW_FEATURE_LPS_DACK_BY_C2H_REG,
+ RTW89_FW_FEATURE_BEACON_TRACKING,
};
struct rtw89_fw_suit {
@@ -4681,6 +4699,7 @@ struct rtw89_fw_elm_info {
struct rtw89_fw_txpwr_track_cfg *txpwr_trk;
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
const struct rtw89_regd_data *regd;
+ const struct rtw89_fw_element_hdr *afe;
};
enum rtw89_fw_mss_dev_type {
@@ -5074,9 +5093,36 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
u8 beacon_rate;
+ u32 beacon_len;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
+#define RTW89_BCN_TRACK_STAT_NR 32
+#define RTW89_BCN_TRACK_SCALE_FACTOR 10
+#define RTW89_BCN_TRACK_MAX_BIN_NUM 6
+#define RTW89_BCN_TRACK_BIN_WIDTH 5
+#define RTW89_BCN_TRACK_TARGET_BCN 80
+
+struct rtw89_beacon_dist {
+ u16 min;
+ u16 max;
+ u16 outlier_count;
+ u16 lower_bound;
+ u16 upper_bound;
+ u16 bins[RTW89_BCN_TRACK_MAX_BIN_NUM];
+};
+
+struct rtw89_beacon_stat {
+ u8 num;
+ u8 wp;
+ u16 tbtt_tu_min;
+ u16 tbtt_tu_max;
+ u16 drift[RTW89_BCN_TRACK_STAT_NR];
+ u32 tbtt_us[RTW89_BCN_TRACK_STAT_NR];
+ u16 tbtt_tu[RTW89_BCN_TRACK_STAT_NR];
+ struct rtw89_beacon_dist bcn_dist;
+};
+
DECLARE_EWMA(thermal, 4, 4);
struct rtw89_phy_stat {
@@ -5085,6 +5131,7 @@ struct rtw89_phy_stat {
struct ewma_rssi bcn_rssi;
struct rtw89_pkt_stat cur_pkt_stat;
struct rtw89_pkt_stat last_pkt_stat;
+ struct rtw89_beacon_stat bcn_stat;
};
enum rtw89_rfk_report_state {
@@ -5434,6 +5481,7 @@ enum rtw89_env_racing_lv {
struct rtw89_ccx_para_info {
enum rtw89_env_racing_lv rac_lv;
u16 mntr_time;
+ bool nhm_incld_cca;
u8 nhm_manual_th_ofst;
u8 nhm_manual_th0;
enum rtw89_ifs_clm_application ifs_clm_app;
@@ -5467,9 +5515,13 @@ enum rtw89_ccx_edcca_opt_bw_idx {
RTW89_CCX_EDCCA_BW20_7 = 7
};
-#define RTW89_NHM_TH_NUM 11
+struct rtw89_nhm_report {
+ struct list_head list;
+ struct ieee80211_channel *channel;
+ u8 noise;
+};
+
#define RTW89_FAHM_TH_NUM 11
-#define RTW89_NHM_RPT_NUM 12
#define RTW89_FAHM_RPT_NUM 12
#define RTW89_IFS_CLM_NUM 4
struct rtw89_env_monitor_info {
@@ -5503,6 +5555,13 @@ struct rtw89_env_monitor_info {
u16 ifs_clm_ofdm_fa_permil;
u32 ifs_clm_ifs_avg[RTW89_IFS_CLM_NUM];
u32 ifs_clm_cca_avg[RTW89_IFS_CLM_NUM];
+ bool nhm_include_cca;
+ u32 nhm_sum;
+ u32 nhm_mntr_time;
+ u16 nhm_result[RTW89_NHM_RPT_NUM];
+ u8 nhm_th[RTW89_NHM_RPT_NUM];
+ struct rtw89_nhm_report *nhm_his[RTW89_BAND_NUM];
+ struct list_head nhm_rpt_list;
};
enum rtw89_ser_rcvy_step {
@@ -5715,8 +5774,8 @@ struct rtw89_wow_gtk_info {
u8 kck[32];
u8 kek[32];
u8 tk1[16];
- u8 txmickey[8];
u8 rxmickey[8];
+ u8 txmickey[8];
__le32 igtk_keyid;
__le64 ipn;
u8 igtk[2][32];
@@ -5882,6 +5941,24 @@ struct rtw89_mlo_info {
struct rtw89_wait_info wait;
};
+struct rtw89_beacon_track_info {
+ bool is_data_ready;
+ u32 tbtt_offset; /* in unit of microsecond */
+ u16 bcn_timeout; /* in unit of millisecond */
+
+ /* The following are constant and set at association. */
+ u8 dtim;
+ u16 beacon_int;
+ u16 low_bcn_th;
+ u16 med_bcn_th;
+ u16 high_bcn_th;
+ u16 target_bcn_th;
+ u16 outlier_low_bcn_th;
+ u16 outlier_high_bcn_th;
+ u32 close_bcn_intvl_th;
+ u32 tbtt_diff_th;
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
@@ -5896,6 +5973,7 @@ struct rtw89_dev {
const struct rtw89_pci_info *pci_info;
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
+ struct rtw89_beacon_track_info bcn_track;
struct rtw89_mcc_info mcc;
struct rtw89_mlo_info mlo;
struct rtw89_mac_info mac;
@@ -5925,6 +6003,9 @@ struct rtw89_dev {
/* used to protect rpwm */
spinlock_t rpwm_lock;
+ struct list_head tx_waits;
+ struct wiphy_delayed_work tx_wait_work;
+
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
@@ -6181,6 +6262,26 @@ rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \
})
+static inline void rtw89_tx_wait_release(struct rtw89_tx_wait_info *wait)
+{
+ dev_kfree_skb_any(wait->skb);
+ kfree_rcu(wait, rcu_head);
+}
+
+static inline void rtw89_tx_wait_list_clear(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_tx_wait_info *wait, *tmp;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ list_for_each_entry_safe(wait, tmp, &rtwdev->tx_waits, list) {
+ if (!completion_done(&wait->completion))
+ continue;
+ list_del(&wait->list);
+ rtw89_tx_wait_release(wait);
+ }
+}
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6190,6 +6291,7 @@ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev)
{
rtwdev->hci.ops->reset(rtwdev);
+ rtw89_tx_wait_list_clear(rtwdev);
}
static inline int rtw89_hci_start(struct rtw89_dev *rtwdev)
@@ -6322,9 +6424,13 @@ static inline void rtw89_hci_clear(struct rtw89_dev *rtwdev, struct pci_dev *pde
static inline
struct rtw89_tx_skb_data *RTW89_TX_SKB_CB(struct sk_buff *skb)
{
+ /*
+ * This should be used by/after rtw89_hci_tx_write() and before doing
+ * ieee80211_tx_info_clear_status().
+ */
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- return (struct rtw89_tx_skb_data *)info->status.status_driver_data;
+ return (struct rtw89_tx_skb_data *)info->driver_data;
}
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
@@ -7131,6 +7237,14 @@ void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev,
}
static inline
+u8 rtw89_chip_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->get_ch_dma(rtwdev, qsel);
+}
+
+static inline
void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
@@ -7258,11 +7372,12 @@ static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev,
return dev_alloc_skb(length);
}
-static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
+static inline bool rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
struct rtw89_tx_skb_data *skb_data,
bool tx_done)
{
struct rtw89_tx_wait_info *wait;
+ bool ret = false;
rcu_read_lock();
@@ -7270,11 +7385,14 @@ static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
if (!wait)
goto out;
+ ret = true;
wait->tx_done = tx_done;
- complete(&wait->completion);
+ /* Don't access skb anymore after completion */
+ complete_all(&wait->completion);
out:
rcu_read_unlock();
+ return ret;
}
static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
@@ -7358,7 +7476,8 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb, bool fwdl);
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
- int qsel, unsigned int timeout);
+ struct rtw89_tx_wait_info *wait, int qsel,
+ unsigned int timeout);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
@@ -7374,6 +7493,8 @@ void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel);
+u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel);
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb);
@@ -7454,13 +7575,18 @@ void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
-bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
+bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
const char *rtw89_regd_get_string(enum rtw89_regulation_type regd);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
-int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
+struct rtw89_wait_response *
+rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond)
+__acquires(rtw89_wait);
+int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait,
+ struct rtw89_wait_response *prep, int err)
+__releases(rtw89_wait);
void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data);
int rtw89_core_start(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index afdfb9647fc1..3dc7981c510f 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -86,6 +86,7 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv stations;
struct rtw89_debugfs_priv disable_dm;
struct rtw89_debugfs_priv mlo_mode;
+ struct rtw89_debugfs_priv beacon_info;
};
struct rtw89_debugfs_iter_data {
@@ -3562,6 +3563,58 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_dbg_trigger_mac_error_ax(struct rtw89_dev *rtwdev)
+{
+ u16 val16;
+ u8 val8;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val8 = rtw89_read8(rtwdev, R_AX_CMAC_FUNC_EN);
+ rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8 & ~B_AX_TMAC_EN);
+ mdelay(1);
+ rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8);
+
+ val16 = rtw89_read16(rtwdev, R_AX_PTCL_IMR0);
+ rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16 | B_AX_F2PCMD_EMPTY_ERR_INT_EN);
+ rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16);
+
+ return 0;
+}
+
+static int rtw89_dbg_trigger_mac_error_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_CMAC_FW_TRIGGER_IDCT_ISR,
+ B_BE_CMAC_FW_TRIG_IDCT | B_BE_CMAC_FW_ERR_IDCT_IMR);
+
+ return 0;
+}
+
+static int rtw89_dbg_trigger_mac_error(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ switch (chip->chip_gen) {
+ case RTW89_CHIP_AX:
+ return rtw89_dbg_trigger_mac_error_ax(rtwdev);
+ case RTW89_CHIP_BE:
+ return rtw89_dbg_trigger_mac_error_be(rtwdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static ssize_t
rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
@@ -3577,6 +3630,7 @@ rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
enum rtw89_dbg_crash_simulation_type {
RTW89_DBG_SIM_CPU_EXCEPTION = 1,
RTW89_DBG_SIM_CTRL_ERROR = 2,
+ RTW89_DBG_SIM_MAC_ERROR = 3,
};
static ssize_t
@@ -3585,6 +3639,7 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
const char *buf, size_t count)
{
int (*sim)(struct rtw89_dev *rtwdev);
+ bool announce = true;
u8 crash_type;
int ret;
@@ -3603,11 +3658,19 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
case RTW89_DBG_SIM_CTRL_ERROR:
sim = rtw89_dbg_trigger_ctrl_error;
break;
+ case RTW89_DBG_SIM_MAC_ERROR:
+ sim = rtw89_dbg_trigger_mac_error;
+
+ /* Driver SER flow won't get involved; only FW will. */
+ announce = false;
+ break;
default:
return -EINVAL;
}
- set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+ if (announce)
+ set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+
ret = sim(rtwdev);
if (ret)
@@ -4298,6 +4361,64 @@ rtw89_debug_priv_mlo_mode_set(struct rtw89_dev *rtwdev,
return count;
}
+static ssize_t
+rtw89_debug_priv_beacon_info_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
+{
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 upper, lower = bcn_stat->tbtt_tu_min;
+ char *p = buf, *end = buf + bufsz;
+ u16 *drift = bcn_stat->drift;
+ u8 bcn_num = bcn_stat->num;
+ u8 count;
+ u8 i;
+
+ p += scnprintf(p, end - p, "[Beacon info]\n");
+ p += scnprintf(p, end - p, "count: %u\n", pkt_stat->beacon_nr);
+ p += scnprintf(p, end - p, "interval: %u\n", bcn_track->beacon_int);
+ p += scnprintf(p, end - p, "dtim: %u\n", bcn_track->dtim);
+ p += scnprintf(p, end - p, "raw rssi: %lu\n",
+ ewma_rssi_read(&rtwdev->phystat.bcn_rssi));
+ p += scnprintf(p, end - p, "hw rate: %u\n", pkt_stat->beacon_rate);
+ p += scnprintf(p, end - p, "length: %u\n", pkt_stat->beacon_len);
+
+ p += scnprintf(p, end - p, "\n[Distribution]\n");
+ p += scnprintf(p, end - p, "tbtt\n");
+ for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) {
+ upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1;
+ if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1)
+ upper = max(upper, bcn_stat->tbtt_tu_max);
+
+ p += scnprintf(p, end - p, "%02u - %02u: %u\n",
+ lower, upper, bcn_dist->bins[i]);
+
+ lower = upper + 1;
+ }
+
+ p += scnprintf(p, end - p, "\ndrift\n");
+
+ for (i = 0; i < bcn_num; i += count) {
+ count = 1;
+ while (i + count < bcn_num && drift[i] == drift[i + count])
+ count++;
+
+ p += scnprintf(p, end - p, "%u: %u\n", drift[i], count);
+ }
+ p += scnprintf(p, end - p, "\nlower bound: %u\n", bcn_dist->lower_bound);
+ p += scnprintf(p, end - p, "upper bound: %u\n", bcn_dist->upper_bound);
+ p += scnprintf(p, end - p, "outlier count: %u\n", bcn_dist->outlier_count);
+
+ p += scnprintf(p, end - p, "\n[Tracking]\n");
+ p += scnprintf(p, end - p, "tbtt offset: %u\n", bcn_track->tbtt_offset);
+ p += scnprintf(p, end - p, "bcn timeout: %u\n", bcn_track->bcn_timeout);
+
+ return p - buf;
+}
+
#define rtw89_debug_priv_get(name, opts...) \
{ \
.cb_read = rtw89_debug_priv_ ##name## _get, \
@@ -4356,6 +4477,7 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = {
.stations = rtw89_debug_priv_get(stations, RLOCK),
.disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK),
.mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK),
+ .beacon_info = rtw89_debug_priv_get(beacon_info),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
@@ -4401,6 +4523,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
rtw89_debugfs_add_rw(mlo_mode);
+ rtw89_debugfs_add_r(beacon_info);
}
void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index fc690f7c55dc..a364e7adb079 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -56,6 +56,7 @@ static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
+#define rtw89_info_once(rtwdev, a...) dev_info_once((rtwdev)->dev, ##a)
#define rtw89_warn(rtwdev, a...) dev_warn((rtwdev)->dev, ##a)
#define rtw89_err(rtwdev, a...) dev_err((rtwdev)->dev, ##a)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 16e59a4a486e..ab904a7def1b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -830,11 +830,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
@@ -846,6 +848,9 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
@@ -864,6 +869,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -1280,6 +1286,18 @@ int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
return 0;
}
+static
+int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+
+ elm_info->afe = elm;
+
+ return 0;
+}
+
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@@ -1365,6 +1383,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_REGD] = {
rtw89_recognize_regd_from_elm, {}, "REGD",
},
+ [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = {
+ rtw89_build_afe_pwr_seq_from_elm, {}, "AFE",
+ },
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@@ -1537,7 +1558,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
u32 truncated;
- u32 ret = 0;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -3990,6 +4011,93 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
+int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link, u32 offset)
+{
+ struct rtw89_h2c_tbtt_tuning *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) |
+ le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT);
+ h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_TBTT_TUNING, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+{
+#define RTW89_BCN_TO_VAL_MIN 4
+#define RTW89_BCN_TO_VAL_MAX 64
+#define RTW89_DTIM_TO_VAL_MIN 7
+#define RTW89_DTIM_TO_VAL_MAX 15
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_h2c_pwr_lvl *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 bcn_to_val;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_pwr_lvl *)skb->data;
+
+ bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout,
+ RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX);
+
+ h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) |
+ le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) |
+ le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) |
+ le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) |
+ le32_encode_bits(RTW89_DTIM_TO_VAL_MIN,
+ RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_PS_POWER_LEVEL, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -6580,6 +6688,40 @@ fail:
return ret;
}
+int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable)
+{
+ struct rtw89_h2c_rf_tas *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_tas *)skb->data;
+
+ h2c->enable = cpu_to_le32(enable);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -6826,8 +6968,9 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const u32 *c2h_reg = chip->c2h_regs;
- u32 ret, timeout;
+ u32 timeout;
u8 i, val;
+ int ret;
info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
@@ -6865,7 +7008,7 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *h2c_info,
struct rtw89_mac_c2h_info *c2h_info)
{
- u32 ret;
+ int ret;
if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
lockdep_assert_wiphy(rtwdev->hw->wiphy);
@@ -7123,7 +7266,6 @@ static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
struct rtw89_pktofld_info *info;
u8 probe_count = 0;
- ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
ch_info->bw = RTW89_SCAN_WIDTH;
ch_info->tx_pkt = true;
@@ -7264,7 +7406,6 @@ static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
struct rtw89_pktofld_info *info;
u8 probe_count = 0, i;
- ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
ch_info->bw = RTW89_SCAN_WIDTH;
ch_info->tx_null = false;
@@ -8585,9 +8726,10 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
goto fail;
}
- /* not support TKIP yet */
h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
- le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
+ le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0,
+ sizeof(gtk_info->txmickey)),
+ RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
@@ -8679,19 +8821,30 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_wait_info *wait, unsigned int cond)
{
- int ret;
+ struct rtw89_wait_response *prep;
+ int ret = 0;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ prep = rtw89_wait_for_cond_prep(wait, cond);
+ if (IS_ERR(prep))
+ goto out;
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
dev_kfree_skb_any(skb);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
- return 1;
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
+ ret = 1;
+ goto out;
+ }
- return rtw89_wait_for_cond(wait, cond);
+out:
+ return rtw89_wait_for_cond_eval(wait, prep, ret);
}
#define H2C_ADD_MCC_LEN 16
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 3de29137b113..ddebf7972068 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -1602,6 +1602,28 @@ struct rtw89_h2c_bcn_upd_be {
#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
+struct rtw89_h2c_tbtt_tuning {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_TBTT_TUNING_W0_BAND GENMASK(3, 0)
+#define RTW89_H2C_TBTT_TUNING_W0_PORT GENMASK(7, 4)
+#define RTW89_H2C_TBTT_TUNING_W1_SHIFT GENMASK(31, 0)
+
+struct rtw89_h2c_pwr_lvl {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_PWR_LVL_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL GENMASK(15, 8)
+#define RTW89_H2C_PWR_LVL_W0_PS_LVL GENMASK(19, 16)
+#define RTW89_H2C_PWR_LVL_W0_TRX_LVL GENMASK(23, 20)
+#define RTW89_H2C_PWR_LVL_W0_BCN_TO_LVL GENMASK(27, 24)
+#define RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL GENMASK(31, 28)
+#define RTW89_H2C_PWR_LVL_W1_MACID_EXT GENMASK(7, 0)
+
struct rtw89_h2c_role_maintain {
__le32 w0;
};
@@ -3962,6 +3984,7 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24,
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25,
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26,
+ RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ = 27,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -4067,6 +4090,30 @@ struct rtw89_fw_txpwr_track_cfg {
BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_N) | \
BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_P))
+enum rtw89_fw_afe_action {
+ RTW89_FW_AFE_ACTION_WRITE = 0,
+ RTW89_FW_AFE_ACTION_DELAY = 1,
+ RTW89_FW_AFE_ACTION_POLL = 2,
+};
+
+enum rtw89_fw_afe_cat {
+ RTW89_FW_AFE_CAT_BB = 0,
+ RTW89_FW_AFE_CAT_BB1 = 1,
+ RTW89_FW_AFE_CAT_MAC = 2,
+ RTW89_FW_AFE_CAT_MAC1 = 3,
+ RTW89_FW_AFE_CAT_AFEDIG = 4,
+ RTW89_FW_AFE_CAT_AFEDIG1 = 5,
+};
+
+enum rtw89_fw_afe_class {
+ RTW89_FW_AFE_CLASS_P0 = 0,
+ RTW89_FW_AFE_CLASS_P1 = 1,
+ RTW89_FW_AFE_CLASS_P2 = 2,
+ RTW89_FW_AFE_CLASS_P3 = 3,
+ RTW89_FW_AFE_CLASS_P4 = 4,
+ RTW89_FW_AFE_CLASS_CMN = 5,
+};
+
struct rtw89_fw_element_hdr {
__le32 id; /* enum rtw89_fw_element_id */
__le32 size; /* exclude header size */
@@ -4104,6 +4151,17 @@ struct rtw89_fw_element_hdr {
u8 rsvd1[3];
__le16 offset[];
} __packed rfk_log_fmt;
+ struct {
+ u8 rsvd[8];
+ struct rtw89_phy_afe_info {
+ __le32 action; /* enum rtw89_fw_afe_action */
+ __le32 cat; /* enum rtw89_fw_afe_cat */
+ __le32 class; /* enum rtw89_fw_afe_class */
+ __le32 addr;
+ __le32 mask;
+ __le32 val;
+ } __packed infos[];
+ } __packed afe;
struct __rtw89_fw_txpwr_element txpwr;
struct __rtw89_fw_regd_element regd;
} __packed u;
@@ -4201,6 +4259,8 @@ enum rtw89_ps_h2c_func {
H2C_FUNC_MAC_LPS_PARM = 0x0,
H2C_FUNC_P2P_ACT = 0x1,
H2C_FUNC_IPS_CFG = 0x3,
+ H2C_FUNC_PS_POWER_LEVEL = 0x7,
+ H2C_FUNC_TBTT_TUNING = 0xA,
NUM_OF_RTW89_PS_H2C_FUNC,
};
@@ -4370,6 +4430,7 @@ enum rtw89_rfk_offload_h2c_func {
H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
+ H2C_FUNC_RFK_TAS_OFFLOAD = 0x9,
};
struct rtw89_fw_h2c_rf_get_mccch {
@@ -4551,6 +4612,10 @@ struct rtw89_h2c_rf_rxdck_v0 {
u8 rxdck_dbg_en;
} __packed;
+struct rtw89_h2c_rf_tas {
+ __le32 enable;
+} __packed;
+
struct rtw89_h2c_rf_rxdck {
struct rtw89_h2c_rf_rxdck_v0 v0;
u8 is_chl_k;
@@ -4683,12 +4748,16 @@ struct rtw89_c2h_rfk_report {
u8 version;
} __packed;
-struct rtw89_c2h_rf_tas_info {
- struct rtw89_c2h_hdr hdr;
+struct rtw89_c2h_rf_tas_rpt_log {
__le32 cur_idx;
__le16 txpwr_history[20];
} __packed;
+struct rtw89_c2h_rf_tas_info {
+ struct rtw89_c2h_hdr hdr;
+ struct rtw89_c2h_rf_tas_rpt_log content;
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -4750,6 +4819,9 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link, u32 offset);
+int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif,
struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
@@ -4826,6 +4898,7 @@ int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan);
int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, bool is_chl_k);
+int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 5a5da9d9c0c5..fd11b8fb3c89 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@@ -177,7 +178,7 @@ int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev,
struct rtw89_mac_dle_dfi_qempty *qempty)
{
struct rtw89_mac_dle_dfi_ctrl ctrl;
- u32 ret;
+ int ret;
ctrl.type = qempty->dle_type;
ctrl.target = DLE_DFI_TYPE_QEMPTY;
@@ -985,7 +986,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
struct rtw89_hfc_ch_info *info = param->ch_info;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
u32 val;
- u32 ret;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
if (ret)
@@ -1176,8 +1177,8 @@ int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_e
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 dma_ch_mask = chip->dma_ch_mask;
+ int ret = 0;
u8 ch;
- u32 ret = 0;
if (reset)
ret = hfc_reset_param(rtwdev);
@@ -1193,7 +1194,7 @@ int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_e
if (!en && h2c_en) {
mac->hfc_h2c_cfg(rtwdev);
mac->hfc_func_en(rtwdev, en, h2c_en);
- return ret;
+ return 0;
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
@@ -2413,7 +2414,7 @@ static int addr_cam_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
- u32 ret;
+ int ret;
u32 reg;
u32 val;
@@ -2954,7 +2955,7 @@ static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info h2c_info = {};
enum rtw89_mac_c2h_type c2h_type;
u8 content_len;
- u32 ret;
+ int ret;
if (chip->chip_gen == RTW89_CHIP_AX)
content_len = 0;
@@ -3105,10 +3106,10 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
u16 tx_en_u16, u16 mask_u16)
{
- u32 ret;
struct rtw89_mac_c2h_info c2h_info = {0};
struct rtw89_mac_h2c_info h2c_info = {0};
struct rtw89_h2creg_sch_tx_en *sch_tx_en = &h2c_info.u.sch_tx_en;
+ int ret;
h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN;
h2c_info.content_len = sizeof(*sch_tx_en) - RTW89_H2CREG_HDR_LEN;
@@ -4197,6 +4198,7 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.ptcl_dbg = R_AX_PTCL_DBG,
.ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
.bcn_drop_all = R_AX_BCN_DROP_ALL0,
+ .bcn_psr_rpt = R_AX_BCN_PSR_RPT_P0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@@ -4649,25 +4651,28 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
BCN_ERLY_DEF);
}
-static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static void rtw89_mac_port_cfg_bcn_psr_rpt(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- u16 val;
+ struct ieee80211_bss_conf *bss_conf;
+ u8 bssid_index;
+ u32 reg;
- if (rtwdev->chip->chip_id != RTL8852C)
- return;
+ rcu_read_lock();
- if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
- rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
- return;
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ if (bss_conf->nontransmitted)
+ bssid_index = bss_conf->bssid_index;
+ else
+ bssid_index = 0;
- val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
- B_AX_TBTT_SHIFT_OFST_SIGN;
+ rcu_read_unlock();
- rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
- B_AX_TBTT_SHIFT_OFST_MASK, val);
+ reg = rtw89_mac_reg_by_idx(rtwdev, p->bcn_psr_rpt + rtwvif_link->port * 4,
+ rtwvif_link->mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_BCAID_P0_MASK, bssid_index);
}
void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
@@ -4820,13 +4825,13 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link);
- rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true);
rtw89_mac_port_tsf_resync_all(rtwdev);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif_link);
+ rtw89_mac_port_cfg_bcn_psr_rpt(rtwdev, rtwvif_link);
return 0;
}
@@ -5041,6 +5046,8 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (op_chan) {
rtw89_mac_enable_aps_bcn_by_chan(rtwdev, op_chan, false);
ieee80211_stop_queues(rtwdev->hw);
+ } else {
+ rtw89_phy_nhm_get_result(rtwdev, band, chan);
}
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
@@ -5071,6 +5078,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
RTW89_CHANNEL_WIDTH_20);
rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx,
&new);
+ rtw89_phy_nhm_trigger(rtwdev);
}
break;
default:
@@ -5236,6 +5244,11 @@ rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
}
static void
+rtw89_mac_c2h_bcn_upd_done(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
u32 len)
{
@@ -5258,6 +5271,11 @@ rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
}
static void
+rtw89_mac_c2h_bcn_resend(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
rtw89_mac_c2h_tx_duty_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len)
{
struct rtw89_c2h_tx_duty_rpt *c2h =
@@ -5646,7 +5664,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
[RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
[RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp,
- [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
+ [RTW89_MAC_C2H_FUNC_BCN_RESEND] = rtw89_mac_c2h_bcn_resend,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
[RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
[RTW89_MAC_C2H_FUNC_TX_DUTY_RPT] = rtw89_mac_c2h_tx_duty_rpt,
@@ -5661,6 +5679,7 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
[RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
+ [RTW89_MAC_C2H_FUNC_BCN_UPD_DONE] = rtw89_mac_c2h_bcn_upd_done,
};
static
@@ -5813,12 +5832,11 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
case RTW89_MAC_C2H_CLASS_ROLE:
return;
default:
- rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
- return;
+ break;
}
if (!handler) {
- rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
- func);
+ rtw89_info_once(rtwdev, "MAC c2h class %d func %d not support\n",
+ class, func);
return;
}
handler(rtwdev, skb, len);
@@ -6720,7 +6738,7 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
u8 mac_idx = rtwvif_link->mac_idx;
u16 set = mac->muedca_ctrl.mask;
u32 reg;
- u32 ret;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
@@ -6862,7 +6880,7 @@ int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
{
struct rtw89_mac_h2c_info h2c_info = {};
struct rtw89_mac_c2h_info c2h_info = {};
- u32 ret;
+ int ret;
if (RTW89_CHK_FW_FEATURE(NO_WOW_CPU_IO_RX, &rtwdev->fw))
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 241e89983c4a..25fe5e5c8a97 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -419,6 +419,7 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
RTW89_MAC_C2H_FUNC_BCN_CNT,
+ RTW89_MAC_C2H_FUNC_BCN_UPD_DONE = 0x06,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index c1ca6d741b32..7b04183a3a5d 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -1837,6 +1837,40 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
}
#endif
+static int rtw89_ops_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ieee80211_conf *conf = &hw->conf;
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_bb_ctx *bb;
+
+ if (idx == 0) {
+ survey->channel = conf->chandef.chan;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = RTW89_NOISE_DEFAULT;
+
+ return 0;
+ }
+
+ rtw89_for_each_active_bb(rtwdev, bb) {
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ struct rtw89_nhm_report *rpt;
+
+ rpt = list_first_entry_or_null(&env->nhm_rpt_list, typeof(*rpt), list);
+ if (!rpt)
+ continue;
+
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = rpt->noise - MAX_RSSI;
+ survey->channel = rpt->channel;
+ list_del_init(&rpt->list);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -1869,6 +1903,7 @@ const struct ieee80211_ops rtw89_ops = {
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
.ampdu_action = rtw89_ops_ampdu_action,
+ .get_survey = rtw89_ops_get_survey,
.set_rts_threshold = rtw89_ops_set_rts_threshold,
.sta_statistics = rtw89_ops_sta_statistics,
.flush = rtw89_ops_flush,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 0078080b3999..ef69672b6862 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -56,6 +56,7 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.ptcl_dbg = R_BE_PTCL_DBG,
.ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
.bcn_drop_all = R_BE_BCN_DROP_ALL0,
+ .bcn_psr_rpt = R_BE_BCN_PSR_RPT_P0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index a669f2f843aa..0ee5f8579447 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -134,7 +134,7 @@ static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci)
{
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
u32 cnt;
cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
@@ -440,7 +440,7 @@ static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
int countdown = rtwdev->napi_budget_countdown;
u32 cnt;
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
if (!cnt)
@@ -464,7 +464,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct ieee80211_tx_info *info;
- rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
+ if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE))
+ return;
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
@@ -568,31 +569,52 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
-static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
- struct rtw89_pci_rpp_fmt *rpp)
+void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info)
+{
+ const struct rtw89_pci_rpp_fmt *rpp = _rpp;
+
+ rpp_info->seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
+ rpp_info->qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
+ rpp_info->tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
+ rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, rpp_info->qsel);
+}
+EXPORT_SYMBOL(rtw89_pci_parse_rpp);
+
+void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info)
+{
+ const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp;
+
+ rpp_info->seq = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK);
+ rpp_info->qsel = le32_get_bits(rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK);
+ rpp_info->tx_status = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK);
+ rpp_info->txch = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK);
+}
+EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1);
+
+static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ struct rtw89_pci_rpp_info rpp_info = {};
struct rtw89_pci_tx_wd_ring *wd_ring;
+ struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_tx_wd *txwd;
- u16 seq;
- u8 qsel, tx_status, txch;
- seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
- qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
- tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
- txch = rtw89_core_get_ch_dma(rtwdev, qsel);
+ info->parse_rpp(rtwdev, rpp, &rpp_info);
- if (txch == RTW89_TXCH_CH12) {
+ if (rpp_info.txch == RTW89_TXCH_CH12) {
rtw89_warn(rtwdev, "should no fwcmd release report\n");
return;
}
- tx_ring = &rtwpci->tx_rings[txch];
+ tx_ring = &rtwpci->tx.rings[rpp_info.txch];
wd_ring = &tx_ring->wd_ring;
- txwd = &wd_ring->pages[seq];
+ txwd = &wd_ring->pages[rpp_info.seq];
- rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
+ rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, rpp_info.seq,
+ rpp_info.tx_status);
}
static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
@@ -617,13 +639,14 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
u32 max_cnt)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- struct rtw89_pci_rx_info *rx_info;
- struct rtw89_pci_rpp_fmt *rpp;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_rx_desc_info desc_info = {};
+ struct rtw89_pci_rx_info *rx_info;
struct sk_buff *skb;
- u32 cnt = 0;
- u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
+ void *rpp;
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
+ u32 rpp_size = info->rpp_fmt_size;
+ u32 cnt = 0;
u32 skb_idx;
u32 offset;
int ret;
@@ -649,7 +672,7 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
/* first segment has RX desc */
offset = desc_info.offset + desc_info.rxd_len;
for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
- rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
+ rpp = skb->data + offset;
rtw89_pci_release_rpp(rtwdev, rpp);
}
@@ -694,7 +717,7 @@ static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
u32 cnt;
int work_done;
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
@@ -724,7 +747,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
bd_ring = &rx_ring->bd_ring;
reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
@@ -797,6 +820,29 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
+void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
+ isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
+ rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
+
+ /* isrs[0] is not used, so borrow to store RDU status to share common
+ * flow in rtw89_pci_interrupt_threadfn().
+ */
+ isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT |
+ B_BE_PCIE_RDU_CH0_INT);
+
+ if (isrs->halt_c2h_isrs)
+ rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
+ if (isrs->isrs[1])
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
+ rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
+}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3);
+
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
@@ -844,6 +890,21 @@ void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpc
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
+void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
+ rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
+}
+EXPORT_SYMBOL(rtw89_pci_enable_intr_v3);
+
+void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
+}
+EXPORT_SYMBOL(rtw89_pci_disable_intr_v3);
+
static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -885,7 +946,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
struct rtw89_dev *rtwdev = dev;
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
- const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+ const struct rtw89_pci_isr_def *isr_def = info->isr_def;
struct rtw89_pci_isrs isrs;
unsigned long flags;
@@ -893,13 +954,13 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
- if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
+ if (unlikely(isrs.isrs[0] & isr_def->isr_rdu))
rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
- if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
+ if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
- if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
+ if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout))
rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
if (unlikely(rtwpci->under_recovery))
@@ -950,6 +1011,24 @@ exit:
return irqret;
}
+#define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \
+ [RTW89_TXCH_##ch_idx] = { \
+ .num = R_##gen##_##txch##_TXBD_CFG, \
+ .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
+ .bdram = 0, \
+ .desa_l = 0, \
+ .desa_h = 0, \
+ }
+
+#define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \
+ [RTW89_TXCH_##ch_idx] = { \
+ .num = R_##gen##_##txch##_TXBD_CFG, \
+ .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
+ .bdram = 0, \
+ .desa_l = R_##gen##_##grp##_TXBD_DESA_L, \
+ .desa_h = R_##gen##_##grp##_TXBD_DESA_H, \
+ }
+
#define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
[RTW89_TXCH_##ch_idx] = { \
.num = R_##gen##_##txch##_TXBD_NUM ##v, \
@@ -977,6 +1056,22 @@ exit:
.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
}
+#define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \
+ [RTW89_RXCH_##ch_idx] = { \
+ .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
+ .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
+ .desa_l = 0, \
+ .desa_h = 0, \
+ }
+
+#define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \
+ [RTW89_RXCH_##ch_idx] = { \
+ .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
+ .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
+ .desa_l = R_##gen##_##grp##_RXBD_DESA_L, \
+ .desa_h = R_##gen##_##grp##_RXBD_DESA_H, \
+ }
+
#define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
[RTW89_RXCH_##ch_idx] = { \
.num = R_##gen##_##rxch##_RXBD_NUM ##v, \
@@ -1054,8 +1149,36 @@ const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = {
+ .tx = {
+ DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1),
+ /* no CH1 */
+ DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1),
+ /* no CH3 */
+ DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1),
+ /* no CH5 */
+ DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1),
+ /* no CH7 */
+ DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1),
+ /* no CH9 */
+ DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1),
+ /* no CH11 */
+ DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1),
+ },
+ .rx = {
+ DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1),
+ DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1);
+
+#undef DEF_TXCHADDRS_TYPE3
+#undef DEF_TXCHADDRS_TYPE3_GRP_BASE
+#undef DEF_TXCHADDRS_TYPE2
#undef DEF_TXCHADDRS_TYPE1
#undef DEF_TXCHADDRS
+#undef DEF_RXCHADDRS_TYPE3
+#undef DEF_RXCHADDRS_TYPE3_GRP_BASE
#undef DEF_RXCHADDRS
static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
@@ -1101,7 +1224,7 @@ static
u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
@@ -1117,7 +1240,7 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
u32 cnt;
@@ -1134,7 +1257,7 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 bd_cnt, wd_cnt, min_cnt = 0;
@@ -1142,7 +1265,7 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
enum rtw89_debug_mask debug_mask;
u32 cnt;
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
@@ -1227,7 +1350,7 @@ static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_p
static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
if (rtwdev->hci.paused) {
set_bit(txch, rtwpci->kick_map);
@@ -1247,7 +1370,7 @@ static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
if (!test_and_clear_bit(txch, rtwpci->kick_map))
continue;
- tx_ring = &rtwpci->tx_rings[txch];
+ tx_ring = &rtwpci->tx.rings[txch];
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
}
}
@@ -1255,7 +1378,7 @@ static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 cur_idx, cur_rp;
u8 i;
@@ -1371,7 +1494,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
bool en_wd_info = desc_info->en_wd_info;
u32 txwd_len;
u32 txwp_len;
@@ -1387,7 +1509,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
- rcu_assign_pointer(skb_data->wait, NULL);
txwp_len = sizeof(*txwp_info);
txwd_len = chip->txwd_body_size;
@@ -1521,7 +1642,7 @@ static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_req
return -EINVAL;
}
- tx_ring = &rtwpci->tx_rings[txch];
+ tx_ring = &rtwpci->tx.rings[txch];
spin_lock_bh(&rtwpci->trx_lock);
n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
@@ -1607,6 +1728,41 @@ static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
}
}
+static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num,
+ u32 dma_offset)
+{
+ u16 dma_offset_sel;
+ u16 num_sel;
+
+ /* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK:
+ * 0 -> 0
+ * 1 -> 64 = 2^6
+ * 2 -> 128 = 2^7
+ * ...
+ * 7 -> 4096 = 2^12
+ */
+ num_sel = ilog2(bd_num) - 5;
+
+ if (hweight16(bd_num) != 1)
+ rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num);
+
+ /* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK:
+ * 0 -> 0 = 0 * 2^9
+ * 1 -> 512 = 1 * 2^9
+ * 2 -> 1024 = 2 * 2^9
+ * 3 -> 1536 = 3 * 2^9
+ * ...
+ * 255 -> 130560 = 255 * 2^9
+ */
+ dma_offset_sel = dma_offset >> 9;
+
+ if (dma_offset % 512)
+ rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset);
+
+ return u16_encode_bits(num_sel, B_BE_TX_NUM_SEL_MASK) |
+ u16_encode_bits(dma_offset_sel, B_BE_TX_START_OFFSET_MASK);
+}
+
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -1616,10 +1772,12 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
const struct rtw89_pci_bd_ram *bd_ram;
+ dma_addr_t group_dma_base = 0;
+ u16 num_or_offset;
+ u32 addr_desa_l;
+ u32 addr_bdram;
u32 addr_num;
u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
u32 val32;
int i;
@@ -1627,7 +1785,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
if (info->tx_dma_ch_mask & BIT(i))
continue;
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
addr_num = bd_ring->addr.num;
@@ -1636,7 +1794,18 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->wp = 0;
bd_ring->rp = 0;
- rtw89_write16(rtwdev, addr_num, bd_ring->len);
+ if (info->group_bd_addr) {
+ if (addr_desa_l)
+ group_dma_base = bd_ring->dma;
+
+ num_or_offset =
+ rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
+ bd_ring->dma - group_dma_base);
+ } else {
+ num_or_offset = bd_ring->len;
+ }
+ rtw89_write16(rtwdev, addr_num, num_or_offset);
+
if (addr_bdram && bd_ram) {
val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
@@ -1644,12 +1813,14 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, addr_bdram, val32);
}
- rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
- rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ if (addr_desa_l) {
+ rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ }
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
bd_ring = &rx_ring->bd_ring;
addr_num = bd_ring->addr.num;
addr_idx = bd_ring->addr.idx;
@@ -1663,9 +1834,22 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rx_ring->diliver_desc.ready = false;
rx_ring->target_rx_tag = 0;
- rtw89_write16(rtwdev, addr_num, bd_ring->len);
- rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
- rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ if (info->group_bd_addr) {
+ if (addr_desa_l)
+ group_dma_base = bd_ring->dma;
+
+ num_or_offset =
+ rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
+ bd_ring->dma - group_dma_base);
+ } else {
+ num_or_offset = bd_ring->len;
+ }
+ rtw89_write16(rtwdev, addr_num, num_or_offset);
+
+ if (addr_desa_l) {
+ rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ }
if (info->rx_ring_eq_is_full)
rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
@@ -1698,7 +1882,7 @@ void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
skb_queue_len(&rtwpci->h2c_queue), true);
continue;
}
- rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
+ rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx.rings[txch]);
}
spin_unlock_bh(&rtwpci->trx_lock);
}
@@ -1774,14 +1958,14 @@ void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
return;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
tx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->tx_bd_addrs[i] :
dma_addr_set->tx[i].idx;
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
rx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->rx_bd_addrs[i] :
dma_addr_set->rx[i].idx;
@@ -2725,7 +2909,7 @@ static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
{
- u32 ret;
+ int ret;
ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
if (ret) {
@@ -3211,15 +3395,6 @@ static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring)
{
- int ring_sz;
- u8 *head;
- dma_addr_t dma;
-
- head = tx_ring->bd_ring.head;
- dma = tx_ring->bd_ring.dma;
- ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
tx_ring->bd_ring.head = NULL;
}
@@ -3227,6 +3402,7 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
@@ -3234,10 +3410,12 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
+
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
@@ -3248,8 +3426,6 @@ static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
dma_addr_t dma;
u32 buf_sz;
- u8 *head;
- int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
int i;
buf_sz = rx_ring->buf_sz;
@@ -3265,10 +3441,6 @@ static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf[i] = NULL;
}
- head = rx_ring->bd_ring.head;
- dma = rx_ring->bd_ring.dma;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
rx_ring->bd_ring.head = NULL;
}
@@ -3276,13 +3448,16 @@ static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
+
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
@@ -3374,12 +3549,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring,
u32 desc_size, u32 len,
- enum rtw89_tx_channel txch)
+ enum rtw89_tx_channel txch,
+ void *head, dma_addr_t dma)
{
const struct rtw89_pci_ch_dma_addr *txch_addr;
- int ring_sz = desc_size * len;
- u8 *head;
- dma_addr_t dma;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@@ -3394,12 +3567,6 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
goto err_free_wd_ring;
}
- head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
- if (!head) {
- ret = -ENOMEM;
- goto err_free_wd_ring;
- }
-
INIT_LIST_HEAD(&tx_ring->busy_pages);
tx_ring->bd_ring.head = head;
tx_ring->bd_ring.dma = dma;
@@ -3422,25 +3589,48 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
+ u32 i, tx_allocated;
+ dma_addr_t dma;
u32 desc_size;
+ u32 ring_sz;
+ u32 pool_sz;
+ u32 ch_num;
+ void *head;
u32 len;
- u32 i, tx_allocated;
int ret;
+ BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16);
+
+ desc_size = sizeof(struct rtw89_pci_tx_bd_32);
+ len = RTW89_PCI_TXBD_NUM_MAX;
+ ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask);
+ ring_sz = desc_size * len;
+ pool_sz = ring_sz * ch_num;
+
+ head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ bd_pool->head = head;
+ bd_pool->dma = dma;
+ bd_pool->size = pool_sz;
+
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
- tx_ring = &rtwpci->tx_rings[i];
- desc_size = sizeof(struct rtw89_pci_tx_bd_32);
- len = RTW89_PCI_TXBD_NUM_MAX;
+ tx_ring = &rtwpci->tx.rings[i];
ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
- desc_size, len, i);
+ desc_size, len, i, head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
goto err_free;
}
+
+ head += ring_sz;
+ dma += ring_sz;
}
return 0;
@@ -3448,24 +3638,24 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
err_free:
tx_allocated = i;
for (i = 0; i < tx_allocated; i++) {
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
+
return ret;
}
static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_rx_ring *rx_ring,
- u32 desc_size, u32 len, u32 rxch)
+ u32 desc_size, u32 len, u32 rxch,
+ void *head, dma_addr_t dma)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
- u8 *head;
- dma_addr_t dma;
- int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
@@ -3476,12 +3666,6 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
return ret;
}
- head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
- if (!head) {
- ret = -ENOMEM;
- goto err;
- }
-
rx_ring->bd_ring.head = head;
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
@@ -3530,12 +3714,8 @@ err_free:
rx_ring->buf[i] = NULL;
}
- head = rx_ring->bd_ring.head;
- dma = rx_ring->bd_ring.dma;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
rx_ring->bd_ring.head = NULL;
-err:
+
return ret;
}
@@ -3543,22 +3723,43 @@ static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
+ int i, rx_allocated;
+ dma_addr_t dma;
u32 desc_size;
+ u32 ring_sz;
+ u32 pool_sz;
+ void *head;
u32 len;
- int i, rx_allocated;
int ret;
+ desc_size = sizeof(struct rtw89_pci_rx_bd_32);
+ len = RTW89_PCI_RXBD_NUM_MAX;
+ ring_sz = desc_size * len;
+ pool_sz = ring_sz * RTW89_RXCH_NUM;
+
+ head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ bd_pool->head = head;
+ bd_pool->dma = dma;
+ bd_pool->size = pool_sz;
+
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
- desc_size = sizeof(struct rtw89_pci_rx_bd_32);
- len = RTW89_PCI_RXBD_NUM_MAX;
+ rx_ring = &rtwpci->rx.rings[i];
+
ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
- desc_size, len, i);
+ desc_size, len, i,
+ head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
goto err_free;
}
+
+ head += ring_sz;
+ dma += ring_sz;
}
return 0;
@@ -3566,10 +3767,12 @@ static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
err_free:
rx_allocated = i;
for (i = 0; i < rx_allocated; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
+
return ret;
}
@@ -3776,6 +3979,40 @@ void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
+static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = 0;
+}
+
+static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR |
+ B_BE_PCIE_RDU_CH0_IMR |
+ B_BE_PCIE_RX_RX0P2_IMR0_V1 |
+ B_BE_PCIE_RX_RPQ0_IMR0_V1;
+}
+
+void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (rtwpci->under_recovery)
+ rtw89_pci_recovery_intr_mask_v3(rtwdev);
+ else
+ rtw89_pci_default_intr_mask_v3(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3);
+
static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -4158,7 +4395,7 @@ static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
{
- u32 ret;
+ int ret;
if (rtwdev->chip->chip_id == RTL8852C)
return 0;
@@ -4172,7 +4409,7 @@ static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
return ret;
rtw89_pci_ctrl_dma_all(rtwdev, true);
- return ret;
+ return 0;
}
static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
@@ -4228,18 +4465,18 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
- const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+ const struct rtw89_pci_isr_def *isr_def = info->isr_def;
unsigned long flags;
int work_done;
rtwdev->napi_budget_countdown = budget;
- rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
+ rtw89_write32(rtwdev, isr_def->isr_clear_rpq.addr, isr_def->isr_clear_rpq.data);
work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done == budget)
return budget;
- rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
+ rtw89_write32(rtwdev, isr_def->isr_clear_rxq.addr, isr_def->isr_clear_rxq.data);
work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&rtwpci->irq_lock, flags);
@@ -4394,14 +4631,17 @@ const struct pci_error_handlers rtw89_pci_err_handler = {
};
EXPORT_SYMBOL(rtw89_pci_err_handler);
-const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+const struct rtw89_pci_isr_def rtw89_pci_isr_ax = {
.isr_rdu = B_AX_RDU_INT,
.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
B_AX_RDU_INT},
+};
+EXPORT_SYMBOL(rtw89_pci_isr_ax);
+const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 52f527069da6..cb05c83dfd56 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -372,6 +372,14 @@
#define B_BE_HS0ISR_IND_INT BIT(0)
#define R_BE_PCIE_DMA_IMR_0_V1 0x30B8
+#define B_BE_PCIE_RDU_CH7_IMR BIT(31)
+#define B_BE_PCIE_RDU_CH6_IMR BIT(30)
+#define B_BE_PCIE_RDU_CH5_IMR BIT(29)
+#define B_BE_PCIE_RDU_CH4_IMR BIT(28)
+#define B_BE_PCIE_RDU_CH3_IMR BIT(27)
+#define B_BE_PCIE_RDU_CH2_IMR BIT(26)
+#define B_BE_PCIE_RDU_CH1_IMR BIT(25)
+#define B_BE_PCIE_RDU_CH0_IMR BIT(24)
#define B_BE_PCIE_RX_RX1P1_IMR0_V1 BIT(23)
#define B_BE_PCIE_RX_RX0P1_IMR0_V1 BIT(22)
#define B_BE_PCIE_RX_ROQ1_IMR0_V1 BIT(21)
@@ -397,6 +405,14 @@
#define B_BE_PCIE_TX_CH0_IMR0 BIT(0)
#define R_BE_PCIE_DMA_ISR 0x30BC
+#define B_BE_PCIE_RDU_CH7_INT BIT(31)
+#define B_BE_PCIE_RDU_CH6_INT BIT(30)
+#define B_BE_PCIE_RDU_CH5_INT BIT(29)
+#define B_BE_PCIE_RDU_CH4_INT BIT(28)
+#define B_BE_PCIE_RDU_CH3_INT BIT(27)
+#define B_BE_PCIE_RDU_CH2_INT BIT(26)
+#define B_BE_PCIE_RDU_CH1_INT BIT(25)
+#define B_BE_PCIE_RDU_CH0_INT BIT(24)
#define B_BE_PCIE_RX_RX1P1_ISR_V1 BIT(23)
#define B_BE_PCIE_RX_RX0P1_ISR_V1 BIT(22)
#define B_BE_PCIE_RX_ROQ1_ISR_V1 BIT(21)
@@ -426,9 +442,13 @@
#define B_BE_RDU_CH4_INT_IMR_V1 BIT(29)
#define B_BE_RDU_CH3_INT_IMR_V1 BIT(28)
#define B_BE_RDU_CH2_INT_IMR_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_EN_V2 BIT(27)
#define B_BE_RDU_CH1_INT_IMR_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_EN_V2 BIT(26)
#define B_BE_RDU_CH0_INT_IMR_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_EN_V2 BIT(25)
#define B_BE_RXDMA_STUCK_INT_EN_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_EN_V2 BIT(24)
#define B_BE_TXDMA_STUCK_INT_EN_V1 BIT(23)
#define B_BE_TXDMA_CH14_INT_EN_V1 BIT(22)
#define B_BE_TXDMA_CH13_INT_EN_V1 BIT(21)
@@ -459,9 +479,13 @@
#define B_BE_RDU_CH4_INT_V1 BIT(29)
#define B_BE_RDU_CH3_INT_V1 BIT(28)
#define B_BE_RDU_CH2_INT_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_V2 BIT(27)
#define B_BE_RDU_CH1_INT_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_V2 BIT(26)
#define B_BE_RDU_CH0_INT_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_V2 BIT(25)
#define B_BE_RXDMA_STUCK_INT_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_V2 BIT(24)
#define B_BE_TXDMA_STUCK_INT_V1 BIT(23)
#define B_BE_TXDMA_CH14_INT_V1 BIT(22)
#define B_BE_TXDMA_CH13_INT_V1 BIT(21)
@@ -784,9 +808,25 @@
#define R_BE_CH13_TXBD_NUM_V1 0xB04C
#define R_BE_CH14_TXBD_NUM_V1 0xB04E
+#define R_BE_CH0_TXBD_CFG 0xB030
+#define R_BE_CH2_TXBD_CFG 0xB034
+#define R_BE_CH4_TXBD_CFG 0xB038
+#define R_BE_CH6_TXBD_CFG 0xB03C
+#define R_BE_CH8_TXBD_CFG 0xB040
+#define R_BE_CH10_TXBD_CFG 0xB044
+#define R_BE_CH12_TXBD_CFG 0xB048
+#define B_BE_TX_FLAG BIT(14)
+#define B_BE_TX_START_OFFSET_MASK GENMASK(12, 4)
+#define B_BE_TX_NUM_SEL_MASK GENMASK(2, 0)
+
#define R_BE_RXQ0_RXBD_NUM_V1 0xB050
#define R_BE_RPQ0_RXBD_NUM_V1 0xB052
+#define R_BE_RX_CH0_RXBD_CONFIG 0xB050
+#define R_BE_RX_CH1_RXBD_CONFIG 0xB052
+#define B_BE_RX_START_OFFSET_MASK GENMASK(11, 4)
+#define B_BE_RX_NUM_SEL_MASK GENMASK(2, 0)
+
#define R_BE_CH0_TXBD_IDX_V1 0xB100
#define R_BE_CH1_TXBD_IDX_V1 0xB104
#define R_BE_CH2_TXBD_IDX_V1 0xB108
@@ -837,11 +877,25 @@
#define R_BE_CH14_TXBD_DESA_L_V1 0xB270
#define R_BE_CH14_TXBD_DESA_H_V1 0xB274
+#define R_BE_ACQ_TXBD_DESA_L 0xB200
+#define B_BE_TX_ACQ_DESA_L_MASK GENMASK(31, 3)
+#define R_BE_ACQ_TXBD_DESA_H 0xB204
+#define B_BE_TX_ACQ_DESA_H_MASK GENMASK(7, 0)
+#define R_BE_NACQ_TXBD_DESA_L 0xB240
+#define B_BE_TX_NACQ_DESA_L_MASK GENMASK(31, 3)
+#define R_BE_NACQ_TXBD_DESA_H 0xB244
+#define B_BE_TX_NACQ_DESA_H_MASK GENMASK(7, 0)
+
#define R_BE_RXQ0_RXBD_DESA_L_V1 0xB300
#define R_BE_RXQ0_RXBD_DESA_H_V1 0xB304
#define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308
#define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C
+#define R_BE_HOST0_RXBD_DESA_L 0xB300
+#define B_BE_RX_HOST0_DESA_L_MASK GENMASK(31, 3)
+#define R_BE_HOST0_RXBD_DESA_H 0xB304
+#define B_BE_RX_HOST0_DESA_H_MASK GENMASK(7, 0)
+
#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420
#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424
#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428
@@ -1249,7 +1303,7 @@ struct rtw89_pci_bd_idx_addr {
};
struct rtw89_pci_ch_dma_addr {
- u32 num;
+ u32 num; /* also `offset` addr for group_bd_addr design */
u32 idx;
u32 bdram;
u32 desa_l;
@@ -1267,13 +1321,15 @@ struct rtw89_pci_bd_ram {
u8 min_num;
};
-struct rtw89_pci_gen_def {
+struct rtw89_pci_isr_def {
u32 isr_rdu;
u32 isr_halt_c2h;
u32 isr_wdt_timeout;
struct rtw89_reg2_def isr_clear_rpq;
struct rtw89_reg2_def isr_clear_rxq;
+};
+struct rtw89_pci_gen_def {
int (*mac_pre_init)(struct rtw89_dev *rtwdev);
int (*mac_pre_deinit)(struct rtw89_dev *rtwdev);
int (*mac_post_init)(struct rtw89_dev *rtwdev);
@@ -1309,8 +1365,16 @@ struct rtw89_pci_ssid_quirk {
unsigned long bitmap; /* bitmap of rtw89_quirks */
};
+struct rtw89_pci_rpp_info {
+ u16 seq;
+ u8 qsel;
+ u8 tx_status;
+ u8 txch;
+};
+
struct rtw89_pci_info {
const struct rtw89_pci_gen_def *gen_def;
+ const struct rtw89_pci_isr_def *isr_def;
enum mac_ax_bd_trunc_mode txbd_trunc_mode;
enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
enum mac_ax_rxbd_mode rxbd_mode;
@@ -1328,6 +1392,8 @@ struct rtw89_pci_info {
bool rx_ring_eq_is_full;
bool check_rx_tag;
bool no_rxbd_fs;
+ bool group_bd_addr;
+ u32 rpp_fmt_size;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1357,6 +1423,8 @@ struct rtw89_pci_info {
u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
+ void (*parse_rpp)(struct rtw89_dev *rtwdev, void *rpp,
+ struct rtw89_pci_rpp_info *rpp_info);
void (*config_intr_mask)(struct rtw89_dev *rtwdev);
void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
@@ -1430,6 +1498,19 @@ struct rtw89_pci_rpp_fmt {
__le32 dword;
} __packed;
+#define RTW89_PCI_RPP_W0_MACID_V1_MASK GENMASK(9, 0)
+#define RTW89_PCI_RPP_W0_DMA_CH_MASK GENMASK(13, 10)
+#define RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK GENMASK(16, 14)
+#define RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK GENMASK(31, 17)
+#define RTW89_PCI_RPP_W1_QSEL_V1_MASK GENMASK(5, 0)
+#define RTW89_PCI_RPP_W1_TID_IND BIT(6)
+#define RTW89_PCI_RPP_W1_CHANGE_LINK BIT(7)
+
+struct rtw89_pci_rpp_fmt_v1 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
struct rtw89_pci_rx_bd_32 {
__le16 buf_size;
__le16 opt;
@@ -1468,6 +1549,12 @@ struct rtw89_pci_dma_ring {
u32 rp; /* hw idx */
};
+struct rtw89_pci_dma_pool {
+ void *head;
+ dma_addr_t dma;
+ u32 size;
+};
+
struct rtw89_pci_tx_wd_ring {
void *head;
dma_addr_t dma;
@@ -1497,6 +1584,11 @@ struct rtw89_pci_tx_ring {
u64 tx_mac_id_drop;
};
+struct rtw89_pci_tx_rings {
+ struct rtw89_pci_tx_ring rings[RTW89_TXCH_NUM];
+ struct rtw89_pci_dma_pool bd_pool;
+};
+
struct rtw89_pci_rx_ring {
struct rtw89_pci_dma_ring bd_ring;
struct sk_buff *buf[RTW89_PCI_RXBD_NUM_MAX];
@@ -1506,6 +1598,11 @@ struct rtw89_pci_rx_ring {
u32 target_rx_tag:13;
};
+struct rtw89_pci_rx_rings {
+ struct rtw89_pci_rx_ring rings[RTW89_RXCH_NUM];
+ struct rtw89_pci_dma_pool bd_pool;
+};
+
struct rtw89_pci_isrs {
u32 ind_isrs;
u32 halt_c2h_isrs;
@@ -1523,8 +1620,8 @@ struct rtw89_pci {
bool low_power;
bool under_recovery;
bool enable_dac;
- struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
- struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
+ struct rtw89_pci_tx_rings tx;
+ struct rtw89_pci_rx_rings rx;
struct sk_buff_head h2c_queue;
struct sk_buff_head h2c_release_queue;
DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM);
@@ -1537,10 +1634,7 @@ struct rtw89_pci {
static inline struct rtw89_pci_rx_info *RTW89_PCI_RX_SKB_CB(struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- BUILD_BUG_ON(sizeof(struct rtw89_pci_tx_data) >
- sizeof(info->status.status_driver_data));
+ BUILD_BUG_ON(sizeof(struct rtw89_pci_rx_info) > sizeof(skb->cb));
return (struct rtw89_pci_rx_info *)skb->cb;
}
@@ -1571,6 +1665,10 @@ static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb)
{
struct rtw89_tx_skb_data *data = RTW89_TX_SKB_CB(skb);
+ BUILD_BUG_ON(sizeof(struct rtw89_tx_skb_data) +
+ sizeof(struct rtw89_pci_tx_data) >
+ sizeof_field(struct ieee80211_tx_info, driver_data));
+
return (struct rtw89_pci_tx_data *)data->hci_priv;
}
@@ -1626,8 +1724,12 @@ extern const struct pci_error_handlers rtw89_pci_err_handler;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1;
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
+extern const struct rtw89_pci_isr_def rtw89_pci_isr_ax;
+extern const struct rtw89_pci_isr_def rtw89_pci_isr_be;
+extern const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1;
extern const struct rtw89_pci_gen_def rtw89_pci_gen_ax;
extern const struct rtw89_pci_gen_def rtw89_pci_gen_be;
@@ -1646,16 +1748,23 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
+void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info);
+void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info);
void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable);
void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev);
+void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
@@ -1665,6 +1774,9 @@ void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
+void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
static inline
u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 12e6a0cbb889..e4590879b800 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -175,10 +175,10 @@ static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX);
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
}
@@ -665,13 +665,25 @@ static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
EXPORT_SYMBOL(rtw89_pm_ops_be);
-const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
+const struct rtw89_pci_isr_def rtw89_pci_isr_be = {
.isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
+};
+EXPORT_SYMBOL(rtw89_pci_isr_be);
+const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1 = {
+ .isr_rdu = B_BE_PCIE_RDU_CH1_INT | B_BE_PCIE_RDU_CH0_INT,
+ .isr_halt_c2h = B_BE_HALT_C2H_INT,
+ .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
+ .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
+ .isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
+};
+EXPORT_SYMBOL(rtw89_pci_isr_be_v1);
+
+const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.mac_pre_init = rtw89_pci_ops_mac_pre_init_be,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be,
.mac_post_init = rtw89_pci_ops_mac_post_init_be,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index d607577b353c..ba7feadd7582 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -1702,6 +1702,91 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
rtw89_phy_bb_reset(rtwdev);
}
+void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe;
+ const struct rtw89_phy_afe_info *info;
+ u32 action, cat, class;
+ u32 addr, mask, val;
+ u32 poll, rpt;
+ u32 n, i;
+
+ if (!afe_elm)
+ return;
+
+ n = le32_to_cpu(afe_elm->size) / sizeof(*info);
+
+ for (i = 0; i < n; i++) {
+ info = &afe_elm->u.afe.infos[i];
+
+ class = le32_to_cpu(info->class);
+ switch (class) {
+ case RTW89_FW_AFE_CLASS_P0:
+ case RTW89_FW_AFE_CLASS_P1:
+ case RTW89_FW_AFE_CLASS_CMN:
+ /* Currently support two paths */
+ break;
+ case RTW89_FW_AFE_CLASS_P2:
+ case RTW89_FW_AFE_CLASS_P3:
+ case RTW89_FW_AFE_CLASS_P4:
+ default:
+ rtw89_warn(rtwdev, "unexpected AFE class %u\n", class);
+ continue;
+ }
+
+ addr = le32_to_cpu(info->addr);
+ mask = le32_to_cpu(info->mask);
+ val = le32_to_cpu(info->val);
+ cat = le32_to_cpu(info->cat);
+ action = le32_to_cpu(info->action);
+
+ switch (action) {
+ case RTW89_FW_AFE_ACTION_WRITE:
+ switch (cat) {
+ case RTW89_FW_AFE_CAT_MAC:
+ case RTW89_FW_AFE_CAT_MAC1:
+ rtw89_write32_mask(rtwdev, addr, mask, val);
+ break;
+ case RTW89_FW_AFE_CAT_AFEDIG:
+ case RTW89_FW_AFE_CAT_AFEDIG1:
+ rtw89_write32_mask(rtwdev, addr, mask, val);
+ break;
+ case RTW89_FW_AFE_CAT_BB:
+ rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
+ break;
+ case RTW89_FW_AFE_CAT_BB1:
+ rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "unexpected AFE writing action %u\n", action);
+ break;
+ }
+ break;
+ case RTW89_FW_AFE_ACTION_POLL:
+ for (poll = 0; poll <= 10; poll++) {
+ /*
+ * For CAT_BB, AFE reads register with mcu_offset 0,
+ * so both CAT_MAC and CAT_BB use the same method.
+ */
+ rpt = rtw89_read32_mask(rtwdev, addr, mask);
+ if (rpt == val)
+ goto poll_done;
+
+ fsleep(1);
+ }
+ rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n",
+ cat, addr, mask);
+poll_done:
+ break;
+ case RTW89_FW_AFE_ACTION_DELAY:
+ fsleep(addr);
+ break;
+ }
+ }
+}
+
static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
{
rtw89_phy_write32(rtwdev, 0x8080, 0x4);
@@ -2943,7 +3028,7 @@ static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
}
if (mode == RTW89_RA_RPT_MODE_LEGACY) {
- valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
+ valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate);
if (!valid)
return;
}
@@ -3087,6 +3172,34 @@ void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
};
+static
+void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_c2h_rf_tas_rpt_log *content)
+{
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 linear = 0;
+ u32 i, cur_idx;
+ s16 txpwr;
+
+ if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
+ return;
+
+ cur_idx = le32_to_cpu(content->cur_idx);
+ for (i = 0; i < cur_idx; i++) {
+ txpwr = le16_to_cpu(content->txpwr_history[i]);
+ linear += rtw89_db_quarter_to_linear(txpwr);
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: index: %u, txpwr: %d\n", i, txpwr);
+ }
+
+ if (cur_idx == 0)
+ tas->instant_txpwr = rtw89_db_to_linear(0);
+ else
+ tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
+}
+
static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
enum rtw89_phy_c2h_rfk_log_func func,
void *content, u16 len)
@@ -3338,6 +3451,13 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
(int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR:
+ if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log))
+ goto out;
+
+ rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
+
+ return;
default:
break;
}
@@ -3390,9 +3510,6 @@ static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
u16 chunk_len;
bool handled;
- if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK))
- return;
-
log_ptr += sizeof(*c2h_hdr);
len -= sizeof(*c2h_hdr);
@@ -3469,6 +3586,13 @@ rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
}
+static void
+rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
+}
+
static
void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3478,6 +3602,7 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
static
@@ -3540,39 +3665,19 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
}
static void
-rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
- const struct rtw89_c2h_rf_tas_info *rf_tas =
+ const struct rtw89_c2h_rf_tas_info *report =
(const struct rtw89_c2h_rf_tas_info *)c2h->data;
- const enum rtw89_sar_sources src = rtwdev->sar.src;
- struct rtw89_tas_info *tas = &rtwdev->tas;
- u64 linear = 0;
- u32 i, cur_idx;
- s16 txpwr;
-
- if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
- return;
-
- cur_idx = le32_to_cpu(rf_tas->cur_idx);
- for (i = 0; i < cur_idx; i++) {
- txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]);
- linear += rtw89_db_quarter_to_linear(txpwr);
-
- rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "tas: index: %u, txpwr: %d\n", i, txpwr);
- }
- if (cur_idx == 0)
- tas->instant_txpwr = rtw89_db_to_linear(0);
- else
- tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
+ rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content);
}
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
- [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
+ [RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@@ -3626,12 +3731,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler = rtw89_phy_c2h_dm_handler[func];
break;
default:
- rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
- return;
+ break;
}
if (!handler) {
- rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
- func);
+ rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n",
+ class, func);
return;
}
handler(rtwdev, skb, len);
@@ -5497,6 +5601,34 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
}
+static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_ccx_regs *ccx = phy->ccx;
+
+ env->nhm_include_cca = false;
+ env->nhm_mntr_time = 0;
+ env->nhm_sum = 0;
+
+ rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx);
+ rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk,
+ bb->phy_idx);
+}
+
+void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_bb_ctx *bb;
+
+ if (!chip->support_noise)
+ return;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_nhm_setting_init(rtwdev, bb);
+}
+
static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
@@ -5558,7 +5690,7 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
}
static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
- struct rtw89_bb_ctx *bb)
+ struct rtw89_bb_ctx *bb, u8 sel)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
struct rtw89_env_monitor_info *env = &bb->env_monitor;
@@ -5568,10 +5700,17 @@ static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
bb->phy_idx);
+ if (sel & RTW89_PHY_ENV_MON_NHM)
+ rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config,
+ ccx->nhm_en_mask, bb->phy_idx);
+
rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
bb->phy_idx);
+ if (sel & RTW89_PHY_ENV_MON_NHM)
+ rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config,
+ ccx->nhm_en_mask, bb->phy_idx);
env->ccx_ongoing = true;
}
@@ -5642,6 +5781,125 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
env->ifs_clm_cca_avg[i]);
}
+static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ u8 nhm_weight[RTW89_NHM_RPT_NUM];
+ u32 nhm_weighted_sum = 0;
+ u8 weight_zero;
+ u8 i;
+
+ if (env->nhm_sum == 0)
+ return 0;
+
+ weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX);
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
+ if (i == 0)
+ nhm_weight[i] = weight_zero;
+ else if (i == (RTW89_NHM_RPT_NUM - 1))
+ nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET;
+ else
+ nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2;
+ }
+
+ if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B ||
+ rtwdev->chip->chip_id == RTL8852C) {
+ if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) {
+ nhm_weight[RTW89_NHM_RPT_NUM - 1] =
+ env->nhm_th[RTW89_NHM_TH_NUM - 2] +
+ RTW89_NHM_WEIGHT_OFFSET;
+ nhm_weight[RTW89_NHM_RPT_NUM - 2] =
+ nhm_weight[RTW89_NHM_RPT_NUM - 1];
+ }
+
+ env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1];
+ env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0;
+ }
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
+ nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i];
+
+ return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR;
+}
+
+static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, enum rtw89_band hw_band,
+ u16 ch_hw_value)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_ccx_regs *ccx = phy->ccx;
+ struct ieee80211_supported_band *sband;
+ const struct rtw89_reg_def *nhm_rpt;
+ enum nl80211_band band;
+ u32 sum = 0;
+ u8 chan_idx;
+ u8 nhm_pwr;
+ u8 i;
+
+ if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] Get NHM report Fail\n");
+ return;
+ }
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
+ nhm_rpt = &(*chip->nhm_report)[i];
+
+ env->nhm_result[i] =
+ rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr,
+ nhm_rpt->mask, bb->phy_idx);
+ sum += env->nhm_result[i];
+ }
+ env->nhm_sum = sum;
+ nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb);
+
+ if (!ch_hw_value)
+ return;
+
+ band = rtw89_hw_to_nl80211_band(hw_band);
+ sband = rtwdev->hw->wiphy->bands[band];
+ if (!sband)
+ return;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel;
+ struct rtw89_nhm_report *rpt;
+ struct list_head *nhm_list;
+
+ channel = &sband->channels[chan_idx];
+ if (channel->hw_value != ch_hw_value)
+ continue;
+
+ rpt = &env->nhm_his[hw_band][chan_idx];
+ nhm_list = &env->nhm_rpt_list;
+
+ rpt->channel = channel;
+ rpt->noise = nhm_pwr;
+
+ if (list_empty(&rpt->list))
+ list_add_tail(&rpt->list, nhm_list);
+
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n");
+}
+
+void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
+ u16 ch_hw_value)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_bb_ctx *bb;
+
+ if (!chip->support_noise)
+ return;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value);
+}
+
static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
@@ -5742,6 +6000,107 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
return true;
}
+static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = {
+ 18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0
+ };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_reg_def *nhm_th;
+ u8 i;
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
+ env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
+ chip->chip_id == RTL8852C)
+ env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH;
+
+ for (i = 0; i < RTW89_NHM_TH_NUM; i++) {
+ nhm_th = &(*chip->nhm_th)[i];
+
+ rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask,
+ env->nhm_th[i], bb->phy_idx);
+ }
+}
+
+static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ struct rtw89_ccx_para_info *para)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_ccx_regs *ccx = phy->ccx;
+ u32 unit_idx = 0;
+ u32 period = 0;
+
+ if (para->mntr_time == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[NHM] MNTR_TIME is 0\n");
+ return -EINVAL;
+ }
+
+ if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
+ return -EINVAL;
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n",
+ para->nhm_incld_cca, para->mntr_time);
+
+ if (para->mntr_time != env->nhm_mntr_time) {
+ rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
+ &period, &unit_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
+ ccx->nhm_period_mask, period, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
+ ccx->nhm_unit_mask, period, bb->phy_idx);
+
+ env->nhm_mntr_time = para->mntr_time;
+ env->ccx_period = period;
+ env->ccx_unit_idx = unit_idx;
+ }
+
+ if (para->nhm_incld_cca != env->nhm_include_cca) {
+ rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
+ ccx->nhm_include_cca_mask, para->nhm_incld_cca,
+ bb->phy_idx);
+
+ env->nhm_include_cca = para->nhm_incld_cca;
+ }
+
+ rtw89_phy_nhm_th_update(rtwdev, bb);
+
+ return 0;
+}
+
+static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_ccx_para_info para = {
+ .mntr_time = RTW89_NHM_MNTR_TIME,
+ .rac_lv = RTW89_RAC_LV_1,
+ .nhm_incld_cca = true,
+ };
+
+ rtw89_phy_ccx_racing_release(rtwdev, bb);
+
+ rtw89_phy_nhm_set(rtwdev, bb, &para);
+ rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM);
+}
+
+void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_bb_ctx *bb;
+
+ if (!chip->support_noise)
+ return;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_nhm_trigger(rtwdev, bb);
+}
+
static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
@@ -5816,7 +6175,7 @@ static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
if (chk_result)
- rtw89_phy_ccx_trigger(rtwdev, bb);
+ rtw89_phy_ccx_trigger(rtwdev, bb, chk_result);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"get_result=0x%x, chk_result:0x%x\n",
@@ -5930,8 +6289,6 @@ static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
} else if (i >= RTW89_CCK_PKT) {
- val |= BIT(RTW89_PHYSTS_IE09_FTR_0);
-
val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
@@ -6910,6 +7267,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_bb_sethw(rtwdev);
rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_phy_nhm_setting_init(rtwdev);
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
@@ -6935,6 +7293,43 @@ void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
}
+static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct ieee80211_supported_band *sband;
+ enum rtw89_band hw_band;
+ enum nl80211_band band;
+ u8 idx;
+
+ if (!chip->support_noise)
+ return;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ sband = rtwdev->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ hw_band = rtw89_nl80211_to_hw_band(band);
+ env->nhm_his[hw_band] =
+ devm_kcalloc(rtwdev->dev, sband->n_channels,
+ sizeof(*env->nhm_his[0]), GFP_KERNEL);
+
+ for (idx = 0; idx < sband->n_channels; idx++)
+ INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list);
+
+ INIT_LIST_HEAD(&env->nhm_rpt_list);
+ }
+}
+
+void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_dm_init_data(rtwdev, bb);
+}
+
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
@@ -7590,6 +7985,15 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.ifs_total_addr = R_IFSCNT,
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
+ .nhm = R_NHM_AX,
+ .nhm_ready = B_NHM_READY_MSK,
+ .nhm_config = R_NHM_CFG,
+ .nhm_period_mask = B_NHM_PERIOD_MSK,
+ .nhm_unit_mask = B_NHM_COUNTER_MSK,
+ .nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
+ .nhm_en_mask = B_NHM_EN_MSK,
+ .nhm_method = R_NHM_TH9,
+ .nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index dc156376d951..9caacffd0af8 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -149,13 +149,14 @@ enum rtw89_phy_c2h_rfk_log_func {
RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK = 3,
RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI = 4,
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK = 5,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR = 9,
RTW89_PHY_C2H_RFK_LOG_FUNC_NUM,
};
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
- RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
+ RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@@ -188,6 +189,12 @@ enum rtw89_env_monitor_result_level {
RTW89_PHY_ENV_MON_EDCCA_CLM = BIT(4),
};
+#define RTW89_NHM_WEIGHT_OFFSET 2
+#define RTW89_NHM_WA_TH (109 << 1)
+#define RTW89_NOISE_DEFAULT -96
+#define RTW89_NHM_MNTR_TIME 40
+#define RTW89_NHM_TH_FACTOR 1
+
#define CCX_US_BASE_RATIO 4
enum rtw89_ccx_unit {
RTW89_CCX_4_US = 0,
@@ -428,6 +435,15 @@ struct rtw89_ccx_regs {
u32 ifs_total_addr;
u32 ifs_cnt_done_mask;
u32 ifs_total_mask;
+ u32 nhm;
+ u32 nhm_ready;
+ u32 nhm_config;
+ u32 nhm_period_mask;
+ u32 nhm_unit_mask;
+ u32 nhm_include_cca_mask;
+ u32 nhm_en_mask;
+ u32 nhm_method;
+ u32 nhm_pwr_method_msk;
};
struct rtw89_physts_regs {
@@ -814,6 +830,7 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
@@ -821,6 +838,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
+void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@@ -1038,5 +1056,9 @@ enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
const struct rtw89_chan *target_chan);
+void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev);
+void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
+ u16 ch_hw_value);
+void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index d321cf1fc485..3316a38a62d0 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -63,6 +63,15 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
.ifs_total_addr = R_IFSCNT_V1,
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
+ .nhm = R_NHM_BE,
+ .nhm_ready = B_NHM_READY_BE_MSK,
+ .nhm_config = R_NHM_CFG,
+ .nhm_period_mask = B_NHM_PERIOD_MSK,
+ .nhm_unit_mask = B_NHM_COUNTER_MSK,
+ .nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
+ .nhm_en_mask = B_NHM_EN_MSK,
+ .nhm_method = R_NHM_TH9,
+ .nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
static const struct rtw89_physts_regs rtw89_physts_regs_be = {
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 652f8fc81b79..cf58121eb541 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -119,6 +119,9 @@ static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+
+ if (RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ rtw89_fw_h2c_pwr_lvl(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index de81103a072f..ed1d958bc49e 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -3370,6 +3370,10 @@
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
#define B_AX_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_AX_BCN_PSR_RPT_P0 0xCE84
+#define R_AX_BCN_PSR_RPT_P0_C1 0xEE84
+#define B_AX_BCAID_P0_MASK GENMASK(10, 0)
+
#define R_AX_RX_STATE_MONITOR 0xCEF0
#define R_AX_RX_STATE_MONITOR_C1 0xEEF0
#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0)
@@ -6258,6 +6262,11 @@
#define B_BE_PTCL_TOP_ERR_IND BIT(1)
#define B_BE_SCHEDULE_TOP_ERR_IND BIT(0)
+#define R_BE_CMAC_FW_TRIGGER_IDCT_ISR 0x10168
+#define R_BE_CMAC_FW_TRIGGER_IDCT_ISR_C1 0x14168
+#define B_BE_CMAC_FW_ERR_IDCT_IMR BIT(31)
+#define B_BE_CMAC_FW_TRIG_IDCT BIT(0)
+
#define R_BE_SER_L0_DBG_CNT 0x10170
#define R_BE_SER_L0_DBG_CNT_C1 0x14170
#define B_BE_SER_L0_PHYINTF_CNT_MASK GENMASK(31, 24)
@@ -7494,6 +7503,10 @@
#define R_BE_DRV_INFO_OPTION_C1 0x15470
#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
+#define R_BE_BCN_PSR_RPT_P0 0x11484
+#define R_BE_BCN_PSR_RPT_P0_C1 0x15484
+#define B_BE_BCAID_P0_MASK GENMASK(10, 0)
+
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
@@ -8092,6 +8105,26 @@
#define B_MEASUREMENT_TRIG_MSK BIT(2)
#define B_CCX_TRIG_OPT_MSK BIT(1)
#define B_CCX_EN_MSK BIT(0)
+#define R_NHM_CFG 0x0C08
+#define B_NHM_PERIOD_MSK GENMASK(15, 0)
+#define B_NHM_COUNTER_MSK GENMASK(17, 16)
+#define B_NHM_EN_MSK BIT(18)
+#define B_NHM_INCLUDE_CCA_MSK BIT(19)
+#define B_NHM_TH0_MSK GENMASK(31, 24)
+#define R_NHM_TH1 0x0C0C
+#define B_NHM_TH1_MSK GENMASK(7, 0)
+#define B_NHM_TH2_MSK GENMASK(15, 8)
+#define B_NHM_TH3_MSK GENMASK(23, 16)
+#define B_NHM_TH4_MSK GENMASK(31, 24)
+#define R_NHM_TH5 0x0C10
+#define B_NHM_TH5_MSK GENMASK(7, 0)
+#define B_NHM_TH6_MSK GENMASK(15, 8)
+#define B_NHM_TH7_MSK GENMASK(23, 16)
+#define B_NHM_TH8_MSK GENMASK(31, 24)
+#define R_NHM_TH9 0x0C14
+#define B_NHM_TH9_MSK GENMASK(7, 0)
+#define B_NHM_TH10_MSK GENMASK(15, 8)
+#define B_NHM_PWDB_METHOD_MSK GENMASK(17, 16)
#define R_FAHM 0x0C1C
#define B_RXTD_CKEN BIT(2)
#define R_IFS_COUNTER 0x0C28
@@ -8161,6 +8194,8 @@
#define R_BRK_ASYNC_RST_EN_1 0x0DC0
#define R_BRK_ASYNC_RST_EN_2 0x0DC4
#define R_BRK_ASYNC_RST_EN_3 0x0DC8
+#define R_NHM_BE 0x0EA4
+#define B_NHM_READY_BE_MSK BIT(16)
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
@@ -8216,6 +8251,26 @@
#define B_SWSI_R_BUSY_V1 BIT(25)
#define B_SWSI_R_DATA_DONE_V1 BIT(26)
#define R_TX_COUNTER 0x1A40
+#define R_NHM_CNT0 0x1A88
+#define B_NHM_CNT0_MSK GENMASK(15, 0)
+#define B_NHM_CNT1_MSK GENMASK(31, 16)
+#define R_NHM_CNT2 0x1A8C
+#define B_NHM_CNT2_MSK GENMASK(15, 0)
+#define B_NHM_CNT3_MSK GENMASK(31, 16)
+#define R_NHM_CNT4 0x1A90
+#define B_NHM_CNT4_MSK GENMASK(15, 0)
+#define B_NHM_CNT5_MSK GENMASK(31, 16)
+#define R_NHM_CNT6 0x1A94
+#define B_NHM_CNT6_MSK GENMASK(15, 0)
+#define B_NHM_CNT7_MSK GENMASK(31, 16)
+#define R_NHM_CNT8 0x1A98
+#define B_NHM_CNT8_MSK GENMASK(15, 0)
+#define B_NHM_CNT9_MSK GENMASK(31, 16)
+#define R_NHM_CNT10 0x1A9C
+#define B_NHM_CNT10_MSK GENMASK(15, 0)
+#define B_NHM_CNT11_MSK GENMASK(31, 16)
+#define R_NHM_AX 0x1AA4
+#define B_NHM_READY_MSK BIT(16)
#define R_IFS_CLM_TX_CNT 0x1ACC
#define R_IFS_CLM_TX_CNT_V1 0x0ECC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
@@ -9126,6 +9181,7 @@
#define B_COEF_SEL_MDPD BIT(8)
#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
#define B_COEF_SEL_EN BIT(31)
+#define R_CFIR_COEF 0x810c
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 393df2b0dcae..edcbda124916 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2537,6 +2537,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@@ -2628,6 +2629,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_ant_gain = false,
.support_tas = false,
.support_sar_by_ant = false,
+ .support_noise = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@@ -2689,6 +2691,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
.dcfo_comp_sft = 12,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = &rtw8851b_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 7a319a6c838a..84c46d2f4d85 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -17,8 +17,9 @@
#define DPK_RF_REG_NUM_8851B 4
#define DPK_KSET_NUM 4
#define RTW8851B_RXK_GROUP_NR 4
-#define RTW8851B_RXK_GROUP_IDX_NR 2
-#define RTW8851B_TXK_GROUP_NR 1
+#define RTW8851B_RXK_GROUP_IDX_NR 4
+#define RTW8851B_A_TXK_GROUP_NR 2
+#define RTW8851B_G_TXK_GROUP_NR 1
#define RTW8851B_IQK_VER 0x14
#define RTW8851B_IQK_SS 1
#define RTW8851B_LOK_GRAM 10
@@ -114,19 +115,21 @@ static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830};
static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296};
static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf};
static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3};
-static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x28c};
-static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf};
-static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x6};
-static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
-static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
-static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a};
-static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
-static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
-static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
-static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10};
-static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
-
-static const u32 rtw8851b_backup_bb_regs[] = {0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8};
+static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x112, 0x28c, 0x292};
+static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf, 0xf, 0xf};
+static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x5, 0x6, 0x7};
+static const u32 a_power_range[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x0};
+static const u32 a_track_range[RTW8851B_A_TXK_GROUP_NR] = {0x7, 0x7};
+static const u32 a_gain_bb[RTW8851B_A_TXK_GROUP_NR] = {0x08, 0x0d};
+static const u32 a_itqt[RTW8851B_A_TXK_GROUP_NR] = {0x12, 0x12};
+static const u32 a_att_smxr[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x2};
+static const u32 g_power_range[RTW8851B_G_TXK_GROUP_NR] = {0x0};
+static const u32 g_track_range[RTW8851B_G_TXK_GROUP_NR] = {0x6};
+static const u32 g_gain_bb[RTW8851B_G_TXK_GROUP_NR] = {0x10};
+static const u32 g_itqt[RTW8851B_G_TXK_GROUP_NR] = {0x12};
+
+static const u32 rtw8851b_backup_bb_regs[] = {
+ 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8, 0x12a0, 0xc0f0};
static const u32 rtw8851b_backup_rf_regs[] = {
0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
@@ -139,17 +142,6 @@ static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005};
static void _set_ch(struct rtw89_dev *rtwdev, u32 val);
-static u8 _rxk_5ghz_group_from_idx(u8 idx)
-{
- /* There are four RXK groups (RTW8851B_RXK_GROUP_NR), but only group 0
- * and 2 are used in 5 GHz band, so reduce elements to 2.
- */
- if (idx < RTW8851B_RXK_GROUP_IDX_NR)
- return idx * 2;
-
- return 0;
-}
-
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
return RF_A;
@@ -196,7 +188,7 @@ static void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
bool force, enum adc_ck ck)
{
- static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x93};
+ static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x92};
static const u32 ck1920_8851b[] = {0x9, 0x0, 0x0, 0x3, 0xf, 0xa, 0x49};
const u32 *data;
@@ -800,7 +792,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
"[IQK]============ S%d ID_NBTXK ============\n", path);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
- 0x00b);
+ 0x11);
iqk_cmd = 0x408 | (1 << (4 + path));
break;
case ID_NBRXK:
@@ -818,7 +810,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
notready = _iqk_check_cal(rtwdev, path);
if (iqk_info->iqk_sram_en &&
- (ktype == ID_NBRXK || ktype == ID_RXK))
+ (ktype == ID_NBRXK || ktype == ID_RXK || ktype == ID_NBTXK))
_iqk_sram(rtwdev, path);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
@@ -905,18 +897,27 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
bool kfail = false;
bool notready;
u32 rf_0;
- u8 idx;
+ u32 val;
u8 gp;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (idx = 0; idx < RTW8851B_RXK_GROUP_IDX_NR; idx++) {
- gp = _rxk_5ghz_group_from_idx(idx);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
+ val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc);
+ for (gp = 0; gp < RTW8851B_RXK_GROUP_IDX_NR; gp++) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
@@ -926,7 +927,7 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
fsleep(100);
rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
- rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
@@ -959,6 +960,7 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
_iqk_sram(rtwdev, path);
if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES, B_IQK_RES_RXCFIR, 0x0);
rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
iqk_info->nb_rxcfir[path] | 0x2);
iqk_info->is_wb_txiqk[path] = false;
@@ -968,6 +970,14 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
iqk_info->is_wb_txiqk[path] = true;
}
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
1 << path, iqk_info->nb_rxcfir[path]);
@@ -980,17 +990,26 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
- u8 idx = 0x1;
+ u8 gp = 2;
u32 rf_0;
- u8 gp;
-
- gp = _rxk_5ghz_group_from_idx(idx);
+ u32 val;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
+ val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
@@ -1000,7 +1019,7 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
fsleep(100);
rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
- rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
@@ -1026,6 +1045,7 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 0xf, 0x0);
rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
MASKDWORD, 0x40000002);
iqk_info->is_wb_rxiqk[path] = false;
@@ -1033,6 +1053,14 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
iqk_info->is_wb_rxiqk[path] = false;
}
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
1 << path, iqk_info->nb_rxcfir[path]);
@@ -1149,6 +1177,7 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx, u8 path)
{
+ static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3};
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
@@ -1156,16 +1185,20 @@ static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x33332222);
+
+ for (gp = 0x0; gp < RTW8851B_A_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]);
rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x11);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
@@ -1206,7 +1239,9 @@ static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x0);
+
+ for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
@@ -1249,29 +1284,29 @@ static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
u8 path)
{
+ static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3};
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
- u8 gp;
+ u8 gp = 0;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
- rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
- rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
- rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
- rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
- rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
- notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
- iqk_info->nb_txcfir[path] =
- rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
- }
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
if (!notready)
kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
@@ -1300,7 +1335,7 @@ static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
@@ -1664,8 +1699,6 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx, path;
- rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
-
if (iqk_info->is_iqk_init)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index 598730831707..ce59ac9f56ba 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8851b_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -57,6 +60,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851bu.c b/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
index c3722547c6b0..04e1ab13b753 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
@@ -16,6 +16,9 @@ static const struct rtw89_driver_info rtw89_8851bu_info = {
static const struct usb_device_id rtw_8851bu_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb851, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
+ /* D-Link AX9U rev. A1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x332a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
/* TP-Link Archer TX10UB Nano */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 3bbe2a808844..232f4c1bee1b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -426,6 +426,35 @@ static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
};
+static const struct rtw89_reg_def rtw8852a_nhm_th[RTW89_NHM_TH_NUM] = {
+ {R_NHM_CFG, B_NHM_TH0_MSK},
+ {R_NHM_TH1, B_NHM_TH1_MSK},
+ {R_NHM_TH1, B_NHM_TH2_MSK},
+ {R_NHM_TH1, B_NHM_TH3_MSK},
+ {R_NHM_TH1, B_NHM_TH4_MSK},
+ {R_NHM_TH5, B_NHM_TH5_MSK},
+ {R_NHM_TH5, B_NHM_TH6_MSK},
+ {R_NHM_TH5, B_NHM_TH7_MSK},
+ {R_NHM_TH5, B_NHM_TH8_MSK},
+ {R_NHM_TH9, B_NHM_TH9_MSK},
+ {R_NHM_TH9, B_NHM_TH10_MSK},
+};
+
+static const struct rtw89_reg_def rtw8852a_nhm_rpt[RTW89_NHM_RPT_NUM] = {
+ {R_NHM_CNT0, B_NHM_CNT0_MSK},
+ {R_NHM_CNT0, B_NHM_CNT1_MSK},
+ {R_NHM_CNT2, B_NHM_CNT2_MSK},
+ {R_NHM_CNT2, B_NHM_CNT3_MSK},
+ {R_NHM_CNT4, B_NHM_CNT4_MSK},
+ {R_NHM_CNT4, B_NHM_CNT5_MSK},
+ {R_NHM_CNT6, B_NHM_CNT6_MSK},
+ {R_NHM_CNT6, B_NHM_CNT7_MSK},
+ {R_NHM_CNT8, B_NHM_CNT8_MSK},
+ {R_NHM_CNT8, B_NHM_CNT9_MSK},
+ {R_NHM_CNT10, B_NHM_CNT10_MSK},
+ {R_NHM_CNT10, B_NHM_CNT11_MSK},
+};
+
static const struct rtw89_imr_info rtw8852a_imr_info = {
.wdrls_imr_set = B_AX_WDRLS_IMR_SET,
.wsec_imr_reg = R_AX_SEC_DEBUG,
@@ -2080,10 +2109,17 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
{
u8 path;
u8 *rx_power = phy_ppdu->rssi;
+ u8 raw;
+
+ if (!status->signal) {
+ if (phy_ppdu->to_self)
+ raw = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
+ else
+ raw = max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+
+ status->signal = RTW89_RSSI_RAW_TO_DBM(raw);
+ }
- if (!status->signal)
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
- rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2142,6 +2178,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@@ -2220,6 +2257,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_ant_gain = false,
.support_tas = false,
.support_sar_by_ant = false,
+ .support_noise = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@@ -2282,6 +2320,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 10,
+ .nhm_report = &rtw8852a_nhm_rpt,
+ .nhm_th = &rtw8852a_nhm_th,
.imr_info = &rtw8852a_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 90ffaf9f4f6a..9e05e831569d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8852a_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -55,6 +58,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 7ede07f7b1eb..0777e336aaa1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -842,6 +842,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@@ -939,6 +940,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_ant_gain = true,
.support_tas = false,
.support_sar_by_ant = true,
+ .support_noise = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@@ -1001,6 +1003,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
.dcfo_comp_sft = 10,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = &rtw8852b_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index b0726f590ca2..12db0d0be547 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8852b_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -57,6 +60,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 9427823aca2f..b3a79ebc7e75 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -708,6 +708,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index d0e299803225..961b26ba2d3c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -1799,22 +1799,14 @@ static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool o
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 val, kidx = dpk->cur_idx[path];
- bool off_reverse;
val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
- if (off)
- off_reverse = false;
- else
- off_reverse = true;
-
- val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
-
rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
BIT(24), val);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
- kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
+ kidx, str_enable_disable(dpk->is_dpk_enable && !off));
}
static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
@@ -1883,8 +1875,8 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
path, dpk->cur_idx[path], phy,
- rtwdev->is_tssi_mode[path] ? "on" : "off",
- rtwdev->dbcc_en ? "on" : "off",
+ str_on_off(rtwdev->is_tssi_mode[path]),
+ str_on_off(rtwdev->dbcc_en),
dpk->bp[path][kidx].band == 0 ? "2G" :
dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
dpk->bp[path][kidx].ch,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
index a584c75b801d..8c995aa95325 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -17,6 +17,7 @@ static const struct rtw89_pci_ssid_quirk rtw8852bt_pci_ssid_quirks[] = {
static const struct rtw89_pci_info rtw8852bt_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -34,6 +35,8 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -63,6 +66,7 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
index b315cb997758..0694272f7ffa 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
@@ -30,6 +30,8 @@ static const struct usb_device_id rtw_8852bu_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x6931, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3327, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6121, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0100, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 88cf8ea13e7c..440801d63343 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -2962,6 +2962,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc_v1,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
@@ -3043,6 +3044,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_ant_gain = true,
.support_tas = true,
.support_sar_by_ant = true,
+ .support_noise = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.rx_freq_frome_ie = false,
@@ -3106,6 +3108,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 12,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = &rtw8852c_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index db01d3966c27..150fed189414 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -20,6 +20,7 @@ static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = {
static const struct rtw89_pci_info rtw8852c_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -37,6 +38,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
@@ -64,6 +67,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.ltr_set = rtw89_pci_ltr_set_v1,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask_v1,
.enable_intr = rtw89_pci_enable_intr_v1,
.disable_intr = rtw89_pci_disable_intr_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 36c641e3bc13..6aa19ad259ac 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2765,6 +2765,10 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
return 0;
}
+static const struct rtw89_chanctx_listener rtw8922a_chanctx_listener = {
+ .callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb,
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
@@ -2817,6 +2821,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc_v2,
.fill_txdesc = rtw89_core_fill_txdesc_v2,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v2,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
@@ -2875,6 +2880,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL, /* load parm from fw */
.rfe_parms_conf = NULL, /* load parm from fw */
+ .chanctx_listener = &rtw8922a_chanctx_listener,
.txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
@@ -2894,8 +2900,9 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.support_ant_gain = true,
- .support_tas = false,
+ .support_tas = true,
.support_sar_by_ant = true,
+ .support_noise = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = false,
@@ -2958,6 +2965,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = NULL,
.dcfo_comp_sft = 0,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index b730d79edd10..90c62b757c57 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -17,6 +17,7 @@ static const struct rtw89_pci_ssid_quirk rtw8922a_pci_ssid_quirks[] = {
static const struct rtw89_pci_info rtw8922a_pci_info = {
.gen_def = &rtw89_pci_gen_be,
+ .isr_def = &rtw89_pci_isr_be,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -34,6 +35,8 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.rx_ring_eq_is_full = true,
.check_rx_tag = true,
.no_rxbd_fs = true,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -61,6 +64,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.ltr_set = rtw89_pci_ltr_set_v2,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask_v2,
.enable_intr = rtw89_pci_enable_intr_v2,
.disable_intr = rtw89_pci_disable_intr_v2,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 7f568ffb3766..ef7feccccd5e 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -4,6 +4,7 @@
#include "acpi.h"
#include "debug.h"
+#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "sar.h"
@@ -843,6 +844,20 @@ void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
+void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+
+ if (!tas->enable)
+ return;
+
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ return;
+
+ rtw89_fw_h2c_rf_tas_trigger(rtwdev, enable);
+}
+
void rtw89_sar_init(struct rtw89_dev *rtwdev)
{
rtw89_set_sar_from_acpi(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index 4b7f3d44f57b..60b3aec5b3be 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -37,6 +37,7 @@ void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_state state);
+void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable);
void rtw89_sar_init(struct rtw89_dev *rtwdev);
void rtw89_sar_track(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index bb39fdbcba0d..f99e179f7ff9 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -205,7 +205,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work)
static int ser_send_msg(struct rtw89_ser *ser, u8 event)
{
- struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
struct ser_msg *msg = NULL;
if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
@@ -221,7 +220,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
list_add(&msg->list, &ser->msg_q);
spin_unlock_irq(&ser->msg_q_lock);
- ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
+ schedule_work(&ser->ser_hdl_work);
return 0;
}
@@ -502,7 +501,9 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
}
drv_stop_rx(ser);
+ wiphy_lock(wiphy);
drv_trx_reset(ser);
+ wiphy_unlock(wiphy);
/* wait m3 */
hal_send_m2_event(ser);
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index ec01bfc363da..984c9fdbb018 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -572,6 +572,7 @@ struct rtw89_phy_sts_ie00 {
} __packed;
#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
+#define RTW89_PHY_STS_IE00_W3_RX_PATH_EN GENMASK(31, 28)
struct rtw89_phy_sts_ie00_v2 {
__le32 w0;
@@ -732,43 +733,6 @@ rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request
return RTW89_TX_QSEL_B0_MGMT;
}
-static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
-{
- switch (qsel) {
- default:
- rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
- fallthrough;
- case RTW89_TX_QSEL_BE_0:
- case RTW89_TX_QSEL_BE_1:
- case RTW89_TX_QSEL_BE_2:
- case RTW89_TX_QSEL_BE_3:
- return RTW89_TXCH_ACH0;
- case RTW89_TX_QSEL_BK_0:
- case RTW89_TX_QSEL_BK_1:
- case RTW89_TX_QSEL_BK_2:
- case RTW89_TX_QSEL_BK_3:
- return RTW89_TXCH_ACH1;
- case RTW89_TX_QSEL_VI_0:
- case RTW89_TX_QSEL_VI_1:
- case RTW89_TX_QSEL_VI_2:
- case RTW89_TX_QSEL_VI_3:
- return RTW89_TXCH_ACH2;
- case RTW89_TX_QSEL_VO_0:
- case RTW89_TX_QSEL_VO_1:
- case RTW89_TX_QSEL_VO_2:
- case RTW89_TX_QSEL_VO_3:
- return RTW89_TXCH_ACH3;
- case RTW89_TX_QSEL_B0_MGMT:
- return RTW89_TXCH_CH8;
- case RTW89_TX_QSEL_B0_HI:
- return RTW89_TXCH_CH9;
- case RTW89_TX_QSEL_B1_MGMT:
- return RTW89_TXCH_CH10;
- case RTW89_TX_QSEL_B1_HI:
- return RTW89_TXCH_CH11;
- }
-}
-
static inline u8 rtw89_core_get_tid_indicate(struct rtw89_dev *rtwdev, u8 tid)
{
switch (tid) {
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 5bb7c1a42f1d..5faa51ad896a 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -99,13 +99,26 @@ static int rtw89_rx_pn_to_iv(struct rtw89_dev *rtwdev,
ieee80211_get_key_rx_seq(key, 0, &seq);
- /* seq.ccmp.pn[] is BE order array */
- pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
- u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
- u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
- u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
- u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
- u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ pn = u64_encode_bits(seq.tkip.iv32, RTW89_KEY_TKIP_PN_IV32) |
+ u64_encode_bits(seq.tkip.iv16, RTW89_KEY_TKIP_PN_IV16);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* seq.ccmp.pn[] is BE order array */
+ pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
+ u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
+ u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
+ u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
+ u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
+ u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+ break;
+ default:
+ return -EINVAL;
+ }
err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
if (err)
@@ -177,13 +190,26 @@ static int rtw89_rx_iv_to_pn(struct rtw89_dev *rtwdev,
if (err)
return err;
- /* seq.ccmp.pn[] is BE order array */
- seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
- seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
- seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
- seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
- seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
- seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ seq.tkip.iv32 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV32);
+ seq.tkip.iv16 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV16);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* seq.ccmp.pn[] is BE order array */
+ seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
+ seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ break;
+ default:
+ return -EINVAL;
+ }
ieee80211_set_key_rx_seq(key, 0, &seq);
rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n",
@@ -285,6 +311,11 @@ static void rtw89_wow_get_key_info_iter(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
+ if (sta)
+ memcpy(gtk_info->txmickey,
+ key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+ sizeof(gtk_info->txmickey));
+ fallthrough;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -348,10 +379,27 @@ static void rtw89_wow_set_key_info_iter(struct ieee80211_hw *hw,
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct rtw89_set_key_info_iter_data *iter_data = data;
bool update_tx_key_info = iter_data->rx_ready;
+ u8 tmp[RTW89_MIC_KEY_LEN];
int ret;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
+ /*
+ * TX MIC KEY and RX MIC KEY is oppsite in FW,
+ * need to swap it before sending to mac80211.
+ */
+ if (!sta && update_tx_key_info && aoac_rpt->rekey_ok &&
+ !iter_data->tkip_gtk_swapped) {
+ memcpy(tmp, &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ RTW89_MIC_KEY_LEN);
+ memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ RTW89_MIC_KEY_LEN);
+ memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ tmp, RTW89_MIC_KEY_LEN);
+ iter_data->tkip_gtk_swapped = true;
+ }
+ fallthrough;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -642,7 +690,8 @@ static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct rtw89_set_key_info_iter_data data = {.error = false,
- .rx_ready = rx_ready};
+ .rx_ready = rx_ready,
+ .tkip_gtk_swapped = false};
struct ieee80211_bss_conf *bss_conf;
struct ieee80211_key_conf *key;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index 6606528d31c7..d2ba6cebc2a6 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -5,6 +5,9 @@
#ifndef __RTW89_WOW_H__
#define __RTW89_WOW_H__
+#define RTW89_KEY_TKIP_PN_IV16 GENMASK_ULL(15, 0)
+#define RTW89_KEY_TKIP_PN_IV32 GENMASK_ULL(47, 16)
+
#define RTW89_KEY_PN_0 GENMASK_ULL(7, 0)
#define RTW89_KEY_PN_1 GENMASK_ULL(15, 8)
#define RTW89_KEY_PN_2 GENMASK_ULL(23, 16)
@@ -25,6 +28,8 @@
#define RTW89_WOW_SYMBOL_CHK_PTK BIT(0)
#define RTW89_WOW_SYMBOL_CHK_GTK BIT(1)
+#define RTW89_MIC_KEY_LEN 8
+
enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
@@ -73,6 +78,7 @@ struct rtw89_set_key_info_iter_data {
u32 igtk_cipher;
bool rx_ready;
bool error;
+ bool tkip_gtk_swapped;
};
static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 3789d46d5614..9f856042a67a 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -645,6 +645,7 @@ static LIST_HEAD(hwsim_radios);
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static int hwsim_radios_generation = 1;
+static u8 hwsim_nan_cluster_id[ETH_ALEN];
static struct platform_driver mac80211_hwsim_driver = {
.driver = {
@@ -670,7 +671,7 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
- struct ieee80211_iface_limit if_limits[3];
+ struct ieee80211_iface_limit if_limits[4];
int n_if_limits;
struct ieee80211_iface_combination if_combination_radio;
@@ -679,7 +680,7 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
- struct mac_address addresses[2];
+ struct mac_address addresses[3];
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -752,6 +753,14 @@ struct mac80211_hwsim_data {
struct wireless_dev *pmsr_request_wdev;
struct mac80211_hwsim_link_data link_data[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct ieee80211_vif *nan_device_vif;
+ u8 nan_bands;
+
+ enum nl80211_band nan_curr_dw_band;
+ struct hrtimer nan_timer;
+ bool notify_dw;
+ struct ieee80211_vif *nan_vif;
};
static const struct rhashtable_params hwsim_rht_params = {
@@ -926,6 +935,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy),
[HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy),
[HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_SUPPORT_NAN_DEVICE] = { .type = NLA_FLAG },
};
#if IS_REACHABLE(CONFIG_VIRTIO)
@@ -1644,6 +1654,16 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
struct tx_iter_data *data = _data;
int i;
+ /* For NAN Device simulation purposes, assume that NAN is always
+ * on channel 6 or channel 149.
+ */
+ if (vif->type == NL80211_IFTYPE_NAN) {
+ data->receive = (data->channel &&
+ (data->channel->center_freq == 2437 ||
+ data->channel->center_freq == 5745));
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
struct ieee80211_bss_conf *conf;
struct ieee80211_chanctx_conf *chanctx;
@@ -1944,6 +1964,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
+ struct ieee80211_vif *vif = txi->control.vif;
bool ack;
enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
u32 _portid, i;
@@ -1954,7 +1975,23 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (!data->use_chanctx) {
+ if (vif && vif->type == NL80211_IFTYPE_NAN && !data->tmp_chan) {
+ /* For NAN Device simulation purposes, assume that NAN is always
+ * on channel 6 or channel 149, unless a ROC is in progress (for
+ * USD use cases).
+ */
+ if (data->nan_curr_dw_band == NL80211_BAND_2GHZ)
+ channel = ieee80211_get_channel(hw->wiphy, 2437);
+ else if (data->nan_curr_dw_band == NL80211_BAND_5GHZ)
+ channel = ieee80211_get_channel(hw->wiphy, 5745);
+ else
+ channel = NULL;
+
+ if (WARN_ON(!channel)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else if (!data->use_chanctx) {
channel = data->channel;
confbw = data->bw;
} else if (txi->hw_queue == 4) {
@@ -1962,7 +1999,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
} else {
u8 link = u32_get_bits(IEEE80211_SKB_CB(skb)->control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
- struct ieee80211_vif *vif = txi->control.vif;
struct ieee80211_link_sta *link_sta = NULL;
struct ieee80211_sta *sta = control->sta;
struct ieee80211_bss_conf *bss_conf;
@@ -3950,6 +3986,168 @@ out:
return err;
}
+static enum hrtimer_restart
+mac80211_hwsim_nan_dw_start(struct hrtimer *timer)
+{
+ struct mac80211_hwsim_data *data =
+ container_of(timer, struct mac80211_hwsim_data,
+ nan_timer);
+ struct ieee80211_hw *hw = data->hw;
+ u64 orig_tsf = mac80211_hwsim_get_tsf(hw, NULL), tsf = orig_tsf;
+ u32 dw_int = 512 * 1024;
+ u64 until_dw;
+
+ if (!data->nan_device_vif)
+ return HRTIMER_NORESTART;
+
+ if (data->nan_bands & BIT(NL80211_BAND_5GHZ)) {
+ if (data->nan_curr_dw_band == NL80211_BAND_2GHZ) {
+ dw_int = 128 * 1024;
+ data->nan_curr_dw_band = NL80211_BAND_5GHZ;
+ } else if (data->nan_curr_dw_band == NL80211_BAND_5GHZ) {
+ data->nan_curr_dw_band = NL80211_BAND_2GHZ;
+ }
+ }
+
+ until_dw = dw_int - do_div(tsf, dw_int);
+
+ /* The timer might fire just before the actual DW, in which case
+ * update the timeout to the actual next DW
+ */
+ if (until_dw < dw_int / 2)
+ until_dw += dw_int;
+
+ /* The above do_div() call directly modifies the 'tsf' variable, thus,
+ * use a copy so that the print below would show the original TSF.
+ */
+ wiphy_debug(hw->wiphy,
+ "%s: tsf=%llx, curr_dw_band=%u, next_dw=%llu\n",
+ __func__, orig_tsf, data->nan_curr_dw_band,
+ until_dw);
+
+ hrtimer_forward_now(&data->nan_timer,
+ ns_to_ktime(until_dw * NSEC_PER_USEC));
+
+ if (data->notify_dw) {
+ struct ieee80211_channel *ch;
+ struct wireless_dev *wdev =
+ ieee80211_vif_to_wdev(data->nan_device_vif);
+
+ if (data->nan_curr_dw_band == NL80211_BAND_5GHZ)
+ ch = ieee80211_get_channel(hw->wiphy, 5475);
+ else
+ ch = ieee80211_get_channel(hw->wiphy, 2437);
+
+ cfg80211_next_nan_dw_notif(wdev, ch, GFP_ATOMIC);
+ }
+
+ return HRTIMER_RESTART;
+}
+
+static int mac80211_hwsim_start_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
+ u32 dw_int = 512 * 1000;
+ u64 until_dw = dw_int - do_div(tsf, dw_int);
+ struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+
+ if (vif->type != NL80211_IFTYPE_NAN)
+ return -EINVAL;
+
+ if (data->nan_device_vif)
+ return -EALREADY;
+
+ /* set this before starting the timer, as preemption might occur */
+ data->nan_device_vif = vif;
+ data->nan_bands = conf->bands;
+ data->nan_curr_dw_band = NL80211_BAND_2GHZ;
+
+ wiphy_debug(hw->wiphy, "nan_started, next_dw=%llu\n",
+ until_dw);
+
+ hrtimer_start(&data->nan_timer,
+ ns_to_ktime(until_dw * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
+
+ if (conf->cluster_id && !is_zero_ether_addr(conf->cluster_id) &&
+ is_zero_ether_addr(hwsim_nan_cluster_id)) {
+ memcpy(hwsim_nan_cluster_id, conf->cluster_id, ETH_ALEN);
+ } else if (is_zero_ether_addr(hwsim_nan_cluster_id)) {
+ hwsim_nan_cluster_id[0] = 0x50;
+ hwsim_nan_cluster_id[1] = 0x6f;
+ hwsim_nan_cluster_id[2] = 0x9a;
+ hwsim_nan_cluster_id[3] = 0x01;
+ hwsim_nan_cluster_id[4] = get_random_u8();
+ hwsim_nan_cluster_id[5] = get_random_u8();
+ }
+
+ data->notify_dw = conf->enable_dw_notification;
+
+ cfg80211_nan_cluster_joined(wdev, hwsim_nan_cluster_id, true,
+ GFP_KERNEL);
+
+ return 0;
+}
+
+static int mac80211_hwsim_stop_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ struct mac80211_hwsim_data *data2;
+ bool nan_cluster_running = false;
+
+ if (vif->type != NL80211_IFTYPE_NAN || !data->nan_device_vif ||
+ data->nan_device_vif != vif)
+ return -EINVAL;
+
+ hrtimer_cancel(&data->nan_timer);
+ data->nan_device_vif = NULL;
+
+ spin_lock(&hwsim_radio_lock);
+ list_for_each_entry(data2, &hwsim_radios, list) {
+ if (data2->nan_device_vif) {
+ nan_cluster_running = true;
+ break;
+ }
+ }
+ spin_unlock(&hwsim_radio_lock);
+
+ if (!nan_cluster_running)
+ memset(hwsim_nan_cluster_id, 0, ETH_ALEN);
+
+ return 0;
+}
+
+static int mac80211_hwsim_change_nan_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf,
+ u32 changes)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+
+ if (vif->type != NL80211_IFTYPE_NAN)
+ return -EINVAL;
+
+ if (!data->nan_device_vif)
+ return -EINVAL;
+
+ wiphy_debug(hw->wiphy, "nan_config_changed: changes=0x%x\n", changes);
+
+ /* Handle only the changes we care about for simulation purposes */
+ if (changes & CFG80211_NAN_CONF_CHANGED_BANDS) {
+ data->nan_bands = conf->bands;
+ data->nan_curr_dw_band = NL80211_BAND_2GHZ;
+ }
+
+ if (changes & CFG80211_NAN_CONF_CHANGED_CONFIG)
+ data->notify_dw = conf->enable_dw_notification;
+
+ return 0;
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
#define HWSIM_DEBUGFS_OPS \
.link_add_debugfs = mac80211_hwsim_link_add_debugfs,
@@ -3982,6 +4180,9 @@ out:
.get_et_strings = mac80211_hwsim_get_et_strings, \
.start_pmsr = mac80211_hwsim_start_pmsr, \
.abort_pmsr = mac80211_hwsim_abort_pmsr, \
+ .start_nan = mac80211_hwsim_start_nan, \
+ .stop_nan = mac80211_hwsim_stop_nan, \
+ .nan_change_conf = mac80211_hwsim_change_nan_config, \
HWSIM_DEBUGFS_OPS
#define HWSIM_NON_MLO_OPS \
@@ -4047,6 +4248,7 @@ struct hwsim_new_radio_params {
u8 n_ciphers;
bool mlo;
const struct cfg80211_pmsr_capabilities *pmsr_capa;
+ bool nan_device;
};
static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
@@ -4127,6 +4329,11 @@ static int append_radio_msg(struct sk_buff *skb, int id,
return ret;
}
+ if (param->nan_device) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_NAN_DEVICE);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -5239,14 +5446,18 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
/* Why need here second address ? */
memcpy(data->addresses[1].addr, addr, ETH_ALEN);
data->addresses[1].addr[0] |= 0x40;
- hw->wiphy->n_addresses = 2;
+ memcpy(data->addresses[2].addr, addr, ETH_ALEN);
+ data->addresses[2].addr[0] |= 0x50;
+
+ hw->wiphy->n_addresses = 3;
hw->wiphy->addresses = data->addresses;
/* possible address clash is checked at hash table insertion */
} else {
memcpy(data->addresses[0].addr, param->perm_addr, ETH_ALEN);
/* compatibility with automatically generated mac addr */
memcpy(data->addresses[1].addr, param->perm_addr, ETH_ALEN);
- hw->wiphy->n_addresses = 2;
+ memcpy(data->addresses[2].addr, param->perm_addr, ETH_ALEN);
+ hw->wiphy->n_addresses = 3;
hw->wiphy->addresses = data->addresses;
}
@@ -5283,6 +5494,30 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
n_limits++;
}
+ if (param->iftypes & BIT(NL80211_IFTYPE_NAN)) {
+ data->if_limits[n_limits].max = 1;
+ data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_NAN);
+ n_limits++;
+
+ hw->wiphy->nan_supported_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ);
+
+ hw->wiphy->nan_capa.flags = WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC |
+ WIPHY_NAN_FLAGS_USERSPACE_DE;
+ hw->wiphy->nan_capa.op_mode = NAN_OP_MODE_PHY_MODE_MASK |
+ NAN_OP_MODE_80P80MHZ |
+ NAN_OP_MODE_160MHZ;
+
+ hw->wiphy->nan_capa.n_antennas = 0x22;
+ hw->wiphy->nan_capa.max_channel_switch_time = 0;
+ hw->wiphy->nan_capa.dev_capabilities =
+ NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED |
+ NAN_DEV_CAPA_NDPE_SUPPORTED;
+
+ hrtimer_setup(&data->nan_timer, mac80211_hwsim_nan_dw_start,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT);
+ }
+
data->if_combination.radar_detect_widths =
BIT(NL80211_CHAN_WIDTH_5) |
BIT(NL80211_CHAN_WIDTH_10) |
@@ -5701,6 +5936,8 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
REGULATORY_STRICT_REG);
param.p2p_device = !!(data->hw->wiphy->interface_modes &
BIT(NL80211_IFTYPE_P2P_DEVICE));
+ param.nan_device = !!(data->hw->wiphy->interface_modes &
+ BIT(NL80211_IFTYPE_NAN));
param.use_chanctx = data->use_chanctx;
param.regd = data->regd;
param.channels = data->channels;
@@ -6119,6 +6356,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+ param.nan_device = info->attrs[HWSIM_ATTR_SUPPORT_NAN_DEVICE];
param.channels = channels;
param.destroy_on_close =
info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE];
@@ -6190,6 +6428,13 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.p2p_device = true;
}
+ /* ensure both flag and iftype support is honored */
+ if (param.nan_device ||
+ param.iftypes & BIT(NL80211_IFTYPE_NAN)) {
+ param.iftypes |= BIT(NL80211_IFTYPE_NAN);
+ param.nan_device = true;
+ }
+
if (info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]) {
u32 len = nla_len(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]);
@@ -6910,12 +7155,14 @@ static int __init init_mac80211_hwsim(void)
}
param.p2p_device = support_p2p_device;
+ param.nan_device = true;
param.mlo = mlo;
param.multi_radio = multi_radio;
param.use_chanctx = channels > 1 || mlo || multi_radio;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ param.iftypes |= BIT(NL80211_IFTYPE_NAN);
err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index fa157c883f7f..c2d06cf852a5 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2020, 2022-2025 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -160,6 +160,7 @@ enum hwsim_commands {
* @HWSIM_ATTR_MULTI_RADIO: Register multiple wiphy radios (flag).
* Adds one radio for each band. Number of supported channels will be set for
* each radio instead of for the wiphy.
+ * @HWSIM_ATTR_SUPPORT_NAN_DEVICE: support NAN Device virtual interface (flag)
* @__HWSIM_ATTR_MAX: enum limit
*/
enum hwsim_attrs {
@@ -193,6 +194,7 @@ enum hwsim_attrs {
HWSIM_ATTR_PMSR_REQUEST,
HWSIM_ATTR_PMSR_RESULT,
HWSIM_ATTR_MULTI_RADIO,
+ HWSIM_ATTR_SUPPORT_NAN_DEVICE,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index a066977af0be..08ff0d6ccfab 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -69,7 +69,7 @@ static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
{
struct pci_dev *pci = ipc_pcie->pci;
u32 cap = 0;
- u32 ret;
+ int ret;
/* Reserved PCI I/O and memory resources.
* Mark all PCI regions associated with PCI device pci as
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 6a7a26085fc7..2310493203d3 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -1085,7 +1085,8 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
{
dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
- WQ_MEM_RECLAIM, 1);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 1);
if (!dpmaif_ctrl->bat_release_wq)
return -ENOMEM;
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index b02befd1b6fb..733688cd4607 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -509,7 +509,7 @@ static int __init wwan_hwsim_init(void)
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
- wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ wwan_wq = alloc_workqueue("wwan_wq", WQ_PERCPU, 0);
if (!wwan_wq)
return -ENOMEM;
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 14661249c690..2b043a9f9533 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1412,11 +1412,9 @@ static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
if (dev->poll_mod_count != 0)
return rc;
goto stop_poll;
- } else if (rc < 0) {
- nfc_err(dev->dev,
- "Error %d when running autopoll\n", rc);
- goto stop_poll;
}
+ nfc_err(dev->dev, "Error %d when running autopoll\n", rc);
+ goto stop_poll;
}
nbtg = resp->data[0];
@@ -1505,11 +1503,9 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
if (dev->poll_mod_count != 0)
return rc;
goto stop_poll;
- } else if (rc < 0) {
- nfc_err(dev->dev,
- "Error %d when running poll\n", rc);
- goto stop_poll;
}
+ nfc_err(dev->dev, "Error %d when running poll\n", rc);
+ goto stop_poll;
}
cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
index 8a6b1a79de25..96386b73fa2b 100644
--- a/drivers/nfc/s3fwrn5/Kconfig
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NFC_S3FWRN5
tristate
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA1
help
Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
of chip. It's intended to be used by PHYs to avoid duplicating lots
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 781cdbcac104..64d61b2a715a 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -8,7 +8,6 @@
#include <linux/completion.h>
#include <linux/firmware.h>
-#include <crypto/hash.h>
#include <crypto/sha1.h>
#include "s3fwrn5.h"
@@ -411,27 +410,13 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_image *fw = &fw_info->fw;
u8 hash_data[SHA1_DIGEST_SIZE];
- struct crypto_shash *tfm;
u32 image_size, off;
int ret;
image_size = fw_info->sector_size * fw->image_sectors;
/* Compute SHA of firmware data */
-
- tfm = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(tfm)) {
- dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm);
- return PTR_ERR(tfm);
- }
-
- ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
-
- crypto_free_shash(tfm);
- if (ret) {
- dev_err(dev, "Cannot compute hash (code=%d)\n", ret);
- return ret;
- }
+ sha1(fw->image, image_size, hash_data);
/* Firmware update process */
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 2a1aa32e6693..a933db961ed7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1478,12 +1478,12 @@ static void btt_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int btt_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
return 0;
}
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 91e273b89fea..1f51fbebd9fa 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -684,6 +684,59 @@ out_free_enc:
EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
/**
+ * hkdf_expand_label - HKDF-Expand-Label (RFC 8846 section 7.1)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @label: ASCII label without "tls13 " prefix
+ * @labellen: length of @label
+ * @context: context bytes
+ * @contextlen: length of @context
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * Build the TLS 1.3 HkdfLabel structure and invoke hkdf_expand().
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+static int hkdf_expand_label(struct crypto_shash *hmac_tfm,
+ const u8 *label, unsigned int labellen,
+ const u8 *context, unsigned int contextlen,
+ u8 *okm, unsigned int okmlen)
+{
+ int err;
+ u8 *info;
+ unsigned int infolen;
+ const char *tls13_prefix = "tls13 ";
+ unsigned int prefixlen = strlen(tls13_prefix);
+
+ if (WARN_ON(labellen > (255 - prefixlen)))
+ return -EINVAL;
+ if (WARN_ON(contextlen > 255))
+ return -EINVAL;
+
+ infolen = 2 + (1 + prefixlen + labellen) + (1 + contextlen);
+ info = kzalloc(infolen, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* HkdfLabel.Length */
+ put_unaligned_be16(okmlen, info);
+
+ /* HkdfLabel.Label */
+ info[2] = prefixlen + labellen;
+ memcpy(info + 3, tls13_prefix, prefixlen);
+ memcpy(info + 3 + prefixlen, label, labellen);
+
+ /* HkdfLabel.Context */
+ info[3 + prefixlen + labellen] = contextlen;
+ memcpy(info + 4 + prefixlen + labellen, context, contextlen);
+
+ err = hkdf_expand(hmac_tfm, info, infolen, okm, okmlen);
+ kfree_sensitive(info);
+ return err;
+}
+
+/**
* nvme_auth_derive_tls_psk - Derive TLS PSK
* @hmac_id: Hash function identifier
* @psk: generated input PSK
@@ -715,10 +768,10 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
{
struct crypto_shash *hmac_tfm;
const char *hmac_name;
- const char *psk_prefix = "tls13 nvme-tls-psk";
+ const char *label = "nvme-tls-psk";
static const char default_salt[HKDF_MAX_HASHLEN];
- size_t info_len, prk_len;
- char *info;
+ size_t prk_len;
+ const char *ctx;
unsigned char *prk, *tls_key;
int ret;
@@ -758,36 +811,29 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
if (ret)
goto out_free_prk;
- /*
- * 2 additional bytes for the length field from HDKF-Expand-Label,
- * 2 additional bytes for the HMAC ID, and one byte for the space
- * separator.
- */
- info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
- info = kzalloc(info_len + 1, GFP_KERNEL);
- if (!info) {
+ ctx = kasprintf(GFP_KERNEL, "%02d %s", hmac_id, psk_digest);
+ if (!ctx) {
ret = -ENOMEM;
goto out_free_prk;
}
- put_unaligned_be16(psk_len, info);
- memcpy(info + 2, psk_prefix, strlen(psk_prefix));
- sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest);
-
tls_key = kzalloc(psk_len, GFP_KERNEL);
if (!tls_key) {
ret = -ENOMEM;
- goto out_free_info;
+ goto out_free_ctx;
}
- ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len);
+ ret = hkdf_expand_label(hmac_tfm,
+ label, strlen(label),
+ ctx, strlen(ctx),
+ tls_key, psk_len);
if (ret) {
kfree(tls_key);
- goto out_free_info;
+ goto out_free_ctx;
}
*ret_psk = tls_key;
-out_free_info:
- kfree(info);
+out_free_ctx:
+ kfree(ctx);
out_free_prk:
kfree(prk);
out_free_shash:
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 1286c31320e6..f35d3f71d14f 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -35,7 +35,6 @@
#include "nvme.h"
#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
-#define APPLE_ANS_MAX_QUEUE_DEPTH 64
#define APPLE_ANS_COPROC_CPU_CONTROL 0x44
#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
@@ -75,6 +74,8 @@
#define APPLE_NVME_AQ_DEPTH 2
#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+#define APPLE_NVME_IOSQES 7
+
/*
* These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data.
@@ -142,6 +143,7 @@ struct apple_nvme_queue {
u32 __iomem *sq_db;
u32 __iomem *cq_db;
+ u16 sq_tail;
u16 cq_head;
u8 cq_phase;
@@ -166,11 +168,17 @@ struct apple_nvme_iod {
struct scatterlist *sg;
};
+struct apple_nvme_hw {
+ bool has_lsq_nvmmu;
+ u32 max_queue_depth;
+};
+
struct apple_nvme {
struct device *dev;
void __iomem *mmio_coproc;
void __iomem *mmio_nvme;
+ const struct apple_nvme_hw *hw;
struct device **pd_dev;
struct device_link **pd_link;
@@ -215,10 +223,12 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
{
- if (q->is_adminq)
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ if (q->is_adminq && anv->hw->has_lsq_nvmmu)
return APPLE_NVME_AQ_DEPTH;
- return APPLE_ANS_MAX_QUEUE_DEPTH;
+ return anv->hw->max_queue_depth;
}
static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
@@ -280,7 +290,28 @@ static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
"NVMMU TCB invalidation failed\n");
}
-static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q,
+ struct nvme_command *cmd)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ spin_lock_irq(&anv->lock);
+
+ if (q->is_adminq)
+ memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd));
+ else
+ memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES),
+ cmd, sizeof(*cmd));
+
+ if (++q->sq_tail == anv->hw->max_queue_depth)
+ q->sq_tail = 0;
+
+ writel(q->sq_tail, q->sq_db);
+ spin_unlock_irq(&anv->lock);
+}
+
+
+static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q,
struct nvme_command *cmd)
{
struct apple_nvme *anv = queue_to_apple_nvme(q);
@@ -590,7 +621,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
__u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
- apple_nvmmu_inval(q, command_id);
+ if (anv->hw->has_lsq_nvmmu)
+ apple_nvmmu_inval(q, command_id);
req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
if (unlikely(!req)) {
@@ -685,7 +717,7 @@ static int apple_nvme_create_cq(struct apple_nvme *anv)
c.create_cq.opcode = nvme_admin_create_cq;
c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
c.create_cq.cqid = cpu_to_le16(1);
- c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
c.create_cq.irq_vector = cpu_to_le16(0);
@@ -713,7 +745,7 @@ static int apple_nvme_create_sq(struct apple_nvme *anv)
c.create_sq.opcode = nvme_admin_create_sq;
c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
c.create_sq.sqid = cpu_to_le16(1);
- c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
c.create_sq.cqid = cpu_to_le16(1);
@@ -765,7 +797,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
nvme_start_request(req);
- apple_nvme_submit_cmd(q, cmnd);
+
+ if (anv->hw->has_lsq_nvmmu)
+ apple_nvme_submit_cmd_t8103(q, cmnd);
+ else
+ apple_nvme_submit_cmd_t8015(q, cmnd);
+
return BLK_STS_OK;
out_free_cmd:
@@ -970,11 +1007,13 @@ static const struct blk_mq_ops apple_nvme_mq_ops = {
static void apple_nvme_init_queue(struct apple_nvme_queue *q)
{
unsigned int depth = apple_nvme_queue_depth(q);
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
q->cq_head = 0;
q->cq_phase = 1;
- memset(q->tcbs, 0,
- APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+ if (anv->hw->has_lsq_nvmmu)
+ memset(q->tcbs, 0, anv->hw->max_queue_depth
+ * sizeof(struct apple_nvmmu_tcb));
memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
WRITE_ONCE(q->enabled, true);
wmb(); /* ensure the first interrupt sees the initialization */
@@ -1069,49 +1108,55 @@ static void apple_nvme_reset_work(struct work_struct *work)
dma_set_max_seg_size(anv->dev, 0xffffffff);
- /*
- * Enable NVMMU and linear submission queues.
- * While we could keep those disabled and pretend this is slightly
- * more common NVMe controller we'd still need some quirks (e.g.
- * sq entries will be 128 bytes) and Apple might drop support for
- * that mode in the future.
- */
- writel(APPLE_ANS_LINEAR_SQ_EN,
- anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+ if (anv->hw->has_lsq_nvmmu) {
+ /*
+ * Enable NVMMU and linear submission queues which is required
+ * since T6000.
+ */
+ writel(APPLE_ANS_LINEAR_SQ_EN,
+ anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
- /* Allow as many pending command as possible for both queues */
- writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
- anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+ /* Allow as many pending command as possible for both queues */
+ writel(anv->hw->max_queue_depth
+ | (anv->hw->max_queue_depth << 16), anv->mmio_nvme
+ + APPLE_ANS_MAX_PEND_CMDS_CTRL);
- /* Setup the NVMMU for the maximum admin and IO queue depth */
- writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
- anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+ /* Setup the NVMMU for the maximum admin and IO queue depth */
+ writel(anv->hw->max_queue_depth - 1,
+ anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
- /*
- * This is probably a chicken bit: without it all commands where any PRP
- * is set to zero (including those that don't use that field) fail and
- * the co-processor complains about "completed with err BAD_CMD-" or
- * a "NULL_PRP_PTR_ERR" in the syslog
- */
- writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
- ~APPLE_ANS_PRP_NULL_CHECK,
- anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+ /*
+ * This is probably a chicken bit: without it all commands
+ * where any PRP is set to zero (including those that don't use
+ * that field) fail and the co-processor complains about
+ * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the
+ * syslog
+ */
+ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+ ~APPLE_ANS_PRP_NULL_CHECK,
+ anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+ }
/* Setup the admin queue */
- aqa = APPLE_NVME_AQ_DEPTH - 1;
+ if (anv->hw->has_lsq_nvmmu)
+ aqa = APPLE_NVME_AQ_DEPTH - 1;
+ else
+ aqa = anv->hw->max_queue_depth - 1;
aqa |= aqa << 16;
writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
- /* Setup NVMMU for both queues */
- writeq(anv->adminq.tcb_dma_addr,
- anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
- writeq(anv->ioq.tcb_dma_addr,
- anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+ if (anv->hw->has_lsq_nvmmu) {
+ /* Setup NVMMU for both queues */
+ writeq(anv->adminq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+ writeq(anv->ioq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+ }
anv->ctrl.sqsize =
- APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->hw->max_queue_depth - 1; /* 0's based queue depth */
anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
dev_dbg(anv->dev, "Enabling controller now");
@@ -1282,8 +1327,9 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
* both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
* must be marked as reserved in the IO queue.
*/
- anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
- anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+ if (anv->hw->has_lsq_nvmmu)
+ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+ anv->tagset.queue_depth = anv->hw->max_queue_depth - 1;
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
@@ -1307,6 +1353,7 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv,
struct apple_nvme_queue *q)
{
unsigned int depth = apple_nvme_queue_depth(q);
+ size_t iosq_size;
q->cqes = dmam_alloc_coherent(anv->dev,
depth * sizeof(struct nvme_completion),
@@ -1314,22 +1361,28 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv,
if (!q->cqes)
return -ENOMEM;
- q->sqes = dmam_alloc_coherent(anv->dev,
- depth * sizeof(struct nvme_command),
+ if (anv->hw->has_lsq_nvmmu)
+ iosq_size = depth * sizeof(struct nvme_command);
+ else
+ iosq_size = depth << APPLE_NVME_IOSQES;
+
+ q->sqes = dmam_alloc_coherent(anv->dev, iosq_size,
&q->sq_dma_addr, GFP_KERNEL);
if (!q->sqes)
return -ENOMEM;
- /*
- * We need the maximum queue depth here because the NVMMU only has a
- * single depth configuration shared between both queues.
- */
- q->tcbs = dmam_alloc_coherent(anv->dev,
- APPLE_ANS_MAX_QUEUE_DEPTH *
- sizeof(struct apple_nvmmu_tcb),
- &q->tcb_dma_addr, GFP_KERNEL);
- if (!q->tcbs)
- return -ENOMEM;
+ if (anv->hw->has_lsq_nvmmu) {
+ /*
+ * We need the maximum queue depth here because the NVMMU only
+ * has a single depth configuration shared between both queues.
+ */
+ q->tcbs = dmam_alloc_coherent(anv->dev,
+ anv->hw->max_queue_depth *
+ sizeof(struct apple_nvmmu_tcb),
+ &q->tcb_dma_addr, GFP_KERNEL);
+ if (!q->tcbs)
+ return -ENOMEM;
+ }
/*
* initialize phase to make sure the allocated and empty memory
@@ -1413,6 +1466,12 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
anv->adminq.is_adminq = true;
platform_set_drvdata(pdev, anv);
+ anv->hw = of_device_get_match_data(&pdev->dev);
+ if (!anv->hw) {
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
ret = apple_nvme_attach_genpd(anv);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to attach power domains");
@@ -1444,10 +1503,17 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
goto put_dev;
}
- anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
- anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
- anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
- anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ if (anv->hw->has_lsq_nvmmu) {
+ anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ } else {
+ anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ }
anv->sart = devm_apple_sart_get(dev);
if (IS_ERR(anv->sart)) {
@@ -1625,8 +1691,19 @@ static int apple_nvme_suspend(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
apple_nvme_resume);
+static const struct apple_nvme_hw apple_nvme_t8015_hw = {
+ .has_lsq_nvmmu = false,
+ .max_queue_depth = 16,
+};
+
+static const struct apple_nvme_hw apple_nvme_t8103_hw = {
+ .has_lsq_nvmmu = true,
+ .max_queue_depth = 64,
+};
+
static const struct of_device_id apple_nvme_of_match[] = {
- { .compatible = "apple,nvme-ans2" },
+ { .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
+ { .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
{},
};
MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 201fc8809a62..012fcfc79a73 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -331,9 +331,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
} else {
memset(chap->c2, 0, chap->hash_len);
}
- if (ctrl->opts->concat)
+ if (ctrl->opts->concat) {
chap->s2 = 0;
- else
+ chap->bi_directional = false;
+ } else
chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 6b7493934535..fa4181d7de73 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1807,12 +1807,12 @@ static void nvme_release(struct gendisk *disk)
nvme_ns_release(disk->private_data);
}
-int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
return 0;
}
@@ -3167,6 +3167,11 @@ static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
return ctrl->cntrltype == NVME_CTRL_ADMIN;
}
+static inline bool nvme_is_io_ctrl(struct nvme_ctrl *ctrl)
+{
+ return !nvme_discovery_ctrl(ctrl) && !nvme_admin_ctrl(ctrl);
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -3369,7 +3374,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
else
ctrl->max_zeroes_sectors = 0;
- if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+ if (!nvme_is_io_ctrl(ctrl) ||
!nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
return 0;
@@ -3491,14 +3496,14 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
return -EINVAL;
}
- if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
+ if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) {
dev_err(ctrl->device,
"I/O queue command capsule supported size %d < 4\n",
ctrl->ioccsz);
return -EINVAL;
}
- if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
+ if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) {
dev_err(ctrl->device,
"I/O queue response capsule supported size %d < 1\n",
ctrl->iorcsz);
@@ -4990,8 +4995,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
* checking that they started once before, hence are reconnecting back.
*/
if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
- nvme_discovery_ctrl(ctrl))
+ nvme_discovery_ctrl(ctrl)) {
+ if (!ctrl->kato) {
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = NVME_DEFAULT_KATO;
+ nvme_start_keep_alive(ctrl);
+ }
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+ }
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 3e12d4683ac7..03987f497a5b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3032,11 +3032,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
++ctrl->ctrl.nr_reconnects;
- if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
return -ENODEV;
+ }
- if (nvme_fc_ctlr_active_on_rport(ctrl))
+ if (nvme_fc_ctlr_active_on_rport(ctrl)) {
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
return -ENOTUNIQ;
+ }
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: create association : host wwpn 0x%016llx "
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 6b3ac8ae3f34..c212fa952c0f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -142,14 +142,9 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
-
if (ret)
return ret;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
-
if (has_metadata) {
ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
if (ret)
@@ -410,7 +405,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
+ io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cfd2b5b90b91..102fae6a231c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -936,7 +936,7 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_id_ns **id);
-int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_attr_groups[];
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2c6d9506b172..c916176bd9f0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -172,9 +172,7 @@ struct nvme_dev {
u32 last_ps;
bool hmb;
struct sg_table *hmb_sgt;
-
mempool_t *dmavec_mempool;
- mempool_t *iod_meta_mempool;
/* shadow doorbell buffer support: */
__le32 *dbbuf_dbs;
@@ -261,6 +259,9 @@ enum nvme_iod_flags {
/* single segment dma mapping */
IOD_SINGLE_SEGMENT = 1U << 2,
+
+ /* Metadata using non-coalesced MPTR */
+ IOD_SINGLE_META_SEGMENT = 1U << 5,
};
struct nvme_dma_vec {
@@ -284,7 +285,8 @@ struct nvme_iod {
unsigned int nr_dma_vecs;
dma_addr_t meta_dma;
- struct sg_table meta_sgt;
+ unsigned int meta_total_len;
+ struct dma_iova_state meta_dma_state;
struct nvme_sgl_desc *meta_descriptor;
};
@@ -641,6 +643,11 @@ static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
return nvmeq->descriptor_pools.large;
}
+static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd)
+{
+ return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG;
+}
+
static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd)
{
return cmd->common.flags &
@@ -690,25 +697,52 @@ static void nvme_free_prps(struct request *req)
mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
}
-static void nvme_free_sgls(struct request *req)
+static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
+ struct nvme_sgl_desc *sg_list)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ enum dma_data_direction dir = rq_dma_dir(req);
+ unsigned int len = le32_to_cpu(sge->length);
struct device *dma_dev = nvmeq->dev->dev;
- dma_addr_t sqe_dma_addr = le64_to_cpu(iod->cmd.common.dptr.sgl.addr);
- unsigned int sqe_dma_len = le32_to_cpu(iod->cmd.common.dptr.sgl.length);
- struct nvme_sgl_desc *sg_list = iod->descriptors[0];
+ unsigned int i;
+
+ if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
+ dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
+ return;
+ }
+
+ for (i = 0; i < len / sizeof(*sg_list); i++)
+ dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
+ le32_to_cpu(sg_list[i].length), dir);
+}
+
+static void nvme_unmap_metadata(struct request *req)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
enum dma_data_direction dir = rq_dma_dir(req);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct device *dma_dev = nvmeq->dev->dev;
+ struct nvme_sgl_desc *sge = iod->meta_descriptor;
- if (iod->nr_descriptors) {
- unsigned int nr_entries = sqe_dma_len / sizeof(*sg_list), i;
+ if (iod->flags & IOD_SINGLE_META_SEGMENT) {
+ dma_unmap_page(dma_dev, iod->meta_dma,
+ rq_integrity_vec(req).bv_len,
+ rq_dma_dir(req));
+ return;
+ }
- for (i = 0; i < nr_entries; i++)
- dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
- le32_to_cpu(sg_list[i].length), dir);
- } else {
- dma_unmap_page(dma_dev, sqe_dma_addr, sqe_dma_len, dir);
+ if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
+ iod->meta_total_len)) {
+ if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
+ nvme_free_sgls(req, sge, &sge[1]);
+ else
+ dma_unmap_page(dma_dev, iod->meta_dma,
+ iod->meta_total_len, dir);
}
+
+ if (iod->meta_descriptor)
+ dma_pool_free(nvmeq->descriptor_pools.small,
+ iod->meta_descriptor, iod->meta_dma);
}
static void nvme_unmap_data(struct request *req)
@@ -727,7 +761,8 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
- nvme_free_sgls(req);
+ nvme_free_sgls(req, iod->descriptors[0],
+ &iod->cmd.common.dptr.sgl);
else
nvme_free_prps(req);
}
@@ -1007,70 +1042,70 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter);
}
-static void nvme_pci_sgl_set_data_sg(struct nvme_sgl_desc *sge,
- struct scatterlist *sg)
-{
- sge->addr = cpu_to_le64(sg_dma_address(sg));
- sge->length = cpu_to_le32(sg_dma_len(sg));
- sge->type = NVME_SGL_FMT_DATA_DESC << 4;
-}
-
static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
+ unsigned int entries = req->nr_integrity_segments;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = nvmeq->dev;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sgl, *sg;
- unsigned int entries;
+ struct blk_dma_iter iter;
dma_addr_t sgl_dma;
- int rc, i;
+ int i = 0;
- iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC);
- if (!iod->meta_sgt.sgl)
- return BLK_STS_RESOURCE;
+ if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev,
+ &iod->meta_dma_state, &iter))
+ return iter.status;
- sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments);
- iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req,
- iod->meta_sgt.sgl);
- if (!iod->meta_sgt.orig_nents)
- goto out_free_sg;
+ if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
+ entries = 1;
- rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req),
- DMA_ATTR_NO_WARN);
- if (rc)
- goto out_free_sg;
+ /*
+ * The NVMe MPTR descriptor has an implicit length that the host and
+ * device must agree on to avoid data/memory corruption. We trust the
+ * kernel allocated correctly based on the format's parameters, so use
+ * the more efficient MPTR to avoid extra dma pool allocations for the
+ * SGL indirection.
+ *
+ * But for user commands, we don't necessarily know what they do, so
+ * the driver can't validate the metadata buffer size. The SGL
+ * descriptor provides an explicit length, so we're relying on that
+ * mechanism to catch any misunderstandings between the application and
+ * device.
+ */
+ if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) {
+ iod->cmd.common.metadata = cpu_to_le64(iter.addr);
+ iod->meta_total_len = iter.len;
+ iod->meta_dma = iter.addr;
+ iod->meta_descriptor = NULL;
+ return BLK_STS_OK;
+ }
sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC,
&sgl_dma);
if (!sg_list)
- goto out_unmap_sg;
+ return BLK_STS_RESOURCE;
- entries = iod->meta_sgt.nents;
iod->meta_descriptor = sg_list;
iod->meta_dma = sgl_dma;
-
iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
-
- sgl = iod->meta_sgt.sgl;
if (entries == 1) {
- nvme_pci_sgl_set_data_sg(sg_list, sgl);
+ iod->meta_total_len = iter.len;
+ nvme_pci_sgl_set_data(sg_list, &iter);
return BLK_STS_OK;
}
sgl_dma += sizeof(*sg_list);
- nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries);
- for_each_sg(sgl, sg, entries, i)
- nvme_pci_sgl_set_data_sg(&sg_list[i + 1], sg);
-
- return BLK_STS_OK;
+ do {
+ nvme_pci_sgl_set_data(&sg_list[++i], &iter);
+ iod->meta_total_len += iter.len;
+ } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter));
-out_unmap_sg:
- dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
-out_free_sg:
- mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
- return BLK_STS_RESOURCE;
+ nvme_pci_sgl_set_seg(sg_list, sgl_dma, i);
+ if (unlikely(iter.status))
+ nvme_unmap_metadata(req);
+ return iter.status;
}
static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
@@ -1083,6 +1118,7 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
+ iod->flags |= IOD_SINGLE_META_SEGMENT;
return BLK_STS_OK;
}
@@ -1104,7 +1140,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->flags = 0;
iod->nr_descriptors = 0;
iod->total_len = 0;
- iod->meta_sgt.nents = 0;
+ iod->meta_total_len = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -1215,25 +1251,6 @@ static void nvme_queue_rqs(struct rq_list *rqlist)
*rqlist = requeue_list;
}
-static __always_inline void nvme_unmap_metadata(struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
-
- if (!iod->meta_sgt.nents) {
- dma_unmap_page(dev->dev, iod->meta_dma,
- rq_integrity_vec(req).bv_len,
- rq_dma_dir(req));
- return;
- }
-
- dma_pool_free(nvmeq->descriptor_pools.small, iod->meta_descriptor,
- iod->meta_dma);
- dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
- mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
-}
-
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
if (blk_integrity_rq(req))
@@ -3039,7 +3056,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
- size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1);
size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS;
dev->dmavec_mempool = mempool_create_node(1,
@@ -3048,17 +3064,7 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
dev_to_node(dev->dev));
if (!dev->dmavec_mempool)
return -ENOMEM;
-
- dev->iod_meta_mempool = mempool_create_node(1,
- mempool_kmalloc, mempool_kfree,
- (void *)meta_size, GFP_KERNEL,
- dev_to_node(dev->dev));
- if (!dev->iod_meta_mempool)
- goto free;
return 0;
-free:
- mempool_destroy(dev->dmavec_mempool);
- return -ENOMEM;
}
static void nvme_free_tagset(struct nvme_dev *dev)
@@ -3324,10 +3330,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
+ * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
*/
if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
+ dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3508,7 +3516,6 @@ out_disable:
nvme_free_queues(dev, 0);
out_release_iod_mempool:
mempool_destroy(dev->dmavec_mempool);
- mempool_destroy(dev->iod_meta_mempool);
out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
@@ -3572,7 +3579,6 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dbbuf_dma_free(dev);
nvme_free_queues(dev, 0);
mempool_destroy(dev->dmavec_mempool);
- mempool_destroy(dev->iod_meta_mempool);
nvme_release_descriptor_pools(dev);
nvme_dev_unmap(dev);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0fe8cfb7229..1413788ca7d5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2250,6 +2250,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_cleanup_tagset;
+ if (ctrl->opts->concat && !ctrl->tls_pskid)
+ return 0;
+
error = nvme_enable_ctrl(ctrl);
if (error)
goto out_stop_queue;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 0dd7bd99afa3..5d7d483bfbe3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -513,9 +513,6 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return 0;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct nvmet_ns *ns)
{
@@ -523,6 +520,8 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
@@ -1539,15 +1538,14 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
return false;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!p2p_client)
return;
@@ -1557,14 +1555,13 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
pci_dev_put(radix_tree_deref_slot(slot));
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a9b18c051f5b..7d84527d5a43 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
int ls_error;
struct list_head lsreq_list; /* tgtport->ls_req_list */
bool req_queued;
+
+ struct work_struct put_work;
};
@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
-
- struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
-static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+static void nvmet_fc_put_lsop_work(struct work_struct *work)
{
- struct nvmet_fc_tgtport *tgtport =
- container_of(work, struct nvmet_fc_tgtport, put_work);
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(work, struct nvmet_fc_ls_req_op, put_work);
- nvmet_fc_tgtport_put(tgtport);
+ nvmet_fc_tgtport_put(lsop->tgtport);
+ kfree(lsop);
}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
DMA_BIDIRECTIONAL);
out_putwork:
- queue_work(nvmet_wq, &tgtport->put_work);
+ queue_work(nvmet_wq, &lsop->put_work);
}
static int
@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsreq->done = done;
lsop->req_queued = false;
INIT_LIST_HEAD(&lsop->lsreq_list);
+ INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
lsreq->rqstlen + lsreq->rsplen,
@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvmet_fc_finish_ls_req(lsop);
/* fc-nvme target doesn't care about success or failure of cmd */
-
- kfree(lsop);
}
/*
@@ -1075,6 +1075,14 @@ nvmet_fc_delete_assoc_work(struct work_struct *work)
static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
+ int terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
+
nvmet_fc_tgtport_get(assoc->tgtport);
if (!queue_work(nvmet_wq, &assoc->del_work))
nvmet_fc_tgtport_put(assoc->tgtport);
@@ -1202,13 +1210,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
unsigned long flags;
- int i, terminating;
-
- terminating = atomic_xchg(&assoc->terminating, 1);
-
- /* if already terminating, do nothing */
- if (terminating)
- return;
+ int i;
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
@@ -1410,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
- INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 257b497d515a..5dffcc5becae 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -496,13 +496,15 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (!targetport) {
/*
* The target port is gone. The target doesn't expect any
- * response anymore and the ->done call is not valid
- * because the resources have been freed by
- * nvmet_fc_free_pending_reqs.
+ * response anymore and thus lsreq can't be accessed anymore.
*
* We end up here from delete association exchange:
* nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+ * Return success because this is what LLDDs do; silently
+ * drop the response.
*/
+ lsrsp->done(lsrsp);
kmem_cache_free(lsreq_cache, tls_req);
return 0;
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index d2b690857e58..788ccb6ab287 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -674,13 +674,14 @@ err:
/**
* of_msi_xlate - map a MSI ID and find relevant MSI controller node
* @dev: device for which the mapping is to be done.
- * @msi_np: Pointer to store the MSI controller node
+ * @msi_np: Pointer to target MSI controller node
* @id_in: Device ID.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in. @msi_np pointed
- * value must be NULL on entry, if an MSI controller is found @msi_np is
- * initialized to the MSI controller node with a reference held.
+ * property. If found, apply the mapping to @id_in.
+ * If @msi_np points to a non-NULL device node pointer, only entries targeting
+ * that node will be matched; if it points to a NULL value, it will receive the
+ * device node of the first matching target phandle, with a reference held.
*
* Returns: The mapped MSI id.
*/
@@ -701,22 +702,6 @@ u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
}
/**
- * of_msi_map_id - Map a MSI ID for a device.
- * @dev: device for which the mapping is to be done.
- * @msi_np: device node of the expected msi controller.
- * @id_in: unmapped MSI ID for the device.
- *
- * Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in.
- *
- * Return: The mapped MSI ID.
- */
-u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in)
-{
- return of_msi_xlate(dev, &msi_np, id_in);
-}
-
-/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done.
* @id: Device ID.
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index edbd60501cf0..bba4f7daff8c 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -476,6 +476,16 @@ static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
return opp->bandwidth[index].peak;
}
+static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index,
+ struct dev_pm_opp_key *key)
+{
+ key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0;
+ key->freq = opp->rates[index];
+ key->level = opp->level;
+
+ return true;
+}
+
/* Generic comparison helpers */
static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key)
@@ -509,6 +519,22 @@ static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
return false;
}
+static bool _compare_opp_key_exact(struct dev_pm_opp **opp,
+ struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key,
+ struct dev_pm_opp_key *key)
+{
+ bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level);
+ bool freq_match = (key->freq == 0 || opp_key->freq == key->freq);
+ bool bw_match = (key->bw == 0 || opp_key->bw == key->bw);
+
+ if (freq_match && level_match && bw_match) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
/* Generic key finding helpers */
static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
unsigned long *key, int index, bool available,
@@ -541,6 +567,37 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
return opp;
}
+static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table,
+ struct dev_pm_opp_key *key, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index,
+ struct dev_pm_opp_key *key),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key),
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp_key temp_key;
+
+ /* Assert that the requirement is met */
+ if (!assert(opp_table, 0))
+ return ERR_PTR(-EINVAL);
+
+ guard(mutex)(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ read(temp_opp, 0, &temp_key);
+ if (compare(&opp, temp_opp, &temp_key, key)) {
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+ }
+
+ return opp;
+}
+
static struct dev_pm_opp *
_find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
@@ -633,6 +690,48 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
+ * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set
+ * @dev: Device for which the OPP is being searched
+ * @key: OPP key set to match
+ * @available: true/false - match for available OPP
+ *
+ * Search for an exact match of the key set in the OPP table.
+ *
+ * Return: A matching opp on success, else ERR_PTR in case of error.
+ * Possible error values:
+ * EINVAL: for bad pointers
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * Note: 'available' is a modifier for the search. If 'available' == true,
+ * then the match is for exact matching key and is available in the stored
+ * OPP table. If false, the match is for exact key which is not available.
+ *
+ * This provides a mechanism to enable an OPP which is not available currently
+ * or the opposite as well.
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
+{
+ struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev);
+
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ return _opp_table_find_opp_key(opp_table, key, available,
+ _read_opp_key, _compare_opp_key_exact,
+ assert_single_clk);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact);
+
+/**
* dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
* clock corresponding to the index
* @dev: Device for which we do this operation
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index b5f5eee5a50e..5d7f6f544942 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
@@ -29,18 +30,18 @@
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write
* mask for the lower 16 bits.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
/* General Control Register */
#define PCIE_CLIENT_GENERAL_CON 0x0
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+#define PCIE_CLIENT_MODE_MASK GENMASK(7, 4)
+#define PCIE_CLIENT_MODE_EP 0x0UL
+#define PCIE_CLIENT_MODE_RC 0x4UL
+#define PCIE_CLIENT_SET_MODE(x) FIELD_PREP_WM16(PCIE_CLIENT_MODE_MASK, (x))
+#define PCIE_CLIENT_LD_RQ_RST_GRT FIELD_PREP_WM16(BIT(3), 1)
+#define PCIE_CLIENT_ENABLE_LTSSM FIELD_PREP_WM16(BIT(2), 1)
+#define PCIE_CLIENT_DISABLE_LTSSM FIELD_PREP_WM16(BIT(2), 0)
/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
@@ -52,6 +53,11 @@
/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
+#define PCIE_INTR_MASK GENMASK(7, 0)
+#define PCIE_INTR_CLAMP(_x) ((BIT((_x)) & PCIE_INTR_MASK))
+#define PCIE_INTR_LEGACY_MASK(x) (PCIE_INTR_CLAMP((x)) | \
+ (PCIE_INTR_CLAMP((x)) << 16))
+#define PCIE_INTR_LEGACY_UNMASK(x) (PCIE_INTR_CLAMP((x)) << 16)
/* Interrupt Mask Register Related to Miscellaneous Operation */
#define PCIE_CLIENT_INTR_MASK_MISC 0x24
@@ -116,14 +122,14 @@ static void rockchip_pcie_intx_handler(struct irq_desc *desc)
static void rockchip_intx_mask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_MASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
static void rockchip_intx_unmask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_UNMASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
@@ -489,7 +495,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
dev_dbg(dev, "hot reset or link-down reset\n");
dw_pcie_ep_linkdown(&pci->ep);
/* Stop delaying link training. */
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_APP_DLY2_DONE);
+ val = FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_DONE, 1);
rockchip_pcie_writel_apb(rockchip, val,
PCIE_CLIENT_HOT_RESET_CTRL);
}
@@ -528,10 +534,11 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
}
/* LTSSM enable control mode */
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_RC),
PCIE_CLIENT_GENERAL_CON);
pp = &rockchip->pci.pp;
@@ -545,7 +552,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
}
/* unmask DLL up/down indicator */
- val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
@@ -577,10 +584,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
* LTSSM enable control mode, and automatically delay link training on
* hot reset/link-down reset.
*/
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN);
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1) |
+ FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_EN, 1);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_EP),
PCIE_CLIENT_GENERAL_CON);
rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
@@ -604,7 +613,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
pci_epc_init_notify(rockchip->pci.ep.epc);
/* unmask DLL up/down indicator and hot reset/link-down reset */
- val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0) |
+ FIELD_PREP_WM16(PCIE_LINK_REQ_RST_NOT_INT, 0);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 72a2c045f6fe..3e82a69b9c00 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -12,6 +12,7 @@
#define _PCIE_ROCKCHIP_H
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
@@ -21,10 +22,10 @@
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
* bits. This allows atomic updates of the register without locking.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HWORD_SET_BIT(val) (FIELD_PREP_WM16_CONST((val), 1))
+#define HWORD_CLR_BIT(val) (FIELD_PREP_WM16_CONST((val), 0))
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define ENCODE_LANES(x) ((((x) >> 1) & 3))
#define MAX_LANE_NUM 4
#define MAX_REGION_LIMIT 32
#define MIN_EP_APERTURE 28
@@ -32,21 +33,21 @@
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_LINK_TRAIN_DISABLE HIWORD_UPDATE(0x0002, 0)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_CONF_ENABLE HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HWORD_CLR_BIT(0x0001)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_LINK_TRAIN_DISABLE HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HWORD_SET_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) FIELD_PREP_WM16(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HWORD_SET_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HWORD_CLR_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HWORD_CLR_BIT(0x0080)
+#define PCIE_CLIENT_GEN_SEL_2 HWORD_SET_BIT(0x0080)
#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c)
-#define PCIE_CLIENT_INT_IN_ASSERT HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_INT_IN_DEASSERT HIWORD_UPDATE(0x0002, 0)
-#define PCIE_CLIENT_INT_PEND_ST_PEND HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_INT_PEND_ST_NORMAL HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_INT_IN_ASSERT HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_INT_IN_DEASSERT HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_INT_PEND_ST_PEND HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_INT_PEND_ST_NORMAL HWORD_CLR_BIT(0x0001)
#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20)
#define PCIE_CLIENT_PHY_ST BIT(12)
#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index dfb61f152702..ce741ed9dc3f 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -459,7 +459,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ rid = of_node ? of_msi_xlate(&pdev->dev, &of_node, rid) :
iort_msi_map_id(&pdev->dev, rid);
return rid;
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index 78748e8d2dba..b58f94ee4891 100644
--- a/drivers/pci/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -556,34 +556,13 @@ EXPORT_SYMBOL(vga_put);
static bool vga_is_firmware_default(struct pci_dev *pdev)
{
-#if defined(CONFIG_X86)
- u64 base = screen_info.lfb_base;
- u64 size = screen_info.lfb_size;
- struct resource *r;
- u64 limit;
+#ifdef CONFIG_SCREEN_INFO
+ struct screen_info *si = &screen_info;
- /* Select the device owning the boot framebuffer if there is one */
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)screen_info.ext_lfb_base << 32;
-
- limit = base + size;
-
- /* Does firmware framebuffer belong to us? */
- pci_dev_for_each_resource(pdev, r) {
- if (resource_type(r) != IORESOURCE_MEM)
- continue;
-
- if (!r->start || !r->end)
- continue;
-
- if (base < r->start || limit >= r->end)
- continue;
-
- return true;
- }
-#endif
+ return pdev == screen_info_pci_dev(si);
+#else
return false;
+#endif
}
static bool vga_arb_integrated_gpu(struct device *dev)
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index 20023f6eb994..5187983c58e5 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,7 +22,7 @@
* only if BIT(x + 16) set to 1 the BIT(x) can be written.
*/
#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
+ (FIELD_PREP_WM16((mask) << (shift), (val)))
/* Register definition */
#define GRF_EMMCPHY_CON0 0x0
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index 4e2dfd01adf2..126306c01454 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/hw_bitfield.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -18,22 +19,13 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-/*
- * The higher 16-bit of this register is used for write protection
- * only if BIT(x + 16) set to 1 the BIT(x) can be written.
- */
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
#define PHY_MAX_LANE_NUM 4
-#define PHY_CFG_DATA_SHIFT 7
-#define PHY_CFG_ADDR_SHIFT 1
-#define PHY_CFG_DATA_MASK 0xf
-#define PHY_CFG_ADDR_MASK 0x3f
+#define PHY_CFG_DATA_MASK GENMASK(10, 7)
+#define PHY_CFG_ADDR_MASK GENMASK(6, 1)
#define PHY_CFG_WR_ENABLE 1
#define PHY_CFG_WR_DISABLE 0
-#define PHY_CFG_WR_SHIFT 0
-#define PHY_CFG_WR_MASK 1
+#define PHY_CFG_WR_MASK BIT(0)
#define PHY_CFG_PLL_LOCK 0x10
#define PHY_CFG_CLK_TEST 0x10
#define PHY_CFG_CLK_SCC 0x12
@@ -48,11 +40,7 @@
#define PHY_LANE_RX_DET_SHIFT 11
#define PHY_LANE_RX_DET_TH 0x1
#define PHY_LANE_IDLE_OFF 0x1
-#define PHY_LANE_IDLE_MASK 0x1
-#define PHY_LANE_IDLE_A_SHIFT 3
-#define PHY_LANE_IDLE_B_SHIFT 4
-#define PHY_LANE_IDLE_C_SHIFT 5
-#define PHY_LANE_IDLE_D_SHIFT 6
+#define PHY_LANE_IDLE_MASK BIT(3)
struct rockchip_pcie_data {
unsigned int pcie_conf;
@@ -99,22 +87,14 @@ static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
u32 addr, u32 data)
{
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(data,
- PHY_CFG_DATA_MASK,
- PHY_CFG_DATA_SHIFT) |
- HIWORD_UPDATE(addr,
- PHY_CFG_ADDR_MASK,
- PHY_CFG_ADDR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_DATA_MASK, data) |
+ FIELD_PREP_WM16(PHY_CFG_ADDR_MASK, addr));
udelay(1);
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_WR_ENABLE,
- PHY_CFG_WR_MASK,
- PHY_CFG_WR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_WR_MASK, PHY_CFG_WR_ENABLE));
udelay(1);
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_WR_DISABLE,
- PHY_CFG_WR_MASK,
- PHY_CFG_WR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_WR_MASK, PHY_CFG_WR_DISABLE));
}
static int rockchip_pcie_phy_power_off(struct phy *phy)
@@ -125,11 +105,9 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
guard(mutex)(&rk_phy->pcie_mutex);
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_laneoff,
+ FIELD_PREP_WM16(PHY_LANE_IDLE_MASK,
+ PHY_LANE_IDLE_OFF) << inst->index);
if (--rk_phy->pwr_cnt) {
return 0;
@@ -139,11 +117,9 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
if (err) {
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
rk_phy->pwr_cnt++;
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_laneoff,
+ FIELD_PREP_WM16(PHY_LANE_IDLE_MASK,
+ !PHY_LANE_IDLE_OFF) << inst->index);
return err;
}
@@ -159,11 +135,9 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
guard(mutex)(&rk_phy->pcie_mutex);
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_laneoff,
+ FIELD_PREP_WM16(PHY_LANE_IDLE_MASK,
+ !PHY_LANE_IDLE_OFF) << inst->index);
if (rk_phy->pwr_cnt++) {
return 0;
@@ -177,9 +151,7 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
}
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
- PHY_CFG_ADDR_MASK,
- PHY_CFG_ADDR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_ADDR_MASK, PHY_CFG_PLL_LOCK));
/*
* No documented timeout value for phy operation below,
@@ -210,9 +182,7 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
}
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
- PHY_CFG_ADDR_MASK,
- PHY_CFG_ADDR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_ADDR_MASK, PHY_CFG_PLL_LOCK));
err = regmap_read_poll_timeout(rk_phy->reg_base,
rk_phy->phy_data->pcie_status,
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
index 28a052e17366..4508a3147272 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
@@ -8,6 +8,7 @@
#include <dt-bindings/phy/phy.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -20,12 +21,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#define FIELD_PREP_HIWORD(_mask, _val) \
- ( \
- FIELD_PREP((_mask), (_val)) | \
- ((_mask) << 16) \
- )
-
#define BIAS_CON0 0x0000
#define I_RES_CNTL_MASK GENMASK(6, 4)
#define I_RES_CNTL(x) FIELD_PREP(I_RES_CNTL_MASK, x)
@@ -252,8 +247,8 @@
/* MIPI_CDPHY_GRF registers */
#define MIPI_DCPHY_GRF_CON0 0x0000
-#define S_CPHY_MODE FIELD_PREP_HIWORD(BIT(3), 1)
-#define M_CPHY_MODE FIELD_PREP_HIWORD(BIT(0), 1)
+#define S_CPHY_MODE FIELD_PREP_WM16(BIT(3), 1)
+#define M_CPHY_MODE FIELD_PREP_WM16(BIT(0), 1)
enum hs_drv_res_ohm {
STRENGTH_30_OHM = 0x8,
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 666a896c8f0a..c3c30df29c3e 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/hw_bitfield.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -24,9 +25,6 @@
static int enable_usb_uart;
-#define HIWORD_UPDATE(val, mask) \
- ((val) | (mask) << 16)
-
#define UOC_CON0 0x00
#define UOC_CON0_SIDDQ BIT(13)
#define UOC_CON0_DISABLE BIT(4)
@@ -38,10 +36,10 @@ static int enable_usb_uart;
#define UOC_CON3 0x0c
/* bits present on rk3188 and rk3288 phys */
#define UOC_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
-#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
-#define UOC_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
-#define UOC_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
-#define UOC_CON3_UTMI_OPMODE_MASK (3 << 1)
+#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC 1UL
+#define UOC_CON3_UTMI_XCVRSEELCT_MASK GENMASK(4, 3)
+#define UOC_CON3_UTMI_OPMODE_NODRIVING 1UL
+#define UOC_CON3_UTMI_OPMODE_MASK GENMASK(2, 1)
#define UOC_CON3_UTMI_SUSPENDN BIT(0)
struct rockchip_usb_phys {
@@ -79,7 +77,7 @@ struct rockchip_usb_phy {
static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
bool siddq)
{
- u32 val = HIWORD_UPDATE(siddq ? UOC_CON0_SIDDQ : 0, UOC_CON0_SIDDQ);
+ u32 val = FIELD_PREP_WM16(UOC_CON0_SIDDQ, siddq);
return regmap_write(phy->base->reg_base, phy->reg_offset, val);
}
@@ -332,29 +330,24 @@ static int __init rockchip_init_usb_uart_common(struct regmap *grf,
* but were not present in the original code.
* Also disable the analog phy components to save power.
*/
- val = HIWORD_UPDATE(UOC_CON0_COMMON_ON_N
- | UOC_CON0_DISABLE
- | UOC_CON0_SIDDQ,
- UOC_CON0_COMMON_ON_N
- | UOC_CON0_DISABLE
- | UOC_CON0_SIDDQ);
+ val = FIELD_PREP_WM16(UOC_CON0_COMMON_ON_N, 1) |
+ FIELD_PREP_WM16(UOC_CON0_DISABLE, 1) |
+ FIELD_PREP_WM16(UOC_CON0_SIDDQ, 1);
ret = regmap_write(grf, regoffs + UOC_CON0, val);
if (ret)
return ret;
- val = HIWORD_UPDATE(UOC_CON2_SOFT_CON_SEL,
- UOC_CON2_SOFT_CON_SEL);
+ val = FIELD_PREP_WM16(UOC_CON2_SOFT_CON_SEL, 1);
ret = regmap_write(grf, regoffs + UOC_CON2, val);
if (ret)
return ret;
- val = HIWORD_UPDATE(UOC_CON3_UTMI_OPMODE_NODRIVING
- | UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC
- | UOC_CON3_UTMI_TERMSEL_FULLSPEED,
- UOC_CON3_UTMI_SUSPENDN
- | UOC_CON3_UTMI_OPMODE_MASK
- | UOC_CON3_UTMI_XCVRSEELCT_MASK
- | UOC_CON3_UTMI_TERMSEL_FULLSPEED);
+ val = FIELD_PREP_WM16(UOC_CON3_UTMI_SUSPENDN, 0) |
+ FIELD_PREP_WM16(UOC_CON3_UTMI_OPMODE_MASK,
+ UOC_CON3_UTMI_OPMODE_NODRIVING) |
+ FIELD_PREP_WM16(UOC_CON3_UTMI_XCVRSEELCT_MASK,
+ UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC) |
+ FIELD_PREP_WM16(UOC_CON3_UTMI_TERMSEL_FULLSPEED, 1);
ret = regmap_write(grf, UOC_CON3, val);
if (ret)
return ret;
@@ -380,10 +373,8 @@ static int __init rk3188_init_usb_uart(struct regmap *grf,
if (ret)
return ret;
- val = HIWORD_UPDATE(RK3188_UOC0_CON0_BYPASSSEL
- | RK3188_UOC0_CON0_BYPASSDMEN,
- RK3188_UOC0_CON0_BYPASSSEL
- | RK3188_UOC0_CON0_BYPASSDMEN);
+ val = FIELD_PREP_WM16(RK3188_UOC0_CON0_BYPASSSEL, 1) |
+ FIELD_PREP_WM16(RK3188_UOC0_CON0_BYPASSDMEN, 1);
ret = regmap_write(grf, RK3188_UOC0_CON0, val);
if (ret)
return ret;
@@ -430,10 +421,8 @@ static int __init rk3288_init_usb_uart(struct regmap *grf,
if (ret)
return ret;
- val = HIWORD_UPDATE(RK3288_UOC0_CON3_BYPASSSEL
- | RK3288_UOC0_CON3_BYPASSDMEN,
- RK3288_UOC0_CON3_BYPASSSEL
- | RK3288_UOC0_CON3_BYPASSDMEN);
+ val = FIELD_PREP_WM16(RK3288_UOC0_CON3_BYPASSSEL, 1) |
+ FIELD_PREP_WM16(RK3288_UOC0_CON3_BYPASSDMEN, 1);
ret = regmap_write(grf, RK3288_UOC0_CON3, val);
if (ret)
return ret;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 0f37aeb5e004..4f8507ebbdac 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -211,6 +211,8 @@ config PINCTRL_EIC7700
depends on ARCH_ESWIN || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
help
This driver support for the pin controller in ESWIN's EIC7700 SoC,
which supports pin multiplexing, pin configuration,and rgmii voltage
@@ -562,7 +564,7 @@ config PINCTRL_STMFX
interrupt-controller.
config PINCTRL_SX150X
- bool "Semtech SX150x I2C GPIO expander pinctrl driver"
+ tristate "Semtech SX150x I2C GPIO expander pinctrl driver"
depends on I2C=y
select PINMUX
select PINCONF
@@ -612,6 +614,25 @@ config PINCTRL_TH1520
This driver is needed for RISC-V development boards like
the BeagleV Ahead and the LicheePi 4A.
+config PINCTRL_UPBOARD
+ tristate "AAeon UP board FPGA pin controller"
+ depends on MFD_UPBOARD_FPGA
+ select PINMUX
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIO_AGGREGATOR
+ help
+ Pin controller for the FPGA GPIO lines on UP boards. Due to the
+ hardware layout, the driver controls the FPGA pins in tandem with
+ their corresponding Intel SoC GPIOs.
+
+ Currently supported:
+ - UP Squared
+
+ To compile this driver as a module, choose M here: the module
+ will be called pinctrl-upboard.
+
config PINCTRL_ZYNQ
bool "Pinctrl driver for Xilinx Zynq"
depends on ARCH_ZYNQ || COMPILE_TEST
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 65aa432fc97e..e0cfb9b7c99b 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_TPS6594) += pinctrl-tps6594.o
obj-$(CONFIG_PINCTRL_TH1520) += pinctrl-th1520.o
+obj-$(CONFIG_PINCTRL_UPBOARD) += pinctrl-upboard.o
obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 35b51ce4298e..096d0778427e 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -106,6 +106,18 @@ config PINCTRL_BCM63268
help
Say Y here to enable the Broadcom BCM63268 GPIO driver.
+config PINCTRL_BRCMSTB
+ tristate "Broadcom STB product line pin controller driver"
+ depends on OF && (ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST)
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ Enable pin muxing and configuration functionality
+ for Broadcom STB product line chipsets.
+
+source "drivers/pinctrl/bcm/Kconfig.stb"
+
config PINCTRL_IPROC_GPIO
bool "Broadcom iProc GPIO (with PINCONF) driver"
depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST)
diff --git a/drivers/pinctrl/bcm/Kconfig.stb b/drivers/pinctrl/bcm/Kconfig.stb
new file mode 100644
index 000000000000..c1ba361e1d78
--- /dev/null
+++ b/drivers/pinctrl/bcm/Kconfig.stb
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if PINCTRL_BRCMSTB
+
+config PINCTRL_BCM2712
+ tristate "BCM2712 SoC pin controller driver"
+ help
+ Driver for BCM2712 integrated pin controller,
+ commonly found on Raspberry Pi 5.
+
+endif
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 82b868ec1471..482d769b1a81 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_PINCTRL_BCM6358) += pinctrl-bcm6358.o
obj-$(CONFIG_PINCTRL_BCM6362) += pinctrl-bcm6362.o
obj-$(CONFIG_PINCTRL_BCM6368) += pinctrl-bcm6368.o
obj-$(CONFIG_PINCTRL_BCM63268) += pinctrl-bcm63268.o
+obj-$(CONFIG_PINCTRL_BRCMSTB) += pinctrl-brcmstb.o
+obj-$(CONFIG_PINCTRL_BCM2712) += pinctrl-brcmstb-bcm2712.o
obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
obj-$(CONFIG_PINCTRL_NS) += pinctrl-ns.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 7dbf079739bc..c165674c5b4d 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1023,7 +1023,7 @@ static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
/* No way to read back bias config in HW */
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (fsel != BCM2835_FSEL_GPIO_OUT)
return -EINVAL;
@@ -1091,7 +1091,7 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
break;
/* Set output-high or output-low */
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
bcm2835_gpio_set_bit(pc, arg ? GPSET0 : GPCLR0, pin);
break;
@@ -1202,7 +1202,7 @@ static int bcm2711_pinconf_set(struct pinctrl_dev *pctldev,
break;
/* Set output-high or output-low */
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
bcm2835_gpio_set_bit(pc, arg ? GPSET0 : GPCLR0, pin);
break;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6358.c b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
index 891de49d76e7..4c8cd65fc31e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6358.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
@@ -343,10 +343,8 @@ static int bcm6358_pinctrl_probe(struct platform_device *pdev)
pc = platform_get_drvdata(pdev);
priv->overlays = devm_regmap_field_alloc(dev, pc->regs, overlays);
- if (IS_ERR(priv->overlays))
- return PTR_ERR(priv->overlays);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->overlays);
}
static const struct of_device_id bcm6358_pinctrl_match[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c b/drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c
new file mode 100644
index 000000000000..752b78e2c0d8
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Broadcom brcmstb GPIO units (pinctrl only)
+ *
+ * Copyright (C) 2024-2025 Ivan T. Ivanov, Andrea della Porta
+ * Copyright (C) 2021-3 Raspberry Pi Ltd.
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * Based heavily on the BCM2835 GPIO & pinctrl driver, which was inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ */
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/of.h>
+#include "pinctrl-brcmstb.h"
+
+#define BRCMSTB_FSEL_COUNT 8
+#define BRCMSTB_FSEL_MASK 0xf
+
+#define BRCMSTB_PIN(i, f1, f2, f3, f4, f5, f6, f7, f8) \
+ [i] = { \
+ .funcs = (u8[]) { \
+ func_##f1, \
+ func_##f2, \
+ func_##f3, \
+ func_##f4, \
+ func_##f5, \
+ func_##f6, \
+ func_##f7, \
+ func_##f8, \
+ }, \
+ .n_funcs = BRCMSTB_FSEL_COUNT, \
+ .func_mask = BRCMSTB_FSEL_MASK, \
+ }
+
+enum bcm2712_funcs {
+ func_gpio,
+ func_alt1,
+ func_alt2,
+ func_alt3,
+ func_alt4,
+ func_alt5,
+ func_alt6,
+ func_alt7,
+ func_alt8,
+ func_aon_cpu_standbyb,
+ func_aon_fp_4sec_resetb,
+ func_aon_gpclk,
+ func_aon_pwm,
+ func_arm_jtag,
+ func_aud_fs_clk0,
+ func_avs_pmu_bsc,
+ func_bsc_m0,
+ func_bsc_m1,
+ func_bsc_m2,
+ func_bsc_m3,
+ func_clk_observe,
+ func_ctl_hdmi_5v,
+ func_enet0,
+ func_enet0_mii,
+ func_enet0_rgmii,
+ func_ext_sc_clk,
+ func_fl0,
+ func_fl1,
+ func_gpclk0,
+ func_gpclk1,
+ func_gpclk2,
+ func_hdmi_tx0_auto_i2c,
+ func_hdmi_tx0_bsc,
+ func_hdmi_tx1_auto_i2c,
+ func_hdmi_tx1_bsc,
+ func_i2s_in,
+ func_i2s_out,
+ func_ir_in,
+ func_mtsif,
+ func_mtsif_alt,
+ func_mtsif_alt1,
+ func_pdm,
+ func_pkt,
+ func_pm_led_out,
+ func_sc0,
+ func_sd0,
+ func_sd2,
+ func_sd_card_a,
+ func_sd_card_b,
+ func_sd_card_c,
+ func_sd_card_d,
+ func_sd_card_e,
+ func_sd_card_f,
+ func_sd_card_g,
+ func_spdif_out,
+ func_spi_m,
+ func_spi_s,
+ func_sr_edm_sense,
+ func_te0,
+ func_te1,
+ func_tsio,
+ func_uart0,
+ func_uart1,
+ func_uart2,
+ func_usb_pwr,
+ func_usb_vbus,
+ func_uui,
+ func_vc_i2c0,
+ func_vc_i2c3,
+ func_vc_i2c4,
+ func_vc_i2c5,
+ func_vc_i2csl,
+ func_vc_pcm,
+ func_vc_pwm0,
+ func_vc_pwm1,
+ func_vc_spi0,
+ func_vc_spi3,
+ func_vc_spi4,
+ func_vc_spi5,
+ func_vc_uart0,
+ func_vc_uart2,
+ func_vc_uart3,
+ func_vc_uart4,
+ func__,
+ func_count = func__
+};
+
+static const struct pin_regs bcm2712_c0_gpio_pin_regs[] = {
+ GPIO_REGS(0, 0, 0, 7, 7),
+ GPIO_REGS(1, 0, 1, 7, 8),
+ GPIO_REGS(2, 0, 2, 7, 9),
+ GPIO_REGS(3, 0, 3, 7, 10),
+ GPIO_REGS(4, 0, 4, 7, 11),
+ GPIO_REGS(5, 0, 5, 7, 12),
+ GPIO_REGS(6, 0, 6, 7, 13),
+ GPIO_REGS(7, 0, 7, 7, 14),
+ GPIO_REGS(8, 1, 0, 8, 0),
+ GPIO_REGS(9, 1, 1, 8, 1),
+ GPIO_REGS(10, 1, 2, 8, 2),
+ GPIO_REGS(11, 1, 3, 8, 3),
+ GPIO_REGS(12, 1, 4, 8, 4),
+ GPIO_REGS(13, 1, 5, 8, 5),
+ GPIO_REGS(14, 1, 6, 8, 6),
+ GPIO_REGS(15, 1, 7, 8, 7),
+ GPIO_REGS(16, 2, 0, 8, 8),
+ GPIO_REGS(17, 2, 1, 8, 9),
+ GPIO_REGS(18, 2, 2, 8, 10),
+ GPIO_REGS(19, 2, 3, 8, 11),
+ GPIO_REGS(20, 2, 4, 8, 12),
+ GPIO_REGS(21, 2, 5, 8, 13),
+ GPIO_REGS(22, 2, 6, 8, 14),
+ GPIO_REGS(23, 2, 7, 9, 0),
+ GPIO_REGS(24, 3, 0, 9, 1),
+ GPIO_REGS(25, 3, 1, 9, 2),
+ GPIO_REGS(26, 3, 2, 9, 3),
+ GPIO_REGS(27, 3, 3, 9, 4),
+ GPIO_REGS(28, 3, 4, 9, 5),
+ GPIO_REGS(29, 3, 5, 9, 6),
+ GPIO_REGS(30, 3, 6, 9, 7),
+ GPIO_REGS(31, 3, 7, 9, 8),
+ GPIO_REGS(32, 4, 0, 9, 9),
+ GPIO_REGS(33, 4, 1, 9, 10),
+ GPIO_REGS(34, 4, 2, 9, 11),
+ GPIO_REGS(35, 4, 3, 9, 12),
+ GPIO_REGS(36, 4, 4, 9, 13),
+ GPIO_REGS(37, 4, 5, 9, 14),
+ GPIO_REGS(38, 4, 6, 10, 0),
+ GPIO_REGS(39, 4, 7, 10, 1),
+ GPIO_REGS(40, 5, 0, 10, 2),
+ GPIO_REGS(41, 5, 1, 10, 3),
+ GPIO_REGS(42, 5, 2, 10, 4),
+ GPIO_REGS(43, 5, 3, 10, 5),
+ GPIO_REGS(44, 5, 4, 10, 6),
+ GPIO_REGS(45, 5, 5, 10, 7),
+ GPIO_REGS(46, 5, 6, 10, 8),
+ GPIO_REGS(47, 5, 7, 10, 9),
+ GPIO_REGS(48, 6, 0, 10, 10),
+ GPIO_REGS(49, 6, 1, 10, 11),
+ GPIO_REGS(50, 6, 2, 10, 12),
+ GPIO_REGS(51, 6, 3, 10, 13),
+ GPIO_REGS(52, 6, 4, 10, 14),
+ GPIO_REGS(53, 6, 5, 11, 0),
+ EMMC_REGS(54, 11, 1), /* EMMC_CMD */
+ EMMC_REGS(55, 11, 2), /* EMMC_DS */
+ EMMC_REGS(56, 11, 3), /* EMMC_CLK */
+ EMMC_REGS(57, 11, 4), /* EMMC_DAT0 */
+ EMMC_REGS(58, 11, 5), /* EMMC_DAT1 */
+ EMMC_REGS(59, 11, 6), /* EMMC_DAT2 */
+ EMMC_REGS(60, 11, 7), /* EMMC_DAT3 */
+ EMMC_REGS(61, 11, 8), /* EMMC_DAT4 */
+ EMMC_REGS(62, 11, 9), /* EMMC_DAT5 */
+ EMMC_REGS(63, 11, 10), /* EMMC_DAT6 */
+ EMMC_REGS(64, 11, 11), /* EMMC_DAT7 */
+};
+
+static struct pin_regs bcm2712_c0_aon_gpio_pin_regs[] = {
+ AON_GPIO_REGS(0, 3, 0, 6, 10),
+ AON_GPIO_REGS(1, 3, 1, 6, 11),
+ AON_GPIO_REGS(2, 3, 2, 6, 12),
+ AON_GPIO_REGS(3, 3, 3, 6, 13),
+ AON_GPIO_REGS(4, 3, 4, 6, 14),
+ AON_GPIO_REGS(5, 3, 5, 7, 0),
+ AON_GPIO_REGS(6, 3, 6, 7, 1),
+ AON_GPIO_REGS(7, 3, 7, 7, 2),
+ AON_GPIO_REGS(8, 4, 0, 7, 3),
+ AON_GPIO_REGS(9, 4, 1, 7, 4),
+ AON_GPIO_REGS(10, 4, 2, 7, 5),
+ AON_GPIO_REGS(11, 4, 3, 7, 6),
+ AON_GPIO_REGS(12, 4, 4, 7, 7),
+ AON_GPIO_REGS(13, 4, 5, 7, 8),
+ AON_GPIO_REGS(14, 4, 6, 7, 9),
+ AON_GPIO_REGS(15, 4, 7, 7, 10),
+ AON_GPIO_REGS(16, 5, 0, 7, 11),
+ AON_SGPIO_REGS(0, 0, 0),
+ AON_SGPIO_REGS(1, 0, 1),
+ AON_SGPIO_REGS(2, 0, 2),
+ AON_SGPIO_REGS(3, 0, 3),
+ AON_SGPIO_REGS(4, 1, 0),
+ AON_SGPIO_REGS(5, 2, 0),
+};
+
+static const struct pinctrl_pin_desc bcm2712_c0_gpio_pins[] = {
+ GPIO_PIN(0),
+ GPIO_PIN(1),
+ GPIO_PIN(2),
+ GPIO_PIN(3),
+ GPIO_PIN(4),
+ GPIO_PIN(5),
+ GPIO_PIN(6),
+ GPIO_PIN(7),
+ GPIO_PIN(8),
+ GPIO_PIN(9),
+ GPIO_PIN(10),
+ GPIO_PIN(11),
+ GPIO_PIN(12),
+ GPIO_PIN(13),
+ GPIO_PIN(14),
+ GPIO_PIN(15),
+ GPIO_PIN(16),
+ GPIO_PIN(17),
+ GPIO_PIN(18),
+ GPIO_PIN(19),
+ GPIO_PIN(20),
+ GPIO_PIN(21),
+ GPIO_PIN(22),
+ GPIO_PIN(23),
+ GPIO_PIN(24),
+ GPIO_PIN(25),
+ GPIO_PIN(26),
+ GPIO_PIN(27),
+ GPIO_PIN(28),
+ GPIO_PIN(29),
+ GPIO_PIN(30),
+ GPIO_PIN(31),
+ GPIO_PIN(32),
+ GPIO_PIN(33),
+ GPIO_PIN(34),
+ GPIO_PIN(35),
+ GPIO_PIN(36),
+ GPIO_PIN(37),
+ GPIO_PIN(38),
+ GPIO_PIN(39),
+ GPIO_PIN(40),
+ GPIO_PIN(41),
+ GPIO_PIN(42),
+ GPIO_PIN(43),
+ GPIO_PIN(44),
+ GPIO_PIN(45),
+ GPIO_PIN(46),
+ GPIO_PIN(47),
+ GPIO_PIN(48),
+ GPIO_PIN(49),
+ GPIO_PIN(50),
+ GPIO_PIN(51),
+ GPIO_PIN(52),
+ GPIO_PIN(53),
+ PINCTRL_PIN(54, "emmc_cmd"),
+ PINCTRL_PIN(55, "emmc_ds"),
+ PINCTRL_PIN(56, "emmc_clk"),
+ PINCTRL_PIN(57, "emmc_dat0"),
+ PINCTRL_PIN(58, "emmc_dat1"),
+ PINCTRL_PIN(59, "emmc_dat2"),
+ PINCTRL_PIN(60, "emmc_dat3"),
+ PINCTRL_PIN(61, "emmc_dat4"),
+ PINCTRL_PIN(62, "emmc_dat5"),
+ PINCTRL_PIN(63, "emmc_dat6"),
+ PINCTRL_PIN(64, "emmc_dat7"),
+};
+
+static struct pinctrl_pin_desc bcm2712_c0_aon_gpio_pins[] = {
+ AON_GPIO_PIN(0), AON_GPIO_PIN(1), AON_GPIO_PIN(2), AON_GPIO_PIN(3),
+ AON_GPIO_PIN(4), AON_GPIO_PIN(5), AON_GPIO_PIN(6), AON_GPIO_PIN(7),
+ AON_GPIO_PIN(8), AON_GPIO_PIN(9), AON_GPIO_PIN(10), AON_GPIO_PIN(11),
+ AON_GPIO_PIN(12), AON_GPIO_PIN(13), AON_GPIO_PIN(14), AON_GPIO_PIN(15),
+ AON_GPIO_PIN(16), AON_SGPIO_PIN(0), AON_SGPIO_PIN(1), AON_SGPIO_PIN(2),
+ AON_SGPIO_PIN(3), AON_SGPIO_PIN(4), AON_SGPIO_PIN(5),
+};
+
+static const struct pin_regs bcm2712_d0_gpio_pin_regs[] = {
+ GPIO_REGS(1, 0, 0, 4, 5),
+ GPIO_REGS(2, 0, 1, 4, 6),
+ GPIO_REGS(3, 0, 2, 4, 7),
+ GPIO_REGS(4, 0, 3, 4, 8),
+ GPIO_REGS(10, 0, 4, 4, 9),
+ GPIO_REGS(11, 0, 5, 4, 10),
+ GPIO_REGS(12, 0, 6, 4, 11),
+ GPIO_REGS(13, 0, 7, 4, 12),
+ GPIO_REGS(14, 1, 0, 4, 13),
+ GPIO_REGS(15, 1, 1, 4, 14),
+ GPIO_REGS(18, 1, 2, 5, 0),
+ GPIO_REGS(19, 1, 3, 5, 1),
+ GPIO_REGS(20, 1, 4, 5, 2),
+ GPIO_REGS(21, 1, 5, 5, 3),
+ GPIO_REGS(22, 1, 6, 5, 4),
+ GPIO_REGS(23, 1, 7, 5, 5),
+ GPIO_REGS(24, 2, 0, 5, 6),
+ GPIO_REGS(25, 2, 1, 5, 7),
+ GPIO_REGS(26, 2, 2, 5, 8),
+ GPIO_REGS(27, 2, 3, 5, 9),
+ GPIO_REGS(28, 2, 4, 5, 10),
+ GPIO_REGS(29, 2, 5, 5, 11),
+ GPIO_REGS(30, 2, 6, 5, 12),
+ GPIO_REGS(31, 2, 7, 5, 13),
+ GPIO_REGS(32, 3, 0, 5, 14),
+ GPIO_REGS(33, 3, 1, 6, 0),
+ GPIO_REGS(34, 3, 2, 6, 1),
+ GPIO_REGS(35, 3, 3, 6, 2),
+ EMMC_REGS(36, 6, 3), /* EMMC_CMD */
+ EMMC_REGS(37, 6, 4), /* EMMC_DS */
+ EMMC_REGS(38, 6, 5), /* EMMC_CLK */
+ EMMC_REGS(39, 6, 6), /* EMMC_DAT0 */
+ EMMC_REGS(40, 6, 7), /* EMMC_DAT1 */
+ EMMC_REGS(41, 6, 8), /* EMMC_DAT2 */
+ EMMC_REGS(42, 6, 9), /* EMMC_DAT3 */
+ EMMC_REGS(43, 6, 10), /* EMMC_DAT4 */
+ EMMC_REGS(44, 6, 11), /* EMMC_DAT5 */
+ EMMC_REGS(45, 6, 12), /* EMMC_DAT6 */
+ EMMC_REGS(46, 6, 13), /* EMMC_DAT7 */
+};
+
+static struct pin_regs bcm2712_d0_aon_gpio_pin_regs[] = {
+ AON_GPIO_REGS(0, 3, 0, 5, 9),
+ AON_GPIO_REGS(1, 3, 1, 5, 10),
+ AON_GPIO_REGS(2, 3, 2, 5, 11),
+ AON_GPIO_REGS(3, 3, 3, 5, 12),
+ AON_GPIO_REGS(4, 3, 4, 5, 13),
+ AON_GPIO_REGS(5, 3, 5, 5, 14),
+ AON_GPIO_REGS(6, 3, 6, 6, 0),
+ AON_GPIO_REGS(8, 3, 7, 6, 1),
+ AON_GPIO_REGS(9, 4, 0, 6, 2),
+ AON_GPIO_REGS(12, 4, 1, 6, 3),
+ AON_GPIO_REGS(13, 4, 2, 6, 4),
+ AON_GPIO_REGS(14, 4, 3, 6, 5),
+ AON_SGPIO_REGS(0, 0, 0),
+ AON_SGPIO_REGS(1, 0, 1),
+ AON_SGPIO_REGS(2, 0, 2),
+ AON_SGPIO_REGS(3, 0, 3),
+ AON_SGPIO_REGS(4, 1, 0),
+ AON_SGPIO_REGS(5, 2, 0),
+};
+
+static const struct pinctrl_pin_desc bcm2712_d0_gpio_pins[] = {
+ GPIO_PIN(1),
+ GPIO_PIN(2),
+ GPIO_PIN(3),
+ GPIO_PIN(4),
+ GPIO_PIN(10),
+ GPIO_PIN(11),
+ GPIO_PIN(12),
+ GPIO_PIN(13),
+ GPIO_PIN(14),
+ GPIO_PIN(15),
+ GPIO_PIN(18),
+ GPIO_PIN(19),
+ GPIO_PIN(20),
+ GPIO_PIN(21),
+ GPIO_PIN(22),
+ GPIO_PIN(23),
+ GPIO_PIN(24),
+ GPIO_PIN(25),
+ GPIO_PIN(26),
+ GPIO_PIN(27),
+ GPIO_PIN(28),
+ GPIO_PIN(29),
+ GPIO_PIN(30),
+ GPIO_PIN(31),
+ GPIO_PIN(32),
+ GPIO_PIN(33),
+ GPIO_PIN(34),
+ GPIO_PIN(35),
+ PINCTRL_PIN(36, "emmc_cmd"),
+ PINCTRL_PIN(37, "emmc_ds"),
+ PINCTRL_PIN(38, "emmc_clk"),
+ PINCTRL_PIN(39, "emmc_dat0"),
+ PINCTRL_PIN(40, "emmc_dat1"),
+ PINCTRL_PIN(41, "emmc_dat2"),
+ PINCTRL_PIN(42, "emmc_dat3"),
+ PINCTRL_PIN(43, "emmc_dat4"),
+ PINCTRL_PIN(44, "emmc_dat5"),
+ PINCTRL_PIN(45, "emmc_dat6"),
+ PINCTRL_PIN(46, "emmc_dat7"),
+};
+
+static struct pinctrl_pin_desc bcm2712_d0_aon_gpio_pins[] = {
+ AON_GPIO_PIN(0), AON_GPIO_PIN(1), AON_GPIO_PIN(2), AON_GPIO_PIN(3),
+ AON_GPIO_PIN(4), AON_GPIO_PIN(5), AON_GPIO_PIN(6), AON_GPIO_PIN(8),
+ AON_GPIO_PIN(9), AON_GPIO_PIN(12), AON_GPIO_PIN(13), AON_GPIO_PIN(14),
+ AON_SGPIO_PIN(0), AON_SGPIO_PIN(1), AON_SGPIO_PIN(2),
+ AON_SGPIO_PIN(3), AON_SGPIO_PIN(4), AON_SGPIO_PIN(5),
+};
+
+static const char * const bcm2712_func_names[] = {
+ BRCMSTB_FUNC(gpio),
+ BRCMSTB_FUNC(alt1),
+ BRCMSTB_FUNC(alt2),
+ BRCMSTB_FUNC(alt3),
+ BRCMSTB_FUNC(alt4),
+ BRCMSTB_FUNC(alt5),
+ BRCMSTB_FUNC(alt6),
+ BRCMSTB_FUNC(alt7),
+ BRCMSTB_FUNC(alt8),
+ BRCMSTB_FUNC(aon_cpu_standbyb),
+ BRCMSTB_FUNC(aon_fp_4sec_resetb),
+ BRCMSTB_FUNC(aon_gpclk),
+ BRCMSTB_FUNC(aon_pwm),
+ BRCMSTB_FUNC(arm_jtag),
+ BRCMSTB_FUNC(aud_fs_clk0),
+ BRCMSTB_FUNC(avs_pmu_bsc),
+ BRCMSTB_FUNC(bsc_m0),
+ BRCMSTB_FUNC(bsc_m1),
+ BRCMSTB_FUNC(bsc_m2),
+ BRCMSTB_FUNC(bsc_m3),
+ BRCMSTB_FUNC(clk_observe),
+ BRCMSTB_FUNC(ctl_hdmi_5v),
+ BRCMSTB_FUNC(enet0),
+ BRCMSTB_FUNC(enet0_mii),
+ BRCMSTB_FUNC(enet0_rgmii),
+ BRCMSTB_FUNC(ext_sc_clk),
+ BRCMSTB_FUNC(fl0),
+ BRCMSTB_FUNC(fl1),
+ BRCMSTB_FUNC(gpclk0),
+ BRCMSTB_FUNC(gpclk1),
+ BRCMSTB_FUNC(gpclk2),
+ BRCMSTB_FUNC(hdmi_tx0_auto_i2c),
+ BRCMSTB_FUNC(hdmi_tx0_bsc),
+ BRCMSTB_FUNC(hdmi_tx1_auto_i2c),
+ BRCMSTB_FUNC(hdmi_tx1_bsc),
+ BRCMSTB_FUNC(i2s_in),
+ BRCMSTB_FUNC(i2s_out),
+ BRCMSTB_FUNC(ir_in),
+ BRCMSTB_FUNC(mtsif),
+ BRCMSTB_FUNC(mtsif_alt),
+ BRCMSTB_FUNC(mtsif_alt1),
+ BRCMSTB_FUNC(pdm),
+ BRCMSTB_FUNC(pkt),
+ BRCMSTB_FUNC(pm_led_out),
+ BRCMSTB_FUNC(sc0),
+ BRCMSTB_FUNC(sd0),
+ BRCMSTB_FUNC(sd2),
+ BRCMSTB_FUNC(sd_card_a),
+ BRCMSTB_FUNC(sd_card_b),
+ BRCMSTB_FUNC(sd_card_c),
+ BRCMSTB_FUNC(sd_card_d),
+ BRCMSTB_FUNC(sd_card_e),
+ BRCMSTB_FUNC(sd_card_f),
+ BRCMSTB_FUNC(sd_card_g),
+ BRCMSTB_FUNC(spdif_out),
+ BRCMSTB_FUNC(spi_m),
+ BRCMSTB_FUNC(spi_s),
+ BRCMSTB_FUNC(sr_edm_sense),
+ BRCMSTB_FUNC(te0),
+ BRCMSTB_FUNC(te1),
+ BRCMSTB_FUNC(tsio),
+ BRCMSTB_FUNC(uart0),
+ BRCMSTB_FUNC(uart1),
+ BRCMSTB_FUNC(uart2),
+ BRCMSTB_FUNC(usb_pwr),
+ BRCMSTB_FUNC(usb_vbus),
+ BRCMSTB_FUNC(uui),
+ BRCMSTB_FUNC(vc_i2c0),
+ BRCMSTB_FUNC(vc_i2c3),
+ BRCMSTB_FUNC(vc_i2c4),
+ BRCMSTB_FUNC(vc_i2c5),
+ BRCMSTB_FUNC(vc_i2csl),
+ BRCMSTB_FUNC(vc_pcm),
+ BRCMSTB_FUNC(vc_pwm0),
+ BRCMSTB_FUNC(vc_pwm1),
+ BRCMSTB_FUNC(vc_spi0),
+ BRCMSTB_FUNC(vc_spi3),
+ BRCMSTB_FUNC(vc_spi4),
+ BRCMSTB_FUNC(vc_spi5),
+ BRCMSTB_FUNC(vc_uart0),
+ BRCMSTB_FUNC(vc_uart2),
+ BRCMSTB_FUNC(vc_uart3),
+ BRCMSTB_FUNC(vc_uart4),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_c0_aon_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(0, ir_in, vc_spi0, vc_uart3, vc_i2c3, te0, vc_i2c0, _, _),
+ BRCMSTB_PIN(1, vc_pwm0, vc_spi0, vc_uart3, vc_i2c3, te1, aon_pwm, vc_i2c0, vc_pwm1),
+ BRCMSTB_PIN(2, vc_pwm0, vc_spi0, vc_uart3, ctl_hdmi_5v, fl0, aon_pwm, ir_in, vc_pwm1),
+ BRCMSTB_PIN(3, ir_in, vc_spi0, vc_uart3, aon_fp_4sec_resetb, fl1, sd_card_g, aon_gpclk, _),
+ BRCMSTB_PIN(4, gpclk0, vc_spi0, vc_i2csl, aon_gpclk, pm_led_out, aon_pwm, sd_card_g, vc_pwm0),
+ BRCMSTB_PIN(5, gpclk1, ir_in, vc_i2csl, clk_observe, aon_pwm, sd_card_g, vc_pwm0, _),
+ BRCMSTB_PIN(6, uart1, vc_uart4, gpclk2, ctl_hdmi_5v, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(7, uart1, vc_uart4, gpclk0, aon_pwm, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(8, uart1, vc_uart4, vc_i2csl, ctl_hdmi_5v, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(9, uart1, vc_uart4, vc_i2csl, aon_pwm, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(10, tsio, ctl_hdmi_5v, sc0, spdif_out, vc_spi5, usb_pwr, aon_gpclk, sd_card_f),
+ BRCMSTB_PIN(11, tsio, uart0, sc0, aud_fs_clk0, vc_spi5, usb_vbus, vc_uart2, sd_card_f),
+ BRCMSTB_PIN(12, tsio, uart0, vc_uart0, tsio, vc_spi5, usb_pwr, vc_uart2, sd_card_f),
+ BRCMSTB_PIN(13, bsc_m1, uart0, vc_uart0, uui, vc_spi5, arm_jtag, vc_uart2, vc_i2c3),
+ BRCMSTB_PIN(14, bsc_m1, uart0, vc_uart0, uui, vc_spi5, arm_jtag, vc_uart2, vc_i2c3),
+ BRCMSTB_PIN(15, ir_in, aon_fp_4sec_resetb, vc_uart0, pm_led_out, ctl_hdmi_5v, aon_pwm, aon_gpclk, _),
+ BRCMSTB_PIN(16, aon_cpu_standbyb, gpclk0, pm_led_out, ctl_hdmi_5v, vc_pwm0, usb_pwr, aud_fs_clk0, _),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_c0_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(0, bsc_m3, vc_i2c0, gpclk0, enet0, vc_pwm1, vc_spi0, ir_in, _),
+ BRCMSTB_PIN(1, bsc_m3, vc_i2c0, gpclk1, enet0, vc_pwm1, sr_edm_sense, vc_spi0, vc_uart3),
+ BRCMSTB_PIN(2, pdm, i2s_in, gpclk2, vc_spi4, pkt, vc_spi0, vc_uart3, _),
+ BRCMSTB_PIN(3, pdm, i2s_in, vc_spi4, pkt, vc_spi0, vc_uart3, _, _),
+ BRCMSTB_PIN(4, pdm, i2s_in, arm_jtag, vc_spi4, pkt, vc_spi0, vc_uart3, _),
+ BRCMSTB_PIN(5, pdm, vc_i2c3, arm_jtag, sd_card_e, vc_spi4, pkt, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(6, pdm, vc_i2c3, arm_jtag, sd_card_e, vc_spi4, pkt, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(7, i2s_out, spdif_out, arm_jtag, sd_card_e, vc_i2c3, enet0_rgmii, vc_pcm, vc_spi4),
+ BRCMSTB_PIN(8, i2s_out, aud_fs_clk0, arm_jtag, sd_card_e, vc_i2c3, enet0_mii, vc_pcm, vc_spi4),
+ BRCMSTB_PIN(9, i2s_out, aud_fs_clk0, arm_jtag, sd_card_e, enet0_mii, sd_card_c, vc_spi4, _),
+ BRCMSTB_PIN(10, bsc_m3, mtsif_alt1, i2s_in, i2s_out, vc_spi5, enet0_mii, sd_card_c, vc_spi4),
+ BRCMSTB_PIN(11, bsc_m3, mtsif_alt1, i2s_in, i2s_out, vc_spi5, enet0_mii, sd_card_c, vc_spi4),
+ BRCMSTB_PIN(12, spi_s, mtsif_alt1, i2s_in, i2s_out, vc_spi5, vc_i2csl, sd0, sd_card_d),
+ BRCMSTB_PIN(13, spi_s, mtsif_alt1, i2s_out, usb_vbus, vc_spi5, vc_i2csl, sd0, sd_card_d),
+ BRCMSTB_PIN(14, spi_s, vc_i2csl, enet0_rgmii, arm_jtag, vc_spi5, vc_pwm0, vc_i2c4, sd_card_d),
+ BRCMSTB_PIN(15, spi_s, vc_i2csl, vc_spi3, arm_jtag, vc_pwm0, vc_i2c4, gpclk0, _),
+ BRCMSTB_PIN(16, sd_card_b, i2s_out, vc_spi3, i2s_in, sd0, enet0_rgmii, gpclk1, _),
+ BRCMSTB_PIN(17, sd_card_b, i2s_out, vc_spi3, i2s_in, ext_sc_clk, sd0, enet0_rgmii, gpclk2),
+ BRCMSTB_PIN(18, sd_card_b, i2s_out, vc_spi3, i2s_in, sd0, enet0_rgmii, vc_pwm1, _),
+ BRCMSTB_PIN(19, sd_card_b, usb_pwr, vc_spi3, pkt, spdif_out, sd0, ir_in, vc_pwm1),
+ BRCMSTB_PIN(20, sd_card_b, uui, vc_uart0, arm_jtag, uart2, usb_pwr, vc_pcm, vc_uart4),
+ BRCMSTB_PIN(21, usb_pwr, uui, vc_uart0, arm_jtag, uart2, sd_card_b, vc_pcm, vc_uart4),
+ BRCMSTB_PIN(22, usb_pwr, enet0, vc_uart0, mtsif, uart2, usb_vbus, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(23, usb_vbus, enet0, vc_uart0, mtsif, uart2, i2s_out, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(24, mtsif, pkt, uart0, enet0_rgmii, enet0_rgmii, vc_i2c4, vc_uart3, _),
+ BRCMSTB_PIN(25, mtsif, pkt, sc0, uart0, enet0_rgmii, enet0_rgmii, vc_i2c4, vc_uart3),
+ BRCMSTB_PIN(26, mtsif, pkt, sc0, uart0, enet0_rgmii, vc_uart4, vc_spi5, _),
+ BRCMSTB_PIN(27, mtsif, pkt, sc0, uart0, enet0_rgmii, vc_uart4, vc_spi5, _),
+ BRCMSTB_PIN(28, mtsif, pkt, sc0, enet0_rgmii, vc_uart4, vc_spi5, _, _),
+ BRCMSTB_PIN(29, mtsif, pkt, sc0, enet0_rgmii, vc_uart4, vc_spi5, _, _),
+ BRCMSTB_PIN(30, mtsif, pkt, sc0, sd2, enet0_rgmii, gpclk0, vc_pwm0, _),
+ BRCMSTB_PIN(31, mtsif, pkt, sc0, sd2, enet0_rgmii, vc_spi3, vc_pwm0, _),
+ BRCMSTB_PIN(32, mtsif, pkt, sc0, sd2, enet0_rgmii, vc_spi3, vc_uart3, _),
+ BRCMSTB_PIN(33, mtsif, pkt, sd2, enet0_rgmii, vc_spi3, vc_uart3, _, _),
+ BRCMSTB_PIN(34, mtsif, pkt, ext_sc_clk, sd2, enet0_rgmii, vc_spi3, vc_i2c5, _),
+ BRCMSTB_PIN(35, mtsif, pkt, sd2, enet0_rgmii, vc_spi3, vc_i2c5, _, _),
+ BRCMSTB_PIN(36, sd0, mtsif, sc0, i2s_in, vc_uart3, vc_uart2, _, _),
+ BRCMSTB_PIN(37, sd0, mtsif, sc0, vc_spi0, i2s_in, vc_uart3, vc_uart2, _),
+ BRCMSTB_PIN(38, sd0, mtsif_alt, sc0, vc_spi0, i2s_in, vc_uart3, vc_uart2, _),
+ BRCMSTB_PIN(39, sd0, mtsif_alt, sc0, vc_spi0, vc_uart3, vc_uart2, _, _),
+ BRCMSTB_PIN(40, sd0, mtsif_alt, sc0, vc_spi0, bsc_m3, _, _, _),
+ BRCMSTB_PIN(41, sd0, mtsif_alt, sc0, vc_spi0, bsc_m3, _, _, _),
+ BRCMSTB_PIN(42, vc_spi0, mtsif_alt, vc_i2c0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(43, vc_spi0, mtsif_alt, vc_i2c0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(44, vc_spi0, mtsif_alt, enet0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(45, vc_spi0, mtsif_alt, enet0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(46, vc_spi0, mtsif_alt, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m, _),
+ BRCMSTB_PIN(47, enet0, mtsif_alt, i2s_out, mtsif_alt1, arm_jtag, _, _, _),
+ BRCMSTB_PIN(48, sc0, usb_pwr, spdif_out, mtsif, _, _, _, _),
+ BRCMSTB_PIN(49, sc0, usb_pwr, aud_fs_clk0, mtsif, _, _, _, _),
+ BRCMSTB_PIN(50, sc0, usb_vbus, sc0, _, _, _, _, _),
+ BRCMSTB_PIN(51, sc0, enet0, sc0, sr_edm_sense, _, _, _, _),
+ BRCMSTB_PIN(52, sc0, enet0, vc_pwm1, _, _, _, _, _),
+ BRCMSTB_PIN(53, sc0, enet0_rgmii, ext_sc_clk, _, _, _, _, _),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_d0_aon_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(0, ir_in, vc_spi0, vc_uart0, vc_i2c3, uart0, vc_i2c0, _, _),
+ BRCMSTB_PIN(1, vc_pwm0, vc_spi0, vc_uart0, vc_i2c3, uart0, aon_pwm, vc_i2c0, vc_pwm1),
+ BRCMSTB_PIN(2, vc_pwm0, vc_spi0, vc_uart0, ctl_hdmi_5v, uart0, aon_pwm, ir_in, vc_pwm1),
+ BRCMSTB_PIN(3, ir_in, vc_spi0, vc_uart0, uart0, sd_card_g, aon_gpclk, _, _),
+ BRCMSTB_PIN(4, gpclk0, vc_spi0, pm_led_out, aon_pwm, sd_card_g, vc_pwm0, _, _),
+ BRCMSTB_PIN(5, gpclk1, ir_in, aon_pwm, sd_card_g, vc_pwm0, _, _, _),
+ BRCMSTB_PIN(6, uart1, vc_uart2, ctl_hdmi_5v, gpclk2, vc_spi3, _, _, _),
+ BRCMSTB_PIN(7, _, _, _, _, _, _, _, _), /* non-existent on D0 silicon */
+ BRCMSTB_PIN(8, uart1, vc_uart2, ctl_hdmi_5v, vc_spi0, vc_spi3, _, _, _),
+ BRCMSTB_PIN(9, uart1, vc_uart2, vc_uart0, aon_pwm, vc_spi0, vc_uart2, vc_spi3, _),
+ BRCMSTB_PIN(10, _, _, _, _, _, _, _, _), /* non-existent on D0 silicon */
+ BRCMSTB_PIN(11, _, _, _, _, _, _, _, _), /* non-existent on D0 silicon */
+ BRCMSTB_PIN(12, uart1, vc_uart2, vc_uart0, vc_spi0, usb_pwr, vc_uart2, vc_spi3, _),
+ BRCMSTB_PIN(13, bsc_m1, vc_uart0, uui, vc_spi0, arm_jtag, vc_uart2, vc_i2c3, _),
+ BRCMSTB_PIN(14, bsc_m1, aon_gpclk, vc_uart0, uui, vc_spi0, arm_jtag, vc_uart2, vc_i2c3),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_d0_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(1, vc_i2c0, usb_pwr, gpclk0, sd_card_e, vc_spi3, sr_edm_sense, vc_spi0, vc_uart0),
+ BRCMSTB_PIN(2, vc_i2c0, usb_pwr, gpclk1, sd_card_e, vc_spi3, clk_observe, vc_spi0, vc_uart0),
+ BRCMSTB_PIN(3, vc_i2c3, usb_vbus, gpclk2, sd_card_e, vc_spi3, vc_spi0, vc_uart0, _),
+ BRCMSTB_PIN(4, vc_i2c3, vc_pwm1, vc_spi3, sd_card_e, vc_spi3, vc_spi0, vc_uart0, _),
+ BRCMSTB_PIN(10, bsc_m3, vc_pwm1, vc_spi3, sd_card_e, vc_spi3, gpclk0, _, _),
+ BRCMSTB_PIN(11, bsc_m3, vc_spi3, clk_observe, sd_card_c, gpclk1, _, _, _),
+ BRCMSTB_PIN(12, spi_s, vc_spi3, sd_card_c, sd_card_d, _, _, _, _),
+ BRCMSTB_PIN(13, spi_s, vc_spi3, sd_card_c, sd_card_d, _, _, _, _),
+ BRCMSTB_PIN(14, spi_s, uui, arm_jtag, vc_pwm0, vc_i2c0, sd_card_d, _, _),
+ BRCMSTB_PIN(15, spi_s, uui, arm_jtag, vc_pwm0, vc_i2c0, gpclk0, _, _),
+ BRCMSTB_PIN(18, sd_card_f, vc_pwm1, _, _, _, _, _, _),
+ BRCMSTB_PIN(19, sd_card_f, usb_pwr, vc_pwm1, _, _, _, _, _),
+ BRCMSTB_PIN(20, vc_i2c3, uui, vc_uart0, arm_jtag, vc_uart2, _, _, _),
+ BRCMSTB_PIN(21, vc_i2c3, uui, vc_uart0, arm_jtag, vc_uart2, _, _, _),
+ BRCMSTB_PIN(22, sd_card_f, vc_uart0, vc_i2c3, _, _, _, _, _),
+ BRCMSTB_PIN(23, vc_uart0, vc_i2c3, _, _, _, _, _, _),
+ BRCMSTB_PIN(24, sd_card_b, vc_spi0, arm_jtag, uart0, usb_pwr, vc_uart2, vc_uart0, _),
+ BRCMSTB_PIN(25, sd_card_b, vc_spi0, arm_jtag, uart0, usb_pwr, vc_uart2, vc_uart0, _),
+ BRCMSTB_PIN(26, sd_card_b, vc_spi0, arm_jtag, uart0, usb_vbus, vc_uart2, vc_spi0, _),
+ BRCMSTB_PIN(27, sd_card_b, vc_spi0, arm_jtag, uart0, vc_uart2, vc_spi0, _, _),
+ BRCMSTB_PIN(28, sd_card_b, vc_spi0, arm_jtag, vc_i2c0, vc_spi0, _, _, _),
+ BRCMSTB_PIN(29, arm_jtag, vc_i2c0, vc_spi0, _, _, _, _, _),
+ BRCMSTB_PIN(30, sd2, gpclk0, vc_pwm0, _, _, _, _, _),
+ BRCMSTB_PIN(31, sd2, vc_spi3, vc_pwm0, _, _, _, _, _),
+ BRCMSTB_PIN(32, sd2, vc_spi3, vc_uart3, _, _, _, _, _),
+ BRCMSTB_PIN(33, sd2, vc_spi3, vc_uart3, _, _, _, _, _),
+ BRCMSTB_PIN(34, sd2, vc_spi3, vc_i2c5, _, _, _, _, _),
+ BRCMSTB_PIN(35, sd2, vc_spi3, vc_i2c5, _, _, _, _, _),
+};
+
+static const struct pinctrl_desc bcm2712_c0_pinctrl_desc = {
+ .name = "pinctrl-bcm2712",
+ .pins = bcm2712_c0_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_c0_gpio_pins),
+};
+
+static const struct pinctrl_desc bcm2712_c0_aon_pinctrl_desc = {
+ .name = "aon-pinctrl-bcm2712",
+ .pins = bcm2712_c0_aon_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_c0_aon_gpio_pins),
+};
+
+static const struct pinctrl_desc bcm2712_d0_pinctrl_desc = {
+ .name = "pinctrl-bcm2712",
+ .pins = bcm2712_d0_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_d0_gpio_pins),
+};
+
+static const struct pinctrl_desc bcm2712_d0_aon_pinctrl_desc = {
+ .name = "aon-pinctrl-bcm2712",
+ .pins = bcm2712_d0_aon_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_d0_aon_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_c0_pinctrl_gpio_range = {
+ .name = "pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_c0_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_c0_aon_pinctrl_gpio_range = {
+ .name = "aon-pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_c0_aon_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_d0_pinctrl_gpio_range = {
+ .name = "pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_d0_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_d0_aon_pinctrl_gpio_range = {
+ .name = "aon-pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_d0_aon_gpio_pins),
+};
+
+static const struct brcmstb_pdata bcm2712_c0_pdata = {
+ .pctl_desc = &bcm2712_c0_pinctrl_desc,
+ .gpio_range = &bcm2712_c0_pinctrl_gpio_range,
+ .pin_regs = bcm2712_c0_gpio_pin_regs,
+ .pin_funcs = bcm2712_c0_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static const struct brcmstb_pdata bcm2712_c0_aon_pdata = {
+ .pctl_desc = &bcm2712_c0_aon_pinctrl_desc,
+ .gpio_range = &bcm2712_c0_aon_pinctrl_gpio_range,
+ .pin_regs = bcm2712_c0_aon_gpio_pin_regs,
+ .pin_funcs = bcm2712_c0_aon_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static const struct brcmstb_pdata bcm2712_d0_pdata = {
+ .pctl_desc = &bcm2712_d0_pinctrl_desc,
+ .gpio_range = &bcm2712_d0_pinctrl_gpio_range,
+ .pin_regs = bcm2712_d0_gpio_pin_regs,
+ .pin_funcs = bcm2712_d0_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static const struct brcmstb_pdata bcm2712_d0_aon_pdata = {
+ .pctl_desc = &bcm2712_d0_aon_pinctrl_desc,
+ .gpio_range = &bcm2712_d0_aon_pinctrl_gpio_range,
+ .pin_regs = bcm2712_d0_aon_gpio_pin_regs,
+ .pin_funcs = bcm2712_d0_aon_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static int bcm2712_pinctrl_probe(struct platform_device *pdev)
+{
+ return brcmstb_pinctrl_probe(pdev);
+}
+
+static const struct of_device_id bcm2712_pinctrl_match[] = {
+ {
+ .compatible = "brcm,bcm2712c0-pinctrl",
+ .data = &bcm2712_c0_pdata
+ },
+ {
+ .compatible = "brcm,bcm2712c0-aon-pinctrl",
+ .data = &bcm2712_c0_aon_pdata
+ },
+
+ {
+ .compatible = "brcm,bcm2712d0-pinctrl",
+ .data = &bcm2712_d0_pdata
+ },
+ {
+ .compatible = "brcm,bcm2712d0-aon-pinctrl",
+ .data = &bcm2712_d0_aon_pdata
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm2712_pinctrl_match);
+
+static struct platform_driver bcm2712_pinctrl_driver = {
+ .probe = bcm2712_pinctrl_probe,
+ .driver = {
+ .name = "pinctrl-bcm2712",
+ .of_match_table = bcm2712_pinctrl_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(bcm2712_pinctrl_driver);
+
+MODULE_AUTHOR("Phil Elwell");
+MODULE_AUTHOR("Jonathan Bell");
+MODULE_AUTHOR("Ivan T. Ivanov");
+MODULE_AUTHOR("Andrea della Porta");
+MODULE_DESCRIPTION("Broadcom BCM2712 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/pinctrl-brcmstb.c b/drivers/pinctrl/bcm/pinctrl-brcmstb.c
new file mode 100644
index 000000000000..f46b27155c3c
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-brcmstb.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Broadcom brcmstb GPIO units (pinctrl only)
+ *
+ * Copyright (C) 2024-2025 Ivan T. Ivanov, Andrea della Porta
+ * Copyright (C) 2021-3 Raspberry Pi Ltd.
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * Based heavily on the BCM2835 GPIO & pinctrl driver, which was inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/cleanup.h>
+
+#include "pinctrl-brcmstb.h"
+
+#define BRCMSTB_PULL_NONE 0
+#define BRCMSTB_PULL_DOWN 1
+#define BRCMSTB_PULL_UP 2
+#define BRCMSTB_PULL_MASK 0x3
+
+#define BIT_TO_REG(b) (((b) >> 5) << 2)
+#define BIT_TO_SHIFT(b) ((b) & 0x1f)
+
+struct brcmstb_pinctrl {
+ struct device *dev;
+ void __iomem *base;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+ const struct pin_regs *pin_regs;
+ const struct brcmstb_pin_funcs *pin_funcs;
+ const char * const *func_names;
+ unsigned int func_count;
+ unsigned int func_gpio;
+ const char *const *gpio_groups;
+ struct pinctrl_gpio_range gpio_range;
+ /* Protect FSEL registers */
+ spinlock_t fsel_lock;
+};
+
+static unsigned int brcmstb_pinctrl_fsel_get(struct brcmstb_pinctrl *pc,
+ unsigned int pin)
+{
+ u32 bit = pc->pin_regs[pin].mux_bit;
+ unsigned int func;
+ int fsel;
+ u32 val;
+
+ if (!bit)
+ return pc->func_gpio;
+
+ bit &= ~MUX_BIT_VALID;
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ fsel = (val >> BIT_TO_SHIFT(bit)) & pc->pin_funcs[pin].func_mask;
+ func = pc->pin_funcs[pin].funcs[fsel];
+
+ if (func >= pc->func_count)
+ func = fsel;
+
+ dev_dbg(pc->dev, "get %04x: %08x (%u => %s)\n",
+ BIT_TO_REG(bit), val, pin,
+ pc->func_names[func]);
+
+ return func;
+}
+
+static int brcmstb_pinctrl_fsel_set(struct brcmstb_pinctrl *pc,
+ unsigned int pin, unsigned int func)
+{
+ u32 bit = pc->pin_regs[pin].mux_bit, val, fsel_mask;
+ const u8 *pin_funcs;
+ int fsel;
+ int cur;
+ int i;
+
+ if (!bit || func >= pc->func_count)
+ return -EINVAL;
+
+ bit &= ~MUX_BIT_VALID;
+
+ fsel = pc->pin_funcs[pin].n_funcs + 1;
+ fsel_mask = pc->pin_funcs[pin].func_mask;
+
+ if (func >= fsel) {
+ /* Convert to an fsel number */
+ pin_funcs = pc->pin_funcs[pin].funcs;
+ for (i = 1; i < fsel; i++) {
+ if (pin_funcs[i - 1] == func) {
+ fsel = i;
+ break;
+ }
+ }
+ } else {
+ fsel = func;
+ }
+
+ if (fsel >= pc->pin_funcs[pin].n_funcs + 1)
+ return -EINVAL;
+
+ guard(spinlock_irqsave)(&pc->fsel_lock);
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ cur = (val >> BIT_TO_SHIFT(bit)) & fsel_mask;
+
+ dev_dbg(pc->dev, "read %04x: %08x (%u => %s)\n",
+ BIT_TO_REG(bit), val, pin,
+ pc->func_names[cur]);
+
+ if (cur != fsel) {
+ val &= ~(fsel_mask << BIT_TO_SHIFT(bit));
+ val |= fsel << BIT_TO_SHIFT(bit);
+
+ dev_dbg(pc->dev, "write %04x: %08x (%u <= %s)\n",
+ BIT_TO_REG(bit), val, pin,
+ pc->func_names[fsel]);
+ writel(val, pc->base + BIT_TO_REG(bit));
+ }
+
+ return 0;
+}
+
+static int brcmstb_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->pctl_desc.npins;
+}
+
+static const char *brcmstb_pctl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->gpio_groups[selector];
+}
+
+static int brcmstb_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pc->pctl_desc.pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static void brcmstb_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int fsel = brcmstb_pinctrl_fsel_get(pc, offset);
+ const char *fname = pc->func_names[fsel];
+
+ seq_printf(s, "function %s", fname);
+}
+
+static void brcmstb_pctl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *maps,
+ unsigned int num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++)
+ if (maps[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(maps[i].data.configs.configs);
+
+ kfree(maps);
+}
+
+static const struct pinctrl_ops brcmstb_pctl_ops = {
+ .get_groups_count = brcmstb_pctl_get_groups_count,
+ .get_group_name = brcmstb_pctl_get_group_name,
+ .get_group_pins = brcmstb_pctl_get_group_pins,
+ .pin_dbg_show = brcmstb_pctl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = brcmstb_pctl_dt_free_map,
+};
+
+static int brcmstb_pmx_free(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable by setting to GPIO */
+ return brcmstb_pinctrl_fsel_set(pc, offset, pc->func_gpio);
+}
+
+static int brcmstb_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->func_count;
+}
+
+static const char *brcmstb_pmx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return (selector < pc->func_count) ? pc->func_names[selector] : NULL;
+}
+
+static int brcmstb_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char *const **groups,
+ unsigned *const num_groups)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pc->gpio_groups;
+ *num_groups = pc->pctl_desc.npins;
+
+ return 0;
+}
+
+static int brcmstb_pmx_set(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_desc *pctldesc = &pc->pctl_desc;
+ const struct pinctrl_pin_desc *pindesc;
+
+ if (group_selector >= pctldesc->npins)
+ return -EINVAL;
+
+ pindesc = &pctldesc->pins[group_selector];
+ return brcmstb_pinctrl_fsel_set(pc, pindesc->number, func_selector);
+}
+
+static int brcmstb_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return brcmstb_pinctrl_fsel_set(pc, pin, pc->func_gpio);
+}
+
+static void brcmstb_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable by setting to GPIO */
+ (void)brcmstb_pinctrl_fsel_set(pc, offset, pc->func_gpio);
+}
+
+static bool brcmstb_pmx_function_is_gpio(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->func_gpio == selector;
+}
+
+static const struct pinmux_ops brcmstb_pmx_ops = {
+ .free = brcmstb_pmx_free,
+ .get_functions_count = brcmstb_pmx_get_functions_count,
+ .get_function_name = brcmstb_pmx_get_function_name,
+ .get_function_groups = brcmstb_pmx_get_function_groups,
+ .set_mux = brcmstb_pmx_set,
+ .gpio_request_enable = brcmstb_pmx_gpio_request_enable,
+ .gpio_disable_free = brcmstb_pmx_gpio_disable_free,
+ .function_is_gpio = brcmstb_pmx_function_is_gpio,
+ .strict = true,
+};
+
+static unsigned int brcmstb_pull_config_get(struct brcmstb_pinctrl *pc,
+ unsigned int pin)
+{
+ u32 bit = pc->pin_regs[pin].pad_bit, val;
+
+ if (bit == PAD_BIT_INVALID)
+ return BRCMSTB_PULL_NONE;
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ return (val >> BIT_TO_SHIFT(bit)) & BRCMSTB_PULL_MASK;
+}
+
+static int brcmstb_pull_config_set(struct brcmstb_pinctrl *pc,
+ unsigned int pin, unsigned int arg)
+{
+ u32 bit = pc->pin_regs[pin].pad_bit, val;
+
+ if (bit == PAD_BIT_INVALID) {
+ dev_warn(pc->dev, "Can't set pulls for %s\n",
+ pc->gpio_groups[pin]);
+ return -EINVAL;
+ }
+
+ guard(spinlock_irqsave)(&pc->fsel_lock);
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ val &= ~(BRCMSTB_PULL_MASK << BIT_TO_SHIFT(bit));
+ val |= (arg << BIT_TO_SHIFT(bit));
+ writel(val, pc->base + BIT_TO_REG(bit));
+
+ return 0;
+}
+
+static int brcmstb_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = (brcmstb_pull_config_get(pc, pin) == BRCMSTB_PULL_NONE);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = (brcmstb_pull_config_get(pc, pin) == BRCMSTB_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = (brcmstb_pull_config_get(pc, pin) == BRCMSTB_PULL_UP);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int brcmstb_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ int ret = 0;
+ u32 param;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = brcmstb_pull_config_set(pc, pin, BRCMSTB_PULL_NONE);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = brcmstb_pull_config_set(pc, pin, BRCMSTB_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = brcmstb_pull_config_set(pc, pin, BRCMSTB_PULL_UP);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops brcmstb_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = brcmstb_pinconf_get,
+ .pin_config_set = brcmstb_pinconf_set,
+};
+
+int brcmstb_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct brcmstb_pdata *pdata;
+ struct brcmstb_pinctrl *pc;
+ const char **names;
+ int num_pins, i;
+
+ pdata = of_device_get_match_data(dev);
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pc);
+ pc->dev = dev;
+ spin_lock_init(&pc->fsel_lock);
+
+ pc->base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(pc->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->base),
+ "Could not get IO memory\n");
+
+ pc->pctl_desc = *pdata->pctl_desc;
+ pc->pctl_desc.pctlops = &brcmstb_pctl_ops;
+ pc->pctl_desc.pmxops = &brcmstb_pmx_ops;
+ pc->pctl_desc.confops = &brcmstb_pinconf_ops;
+ pc->pctl_desc.owner = THIS_MODULE;
+ num_pins = pc->pctl_desc.npins;
+ names = devm_kmalloc_array(dev, num_pins, sizeof(const char *),
+ GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pins; i++)
+ names[i] = pc->pctl_desc.pins[i].name;
+
+ pc->gpio_groups = names;
+ pc->pin_regs = pdata->pin_regs;
+ pc->pin_funcs = pdata->pin_funcs;
+ pc->func_count = pdata->func_count;
+ pc->func_names = pdata->func_names;
+
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+ if (IS_ERR(pc->pctl_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->pctl_dev),
+ "Failed to register pinctrl device\n");
+
+ pc->gpio_range = *pdata->gpio_range;
+ pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
+ return 0;
+}
+EXPORT_SYMBOL(brcmstb_pinctrl_probe);
+
+MODULE_AUTHOR("Phil Elwell");
+MODULE_AUTHOR("Jonathan Bell");
+MODULE_AUTHOR("Ivan T. Ivanov");
+MODULE_AUTHOR("Andrea della Porta");
+MODULE_DESCRIPTION("Broadcom brcmstb pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/pinctrl-brcmstb.h b/drivers/pinctrl/bcm/pinctrl-brcmstb.h
new file mode 100644
index 000000000000..c3459103e056
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-brcmstb.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Header for Broadcom brcmstb GPIO based drivers
+ *
+ * Copyright (C) 2024-2025 Ivan T. Ivanov, Andrea della Porta
+ * Copyright (C) 2021-3 Raspberry Pi Ltd.
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * Based heavily on the BCM2835 GPIO & pinctrl driver, which was inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ */
+
+#ifndef __PINCTRL_BRCMSTB_H__
+#define __PINCTRL_BRCMSTB_H__
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+#define BRCMSTB_FUNC(f) \
+ [func_##f] = #f
+
+#define MUX_BIT_VALID 0x8000
+#define PAD_BIT_INVALID 0xffff
+
+#define MUX_BIT(muxreg, muxshift) \
+ (MUX_BIT_VALID + ((muxreg) << 5) + ((muxshift) << 2))
+#define PAD_BIT(padreg, padshift) \
+ (((padreg) << 5) + ((padshift) << 1))
+
+#define GPIO_REGS(n, muxreg, muxshift, padreg, padshift) \
+ [n] = { MUX_BIT(muxreg, muxshift), PAD_BIT(padreg, padshift) }
+
+#define EMMC_REGS(n, padreg, padshift) \
+ [n] = { 0, PAD_BIT(padreg, padshift) }
+
+#define AON_GPIO_REGS(n, muxreg, muxshift, padreg, padshift) \
+ GPIO_REGS(n, muxreg, muxshift, padreg, padshift)
+
+#define AON_SGPIO_REGS(n, muxreg, muxshift) \
+ [(n) + 32] = { MUX_BIT(muxreg, muxshift), PAD_BIT_INVALID }
+
+#define GPIO_PIN(n) PINCTRL_PIN(n, "gpio" #n)
+/**
+ * AON pins are in the Always-On power domain. SGPIOs are also 'Safe'
+ * being 5V tolerant (necessary for the HDMI I2C pins), and can be driven
+ * while the power is off.
+ */
+#define AON_GPIO_PIN(n) PINCTRL_PIN(n, "aon_gpio" #n)
+#define AON_SGPIO_PIN(n) PINCTRL_PIN((n) + 32, "aon_sgpio" #n)
+
+struct pin_regs {
+ u16 mux_bit;
+ u16 pad_bit;
+};
+
+/**
+ * struct brcmstb_pin_funcs - pins provide their primary/alternate
+ * functions in this struct
+ * @func_mask: mask representing valid bits of the function selector
+ * in the registers
+ * @funcs: array of function identifiers
+ * @n_funcs: number of identifiers of the @funcs array above
+ */
+struct brcmstb_pin_funcs {
+ const u32 func_mask;
+ const u8 *funcs;
+ const unsigned int n_funcs;
+};
+
+/**
+ * struct brcmstb_pdata - specific data for a pinctrl chip implementation
+ * @pctl_desc: pin controller descriptor for this implementation
+ * @gpio_range: range of GPIOs served by this controller
+ * @pin_regs: array of register descriptors for each pin
+ * @pin_funcs: array of all possible assignable function for each pin
+ * @func_count: total number of functions
+ * @func_gpio: which function number is GPIO (usually 0)
+ * @func_names: an array listing all function names
+ */
+struct brcmstb_pdata {
+ const struct pinctrl_desc *pctl_desc;
+ const struct pinctrl_gpio_range *gpio_range;
+ const struct pin_regs *pin_regs;
+ const struct brcmstb_pin_funcs *pin_funcs;
+ const unsigned int func_count;
+ const unsigned int func_gpio;
+ const char * const *func_names;
+};
+
+int brcmstb_pinctrl_probe(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index d19ef13224cc..1d9481b17091 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -804,7 +804,7 @@ static int madera_pin_conf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (conf[0] & MADERA_GP1_IP_CFG_MASK)
result = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if ((conf[1] & MADERA_GP1_DIR_MASK) &&
(conf[0] & MADERA_GP1_LVL_MASK))
result = 1;
@@ -902,7 +902,7 @@ static int madera_pin_conf_set(struct pinctrl_dev *pctldev, unsigned int pin,
mask[1] |= MADERA_GP1_DIR_MASK;
conf[1] |= MADERA_GP1_DIR;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
val = pinconf_to_config_argument(*configs);
mask[0] |= MADERA_GP1_LVL_MASK;
if (val)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 18de31328540..731c58ad43ee 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -245,7 +245,7 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
struct imx_pin *pin;
unsigned int npins;
@@ -266,7 +266,7 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
npins = grp->grp.npins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- func->func.name, grp->grp.name);
+ func->func->name, grp->grp.name);
for (i = 0; i < npins; i++) {
/*
@@ -580,33 +580,38 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
u32 index)
{
struct pinctrl_dev *pctl = ipctl->pctl;
- struct function_desc *func;
+ struct pinfunction *func;
struct group_desc *grp;
const char **group_names;
+ int ret;
u32 i;
dev_dbg(pctl->dev, "parse function(%d): %pOFn\n", index, np);
- func = pinmux_generic_get_function(pctl, index);
+ func = devm_kzalloc(ipctl->dev, sizeof(*func), GFP_KERNEL);
if (!func)
- return -EINVAL;
+ return -ENOMEM;
/* Initialise function */
- func->func.name = np->name;
- func->func.ngroups = of_get_child_count(np);
- if (func->func.ngroups == 0) {
+ func->name = np->name;
+ func->ngroups = of_get_child_count(np);
+ if (func->ngroups == 0) {
dev_info(ipctl->dev, "no groups defined in %pOF\n", np);
return -EINVAL;
}
- group_names = devm_kcalloc(ipctl->dev, func->func.ngroups,
- sizeof(*func->func.groups), GFP_KERNEL);
+ group_names = devm_kcalloc(ipctl->dev, func->ngroups,
+ sizeof(*func->groups), GFP_KERNEL);
if (!group_names)
return -ENOMEM;
i = 0;
for_each_child_of_node_scoped(np, child)
group_names[i++] = child->name;
- func->func.groups = group_names;
+ func->groups = group_names;
+
+ ret = pinmux_generic_add_pinfunction(pctl, func, NULL);
+ if (ret < 0)
+ return ret;
i = 0;
for_each_child_of_node_scoped(np, child) {
@@ -615,6 +620,10 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
return -ENOMEM;
mutex_lock(&ipctl->mutex);
+ /*
+ * FIXME: This should use pinctrl_generic_add_group() and not
+ * access the private radix tree directly.
+ */
radix_tree_insert(&pctl->pin_group_tree,
ipctl->group_index++, grp);
mutex_unlock(&ipctl->mutex);
@@ -669,20 +678,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
}
}
- for (i = 0; i < nfuncs; i++) {
- struct function_desc *function;
-
- function = devm_kzalloc(&pdev->dev, sizeof(*function),
- GFP_KERNEL);
- if (!function)
- return -ENOMEM;
-
- mutex_lock(&ipctl->mutex);
- radix_tree_insert(&pctl->pin_function_tree, i, function);
- mutex_unlock(&ipctl->mutex);
- }
- pctl->num_functions = nfuncs;
-
ipctl->group_index = 0;
if (flat_funcs) {
pctl->num_groups = of_get_child_count(np);
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index b405dfa20891..f1cf2578fe42 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -35,13 +35,8 @@
#define PINCTRL_FUNC_DESC(id) \
{ \
- .desc = { \
- .func = { \
- .name = #id, \
- .groups = id##_groups, \
- .ngroups = ARRAY_SIZE(id##_groups), \
- } \
- }, \
+ .desc = PINCTRL_PINFUNCTION(#id, id##_groups, \
+ ARRAY_SIZE(id##_groups)), \
.groups = id##_func_group, \
.group_size = ARRAY_SIZE(id##_func_group), \
}
@@ -337,7 +332,7 @@ struct airoha_pinctrl_func_group {
};
struct airoha_pinctrl_func {
- const struct function_desc desc;
+ const struct pinfunction desc;
const struct airoha_pinctrl_func_group *groups;
u8 group_size;
};
@@ -2451,7 +2446,7 @@ static int airoha_pinmux_set_mux(struct pinctrl_dev *pctrl_dev,
{
struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
const struct airoha_pinctrl_func *func;
- struct function_desc *desc;
+ const struct function_desc *desc;
struct group_desc *grp;
int i;
@@ -2464,7 +2459,7 @@ static int airoha_pinmux_set_mux(struct pinctrl_dev *pctrl_dev,
return -EINVAL;
dev_dbg(pctrl_dev->dev, "enable function %s group %s\n",
- desc->func.name, grp->grp.name);
+ desc->func->name, grp->grp.name);
func = desc->data;
for (i = 0; i < func->group_size; i++) {
@@ -2773,7 +2768,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT: {
+ case PIN_CONFIG_LEVEL: {
bool input = param == PIN_CONFIG_INPUT_ENABLE;
int err;
@@ -2782,7 +2777,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
if (err)
return err;
- if (param == PIN_CONFIG_OUTPUT) {
+ if (param == PIN_CONFIG_LEVEL) {
err = airoha_pinconf_set_pin_value(pctrl_dev,
pin, !!arg);
if (err)
@@ -2911,11 +2906,11 @@ static int airoha_pinctrl_probe(struct platform_device *pdev)
func = &airoha_pinctrl_funcs[i];
err = pinmux_generic_add_pinfunction(pinctrl->ctrl,
- &func->desc.func,
+ &func->desc,
(void *)func);
if (err < 0) {
dev_err(dev, "Failed to register function %s\n",
- func->desc.func.name);
+ func->desc.name);
return err;
}
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 6e4f6c07a509..70f608347a5f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -43,7 +43,7 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
int i, err;
@@ -56,7 +56,7 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->func.name, grp->grp.name);
+ func->func->name, grp->grp.name);
for (i = 0; i < grp->grp.npins; i++) {
const struct mtk_pin_desc *desc;
@@ -332,7 +332,7 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
goto err;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
MTK_OUTPUT);
if (err)
@@ -622,11 +622,9 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
int i, err;
for (i = 0; i < hw->soc->nfuncs ; i++) {
- const struct function_desc *function = hw->soc->funcs + i;
- const struct pinfunction *func = &function->func;
+ const struct pinfunction *func = hw->soc->funcs + i;
- err = pinmux_generic_add_pinfunction(hw->pctrl, func,
- function->data);
+ err = pinmux_generic_add_pinfunction(hw->pctrl, func, NULL);
if (err < 0) {
dev_err(hw->dev, "Failed to register function %s\n",
func->name);
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
index 229d19561e22..fe1f087cacd0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.h
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
@@ -43,11 +43,8 @@
.data = id##_funcs, \
}
-#define PINCTRL_PIN_FUNCTION(_name_, id) \
- { \
- .func = PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups)), \
- .data = NULL, \
- }
+#define PINCTRL_PIN_FUNCTION(_name_, id) \
+ PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups))
int mtk_moore_pinctrl_probe(struct platform_device *pdev,
const struct mtk_pin_soc *soc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 2dc101991066..d5777889448a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -822,7 +822,7 @@ static const char *mt7622_uart_groups[] = { "uart0_0_tx_rx",
"uart4_2_rts_cts",};
static const char *mt7622_wdt_groups[] = { "watchdog", };
-static const struct function_desc mt7622_functions[] = {
+static const struct pinfunction mt7622_functions[] = {
PINCTRL_PIN_FUNCTION("antsel", mt7622_antsel),
PINCTRL_PIN_FUNCTION("emmc", mt7622_emmc),
PINCTRL_PIN_FUNCTION("eth", mt7622_ethernet),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
index 3e59eada2825..69c06c2c0e21 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -1340,7 +1340,7 @@ static const char *mt7623_uart_groups[] = { "uart0_0_txd_rxd",
"uart3_rts_cts", };
static const char *mt7623_wdt_groups[] = { "watchdog_0", "watchdog_1", };
-static const struct function_desc mt7623_functions[] = {
+static const struct pinfunction mt7623_functions[] = {
PINCTRL_PIN_FUNCTION("audck", mt7623_aud_clk),
PINCTRL_PIN_FUNCTION("disp", mt7623_disp_pwm),
PINCTRL_PIN_FUNCTION("eth", mt7623_ethernet),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7629.c b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
index 98142e8c9801..cc0694881ac9 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7629.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
@@ -384,7 +384,7 @@ static const char *mt7629_wdt_groups[] = { "watchdog", };
static const char *mt7629_wifi_groups[] = { "wf0_5g", "wf0_2g", };
static const char *mt7629_flash_groups[] = { "snfi", "spi_nor" };
-static const struct function_desc mt7629_functions[] = {
+static const struct pinfunction mt7629_functions[] = {
PINCTRL_PIN_FUNCTION("eth", mt7629_ethernet),
PINCTRL_PIN_FUNCTION("i2c", mt7629_i2c),
PINCTRL_PIN_FUNCTION("led", mt7629_led),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
index 83092be5b614..6216c2e057f6 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -977,7 +977,7 @@ static const char *mt7981_ethernet_groups[] = { "smi_mdc_mdio", "gbe_ext_mdc_mdi
"wf0_mode1", "wf0_mode3", "mt7531_int", };
static const char *mt7981_ant_groups[] = { "ant_sel", };
-static const struct function_desc mt7981_functions[] = {
+static const struct pinfunction mt7981_functions[] = {
PINCTRL_PIN_FUNCTION("wa_aice", mt7981_wa_aice),
PINCTRL_PIN_FUNCTION("dfd", mt7981_dfd),
PINCTRL_PIN_FUNCTION("jtag", mt7981_jtag),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
index 5816b5fdb7ca..2a762ade9c35 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -878,7 +878,7 @@ static const char *mt7986_uart_groups[] = {
static const char *mt7986_wdt_groups[] = { "watchdog", };
static const char *mt7986_wf_groups[] = { "wf_2g", "wf_5g", "wf_dbdc", };
-static const struct function_desc mt7986_functions[] = {
+static const struct pinfunction mt7986_functions[] = {
PINCTRL_PIN_FUNCTION("audio", mt7986_audio),
PINCTRL_PIN_FUNCTION("emmc", mt7986_emmc),
PINCTRL_PIN_FUNCTION("eth", mt7986_ethernet),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7988.c b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
index 68b4097792b8..9569e8c0cec1 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7988.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
@@ -1464,33 +1464,23 @@ static const char * const mt7988_usb_groups[] = {
"drv_vbus_p1",
};
-static const struct function_desc mt7988_functions[] = {
- { { "audio", mt7988_audio_groups, ARRAY_SIZE(mt7988_audio_groups) },
- NULL },
- { { "jtag", mt7988_jtag_groups, ARRAY_SIZE(mt7988_jtag_groups) },
- NULL },
- { { "int_usxgmii", mt7988_int_usxgmii_groups,
- ARRAY_SIZE(mt7988_int_usxgmii_groups) },
- NULL },
- { { "pwm", mt7988_pwm_groups, ARRAY_SIZE(mt7988_pwm_groups) }, NULL },
- { { "dfd", mt7988_dfd_groups, ARRAY_SIZE(mt7988_dfd_groups) }, NULL },
- { { "i2c", mt7988_i2c_groups, ARRAY_SIZE(mt7988_i2c_groups) }, NULL },
- { { "eth", mt7988_ethernet_groups, ARRAY_SIZE(mt7988_ethernet_groups) },
- NULL },
- { { "pcie", mt7988_pcie_groups, ARRAY_SIZE(mt7988_pcie_groups) },
- NULL },
- { { "pmic", mt7988_pmic_groups, ARRAY_SIZE(mt7988_pmic_groups) },
- NULL },
- { { "watchdog", mt7988_wdt_groups, ARRAY_SIZE(mt7988_wdt_groups) },
- NULL },
- { { "spi", mt7988_spi_groups, ARRAY_SIZE(mt7988_spi_groups) }, NULL },
- { { "flash", mt7988_flash_groups, ARRAY_SIZE(mt7988_flash_groups) },
- NULL },
- { { "uart", mt7988_uart_groups, ARRAY_SIZE(mt7988_uart_groups) },
- NULL },
- { { "udi", mt7988_udi_groups, ARRAY_SIZE(mt7988_udi_groups) }, NULL },
- { { "usb", mt7988_usb_groups, ARRAY_SIZE(mt7988_usb_groups) }, NULL },
- { { "led", mt7988_led_groups, ARRAY_SIZE(mt7988_led_groups) }, NULL },
+static const struct pinfunction mt7988_functions[] = {
+ PINCTRL_PIN_FUNCTION("audio", mt7988_audio),
+ PINCTRL_PIN_FUNCTION("jtag", mt7988_jtag),
+ PINCTRL_PIN_FUNCTION("int_usxgmii", mt7988_int_usxgmii),
+ PINCTRL_PIN_FUNCTION("pwm", mt7988_pwm),
+ PINCTRL_PIN_FUNCTION("dfd", mt7988_dfd),
+ PINCTRL_PIN_FUNCTION("i2c", mt7988_i2c),
+ PINCTRL_PIN_FUNCTION("eth", mt7988_ethernet),
+ PINCTRL_PIN_FUNCTION("pcie", mt7988_pcie),
+ PINCTRL_PIN_FUNCTION("pmic", mt7988_pmic),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7988_wdt),
+ PINCTRL_PIN_FUNCTION("spi", mt7988_spi),
+ PINCTRL_PIN_FUNCTION("flash", mt7988_flash),
+ PINCTRL_PIN_FUNCTION("uart", mt7988_uart),
+ PINCTRL_PIN_FUNCTION("udi", mt7988_udi),
+ PINCTRL_PIN_FUNCTION("usb", mt7988_usb),
+ PINCTRL_PIN_FUNCTION("led", mt7988_led),
};
static const struct mtk_eint_hw mt7988_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 36d2898037dd..fa7c0ed49346 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -238,7 +238,7 @@ struct mtk_pin_soc {
unsigned int npins;
const struct group_desc *grps;
unsigned int ngrps;
- const struct function_desc *funcs;
+ const struct pinfunction *funcs;
unsigned int nfuncs;
const struct mtk_eint_regs *eint_regs;
const struct mtk_eint_hw *eint_hw;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index d10306024111..d6a46fe0cda8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -384,7 +384,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
mtk_gpio_set(pctl->chip, pin, arg);
ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
break;
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 3e714554789d..6bf37d8085fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -169,7 +169,7 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
if (!ret)
err = -EINVAL;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
@@ -292,7 +292,7 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR, !!arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
arg);
if (err)
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
index 6132710aff68..d9e3a8d5932a 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -422,7 +422,7 @@ static int aml_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = aml_pinconf_get_output(info, pin);
if (ret <= 0)
return -EINVAL;
@@ -568,7 +568,7 @@ static int aml_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH_UA:
case PIN_CONFIG_OUTPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pinconf_to_config_argument(configs[i]);
break;
@@ -592,7 +592,7 @@ static int aml_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
ret = aml_pinconf_set_output(info, pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = aml_pinconf_set_output_drive(info, pin, arg);
break;
default:
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 8b9130c6e170..117e72b4ffcb 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -442,6 +442,8 @@ static const unsigned int tdm_c_dout1_z_pins[] = { GPIOZ_3 };
static const unsigned int tdm_c_dout2_z_pins[] = { GPIOZ_4 };
static const unsigned int tdm_c_dout3_z_pins[] = { GPIOZ_5 };
+static const unsigned int pcie_clkreqn_pins[] = { GPIOC_7 };
+
static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
@@ -721,6 +723,7 @@ static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GROUP(pdm_din2_c, 4),
GROUP(pdm_din3_c, 4),
GROUP(pdm_dclk_c, 4),
+ GROUP(pcie_clkreqn, 1),
/* bank GPIOH */
GROUP(spi1_mosi, 3),
@@ -1183,6 +1186,10 @@ static const char * const tdm_c_groups[] = {
"tdm_c_dout2_z", "tdm_c_dout3_z",
};
+static const char * const pcie_clkreqn_groups[] = {
+ "pcie_clkreqn"
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -1309,6 +1316,7 @@ static const struct meson_pmx_func meson_g12a_periphs_functions[] = {
FUNCTION(tdm_a),
FUNCTION(tdm_b),
FUNCTION(tdm_c),
+ FUNCTION(pcie_clkreqn),
};
static const struct meson_pmx_func meson_g12a_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 9171de657f97..a75762e4d264 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -187,6 +187,9 @@ static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+static const unsigned int i2c_sck_d_pins[] = { GPIOX_11 };
+static const unsigned int i2c_sda_d_pins[] = { GPIOX_10 };
+
static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
@@ -411,6 +414,8 @@ static const struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
+ GROUP(i2c_sda_d, 5, 5),
+ GROUP(i2c_sck_d, 5, 4),
GROUP(sdio_d0, 5, 31),
GROUP(sdio_d1, 5, 30),
GROUP(sdio_d2, 5, 29),
@@ -651,6 +656,10 @@ static const char * const i2c_c_groups[] = {
"i2c_sck_c", "i2c_sda_c", "i2c_sda_c_dv18", "i2c_sck_c_dv19",
};
+static const char * const i2c_d_groups[] = {
+ "i2c_sck_d", "i2c_sda_d",
+};
+
static const char * const eth_groups[] = {
"eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
"eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
@@ -777,6 +786,7 @@ static const struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(i2c_a),
FUNCTION(i2c_b),
FUNCTION(i2c_c),
+ FUNCTION(i2c_d),
FUNCTION(eth),
FUNCTION(pwm_a),
FUNCTION(pwm_b),
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 277e9c40490d..18295b15ecd9 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -360,7 +360,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH_UA:
case PIN_CONFIG_OUTPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pinconf_to_config_argument(configs[i]);
break;
@@ -384,7 +384,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
ret = meson_pinconf_set_output(pc, pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = meson_pinconf_set_output_drive(pc, pin, arg);
break;
default:
@@ -502,7 +502,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = meson_pinconf_get_output(pc, pin);
if (ret <= 0)
return -EINVAL;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 881df5e08f61..81dfbd5e7f07 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -420,7 +420,8 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int en_offset = offset;
unsigned int reg = OUTPUT_VAL;
- unsigned int mask, val, ret;
+ unsigned int mask, val;
+ int ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
@@ -634,8 +635,9 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
{
u32 reg_idx = pin_idx / GPIO_PER_REG;
u32 bit_num = pin_idx % GPIO_PER_REG;
- u32 p, l, ret;
unsigned long flags;
+ u32 p, l;
+ int ret;
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 7b5f94d8cb23..fc7ebeda8440 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -860,8 +860,8 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
dev_dbg(chip->parent, "pin %d [%#lx]: %s %s\n",
pin, configs[i],
- (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
- (param == PIN_CONFIG_OUTPUT) ?
+ (param == PIN_CONFIG_LEVEL) ? "output " : "input",
+ (param == PIN_CONFIG_LEVEL) ?
str_high_low(argument) :
(argument ? "pull up" : "pull down"));
@@ -907,7 +907,7 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
ret = abx500_gpio_direction_input(chip, offset);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = abx500_gpio_direction_output(chip, offset,
argument);
break;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index 54652bfbe6ac..cdad01d68a37 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -1038,7 +1038,8 @@ static int ma35_pinctrl_parse_functions(struct fwnode_handle *fwnode, struct ma3
struct group_desc *grp;
static u32 grp_index;
const char **groups;
- u32 ret, i = 0;
+ u32 i = 0;
+ int ret;
dev_dbg(npctl->dev, "parse function(%d): %s\n", index, np->name);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 10765f19b48b..13ed87d5d30c 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1700,13 +1700,13 @@ static int npcm7xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
rc = (!pu && pd);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
ie = ioread32(bank->base + NPCM7XX_GP_N_IEM) & pinmask;
oe = ioread32(bank->base + NPCM7XX_GP_N_OE) & pinmask;
if (param == PIN_CONFIG_INPUT_ENABLE)
rc = (ie && !oe);
- else if (param == PIN_CONFIG_OUTPUT)
+ else if (param == PIN_CONFIG_LEVEL)
rc = (!ie && oe);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
@@ -1765,7 +1765,7 @@ static int npcm7xx_config_set_one(struct npcm7xx_pinctrl *npcm,
iowrite32(gpio, bank->base + NPCM7XX_GP_N_OEC);
bank->direction_input(&bank->chip.gc, pin % bank->chip.gc.ngpio);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
bank->direction_output(&bank->chip.gc, pin % bank->chip.gc.ngpio, arg);
iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
break;
@@ -1836,7 +1836,7 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
if (!pctrl->gpio_bank[id].base)
return -EINVAL;
- config = (typeof(config)){
+ config = (struct gpio_generic_chip_config) {
.dev = dev,
.sz = 4,
.dat = pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DIN,
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index 1005b464a469..0aae1a253459 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -2187,13 +2187,13 @@ static int npcm8xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
rc = !pu && pd;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
ie = ioread32(bank->base + NPCM8XX_GP_N_IEM) & pinmask;
oe = ioread32(bank->base + NPCM8XX_GP_N_OE) & pinmask;
if (param == PIN_CONFIG_INPUT_ENABLE)
rc = (ie && !oe);
- else if (param == PIN_CONFIG_OUTPUT)
+ else if (param == PIN_CONFIG_LEVEL)
rc = (!ie && oe);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
@@ -2251,7 +2251,7 @@ static int npcm8xx_config_set_one(struct npcm8xx_pinctrl *npcm,
iowrite32(gpio, bank->base + NPCM8XX_GP_N_OEC);
bank->direction_input(&bank->chip.gc, pin % bank->chip.gc.ngpio);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
bank->direction_output(&bank->chip.gc, pin % bank->chip.gc.ngpio, arg);
iowrite32(gpio, bank->base + NPCM8XX_GP_N_OES);
break;
@@ -2329,7 +2329,7 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
if (!pctrl->gpio_bank[id].base)
return dev_err_probe(dev, -ENXIO, "fwnode_iomap id %d failed\n", id);
- config = (typeof(config)){
+ config = (struct gpio_generic_chip_config) {
.dev = dev,
.sz = 4,
.dat = pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DIN,
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index c575949e42e6..d624a4d302a8 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -1064,7 +1064,7 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
flags = GPIO_GENERIC_NO_OUTPUT;
}
- config = (typeof(config)){
+ config = (struct gpio_generic_chip_config) {
.dev = dev,
.sz = 4,
.dat = dat,
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index d67838afb085..5de6ff62c69b 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -48,7 +48,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
- PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
+ PCONFDUMP(PIN_CONFIG_LEVEL, "pin output", "level", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, "output impedance", "ohms", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
@@ -183,9 +183,9 @@ static const struct pinconf_generic_params dt_params[] = {
{ "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 },
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
- { "output-high", PIN_CONFIG_OUTPUT, 1, },
+ { "output-high", PIN_CONFIG_LEVEL, 1, },
{ "output-impedance-ohms", PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, 0 },
- { "output-low", PIN_CONFIG_OUTPUT, 0, },
+ { "output-low", PIN_CONFIG_LEVEL, 0, },
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
{ "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 09a5425d54ba..2dac5c71eb00 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -383,14 +383,15 @@ static void amd_gpio_irq_enable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -400,15 +401,16 @@ static void amd_gpio_irq_disable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg &= ~BIT(INTERRUPT_ENABLE_OFF);
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void amd_gpio_irq_mask(struct irq_data *d)
@@ -417,11 +419,12 @@ static void amd_gpio_irq_mask(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -431,11 +434,12 @@ static void amd_gpio_irq_unmask(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -446,17 +450,21 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
int err;
+ pm_pr_dbg("Setting wake for GPIO %lu to %s\n",
+ hwirq, str_enable_disable(on));
+
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
if (on)
pin_reg |= wake_mask;
else
pin_reg &= ~wake_mask;
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
if (on)
@@ -492,9 +500,10 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -560,10 +569,10 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg_irq_en = pin_reg;
pin_reg_irq_en |= mask;
pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
- while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+ writel(pin_reg_irq_en, gpio_dev->base + hwirq * 4);
+ while ((readl(gpio_dev->base + hwirq * 4) & mask) != mask)
continue;
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return ret;
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index a09daa72bfe4..e1a7bc8cf765 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -515,6 +515,7 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev)
}
static const struct of_device_id apple_gpio_pinctrl_of_match[] = {
+ { .compatible = "apple,t8103-pinctrl", },
{ .compatible = "apple,pinctrl", },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 35ea3414cb96..ec5351fc282e 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -862,7 +862,7 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
conf |= ATMEL_PIO_IFSCEN_MASK;
}
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
conf |= ATMEL_PIO_DIR_MASK;
bank = ATMEL_PIO_BANK(pin_id);
pin = ATMEL_PIO_LINE(pin_id);
diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c
index 890b83fddea3..479553a79216 100644
--- a/drivers/pinctrl/pinctrl-aw9523.c
+++ b/drivers/pinctrl/pinctrl-aw9523.c
@@ -215,7 +215,7 @@ static int aw9523_pcfg_param_to_reg(enum pin_config_param pcp, int pin, u8 *r)
case PIN_CONFIG_OUTPUT_ENABLE:
reg = AW9523_REG_CONF_STATE(pin);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
reg = AW9523_REG_OUT_STATE(pin);
break;
default:
@@ -249,7 +249,7 @@ static int aw9523_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
val &= BIT(regbit);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
@@ -301,7 +301,7 @@ static int aw9523_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
goto end;
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
/* First, enable pin output */
rc = regmap_update_bits(awi->regmap,
AW9523_REG_CONF_STATE(pin),
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index cf7f80497fde..a4b04bf6d081 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -808,7 +808,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
case PIN_CONFIG_MODE_PWM:
reg = CY8C95X0_SELPWM;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
reg = CY8C95X0_OUTPUT;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
diff --git a/drivers/pinctrl/pinctrl-eic7700.c b/drivers/pinctrl/pinctrl-eic7700.c
index 4874b5532343..ffcd0ec5c2dc 100644
--- a/drivers/pinctrl/pinctrl-eic7700.c
+++ b/drivers/pinctrl/pinctrl-eic7700.c
@@ -634,7 +634,7 @@ static int eic7700_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pc->base);
regulator = devm_regulator_get(dev, "vrgmii");
- if (IS_ERR_OR_NULL(regulator)) {
+ if (IS_ERR(regulator)) {
return dev_err_probe(dev, PTR_ERR(regulator),
"failed to get vrgmii regulator\n");
}
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 210044185679..2d04829b29c9 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -241,7 +241,7 @@ static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
}
raw_spin_lock_init(&gctrl->lock);
- config = (typeof(config)){
+ config = (struct gpio_generic_chip_config) {
.dev = dev,
.sz = gctrl->bank->nr_pins / 8,
.dat = gctrl->membase + GPIO_IN,
@@ -325,7 +325,7 @@ static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
{
struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
unsigned int *pinmux;
int i;
@@ -445,7 +445,7 @@ static int eqbr_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
}
raw_spin_unlock_irqrestore(&pctl->lock, flags);
*config = pinconf_to_config_packed(param, val);
-;
+
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 2900513467fa..c7f14546de05 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -96,11 +96,8 @@
.data = (void *)func, \
}
-#define INGENIC_PIN_FUNCTION(_name_, id) \
- { \
- .func = PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups)), \
- .data = NULL, \
- }
+#define INGENIC_PIN_FUNCTION(_name_, id) \
+ PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups))
enum jz_version {
ID_JZ4730,
@@ -128,7 +125,7 @@ struct ingenic_chip_info {
const struct group_desc *groups;
unsigned int num_groups;
- const struct function_desc *functions;
+ const struct pinfunction *functions;
unsigned int num_functions;
const u32 *pull_ups, *pull_downs;
@@ -263,7 +260,7 @@ static const char *jz4730_pwm1_groups[] = { "pwm1", };
static const char *jz4730_mii_groups[] = { "mii", };
static const char *jz4730_i2s_groups[] = { "i2s-data", "i2s-master", "i2s-slave", };
-static const struct function_desc jz4730_functions[] = {
+static const struct pinfunction jz4730_functions[] = {
INGENIC_PIN_FUNCTION("mmc", jz4730_mmc),
INGENIC_PIN_FUNCTION("uart0", jz4730_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4730_uart1),
@@ -370,7 +367,7 @@ static const char *jz4740_pwm5_groups[] = { "pwm5", };
static const char *jz4740_pwm6_groups[] = { "pwm6", };
static const char *jz4740_pwm7_groups[] = { "pwm7", };
-static const struct function_desc jz4740_functions[] = {
+static const struct pinfunction jz4740_functions[] = {
INGENIC_PIN_FUNCTION("mmc", jz4740_mmc),
INGENIC_PIN_FUNCTION("uart0", jz4740_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4740_uart1),
@@ -474,7 +471,7 @@ static const char *jz4725b_pwm3_groups[] = { "pwm3", };
static const char *jz4725b_pwm4_groups[] = { "pwm4", };
static const char *jz4725b_pwm5_groups[] = { "pwm5", };
-static const struct function_desc jz4725b_functions[] = {
+static const struct pinfunction jz4725b_functions[] = {
INGENIC_PIN_FUNCTION("mmc0", jz4725b_mmc0),
INGENIC_PIN_FUNCTION("mmc1", jz4725b_mmc1),
INGENIC_PIN_FUNCTION("uart", jz4725b_uart),
@@ -606,7 +603,7 @@ static const char *jz4750_pwm3_groups[] = { "pwm3", };
static const char *jz4750_pwm4_groups[] = { "pwm4", };
static const char *jz4750_pwm5_groups[] = { "pwm5", };
-static const struct function_desc jz4750_functions[] = {
+static const struct pinfunction jz4750_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4750_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4750_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4750_uart2),
@@ -771,7 +768,7 @@ static const char *jz4755_pwm3_groups[] = { "pwm3", };
static const char *jz4755_pwm4_groups[] = { "pwm4", };
static const char *jz4755_pwm5_groups[] = { "pwm5", };
-static const struct function_desc jz4755_functions[] = {
+static const struct pinfunction jz4755_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4755_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4755_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4755_uart2),
@@ -1106,7 +1103,7 @@ static const char *jz4760_pwm6_groups[] = { "pwm6", };
static const char *jz4760_pwm7_groups[] = { "pwm7", };
static const char *jz4760_otg_groups[] = { "otg-vbus", };
-static const struct function_desc jz4760_functions[] = {
+static const struct pinfunction jz4760_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4760_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4760_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4760_uart2),
@@ -1444,7 +1441,7 @@ static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
-static const struct function_desc jz4770_functions[] = {
+static const struct pinfunction jz4770_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4770_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4770_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4770_uart2),
@@ -1723,7 +1720,7 @@ static const char *jz4775_mac_groups[] = {
};
static const char *jz4775_otg_groups[] = { "otg-vbus", };
-static const struct function_desc jz4775_functions[] = {
+static const struct pinfunction jz4775_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4775_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4775_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4775_uart2),
@@ -1976,7 +1973,7 @@ static const char *jz4780_dmic_groups[] = { "dmic", };
static const char *jz4780_cim_groups[] = { "cim-data", };
static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
-static const struct function_desc jz4780_functions[] = {
+static const struct pinfunction jz4780_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4770_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4770_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4780_uart2),
@@ -2211,7 +2208,7 @@ static const char *x1000_pwm3_groups[] = { "pwm3", };
static const char *x1000_pwm4_groups[] = { "pwm4", };
static const char *x1000_mac_groups[] = { "mac", };
-static const struct function_desc x1000_functions[] = {
+static const struct pinfunction x1000_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1000_uart0),
INGENIC_PIN_FUNCTION("uart1", x1000_uart1),
INGENIC_PIN_FUNCTION("uart2", x1000_uart2),
@@ -2341,7 +2338,7 @@ static const char *x1500_pwm2_groups[] = { "pwm2", };
static const char *x1500_pwm3_groups[] = { "pwm3", };
static const char *x1500_pwm4_groups[] = { "pwm4", };
-static const struct function_desc x1500_functions[] = {
+static const struct pinfunction x1500_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1500_uart0),
INGENIC_PIN_FUNCTION("uart1", x1500_uart1),
INGENIC_PIN_FUNCTION("uart2", x1500_uart2),
@@ -2562,7 +2559,7 @@ static const char * const x1600_pwm7_groups[] = { "pwm7-b10", "pwm7-b21", };
static const char * const x1600_mac_groups[] = { "mac", };
-static const struct function_desc x1600_functions[] = {
+static const struct pinfunction x1600_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1600_uart0),
INGENIC_PIN_FUNCTION("uart1", x1600_uart1),
INGENIC_PIN_FUNCTION("uart2", x1600_uart2),
@@ -2779,7 +2776,7 @@ static const char *x1830_pwm6_groups[] = { "pwm6-c-17", "pwm6-c-27", };
static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
static const char *x1830_mac_groups[] = { "mac", };
-static const struct function_desc x1830_functions[] = {
+static const struct pinfunction x1830_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1830_uart0),
INGENIC_PIN_FUNCTION("uart1", x1830_uart1),
INGENIC_PIN_FUNCTION("sfc", x1830_sfc),
@@ -3225,7 +3222,7 @@ static const char *x2000_mac0_groups[] = { "mac0-rmii", "mac0-rgmii", };
static const char *x2000_mac1_groups[] = { "mac1-rmii", "mac1-rgmii", };
static const char *x2000_otg_groups[] = { "otg-vbus", };
-static const struct function_desc x2000_functions[] = {
+static const struct pinfunction x2000_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x2000_uart0),
INGENIC_PIN_FUNCTION("uart1", x2000_uart1),
INGENIC_PIN_FUNCTION("uart2", x2000_uart2),
@@ -3449,7 +3446,7 @@ static const struct group_desc x2100_groups[] = {
static const char *x2100_mac_groups[] = { "mac", };
-static const struct function_desc x2100_functions[] = {
+static const struct pinfunction x2100_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x2000_uart0),
INGENIC_PIN_FUNCTION("uart1", x2000_uart1),
INGENIC_PIN_FUNCTION("uart2", x2000_uart2),
@@ -4003,7 +4000,7 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
{
struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
unsigned int i;
uintptr_t mode;
@@ -4018,7 +4015,7 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->func.name, grp->grp.name);
+ func->func->name, grp->grp.name);
mode = (uintptr_t)grp->data;
if (mode <= 3) {
@@ -4267,7 +4264,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_SLEW_RATE:
continue;
default:
@@ -4308,7 +4305,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_schmitt_trigger(jzpc, pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = pinctrl_gpio_direction_output(jzpc->gc,
pin - jzpc->gc->base);
if (ret)
@@ -4571,11 +4568,9 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
for (i = 0; i < chip_info->num_functions; i++) {
- const struct function_desc *function = &chip_info->functions[i];
- const struct pinfunction *func = &function->func;
+ const struct pinfunction *func = &chip_info->functions[i];
- err = pinmux_generic_add_pinfunction(jzpc->pctl, func,
- function->data);
+ err = pinmux_generic_add_pinfunction(jzpc->pctl, func, NULL);
if (err < 0) {
dev_err(dev, "Failed to register function %s\n", func->name);
return err;
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 66c04120c29d..ddd6d6bfd513 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -551,7 +551,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
else
val &= ~K210_PC_ST;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
k210_pinmux_set_pin_function(pctldev, pin, K210_PCF_CONSTANT);
val = readl(&pdata->fpioa->pins[pin]);
val |= K210_PC_MODE_OUT;
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index 60cf017498b3..3241d3ae6219 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -135,6 +135,11 @@ struct keembay_pin_soc {
const struct pinctrl_pin_desc *pins;
};
+struct keembay_pinfunction {
+ struct pinfunction func;
+ u8 mux_mode;
+};
+
static const struct pinctrl_pin_desc keembay_pins[] = {
KEEMBAY_PIN_DESC(0, "GPIO0",
KEEMBAY_MUX(0x0, "I2S0_M0"),
@@ -930,7 +935,7 @@ static int keembay_set_mux(struct pinctrl_dev *pctldev, unsigned int fun_sel,
unsigned int grp_sel)
{
struct keembay_pinctrl *kpc = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
unsigned int val;
u8 pin_mode;
@@ -1556,13 +1561,13 @@ static int keembay_pinctrl_reg(struct keembay_pinctrl *kpc, struct device *dev)
}
static int keembay_add_functions(struct keembay_pinctrl *kpc,
- struct function_desc *functions)
+ struct keembay_pinfunction *functions)
{
unsigned int i;
/* Assign the groups for each function */
for (i = 0; i < kpc->nfuncs; i++) {
- struct function_desc *func = &functions[i];
+ struct keembay_pinfunction *func = &functions[i];
const char **group_names;
unsigned int grp_idx = 0;
int j;
@@ -1588,14 +1593,14 @@ static int keembay_add_functions(struct keembay_pinctrl *kpc,
/* Add all functions */
for (i = 0; i < kpc->nfuncs; i++)
pinmux_generic_add_pinfunction(kpc->pctrl, &functions[i].func,
- functions[i].data);
+ &functions[i].mux_mode);
return 0;
}
static int keembay_build_functions(struct keembay_pinctrl *kpc)
{
- struct function_desc *keembay_funcs, *new_funcs;
+ struct keembay_pinfunction *keembay_funcs, *new_funcs;
int i;
/*
@@ -1603,7 +1608,8 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
* being part of 8 (hw maximum) globally unique muxes.
*/
kpc->nfuncs = 0;
- keembay_funcs = kcalloc(kpc->npins * 8, sizeof(*keembay_funcs), GFP_KERNEL);
+ keembay_funcs = devm_kcalloc(kpc->dev, kpc->npins * 8,
+ sizeof(*keembay_funcs), GFP_KERNEL);
if (!keembay_funcs)
return -ENOMEM;
@@ -1613,7 +1619,7 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
struct keembay_mux_desc *mux;
for (mux = pdesc->drv_data; mux->name; mux++) {
- struct function_desc *fdesc;
+ struct keembay_pinfunction *fdesc;
/* Check if we already have function for this mux */
for (fdesc = keembay_funcs; fdesc->func.name; fdesc++) {
@@ -1627,18 +1633,18 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
if (!fdesc->func.name) {
fdesc->func.name = mux->name;
fdesc->func.ngroups = 1;
- fdesc->data = &mux->mode;
+ fdesc->mux_mode = mux->mode;
kpc->nfuncs++;
}
}
}
/* Reallocate memory based on actual number of functions */
- new_funcs = krealloc(keembay_funcs, kpc->nfuncs * sizeof(*new_funcs), GFP_KERNEL);
- if (!new_funcs) {
- kfree(keembay_funcs);
+ new_funcs = devm_krealloc_array(kpc->dev, keembay_funcs,
+ kpc->nfuncs, sizeof(*new_funcs),
+ GFP_KERNEL);
+ if (!new_funcs)
return -ENOMEM;
- }
return keembay_add_functions(kpc, new_funcs);
}
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 6191e5c13815..b6363f3cdce9 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -371,7 +371,7 @@ static int sgpio_pinconf_get(struct pinctrl_dev *pctldev,
val = !bank->is_input;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (bank->is_input)
return -EINVAL;
val = sgpio_output_get(priv, &addr);
@@ -402,7 +402,7 @@ static int sgpio_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
arg = pinconf_to_config_argument(configs[cfg]);
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (bank->is_input)
return -EINVAL;
err = sgpio_output_set(priv, &addr, arg);
@@ -824,7 +824,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
pctl_desc->confops = &sgpio_confops;
pctl_desc->owner = THIS_MODULE;
- pins = devm_kzalloc(dev, sizeof(*pins)*ngpios, GFP_KERNEL);
+ pins = devm_kcalloc(dev, ngpios, sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index b82bf83fed25..70da3f37567a 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1656,7 +1656,7 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
return err;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = regmap_read(info->map, REG(OCELOT_GPIO_OUT, info, pin),
&val);
if (err)
@@ -1735,7 +1735,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
p = pin % 32;
if (arg)
regmap_write(info->map,
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 37c2bf752154..e8b481e87c77 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1905,7 +1905,7 @@ static int pic32_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
case PIN_CONFIG_INPUT_ENABLE:
arg = !!(readl(bank->reg_base + TRIS_REG) & mask);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = !(readl(bank->reg_base + TRIS_REG) & mask);
break;
default:
@@ -1960,7 +1960,7 @@ static int pic32_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
case PIN_CONFIG_INPUT_ENABLE:
pic32_gpio_direction_input(&bank->gpio_chip, offset);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pic32_gpio_direction_output(&bank->gpio_chip,
offset, arg);
break;
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index 3acf770316c1..22f576337faa 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -541,7 +541,7 @@ static int rk805_pinconf_get(struct pinctrl_dev *pctldev,
u32 arg = 0;
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
arg = rk805_gpio_get(&pci->gpio_chip, pin);
break;
@@ -568,7 +568,7 @@ static int rk805_pinconf_set(struct pinctrl_dev *pctldev,
arg = pinconf_to_config_argument(configs[i]);
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rk805_gpio_set(&pci->gpio_chip, pin, arg);
rk805_pmx_gpio_set_direction(pctldev, NULL, pin, false);
break;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 930c454e0cec..7a68a6237649 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3272,7 +3272,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- if (param == PIN_CONFIG_OUTPUT || param == PIN_CONFIG_INPUT_ENABLE) {
+ if (param == PIN_CONFIG_LEVEL || param == PIN_CONFIG_INPUT_ENABLE) {
/*
* Check for gpio driver not being probed yet.
* The lock makes sure that either gpio-probe has completed
@@ -3313,7 +3313,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc)
return rc;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rc = rockchip_set_mux(bank, pin - bank->pin_base,
RK_FUNC_GPIO);
if (rc != RK_FUNC_GPIO)
@@ -3392,7 +3392,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rc = rockchip_get_mux(bank, pin - bank->pin_base);
if (rc != RK_FUNC_GPIO)
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-rp1.c b/drivers/pinctrl/pinctrl-rp1.c
index dadafc935dbb..ffc2f0b460a6 100644
--- a/drivers/pinctrl/pinctrl-rp1.c
+++ b/drivers/pinctrl/pinctrl-rp1.c
@@ -1440,7 +1440,7 @@ static int rp1_pinconf_set(struct pinctrl_dev *pctldev, unsigned int offset,
rp1_output_enable(pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rp1_set_value(pin, arg);
rp1_set_dir(pin, RP1_DIR_OUTPUT);
rp1_set_fsel(pin, RP1_FSEL_GPIO);
@@ -1623,12 +1623,94 @@ MODULE_DEVICE_TABLE(of, rp1_pinctrl_match);
static struct rp1_pinctrl rp1_pinctrl_data = {};
-static const struct regmap_config rp1_pinctrl_regmap_cfg = {
+static const struct regmap_range rp1_gpio_reg_ranges[] = {
+ /* BANK 0 */
+ regmap_reg_range(0x2004, 0x20dc),
+ regmap_reg_range(0x3004, 0x30dc),
+ regmap_reg_range(0x0004, 0x00dc),
+ regmap_reg_range(0x0124, 0x0124),
+ regmap_reg_range(0x211c, 0x211c),
+ regmap_reg_range(0x311c, 0x311c),
+ /* BANK 1 */
+ regmap_reg_range(0x6004, 0x602c),
+ regmap_reg_range(0x7004, 0x702c),
+ regmap_reg_range(0x4004, 0x402c),
+ regmap_reg_range(0x4124, 0x4124),
+ regmap_reg_range(0x611c, 0x611c),
+ regmap_reg_range(0x711c, 0x711c),
+ /* BANK 2 */
+ regmap_reg_range(0xa004, 0xa09c),
+ regmap_reg_range(0xb004, 0xb09c),
+ regmap_reg_range(0x8004, 0x809c),
+ regmap_reg_range(0x8124, 0x8124),
+ regmap_reg_range(0xa11c, 0xa11c),
+ regmap_reg_range(0xb11c, 0xb11c),
+};
+
+static const struct regmap_range rp1_rio_reg_ranges[] = {
+ /* BANK 0 */
+ regmap_reg_range(0x2000, 0x2004),
+ regmap_reg_range(0x3000, 0x3004),
+ regmap_reg_range(0x0004, 0x0008),
+ /* BANK 1 */
+ regmap_reg_range(0x6000, 0x6004),
+ regmap_reg_range(0x7000, 0x7004),
+ regmap_reg_range(0x4004, 0x4008),
+ /* BANK 2 */
+ regmap_reg_range(0xa000, 0xa004),
+ regmap_reg_range(0xb000, 0xb004),
+ regmap_reg_range(0x8004, 0x8008),
+};
+
+static const struct regmap_range rp1_pads_reg_ranges[] = {
+ /* BANK 0 */
+ regmap_reg_range(0x0004, 0x0070),
+ /* BANK 1 */
+ regmap_reg_range(0x4004, 0x4018),
+ /* BANK 2 */
+ regmap_reg_range(0x8004, 0x8050),
+};
+
+static const struct regmap_access_table rp1_gpio_reg_table = {
+ .yes_ranges = rp1_gpio_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_gpio_reg_ranges),
+};
+
+static const struct regmap_access_table rp1_rio_reg_table = {
+ .yes_ranges = rp1_rio_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_rio_reg_ranges),
+};
+
+static const struct regmap_access_table rp1_pads_reg_table = {
+ .yes_ranges = rp1_pads_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_pads_reg_ranges),
+};
+
+static const struct regmap_config rp1_pinctrl_gpio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &rp1_gpio_reg_table,
+ .name = "rp1-gpio",
+ .max_register = 0xb11c,
+};
+
+static const struct regmap_config rp1_pinctrl_rio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &rp1_rio_reg_table,
+ .name = "rp1-rio",
+ .max_register = 0xb004,
+};
+
+static const struct regmap_config rp1_pinctrl_pads_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
- .name = "rp1-pinctrl",
+ .rd_table = &rp1_pads_reg_table,
+ .name = "rp1-pads",
+ .max_register = 0x8050,
};
static int rp1_gen_regfield(struct device *dev,
@@ -1685,17 +1767,17 @@ static int rp1_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pc->pads_base), "could not get PADS IO memory\n");
gpio_regmap = devm_regmap_init_mmio(dev, pc->gpio_base,
- &rp1_pinctrl_regmap_cfg);
+ &rp1_pinctrl_gpio_regmap_cfg);
if (IS_ERR(gpio_regmap))
return dev_err_probe(dev, PTR_ERR(gpio_regmap), "could not init GPIO regmap\n");
rio_regmap = devm_regmap_init_mmio(dev, pc->rio_base,
- &rp1_pinctrl_regmap_cfg);
+ &rp1_pinctrl_rio_regmap_cfg);
if (IS_ERR(rio_regmap))
return dev_err_probe(dev, PTR_ERR(rio_regmap), "could not init RIO regmap\n");
pads_regmap = devm_regmap_init_mmio(dev, pc->pads_base,
- &rp1_pinctrl_regmap_cfg);
+ &rp1_pinctrl_pads_regmap_cfg);
if (IS_ERR(pads_regmap))
return dev_err_probe(dev, PTR_ERR(pads_regmap), "could not init PADS regmap\n");
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
index 383681041e4c..d14528b9aa31 100644
--- a/drivers/pinctrl/pinctrl-scmi.c
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -253,7 +253,7 @@ static int pinctrl_scmi_map_pinconf_type(enum pin_config_param param,
case PIN_CONFIG_MODE_LOW_POWER:
*type = SCMI_PIN_LOW_POWER_MODE;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
*type = SCMI_PIN_OUTPUT_VALUE;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 5cda6201b60f..6d580aa282ec 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -336,7 +336,7 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pin_desc *pdesc = pin_desc_get(pctldev, pin);
const struct pinctrl_setting_mux *setting;
- struct function_desc *function;
+ const struct function_desc *function;
unsigned fselector;
/* If pin is not described in DTS & enabled, mux_setting is NULL. */
@@ -360,7 +360,7 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
unsigned group)
{
struct pcs_device *pcs;
- struct function_desc *function;
+ const struct function_desc *function;
struct pcs_function *func;
int i;
@@ -589,8 +589,10 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
/* 4 parameters */
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
- if (arg)
+ if (arg) {
pcs_pinconf_clear_bias(pctldev, pin);
+ data = pcs->read(pcs->base + offset);
+ }
fallthrough;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
data &= ~func->conf[i].mask;
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index c89b99003b71..03ee13844b50 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -267,7 +267,7 @@ static int stmfx_pinconf_get(struct pinctrl_dev *pctldev,
if ((!dir && !type) || (dir && type))
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (dir)
return -EINVAL;
@@ -334,7 +334,7 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret)
return ret;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = stmfx_gpio_direction_output(&pctl->gpio_chip,
pin, arg);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 53cf8168b274..1d6760ffe809 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -611,7 +611,7 @@ static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (sx150x_pin_is_oscio(pctl, pin)) {
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = regmap_read(pctl->regmap,
pctl->data->pri.x789.reg_clock,
&data);
@@ -705,7 +705,7 @@ static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
}
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = sx150x_gpio_get_direction(&pctl->gpio, pin);
if (ret < 0)
return ret;
@@ -744,7 +744,7 @@ static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
arg = pinconf_to_config_argument(configs[i]);
if (sx150x_pin_is_oscio(pctl, pin)) {
- if (param == PIN_CONFIG_OUTPUT) {
+ if (param == PIN_CONFIG_LEVEL) {
ret = sx150x_gpio_direction_output(&pctl->gpio,
pin, arg);
if (ret < 0)
@@ -816,7 +816,7 @@ static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = sx150x_gpio_direction_output(&pctl->gpio,
pin, arg);
if (ret < 0)
@@ -863,6 +863,7 @@ static const struct of_device_id sx150x_of_match[] = {
{ .compatible = "semtech,sx1509q", .data = &sx1509q_device_data },
{},
};
+MODULE_DEVICE_TABLE(of, sx150x_of_match);
static int sx150x_reset(struct sx150x_pinctrl *pctl)
{
@@ -1266,3 +1267,6 @@ static int __init sx150x_init(void)
return i2c_add_driver(&sx150x_driver);
}
subsys_initcall(sx150x_init);
+
+MODULE_DESCRIPTION("Semtech SX150x I2C GPIO expander pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-upboard.c b/drivers/pinctrl/pinctrl-upboard.c
new file mode 100644
index 000000000000..f8c8b9d84990
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-upboard.c
@@ -0,0 +1,1070 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * UP board pin control driver.
+ *
+ * Copyright (C) 2025 Bootlin
+ *
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/gpio/forwarder.h>
+#include <linux/mfd/upboard-fpga.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/stddef.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+#include "core.h"
+#include "pinmux.h"
+
+enum upboard_pin_mode {
+ UPBOARD_PIN_MODE_FUNCTION,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_DISABLED,
+};
+
+struct upboard_pin {
+ struct regmap_field *funcbit;
+ struct regmap_field *enbit;
+ struct regmap_field *dirbit;
+};
+
+struct upboard_pingroup {
+ struct pingroup grp;
+ enum upboard_pin_mode mode;
+ const enum upboard_pin_mode *modes;
+};
+
+struct upboard_pinctrl_data {
+ const struct upboard_pingroup *groups;
+ size_t ngroups;
+ const struct pinfunction *funcs;
+ size_t nfuncs;
+ const unsigned int *pin_header;
+ size_t ngpio;
+};
+
+struct upboard_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ const struct upboard_pinctrl_data *pctrl_data;
+ struct gpio_pin_range pin_range;
+ struct upboard_pin *pins;
+};
+
+struct upboard_pinctrl_map {
+ const struct pinctrl_map *maps;
+ size_t nmaps;
+};
+
+enum upboard_func0_fpgabit {
+ UPBOARD_FUNC_I2C0_EN = 8,
+ UPBOARD_FUNC_I2C1_EN = 9,
+ UPBOARD_FUNC_CEC0_EN = 12,
+ UPBOARD_FUNC_ADC0_EN = 14,
+};
+
+static const struct reg_field upboard_i2c0_reg =
+ REG_FIELD(UPBOARD_REG_FUNC_EN0, UPBOARD_FUNC_I2C0_EN, UPBOARD_FUNC_I2C0_EN);
+
+static const struct reg_field upboard_i2c1_reg =
+ REG_FIELD(UPBOARD_REG_FUNC_EN0, UPBOARD_FUNC_I2C1_EN, UPBOARD_FUNC_I2C1_EN);
+
+static const struct reg_field upboard_adc0_reg =
+ REG_FIELD(UPBOARD_REG_FUNC_EN0, UPBOARD_FUNC_ADC0_EN, UPBOARD_FUNC_ADC0_EN);
+
+#define UPBOARD_UP_BIT_TO_PIN(bit) UPBOARD_UP_BIT_##bit
+
+#define UPBOARD_UP_PIN_NAME(id) \
+ { \
+ .number = UPBOARD_UP_BIT_##id, \
+ .name = #id, \
+ }
+
+#define UPBOARD_UP_PIN_MUX(bit, data) \
+ { \
+ .number = UPBOARD_UP_BIT_##bit, \
+ .name = "PINMUX_"#bit, \
+ .drv_data = (void *)(data), \
+ }
+
+#define UPBOARD_UP_PIN_FUNC(id, data) \
+ { \
+ .number = UPBOARD_UP_BIT_##id, \
+ .name = #id, \
+ .drv_data = (void *)(data), \
+ }
+
+enum upboard_up_fpgabit {
+ UPBOARD_UP_BIT_I2C1_SDA,
+ UPBOARD_UP_BIT_I2C1_SCL,
+ UPBOARD_UP_BIT_ADC0,
+ UPBOARD_UP_BIT_UART1_RTS,
+ UPBOARD_UP_BIT_GPIO27,
+ UPBOARD_UP_BIT_GPIO22,
+ UPBOARD_UP_BIT_SPI_MOSI,
+ UPBOARD_UP_BIT_SPI_MISO,
+ UPBOARD_UP_BIT_SPI_CLK,
+ UPBOARD_UP_BIT_I2C0_SDA,
+ UPBOARD_UP_BIT_GPIO5,
+ UPBOARD_UP_BIT_GPIO6,
+ UPBOARD_UP_BIT_PWM1,
+ UPBOARD_UP_BIT_I2S_FRM,
+ UPBOARD_UP_BIT_GPIO26,
+ UPBOARD_UP_BIT_UART1_TX,
+ UPBOARD_UP_BIT_UART1_RX,
+ UPBOARD_UP_BIT_I2S_CLK,
+ UPBOARD_UP_BIT_GPIO23,
+ UPBOARD_UP_BIT_GPIO24,
+ UPBOARD_UP_BIT_GPIO25,
+ UPBOARD_UP_BIT_SPI_CS0,
+ UPBOARD_UP_BIT_SPI_CS1,
+ UPBOARD_UP_BIT_I2C0_SCL,
+ UPBOARD_UP_BIT_PWM0,
+ UPBOARD_UP_BIT_UART1_CTS,
+ UPBOARD_UP_BIT_I2S_DIN,
+ UPBOARD_UP_BIT_I2S_DOUT,
+};
+
+static const struct pinctrl_pin_desc upboard_up_pins[] = {
+ UPBOARD_UP_PIN_FUNC(I2C1_SDA, &upboard_i2c1_reg),
+ UPBOARD_UP_PIN_FUNC(I2C1_SCL, &upboard_i2c1_reg),
+ UPBOARD_UP_PIN_FUNC(ADC0, &upboard_adc0_reg),
+ UPBOARD_UP_PIN_NAME(UART1_RTS),
+ UPBOARD_UP_PIN_NAME(GPIO27),
+ UPBOARD_UP_PIN_NAME(GPIO22),
+ UPBOARD_UP_PIN_NAME(SPI_MOSI),
+ UPBOARD_UP_PIN_NAME(SPI_MISO),
+ UPBOARD_UP_PIN_NAME(SPI_CLK),
+ UPBOARD_UP_PIN_FUNC(I2C0_SDA, &upboard_i2c0_reg),
+ UPBOARD_UP_PIN_NAME(GPIO5),
+ UPBOARD_UP_PIN_NAME(GPIO6),
+ UPBOARD_UP_PIN_NAME(PWM1),
+ UPBOARD_UP_PIN_NAME(I2S_FRM),
+ UPBOARD_UP_PIN_NAME(GPIO26),
+ UPBOARD_UP_PIN_NAME(UART1_TX),
+ UPBOARD_UP_PIN_NAME(UART1_RX),
+ UPBOARD_UP_PIN_NAME(I2S_CLK),
+ UPBOARD_UP_PIN_NAME(GPIO23),
+ UPBOARD_UP_PIN_NAME(GPIO24),
+ UPBOARD_UP_PIN_NAME(GPIO25),
+ UPBOARD_UP_PIN_NAME(SPI_CS0),
+ UPBOARD_UP_PIN_NAME(SPI_CS1),
+ UPBOARD_UP_PIN_FUNC(I2C0_SCL, &upboard_i2c0_reg),
+ UPBOARD_UP_PIN_NAME(PWM0),
+ UPBOARD_UP_PIN_NAME(UART1_CTS),
+ UPBOARD_UP_PIN_NAME(I2S_DIN),
+ UPBOARD_UP_PIN_NAME(I2S_DOUT),
+};
+
+static const unsigned int upboard_up_pin_header[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SDA),
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SCL),
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SDA),
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SCL),
+ UPBOARD_UP_BIT_TO_PIN(ADC0),
+ UPBOARD_UP_BIT_TO_PIN(GPIO5),
+ UPBOARD_UP_BIT_TO_PIN(GPIO6),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS1),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS0),
+ UPBOARD_UP_BIT_TO_PIN(SPI_MISO),
+ UPBOARD_UP_BIT_TO_PIN(SPI_MOSI),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CLK),
+ UPBOARD_UP_BIT_TO_PIN(PWM0),
+ UPBOARD_UP_BIT_TO_PIN(PWM1),
+ UPBOARD_UP_BIT_TO_PIN(UART1_TX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_CTS),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP_BIT_TO_PIN(I2S_CLK),
+ UPBOARD_UP_BIT_TO_PIN(I2S_FRM),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DIN),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DOUT),
+ UPBOARD_UP_BIT_TO_PIN(GPIO22),
+ UPBOARD_UP_BIT_TO_PIN(GPIO23),
+ UPBOARD_UP_BIT_TO_PIN(GPIO24),
+ UPBOARD_UP_BIT_TO_PIN(GPIO25),
+ UPBOARD_UP_BIT_TO_PIN(GPIO26),
+ UPBOARD_UP_BIT_TO_PIN(GPIO27),
+};
+
+static const unsigned int upboard_up_uart1_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(UART1_TX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP_BIT_TO_PIN(UART1_CTS),
+};
+
+static const enum upboard_pin_mode upboard_up_uart1_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+};
+
+static_assert(ARRAY_SIZE(upboard_up_uart1_modes) == ARRAY_SIZE(upboard_up_uart1_pins));
+
+static const unsigned int upboard_up_i2c0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SCL),
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SDA),
+};
+
+static const unsigned int upboard_up_i2c1_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SCL),
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SDA),
+};
+
+static const unsigned int upboard_up_spi2_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(SPI_MOSI),
+ UPBOARD_UP_BIT_TO_PIN(SPI_MISO),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CLK),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS0),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS1),
+};
+
+static const enum upboard_pin_mode upboard_up_spi2_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up_spi2_modes) == ARRAY_SIZE(upboard_up_spi2_pins));
+
+static const unsigned int upboard_up_i2s0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2S_FRM),
+ UPBOARD_UP_BIT_TO_PIN(I2S_CLK),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DIN),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DOUT),
+};
+
+static const enum upboard_pin_mode upboard_up_i2s0_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up_i2s0_pins) == ARRAY_SIZE(upboard_up_i2s0_modes));
+
+static const unsigned int upboard_up_pwm0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(PWM0),
+};
+
+static const unsigned int upboard_up_pwm1_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(PWM1),
+};
+
+static const unsigned int upboard_up_adc0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(ADC0),
+};
+
+#define UPBOARD_PINGROUP(n, p, m) \
+{ \
+ .grp = PINCTRL_PINGROUP(n, p, ARRAY_SIZE(p)), \
+ .mode = __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(m), const enum upboard_pin_mode *), \
+ 0, m), \
+ .modes = __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(m), const enum upboard_pin_mode *), \
+ m, NULL), \
+}
+
+static const struct upboard_pingroup upboard_up_pin_groups[] = {
+ UPBOARD_PINGROUP("uart1_grp", upboard_up_uart1_pins, &upboard_up_uart1_modes[0]),
+ UPBOARD_PINGROUP("i2c0_grp", upboard_up_i2c0_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("i2c1_grp", upboard_up_i2c1_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("spi2_grp", upboard_up_spi2_pins, &upboard_up_spi2_modes[0]),
+ UPBOARD_PINGROUP("i2s0_grp", upboard_up_i2s0_pins, &upboard_up_i2s0_modes[0]),
+ UPBOARD_PINGROUP("pwm0_grp", upboard_up_pwm0_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("pwm1_grp", upboard_up_pwm1_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("adc0_grp", upboard_up_adc0_pins, UPBOARD_PIN_MODE_GPIO_IN),
+};
+
+static const char * const upboard_up_uart1_groups[] = { "uart1_grp" };
+static const char * const upboard_up_i2c0_groups[] = { "i2c0_grp" };
+static const char * const upboard_up_i2c1_groups[] = { "i2c1_grp" };
+static const char * const upboard_up_spi2_groups[] = { "spi2_grp" };
+static const char * const upboard_up_i2s0_groups[] = { "i2s0_grp" };
+static const char * const upboard_up_pwm0_groups[] = { "pwm0_grp" };
+static const char * const upboard_up_pwm1_groups[] = { "pwm1_grp" };
+static const char * const upboard_up_adc0_groups[] = { "adc0_grp" };
+
+#define UPBOARD_FUNCTION(func, groups) PINCTRL_PINFUNCTION(func, groups, ARRAY_SIZE(groups))
+
+static const struct pinfunction upboard_up_pin_functions[] = {
+ UPBOARD_FUNCTION("uart1", upboard_up_uart1_groups),
+ UPBOARD_FUNCTION("i2c0", upboard_up_i2c0_groups),
+ UPBOARD_FUNCTION("i2c1", upboard_up_i2c1_groups),
+ UPBOARD_FUNCTION("spi2", upboard_up_spi2_groups),
+ UPBOARD_FUNCTION("i2s0", upboard_up_i2s0_groups),
+ UPBOARD_FUNCTION("pwm0", upboard_up_pwm0_groups),
+ UPBOARD_FUNCTION("pwm1", upboard_up_pwm1_groups),
+ UPBOARD_FUNCTION("adc0", upboard_up_adc0_groups),
+};
+
+static const struct upboard_pinctrl_data upboard_up_pinctrl_data = {
+ .groups = &upboard_up_pin_groups[0],
+ .ngroups = ARRAY_SIZE(upboard_up_pin_groups),
+ .funcs = &upboard_up_pin_functions[0],
+ .nfuncs = ARRAY_SIZE(upboard_up_pin_functions),
+ .pin_header = &upboard_up_pin_header[0],
+ .ngpio = ARRAY_SIZE(upboard_up_pin_header),
+};
+
+#define UPBOARD_UP2_BIT_TO_PIN(bit) UPBOARD_UP2_BIT_##bit
+
+#define UPBOARD_UP2_PIN_NAME(id) \
+ { \
+ .number = UPBOARD_UP2_BIT_##id, \
+ .name = #id, \
+ }
+
+#define UPBOARD_UP2_PIN_MUX(bit, data) \
+ { \
+ .number = UPBOARD_UP2_BIT_##bit, \
+ .name = "PINMUX_"#bit, \
+ .drv_data = (void *)(data), \
+ }
+
+#define UPBOARD_UP2_PIN_FUNC(id, data) \
+ { \
+ .number = UPBOARD_UP2_BIT_##id, \
+ .name = #id, \
+ .drv_data = (void *)(data), \
+ }
+
+enum upboard_up2_fpgabit {
+ UPBOARD_UP2_BIT_UART1_TXD,
+ UPBOARD_UP2_BIT_UART1_RXD,
+ UPBOARD_UP2_BIT_UART1_RTS,
+ UPBOARD_UP2_BIT_UART1_CTS,
+ UPBOARD_UP2_BIT_GPIO3_ADC0,
+ UPBOARD_UP2_BIT_GPIO5_ADC2,
+ UPBOARD_UP2_BIT_GPIO6_ADC3,
+ UPBOARD_UP2_BIT_GPIO11,
+ UPBOARD_UP2_BIT_EXHAT_LVDS1n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS1p,
+ UPBOARD_UP2_BIT_SPI2_TXD,
+ UPBOARD_UP2_BIT_SPI2_RXD,
+ UPBOARD_UP2_BIT_SPI2_FS1,
+ UPBOARD_UP2_BIT_SPI2_FS0,
+ UPBOARD_UP2_BIT_SPI2_CLK,
+ UPBOARD_UP2_BIT_SPI1_TXD,
+ UPBOARD_UP2_BIT_SPI1_RXD,
+ UPBOARD_UP2_BIT_SPI1_FS1,
+ UPBOARD_UP2_BIT_SPI1_FS0,
+ UPBOARD_UP2_BIT_SPI1_CLK,
+ UPBOARD_UP2_BIT_I2C0_SCL,
+ UPBOARD_UP2_BIT_I2C0_SDA,
+ UPBOARD_UP2_BIT_I2C1_SCL,
+ UPBOARD_UP2_BIT_I2C1_SDA,
+ UPBOARD_UP2_BIT_PWM1,
+ UPBOARD_UP2_BIT_PWM0,
+ UPBOARD_UP2_BIT_EXHAT_LVDS0n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS0p,
+ UPBOARD_UP2_BIT_GPIO24,
+ UPBOARD_UP2_BIT_GPIO10,
+ UPBOARD_UP2_BIT_GPIO2,
+ UPBOARD_UP2_BIT_GPIO1,
+ UPBOARD_UP2_BIT_EXHAT_LVDS3n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS3p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS4n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS4p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS5n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS5p,
+ UPBOARD_UP2_BIT_I2S_SDO,
+ UPBOARD_UP2_BIT_I2S_SDI,
+ UPBOARD_UP2_BIT_I2S_WS_SYNC,
+ UPBOARD_UP2_BIT_I2S_BCLK,
+ UPBOARD_UP2_BIT_EXHAT_LVDS6n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS6p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS7n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS7p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS2n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS2p,
+};
+
+static const struct pinctrl_pin_desc upboard_up2_pins[] = {
+ UPBOARD_UP2_PIN_NAME(UART1_TXD),
+ UPBOARD_UP2_PIN_NAME(UART1_RXD),
+ UPBOARD_UP2_PIN_NAME(UART1_RTS),
+ UPBOARD_UP2_PIN_NAME(UART1_CTS),
+ UPBOARD_UP2_PIN_NAME(GPIO3_ADC0),
+ UPBOARD_UP2_PIN_NAME(GPIO5_ADC2),
+ UPBOARD_UP2_PIN_NAME(GPIO6_ADC3),
+ UPBOARD_UP2_PIN_NAME(GPIO11),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS1n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS1p),
+ UPBOARD_UP2_PIN_NAME(SPI2_TXD),
+ UPBOARD_UP2_PIN_NAME(SPI2_RXD),
+ UPBOARD_UP2_PIN_NAME(SPI2_FS1),
+ UPBOARD_UP2_PIN_NAME(SPI2_FS0),
+ UPBOARD_UP2_PIN_NAME(SPI2_CLK),
+ UPBOARD_UP2_PIN_NAME(SPI1_TXD),
+ UPBOARD_UP2_PIN_NAME(SPI1_RXD),
+ UPBOARD_UP2_PIN_NAME(SPI1_FS1),
+ UPBOARD_UP2_PIN_NAME(SPI1_FS0),
+ UPBOARD_UP2_PIN_NAME(SPI1_CLK),
+ UPBOARD_UP2_PIN_MUX(I2C0_SCL, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(I2C0_SDA, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(I2C1_SCL, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_MUX(I2C1_SDA, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_NAME(PWM1),
+ UPBOARD_UP2_PIN_NAME(PWM0),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS0n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS0p),
+ UPBOARD_UP2_PIN_MUX(GPIO24, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(GPIO10, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(GPIO2, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_MUX(GPIO1, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS3n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS3p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS4n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS4p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS5n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS5p),
+ UPBOARD_UP2_PIN_NAME(I2S_SDO),
+ UPBOARD_UP2_PIN_NAME(I2S_SDI),
+ UPBOARD_UP2_PIN_NAME(I2S_WS_SYNC),
+ UPBOARD_UP2_PIN_NAME(I2S_BCLK),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS6n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS6p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS7n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS7p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS2n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS2p),
+};
+
+static const unsigned int upboard_up2_pin_header[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO10),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO24),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO1),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO2),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO3_ADC0),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO11),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_CLK),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_CLK),
+ UPBOARD_UP2_BIT_TO_PIN(PWM0),
+ UPBOARD_UP2_BIT_TO_PIN(PWM1),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_CTS),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_BCLK),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_WS_SYNC),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDI),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDO),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO6_ADC3),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO5_ADC2),
+};
+
+static const unsigned int upboard_up2_uart1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(UART1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_CTS),
+};
+
+static const enum upboard_pin_mode upboard_up2_uart1_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+};
+
+static_assert(ARRAY_SIZE(upboard_up2_uart1_modes) == ARRAY_SIZE(upboard_up2_uart1_pins));
+
+static const unsigned int upboard_up2_i2c0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(I2C0_SCL),
+ UPBOARD_UP2_BIT_TO_PIN(I2C0_SDA),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO24),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO10),
+};
+
+static const unsigned int upboard_up2_i2c1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(I2C1_SCL),
+ UPBOARD_UP2_BIT_TO_PIN(I2C1_SDA),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO2),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO1),
+};
+
+static const unsigned int upboard_up2_spi1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_CLK),
+};
+
+static const unsigned int upboard_up2_spi2_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_CLK),
+};
+
+static const enum upboard_pin_mode upboard_up2_spi_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up2_spi_modes) == ARRAY_SIZE(upboard_up2_spi1_pins));
+
+static_assert(ARRAY_SIZE(upboard_up2_spi_modes) == ARRAY_SIZE(upboard_up2_spi2_pins));
+
+static const unsigned int upboard_up2_i2s0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(I2S_BCLK),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_WS_SYNC),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDI),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDO),
+};
+
+static const enum upboard_pin_mode upboard_up2_i2s0_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up2_i2s0_modes) == ARRAY_SIZE(upboard_up2_i2s0_pins));
+
+static const unsigned int upboard_up2_pwm0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(PWM0),
+};
+
+static const unsigned int upboard_up2_pwm1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(PWM1),
+};
+
+static const unsigned int upboard_up2_adc0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO3_ADC0),
+};
+
+static const unsigned int upboard_up2_adc2_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO5_ADC2),
+};
+
+static const unsigned int upboard_up2_adc3_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO6_ADC3),
+};
+
+static const struct upboard_pingroup upboard_up2_pin_groups[] = {
+ UPBOARD_PINGROUP("uart1_grp", upboard_up2_uart1_pins, &upboard_up2_uart1_modes[0]),
+ UPBOARD_PINGROUP("i2c0_grp", upboard_up2_i2c0_pins, UPBOARD_PIN_MODE_FUNCTION),
+ UPBOARD_PINGROUP("i2c1_grp", upboard_up2_i2c1_pins, UPBOARD_PIN_MODE_FUNCTION),
+ UPBOARD_PINGROUP("spi1_grp", upboard_up2_spi1_pins, &upboard_up2_spi_modes[0]),
+ UPBOARD_PINGROUP("spi2_grp", upboard_up2_spi2_pins, &upboard_up2_spi_modes[0]),
+ UPBOARD_PINGROUP("i2s0_grp", upboard_up2_i2s0_pins, &upboard_up2_i2s0_modes[0]),
+ UPBOARD_PINGROUP("pwm0_grp", upboard_up2_pwm0_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("pwm1_grp", upboard_up2_pwm1_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("adc0_grp", upboard_up2_adc0_pins, UPBOARD_PIN_MODE_GPIO_IN),
+ UPBOARD_PINGROUP("adc2_grp", upboard_up2_adc2_pins, UPBOARD_PIN_MODE_GPIO_IN),
+ UPBOARD_PINGROUP("adc3_grp", upboard_up2_adc3_pins, UPBOARD_PIN_MODE_GPIO_IN),
+};
+
+static const char * const upboard_up2_uart1_groups[] = { "uart1_grp" };
+static const char * const upboard_up2_i2c0_groups[] = { "i2c0_grp" };
+static const char * const upboard_up2_i2c1_groups[] = { "i2c1_grp" };
+static const char * const upboard_up2_spi1_groups[] = { "spi1_grp" };
+static const char * const upboard_up2_spi2_groups[] = { "spi2_grp" };
+static const char * const upboard_up2_i2s0_groups[] = { "i2s0_grp" };
+static const char * const upboard_up2_pwm0_groups[] = { "pwm0_grp" };
+static const char * const upboard_up2_pwm1_groups[] = { "pwm1_grp" };
+static const char * const upboard_up2_adc0_groups[] = { "adc0_grp" };
+static const char * const upboard_up2_adc2_groups[] = { "adc2_grp" };
+static const char * const upboard_up2_adc3_groups[] = { "adc3_grp" };
+
+static const struct pinfunction upboard_up2_pin_functions[] = {
+ UPBOARD_FUNCTION("uart1", upboard_up2_uart1_groups),
+ UPBOARD_FUNCTION("i2c0", upboard_up2_i2c0_groups),
+ UPBOARD_FUNCTION("i2c1", upboard_up2_i2c1_groups),
+ UPBOARD_FUNCTION("spi1", upboard_up2_spi1_groups),
+ UPBOARD_FUNCTION("spi2", upboard_up2_spi2_groups),
+ UPBOARD_FUNCTION("i2s0", upboard_up2_i2s0_groups),
+ UPBOARD_FUNCTION("pwm0", upboard_up2_pwm0_groups),
+ UPBOARD_FUNCTION("pwm1", upboard_up2_pwm1_groups),
+ UPBOARD_FUNCTION("adc0", upboard_up2_adc0_groups),
+ UPBOARD_FUNCTION("adc2", upboard_up2_adc2_groups),
+ UPBOARD_FUNCTION("adc3", upboard_up2_adc3_groups),
+};
+
+static const struct upboard_pinctrl_data upboard_up2_pinctrl_data = {
+ .groups = &upboard_up2_pin_groups[0],
+ .ngroups = ARRAY_SIZE(upboard_up2_pin_groups),
+ .funcs = &upboard_up2_pin_functions[0],
+ .nfuncs = ARRAY_SIZE(upboard_up2_pin_functions),
+ .pin_header = &upboard_up2_pin_header[0],
+ .ngpio = ARRAY_SIZE(upboard_up2_pin_header),
+};
+
+static int upboard_pinctrl_set_function(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+ int ret;
+
+ if (!p->funcbit)
+ return -EPERM;
+
+ ret = regmap_field_write(p->enbit, 0);
+ if (ret)
+ return ret;
+
+ return regmap_field_write(p->funcbit, 1);
+}
+
+static int upboard_pinctrl_gpio_commit_enable(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+ int ret;
+
+ if (p->funcbit) {
+ ret = regmap_field_write(p->funcbit, 0);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_field_write(p->enbit, 1);
+}
+
+static int upboard_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ return upboard_pinctrl_gpio_commit_enable(pctldev, offset);
+}
+
+static void upboard_pinctrl_gpio_commit_disable(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+
+ regmap_field_write(p->enbit, 0);
+};
+
+static void upboard_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned int offset)
+{
+ return upboard_pinctrl_gpio_commit_disable(pctldev, offset);
+}
+
+static int upboard_pinctrl_gpio_commit_direction(struct pinctrl_dev *pctldev, unsigned int offset,
+ bool input)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+
+ return regmap_field_write(p->dirbit, input);
+}
+
+static int upboard_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ return upboard_pinctrl_gpio_commit_direction(pctldev, offset, input);
+}
+
+static int upboard_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct upboard_pinctrl_data *pctrl_data = pctrl->pctrl_data;
+ const struct upboard_pingroup *upgroups = pctrl_data->groups;
+ struct group_desc *grp;
+ unsigned int mode, i;
+ int ret;
+
+ grp = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!grp)
+ return -EINVAL;
+
+ for (i = 0; i < grp->grp.npins; i++) {
+ mode = upgroups[group_selector].mode ?: upgroups[group_selector].modes[i];
+ if (mode == UPBOARD_PIN_MODE_FUNCTION) {
+ ret = upboard_pinctrl_set_function(pctldev, grp->grp.pins[i]);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ ret = upboard_pinctrl_gpio_commit_enable(pctldev, grp->grp.pins[i]);
+ if (ret)
+ return ret;
+
+ ret = upboard_pinctrl_gpio_commit_direction(pctldev, grp->grp.pins[i],
+ mode == UPBOARD_PIN_MODE_GPIO_IN);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops upboard_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = upboard_pinctrl_set_mux,
+ .gpio_request_enable = upboard_pinctrl_gpio_request_enable,
+ .gpio_disable_free = upboard_pinctrl_gpio_disable_free,
+ .gpio_set_direction = upboard_pinctrl_gpio_set_direction,
+};
+
+static int upboard_pinctrl_pin_get_mode(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[pin];
+ unsigned int val;
+ int ret;
+
+ if (p->funcbit) {
+ ret = regmap_field_read(p->funcbit, &val);
+ if (ret)
+ return ret;
+ if (val)
+ return UPBOARD_PIN_MODE_FUNCTION;
+ }
+
+ ret = regmap_field_read(p->enbit, &val);
+ if (ret)
+ return ret;
+ if (!val)
+ return UPBOARD_PIN_MODE_DISABLED;
+
+ ret = regmap_field_read(p->dirbit, &val);
+ if (ret)
+ return ret;
+
+ return val ? UPBOARD_PIN_MODE_GPIO_IN : UPBOARD_PIN_MODE_GPIO_OUT;
+}
+
+static void upboard_pinctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int offset)
+{
+ int ret;
+
+ ret = upboard_pinctrl_pin_get_mode(pctldev, offset);
+ if (ret == UPBOARD_PIN_MODE_FUNCTION)
+ seq_puts(s, "mode function ");
+ else if (ret == UPBOARD_PIN_MODE_DISABLED)
+ seq_puts(s, "HIGH-Z ");
+ else if (ret < 0)
+ seq_puts(s, "N/A ");
+ else
+ seq_printf(s, "GPIO (%s) ", str_input_output(ret == UPBOARD_PIN_MODE_GPIO_IN));
+}
+
+static const struct pinctrl_ops upboard_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = upboard_pinctrl_dbg_show,
+};
+
+static int upboard_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ struct upboard_pinctrl *pctrl = gpiochip_fwd_get_data(fwd);
+ unsigned int pin = pctrl->pctrl_data->pin_header[offset];
+ struct gpio_desc *desc;
+ int ret;
+
+ ret = pinctrl_gpio_request(gc, offset);
+ if (ret)
+ return ret;
+
+ desc = gpiod_get_index(pctrl->dev, "external", pin, 0);
+ if (IS_ERR(desc)) {
+ pinctrl_gpio_free(gc, offset);
+ return PTR_ERR(desc);
+ }
+
+ return gpiochip_fwd_desc_add(fwd, desc, offset);
+}
+
+static void upboard_gpio_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+
+ gpiochip_fwd_desc_free(fwd, offset);
+ pinctrl_gpio_free(gc, offset);
+}
+
+static int upboard_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ struct upboard_pinctrl *pctrl = gpiochip_fwd_get_data(fwd);
+ unsigned int pin = pctrl->pctrl_data->pin_header[offset];
+ int mode;
+
+ /* If the pin is in function mode or high-z, input direction is returned */
+ mode = upboard_pinctrl_pin_get_mode(pctrl->pctldev, pin);
+ if (mode < 0)
+ return mode;
+
+ if (mode == UPBOARD_PIN_MODE_GPIO_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int upboard_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ int ret;
+
+ ret = pinctrl_gpio_direction_input(gc, offset);
+ if (ret)
+ return ret;
+
+ return gpiochip_fwd_gpio_direction_input(fwd, offset);
+}
+
+static int upboard_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ int ret;
+
+ ret = pinctrl_gpio_direction_output(gc, offset);
+ if (ret)
+ return ret;
+
+ return gpiochip_fwd_gpio_direction_output(fwd, offset, value);
+}
+
+static int upboard_pinctrl_register_groups(struct upboard_pinctrl *pctrl)
+{
+ const struct upboard_pingroup *groups = pctrl->pctrl_data->groups;
+ size_t ngroups = pctrl->pctrl_data->ngroups;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ngroups; i++) {
+ ret = pinctrl_generic_add_group(pctrl->pctldev, groups[i].grp.name,
+ groups[i].grp.pins, groups[i].grp.npins, pctrl);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int upboard_pinctrl_register_functions(struct upboard_pinctrl *pctrl)
+{
+ const struct pinfunction *funcs = pctrl->pctrl_data->funcs;
+ size_t nfuncs = pctrl->pctrl_data->nfuncs;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < nfuncs ; i++) {
+ ret = pinmux_generic_add_function(pctrl->pctldev, funcs[i].name,
+ funcs[i].groups, funcs[i].ngroups, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_map pinctrl_map_apl01[] = {
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:00", "pwm0_grp", "pwm0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:00", "pwm1_grp", "pwm1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:00", "uart1_grp", "uart1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:02", "i2c0_grp", "i2c0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:02", "i2c1_grp", "i2c1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:01", "ssp0_grp", "ssp0"),
+};
+
+static const struct upboard_pinctrl_map upboard_pinctrl_map_apl01 = {
+ .maps = &pinctrl_map_apl01[0],
+ .nmaps = ARRAY_SIZE(pinctrl_map_apl01),
+};
+
+static const struct dmi_system_id dmi_platform_info[] = {
+ {
+ /* UP Squared */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "UP-APL01"),
+ },
+ .driver_data = (void *)&upboard_pinctrl_map_apl01,
+ },
+ { }
+};
+
+static int upboard_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct upboard_fpga *fpga = dev_get_drvdata(dev->parent);
+ const struct upboard_pinctrl_map *board_map;
+ const struct dmi_system_id *dmi_id;
+ struct pinctrl_desc *pctldesc;
+ struct upboard_pinctrl *pctrl;
+ struct upboard_pin *pins;
+ struct gpiochip_fwd *fwd;
+ struct pinctrl *pinctrl;
+ struct gpio_chip *chip;
+ unsigned int i;
+ int ret;
+
+ pctldesc = devm_kzalloc(dev, sizeof(*pctldesc), GFP_KERNEL);
+ if (!pctldesc)
+ return -ENOMEM;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ switch (fpga->fpga_data->type) {
+ case UPBOARD_UP_FPGA:
+ pctldesc->pins = upboard_up_pins;
+ pctldesc->npins = ARRAY_SIZE(upboard_up_pins);
+ pctrl->pctrl_data = &upboard_up_pinctrl_data;
+ break;
+ case UPBOARD_UP2_FPGA:
+ pctldesc->pins = upboard_up2_pins;
+ pctldesc->npins = ARRAY_SIZE(upboard_up2_pins);
+ pctrl->pctrl_data = &upboard_up2_pinctrl_data;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unsupported device type %d\n",
+ fpga->fpga_data->type);
+ }
+
+ dmi_id = dmi_first_match(dmi_platform_info);
+ if (!dmi_id)
+ return dev_err_probe(dev, -ENODEV, "Unsupported board\n");
+
+ board_map = (const struct upboard_pinctrl_map *)dmi_id->driver_data;
+
+ pctldesc->name = dev_name(dev);
+ pctldesc->owner = THIS_MODULE;
+ pctldesc->pctlops = &upboard_pinctrl_ops;
+ pctldesc->pmxops = &upboard_pinmux_ops;
+
+ pctrl->dev = dev;
+
+ pins = devm_kcalloc(dev, pctldesc->npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ /* Initialize pins */
+ for (i = 0; i < pctldesc->npins; i++) {
+ const struct pinctrl_pin_desc *pin_desc = &pctldesc->pins[i];
+ unsigned int regoff = pin_desc->number / UPBOARD_REGISTER_SIZE;
+ unsigned int lsb = pin_desc->number % UPBOARD_REGISTER_SIZE;
+ struct reg_field * const fld_func = pin_desc->drv_data;
+ struct upboard_pin *pin = &pins[i];
+ struct reg_field fldconf = {};
+
+ if (fld_func) {
+ pin->funcbit = devm_regmap_field_alloc(dev, fpga->regmap, *fld_func);
+ if (IS_ERR(pin->funcbit))
+ return PTR_ERR(pin->funcbit);
+ }
+
+ fldconf.reg = UPBOARD_REG_GPIO_EN0 + regoff;
+ fldconf.lsb = lsb;
+ fldconf.msb = lsb;
+ pin->enbit = devm_regmap_field_alloc(dev, fpga->regmap, fldconf);
+ if (IS_ERR(pin->enbit))
+ return PTR_ERR(pin->enbit);
+
+ fldconf.reg = UPBOARD_REG_GPIO_DIR0 + regoff;
+ fldconf.lsb = lsb;
+ fldconf.msb = lsb;
+ pin->dirbit = devm_regmap_field_alloc(dev, fpga->regmap, fldconf);
+ if (IS_ERR(pin->dirbit))
+ return PTR_ERR(pin->dirbit);
+ }
+
+ pctrl->pins = pins;
+
+ ret = devm_pinctrl_register_and_init(dev, pctldesc, pctrl, &pctrl->pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pinctrl\n");
+
+ ret = upboard_pinctrl_register_groups(pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register groups\n");
+
+ ret = upboard_pinctrl_register_functions(pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register functions\n");
+
+ ret = devm_pinctrl_register_mappings(dev, board_map->maps, board_map->nmaps);
+ if (ret)
+ return ret;
+
+ pinctrl = devm_pinctrl_get_select_default(dev);
+ if (IS_ERR(pinctrl))
+ return dev_err_probe(dev, PTR_ERR(pinctrl), "Failed to select pinctrl\n");
+
+ ret = pinctrl_enable(pctrl->pctldev);
+ if (ret)
+ return ret;
+
+ fwd = devm_gpiochip_fwd_alloc(dev, pctrl->pctrl_data->ngpio);
+ if (IS_ERR(fwd))
+ return dev_err_probe(dev, PTR_ERR(fwd), "Failed to allocate the gpiochip forwarder\n");
+
+ chip = gpiochip_fwd_get_gpiochip(fwd);
+ chip->request = upboard_gpio_request;
+ chip->free = upboard_gpio_free;
+ chip->get_direction = upboard_gpio_get_direction;
+ chip->direction_output = upboard_gpio_direction_output;
+ chip->direction_input = upboard_gpio_direction_input;
+
+ ret = gpiochip_fwd_register(fwd, pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register the gpiochip forwarder\n");
+
+ return gpiochip_add_sparse_pin_range(chip, dev_name(dev), 0, pctrl->pctrl_data->pin_header,
+ pctrl->pctrl_data->ngpio);
+}
+
+static struct platform_driver upboard_pinctrl_driver = {
+ .driver = {
+ .name = "upboard-pinctrl",
+ },
+ .probe = upboard_pinctrl_probe,
+};
+module_platform_driver(upboard_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com");
+MODULE_DESCRIPTION("UP Board HAT pin controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:upboard-pinctrl");
+MODULE_IMPORT_NS("GPIO_FORWARDER");
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index fddf0fef4b13..585fe1661e17 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -919,7 +919,7 @@ static int versal_pinctrl_prepare_pin_desc(struct device *dev,
if (ret)
return ret;
- pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ pins = devm_kcalloc(dev, *npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 79814758a084..3a8dd184ba3d 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -89,13 +89,20 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct pin_desc *desc = pin_desc_get(pctldev, pin);
const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ const struct pinctrl_setting_mux *mux_setting;
+ bool func_is_gpio = false;
/* Can't inspect pin, assume it can be used */
if (!desc || !ops)
return true;
+ mux_setting = desc->mux_setting;
+
guard(mutex)(&desc->mux_lock);
- if (ops->strict && desc->mux_usecount)
+ if (mux_setting && ops->function_is_gpio)
+ func_is_gpio = ops->function_is_gpio(pctldev, mux_setting->func);
+
+ if (ops->strict && desc->mux_usecount && !func_is_gpio)
return false;
return !(ops->strict && !!desc->gpio_owner);
@@ -116,7 +123,9 @@ static int pin_request(struct pinctrl_dev *pctldev,
{
struct pin_desc *desc;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ const struct pinctrl_setting_mux *mux_setting;
int status = -EINVAL;
+ bool gpio_ok = false;
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
@@ -126,11 +135,21 @@ static int pin_request(struct pinctrl_dev *pctldev,
goto out;
}
+ mux_setting = desc->mux_setting;
+
dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
pin, desc->name, owner);
scoped_guard(mutex, &desc->mux_lock) {
- if ((!gpio_range || ops->strict) &&
+ if (mux_setting) {
+ if (ops->function_is_gpio)
+ gpio_ok = ops->function_is_gpio(pctldev,
+ mux_setting->func);
+ } else {
+ gpio_ok = true;
+ }
+
+ if ((!gpio_range || ops->strict) && !gpio_ok &&
desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
dev_err(pctldev->dev,
"pin %s already requested by %s; cannot claim for %s\n",
@@ -138,7 +157,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
goto out;
}
- if ((gpio_range || ops->strict) && desc->gpio_owner) {
+ if ((gpio_range || ops->strict) && !gpio_ok && desc->gpio_owner) {
dev_err(pctldev->dev,
"pin %s already requested by %s; cannot claim for %s\n",
desc->name, desc->gpio_owner, owner);
@@ -337,7 +356,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
while (selector < nfuncs) {
const char *fname = ops->get_function_name(pctldev, selector);
- if (!strcmp(function, fname))
+ if (fname && !strcmp(function, fname))
return selector;
selector++;
@@ -810,7 +829,7 @@ pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
if (!function)
return NULL;
- return function->func.name;
+ return function->func->name;
}
EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
@@ -835,8 +854,8 @@ int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
__func__, selector);
return -EINVAL;
}
- *groups = function->func.groups;
- *ngroups = function->func.ngroups;
+ *groups = function->func->groups;
+ *ngroups = function->func->ngroups;
return 0;
}
@@ -847,8 +866,8 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_groups);
* @pctldev: pin controller device
* @selector: function number
*/
-struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
- unsigned int selector)
+const struct function_desc *
+pinmux_generic_get_function(struct pinctrl_dev *pctldev, unsigned int selector)
{
struct function_desc *function;
@@ -862,6 +881,27 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
/**
+ * pinmux_generic_function_is_gpio() - returns true if given function is a GPIO
+ * @pctldev: pin controller device
+ * @selector: function number
+ *
+ * Returns:
+ * True if given function is a GPIO, false otherwise.
+ */
+bool pinmux_generic_function_is_gpio(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree, selector);
+ if (!function)
+ return false;
+
+ return function->func->flags & PINFUNCTION_FLAG_GPIO;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_function_is_gpio);
+
+/**
* pinmux_generic_add_function() - adds a function group
* @pctldev: pin controller device
* @name: name of the function
@@ -903,7 +943,17 @@ int pinmux_generic_add_pinfunction(struct pinctrl_dev *pctldev,
if (!function)
return -ENOMEM;
- function->func = *func;
+ /*
+ * FIXME: It's generally a bad idea to use devres in subsystem core
+ * code - managed interfaces are aimed at drivers - but pinctrl already
+ * uses it all over the place so it's a larger piece of technical debt
+ * to fix.
+ */
+ function->func = devm_kmemdup_const(pctldev->dev, func,
+ sizeof(*func), GFP_KERNEL);
+ if (!function->func)
+ return -ENOMEM;
+
function->data = data;
error = radix_tree_insert(&pctldev->pin_function_tree, selector, function);
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index bdb5be1a636e..4e826c1a5246 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -137,7 +137,7 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
* @data: pin controller driver specific data
*/
struct function_desc {
- struct pinfunction func;
+ const struct pinfunction *func;
void *data;
};
@@ -152,8 +152,8 @@ int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
const char * const **groups,
unsigned int * const ngroups);
-struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
- unsigned int selector);
+const struct function_desc *
+pinmux_generic_get_function(struct pinctrl_dev *pctldev, unsigned int selector);
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
@@ -169,6 +169,9 @@ int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
void pinmux_generic_free_functions(struct pinctrl_dev *pctldev);
+bool pinmux_generic_function_is_gpio(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
#else
static inline void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index dd9bbe8f3e11..c480e8b78503 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -8,6 +8,7 @@ config PINCTRL_MSM
depends on OF
select QCOM_SCM
select PINMUX
+ select GENERIC_PINMUX_FUNCTIONS
select PINCONF
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
@@ -68,6 +69,16 @@ config PINCTRL_SC7280_LPASS_LPI
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform.
+config PINCTRL_SDM660_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SDM660 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SDM660 platform.
+
config PINCTRL_SM4250_LPASS_LPI
tristate "Qualcomm Technologies Inc SM4250 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 6dad942b00a3..69a5b47adedc 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -15,6 +15,16 @@ config PINCTRL_APQ8084
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm APQ8084 platform.
+config PINCTRL_GLYMUR
+ tristate "Qualcomm Technologies Inc Glymur pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM)
+ block found on the Qualcomm Technologies Inc Glymur platform.
+ Say Y here to compile statically, or M here to compile it as a module.
+ If unsure, say N.
+
config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver"
depends on ARM || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 2acff520a285..567d3051e760 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
+obj-$(CONFIG_PINCTRL_GLYMUR) += pinctrl-glymur.o
obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ5018) += pinctrl-ipq5018.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
@@ -44,6 +45,7 @@ obj-$(CONFIG_PINCTRL_SC7280_LPASS_LPI) += pinctrl-sc7280-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SC8180X) += pinctrl-sc8180x.o
obj-$(CONFIG_PINCTRL_SC8280XP) += pinctrl-sc8280xp.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
+obj-$(CONFIG_PINCTRL_SDM660_LPASS_LPI) += pinctrl-sdm660-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SDM670) += pinctrl-sdm670.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
diff --git a/drivers/pinctrl/qcom/pinctrl-glymur.c b/drivers/pinctrl/qcom/pinctrl-glymur.c
new file mode 100644
index 000000000000..9913f98e9531
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-glymur.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ msm_mux_##f10, \
+ msm_mux_##f11 /* egpio mode */ \
+ }, \
+ .nfuncs = 12, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, ctl, io) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = io, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc glymur_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "GPIO_210"),
+ PINCTRL_PIN(211, "GPIO_211"),
+ PINCTRL_PIN(212, "GPIO_212"),
+ PINCTRL_PIN(213, "GPIO_213"),
+ PINCTRL_PIN(214, "GPIO_214"),
+ PINCTRL_PIN(215, "GPIO_215"),
+ PINCTRL_PIN(216, "GPIO_216"),
+ PINCTRL_PIN(217, "GPIO_217"),
+ PINCTRL_PIN(218, "GPIO_218"),
+ PINCTRL_PIN(219, "GPIO_219"),
+ PINCTRL_PIN(220, "GPIO_220"),
+ PINCTRL_PIN(221, "GPIO_221"),
+ PINCTRL_PIN(222, "GPIO_222"),
+ PINCTRL_PIN(223, "GPIO_223"),
+ PINCTRL_PIN(224, "GPIO_224"),
+ PINCTRL_PIN(225, "GPIO_225"),
+ PINCTRL_PIN(226, "GPIO_226"),
+ PINCTRL_PIN(227, "GPIO_227"),
+ PINCTRL_PIN(228, "GPIO_228"),
+ PINCTRL_PIN(229, "GPIO_229"),
+ PINCTRL_PIN(230, "GPIO_230"),
+ PINCTRL_PIN(231, "GPIO_231"),
+ PINCTRL_PIN(232, "GPIO_232"),
+ PINCTRL_PIN(233, "GPIO_233"),
+ PINCTRL_PIN(234, "GPIO_234"),
+ PINCTRL_PIN(235, "GPIO_235"),
+ PINCTRL_PIN(236, "GPIO_236"),
+ PINCTRL_PIN(237, "GPIO_237"),
+ PINCTRL_PIN(238, "GPIO_238"),
+ PINCTRL_PIN(239, "GPIO_239"),
+ PINCTRL_PIN(240, "GPIO_240"),
+ PINCTRL_PIN(241, "GPIO_241"),
+ PINCTRL_PIN(242, "GPIO_242"),
+ PINCTRL_PIN(243, "GPIO_243"),
+ PINCTRL_PIN(244, "GPIO_244"),
+ PINCTRL_PIN(245, "GPIO_245"),
+ PINCTRL_PIN(246, "GPIO_246"),
+ PINCTRL_PIN(247, "GPIO_247"),
+ PINCTRL_PIN(248, "GPIO_248"),
+ PINCTRL_PIN(249, "GPIO_249"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+DECLARE_MSM_GPIO_PINS(210);
+DECLARE_MSM_GPIO_PINS(211);
+DECLARE_MSM_GPIO_PINS(212);
+DECLARE_MSM_GPIO_PINS(213);
+DECLARE_MSM_GPIO_PINS(214);
+DECLARE_MSM_GPIO_PINS(215);
+DECLARE_MSM_GPIO_PINS(216);
+DECLARE_MSM_GPIO_PINS(217);
+DECLARE_MSM_GPIO_PINS(218);
+DECLARE_MSM_GPIO_PINS(219);
+DECLARE_MSM_GPIO_PINS(220);
+DECLARE_MSM_GPIO_PINS(221);
+DECLARE_MSM_GPIO_PINS(222);
+DECLARE_MSM_GPIO_PINS(223);
+DECLARE_MSM_GPIO_PINS(224);
+DECLARE_MSM_GPIO_PINS(225);
+DECLARE_MSM_GPIO_PINS(226);
+DECLARE_MSM_GPIO_PINS(227);
+DECLARE_MSM_GPIO_PINS(228);
+DECLARE_MSM_GPIO_PINS(229);
+DECLARE_MSM_GPIO_PINS(230);
+DECLARE_MSM_GPIO_PINS(231);
+DECLARE_MSM_GPIO_PINS(232);
+DECLARE_MSM_GPIO_PINS(233);
+DECLARE_MSM_GPIO_PINS(234);
+DECLARE_MSM_GPIO_PINS(235);
+DECLARE_MSM_GPIO_PINS(236);
+DECLARE_MSM_GPIO_PINS(237);
+DECLARE_MSM_GPIO_PINS(238);
+DECLARE_MSM_GPIO_PINS(239);
+DECLARE_MSM_GPIO_PINS(240);
+DECLARE_MSM_GPIO_PINS(241);
+DECLARE_MSM_GPIO_PINS(242);
+DECLARE_MSM_GPIO_PINS(243);
+DECLARE_MSM_GPIO_PINS(244);
+DECLARE_MSM_GPIO_PINS(245);
+DECLARE_MSM_GPIO_PINS(246);
+DECLARE_MSM_GPIO_PINS(247);
+DECLARE_MSM_GPIO_PINS(248);
+DECLARE_MSM_GPIO_PINS(249);
+
+static const unsigned int ufs_reset_pins[] = { 250 };
+static const unsigned int sdc2_clk_pins[] = { 251 };
+static const unsigned int sdc2_cmd_pins[] = { 252 };
+static const unsigned int sdc2_data_pins[] = { 253 };
+
+enum glymur_functions {
+ msm_mux_gpio,
+ msm_mux_resout_gpio_n,
+ msm_mux_aoss_cti,
+ msm_mux_asc_cci,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ext_mclk0,
+ msm_mux_audio_ext_mclk1,
+ msm_mux_audio_ref_clk,
+ msm_mux_cam_asc_mclk4,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async_in,
+ msm_mux_cci_i2c_scl,
+ msm_mux_cci_i2c_sda,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist_complete,
+ msm_mux_ddr_bist_fail,
+ msm_mux_ddr_bist_start,
+ msm_mux_ddr_bist_stop,
+ msm_mux_ddr_pxi,
+ msm_mux_edp0_hot,
+ msm_mux_edp0_lcd,
+ msm_mux_edp1_lcd,
+ msm_mux_egpio,
+ msm_mux_eusb_ac_en,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_host2wlan_sol,
+ msm_mux_i2c0_s_scl,
+ msm_mux_i2c0_s_sda,
+ msm_mux_i2s0_data,
+ msm_mux_i2s0_sck,
+ msm_mux_i2s0_ws,
+ msm_mux_i2s1_data,
+ msm_mux_i2s1_sck,
+ msm_mux_i2s1_ws,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync_out,
+ msm_mux_mdp_vsync_e,
+ msm_mux_mdp_vsync_p,
+ msm_mux_mdp_vsync_s,
+ msm_mux_pcie3a_clk,
+ msm_mux_pcie3a_rst_n,
+ msm_mux_pcie3b_clk,
+ msm_mux_pcie4_clk_req_n,
+ msm_mux_pcie5_clk_req_n,
+ msm_mux_pcie6_clk_req_n,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist_sync,
+ msm_mux_pll_clk_aux,
+ msm_mux_pmc_oca_n,
+ msm_mux_pmc_uva_n,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qspi0,
+ msm_mux_qup0_se0,
+ msm_mux_qup0_se1,
+ msm_mux_qup0_se2,
+ msm_mux_qup0_se3,
+ msm_mux_qup0_se4,
+ msm_mux_qup0_se5,
+ msm_mux_qup0_se6,
+ msm_mux_qup0_se7,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_qup1_se7,
+ msm_mux_qup2_se0,
+ msm_mux_qup2_se1,
+ msm_mux_qup2_se2,
+ msm_mux_qup2_se3,
+ msm_mux_qup2_se4,
+ msm_mux_qup2_se5,
+ msm_mux_qup2_se6,
+ msm_mux_qup2_se7,
+ msm_mux_qup3_se0,
+ msm_mux_qup3_se1,
+ msm_mux_sd_write_protect,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sdc4_data,
+ msm_mux_smb_acok_n,
+ msm_mux_sys_throttle,
+ msm_mux_tb_trig_sdc2,
+ msm_mux_tb_trig_sdc4,
+ msm_mux_tmess_prng,
+ msm_mux_tsense_pwm,
+ msm_mux_tsense_therm,
+ msm_mux_usb0_dp,
+ msm_mux_usb0_phy_ps,
+ msm_mux_usb0_sbrx,
+ msm_mux_usb0_sbtx,
+ msm_mux_usb0_tmu,
+ msm_mux_usb1_dbg,
+ msm_mux_usb1_dp,
+ msm_mux_usb1_phy_ps,
+ msm_mux_usb1_sbrx,
+ msm_mux_usb1_sbtx,
+ msm_mux_usb1_tmu,
+ msm_mux_usb2_dp,
+ msm_mux_usb2_phy_ps,
+ msm_mux_usb2_sbrx,
+ msm_mux_usb2_sbtx,
+ msm_mux_usb2_tmu,
+ msm_mux_vsense_trigger_mirnat,
+ msm_mux_wcn_sw,
+ msm_mux_wcn_sw_ctrl,
+ msm_mux__,
+};
+
+static const char *const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+ "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53",
+ "gpio54", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65",
+ "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71",
+ "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+ "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95",
+ "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101",
+ "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+ "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119",
+ "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+ "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
+ "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
+ "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166", "gpio167",
+ "gpio168", "gpio169", "gpio170", "gpio171", "gpio172", "gpio173",
+ "gpio174", "gpio175", "gpio176", "gpio177", "gpio178", "gpio179",
+ "gpio180", "gpio181", "gpio182", "gpio183", "gpio184", "gpio185",
+ "gpio186", "gpio187", "gpio188", "gpio189", "gpio190", "gpio191",
+ "gpio192", "gpio193", "gpio194", "gpio195", "gpio196", "gpio197",
+ "gpio198", "gpio199", "gpio200", "gpio201", "gpio202", "gpio203",
+ "gpio204", "gpio205", "gpio206", "gpio207", "gpio208", "gpio209",
+ "gpio210", "gpio211", "gpio212", "gpio213", "gpio214", "gpio215",
+ "gpio216", "gpio217", "gpio218", "gpio219", "gpio220", "gpio221",
+ "gpio222", "gpio223", "gpio224", "gpio225", "gpio226", "gpio227",
+ "gpio228", "gpio229", "gpio230", "gpio231", "gpio232", "gpio233",
+ "gpio234", "gpio235", "gpio236", "gpio237", "gpio238", "gpio239",
+ "gpio240", "gpio241", "gpio242", "gpio243", "gpio244", "gpio245",
+ "gpio246", "gpio247", "gpio248", "gpio249",
+};
+
+static const char *const resout_gpio_n_groups[] = {
+ "gpio160",
+};
+
+static const char *const aoss_cti_groups[] = {
+ "gpio60",
+ "gpio61",
+ "gpio62",
+ "gpio63",
+};
+
+static const char *const asc_cci_groups[] = {
+ "gpio235",
+ "gpio236",
+};
+
+static const char *const atest_char_groups[] = {
+ "gpio172", "gpio184", "gpio188", "gpio164",
+ "gpio163",
+};
+
+static const char *const atest_usb_groups[] = {
+ "gpio39", "gpio40", "gpio41", "gpio38",
+ "gpio44", "gpio45", "gpio42", "gpio43",
+ "gpio49", "gpio50", "gpio51", "gpio48",
+ "gpio54", "gpio55", "gpio52", "gpio53",
+ "gpio65", "gpio66", "gpio46", "gpio47",
+ "gpio72", "gpio73", "gpio80", "gpio81",
+};
+
+static const char *const audio_ext_mclk0_groups[] = {
+ "gpio134",
+};
+
+static const char *const audio_ext_mclk1_groups[] = {
+ "gpio142",
+};
+
+static const char *const audio_ref_clk_groups[] = {
+ "gpio142",
+};
+
+static const char *const cam_asc_mclk4_groups[] = {
+ "gpio100",
+};
+
+static const char *const cam_mclk_groups[] = {
+ "gpio96",
+ "gpio97",
+ "gpio98",
+ "gpio99",
+};
+
+static const char *const cci_async_in_groups[] = {
+ "gpio113", "gpio112", "gpio111",
+};
+
+static const char *const cci_i2c_scl_groups[] = {
+ "gpio102", "gpio104", "gpio106",
+};
+
+static const char *const cci_i2c_sda_groups[] = {
+ "gpio101", "gpio103", "gpio105",
+};
+
+static const char *const cci_timer_groups[] = {
+ "gpio109", "gpio110", "gpio111", "gpio112",
+ "gpio113",
+};
+
+static const char *const cmu_rng_groups[] = {
+ "gpio48", "gpio47", "gpio46", "gpio45",
+};
+
+static const char *const cri_trng_groups[] = {
+ "gpio173",
+};
+
+static const char *const dbg_out_clk_groups[] = {
+ "gpio51",
+};
+
+static const char *const ddr_bist_complete_groups[] = {
+ "gpio57",
+};
+
+static const char *const ddr_bist_fail_groups[] = {
+ "gpio56",
+};
+
+static const char *const ddr_bist_start_groups[] = {
+ "gpio54",
+};
+
+static const char *const ddr_bist_stop_groups[] = {
+ "gpio55",
+};
+
+static const char *const ddr_pxi_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+ "gpio72", "gpio73", "gpio80", "gpio81",
+ "gpio42", "gpio43", "gpio44", "gpio45",
+ "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53",
+ "gpio54", "gpio55", "gpio65", "gpio66",
+};
+
+static const char *const edp0_hot_groups[] = {
+ "gpio119",
+};
+
+static const char *const edp0_lcd_groups[] = {
+ "gpio120",
+};
+
+static const char *const edp1_lcd_groups[] = {
+ "gpio115",
+ "gpio119",
+};
+
+static const char *const egpio_groups[] = {
+ "gpio192", "gpio193", "gpio194", "gpio195", "gpio196", "gpio197",
+ "gpio198", "gpio199", "gpio200", "gpio201", "gpio202", "gpio203",
+ "gpio204", "gpio205", "gpio206", "gpio207", "gpio208", "gpio209",
+ "gpio210", "gpio211", "gpio212", "gpio213", "gpio214", "gpio215",
+ "gpio216", "gpio217", "gpio218", "gpio219", "gpio220", "gpio221",
+ "gpio222", "gpio223", "gpio224", "gpio225", "gpio226", "gpio227",
+ "gpio228", "gpio229", "gpio230", "gpio231", "gpio232", "gpio233",
+ "gpio234", "gpio235", "gpio236", "gpio237", "gpio238", "gpio239",
+ "gpio240", "gpio241", "gpio242", "gpio243", "gpio244",
+};
+
+static const char *const eusb_ac_en_groups[] = {
+ "gpio168", "gpio177", "gpio186", "gpio69",
+ "gpio187", "gpio178",
+};
+
+static const char *const gcc_gp1_groups[] = {
+ "gpio71",
+ "gpio72",
+};
+
+static const char *const gcc_gp2_groups[] = {
+ "gpio64",
+ "gpio73",
+};
+
+static const char *const gcc_gp3_groups[] = {
+ "gpio74",
+ "gpio82",
+};
+
+static const char *const host2wlan_sol_groups[] = {
+ "gpio118",
+};
+
+static const char *const i2c0_s_scl_groups[] = {
+ "gpio7",
+};
+
+static const char *const i2c0_s_sda_groups[] = {
+ "gpio6",
+};
+
+static const char *const i2s0_data_groups[] = {
+ "gpio136", "gpio137",
+};
+
+static const char *const i2s0_sck_groups[] = {
+ "gpio135",
+};
+
+static const char *const i2s0_ws_groups[] = {
+ "gpio138",
+};
+
+static const char *const i2s1_data_groups[] = {
+ "gpio140", "gpio142",
+};
+
+static const char *const i2s1_sck_groups[] = {
+ "gpio139",
+};
+
+static const char *const i2s1_ws_groups[] = {
+ "gpio141",
+};
+
+static const char *const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio4", "gpio5", "gpio32", "gpio33",
+ "gpio36", "gpio37", "gpio64", "gpio65", "gpio68", "gpio69",
+};
+
+static const char *const jitter_bist_groups[] = {
+ "gpio52",
+};
+
+static const char *const mdp_vsync_out_groups[] = {
+ "gpio114", "gpio114", "gpio115", "gpio115",
+ "gpio109", "gpio110", "gpio111", "gpio112",
+ "gpio113",
+};
+
+static const char *const mdp_vsync_e_groups[] = {
+ "gpio106",
+};
+
+static const char *const mdp_vsync_p_groups[] = {
+ "gpio98",
+};
+
+static const char *const mdp_vsync_s_groups[] = {
+ "gpio105",
+};
+
+static const char *const pcie3a_clk_groups[] = {
+ "gpio144",
+};
+
+static const char *const pcie3a_rst_n_groups[] = {
+ "gpio143",
+};
+
+static const char *const pcie3b_clk_groups[] = {
+ "gpio156",
+};
+
+static const char *const pcie4_clk_req_n_groups[] = {
+ "gpio147",
+};
+
+static const char *const pcie5_clk_req_n_groups[] = {
+ "gpio153",
+};
+
+static const char *const pcie6_clk_req_n_groups[] = {
+ "gpio150",
+};
+
+static const char *const phase_flag_groups[] = {
+ "gpio6", "gpio7", "gpio16", "gpio17",
+ "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25",
+ "gpio8", "gpio26", "gpio27", "gpio163",
+ "gpio164", "gpio188", "gpio184", "gpio172",
+ "gpio186", "gpio173", "gpio76", "gpio9",
+ "gpio77", "gpio78", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char *const pll_bist_sync_groups[] = {
+ "gpio28",
+};
+
+static const char *const pll_clk_aux_groups[] = {
+ "gpio35",
+};
+
+static const char *const pmc_oca_n_groups[] = {
+ "gpio249",
+};
+
+static const char *const pmc_uva_n_groups[] = {
+ "gpio248",
+};
+
+static const char *const prng_rosc_groups[] = {
+ "gpio186", "gpio188", "gpio164", "gpio163",
+};
+
+static const char *const qdss_cti_groups[] = {
+ "gpio18", "gpio19", "gpio23", "gpio27",
+ "gpio161", "gpio162", "gpio215", "gpio217",
+};
+
+static const char *const qdss_gpio_groups[] = {
+ "gpio104", "gpio151", "gpio227", "gpio228",
+ "gpio96", "gpio219", "gpio97", "gpio220",
+ "gpio108", "gpio231", "gpio109", "gpio232",
+ "gpio110", "gpio233", "gpio111", "gpio234",
+ "gpio112", "gpio235", "gpio113", "gpio236",
+ "gpio149", "gpio221", "gpio99", "gpio222",
+ "gpio100", "gpio223", "gpio101", "gpio224",
+ "gpio102", "gpio225", "gpio103", "gpio226",
+ "gpio152", "gpio237", "gpio107", "gpio238",
+};
+
+static const char *const qspi0_groups[] = {
+ "gpio127", "gpio132", "gpio133", "gpio128",
+ "gpio129", "gpio130", "gpio131",
+};
+
+static const char *const qup0_se0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char *const qup0_se1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char *const qup0_se2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio17", "gpio18", "gpio19",
+};
+
+static const char *const qup0_se3_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio21", "gpio22", "gpio23",
+};
+
+static const char *const qup0_se4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char *const qup0_se5_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char *const qup0_se6_groups[] = {
+ "gpio6", "gpio7", "gpio4", "gpio5",
+};
+
+static const char *const qup0_se7_groups[] = {
+ "gpio14", "gpio15", "gpio12", "gpio13",
+};
+
+static const char *const qup1_se0_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se1_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char *const qup1_se2_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se3_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se4_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se5_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char *const qup1_se6_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char *const qup1_se7_groups[] = {
+ "gpio54", "gpio55", "gpio52", "gpio53",
+};
+
+static const char *const qup2_se0_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char *const qup2_se1_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char *const qup2_se2_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75",
+ "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup2_se3_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+ "gpio65", "gpio66", "gpio67",
+};
+
+static const char *const qup2_se4_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup2_se5_groups[] = {
+ "gpio84", "gpio85", "gpio86", "gpio87",
+};
+
+static const char *const qup2_se6_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+
+static const char *const qup2_se7_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup3_se0_groups[] = {
+ "gpio128", "gpio129", "gpio127", "gpio132",
+ "gpio130", "gpio131", "gpio133", "gpio247",
+};
+
+static const char *const qup3_se1_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio49", "gpio50", "gpio51", "gpio48",
+};
+
+static const char *const sd_write_protect_groups[] = {
+ "gpio162",
+};
+
+static const char *const sdc4_clk_groups[] = {
+ "gpio127",
+};
+
+static const char *const sdc4_cmd_groups[] = {
+ "gpio132",
+};
+
+static const char *const sdc4_data_groups[] = {
+ "gpio128",
+ "gpio129",
+ "gpio130",
+ "gpio131",
+};
+
+static const char *const smb_acok_n_groups[] = {
+ "gpio245",
+};
+
+static const char *const sys_throttle_groups[] = {
+ "gpio39",
+ "gpio94",
+};
+
+static const char *const tb_trig_sdc2_groups[] = {
+ "gpio137",
+};
+
+static const char *const tb_trig_sdc4_groups[] = {
+ "gpio133",
+};
+
+static const char *const tmess_prng_groups[] = {
+ "gpio92", "gpio93", "gpio94", "gpio95",
+};
+
+static const char *const tsense_pwm_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio34", "gpio138", "gpio139", "gpio140",
+};
+
+static const char *const tsense_therm_groups[] = {
+ "gpio141",
+};
+
+static const char *const usb0_dp_groups[] = {
+ "gpio122",
+};
+
+static const char *const usb0_phy_ps_groups[] = {
+ "gpio121",
+};
+
+static const char *const usb0_sbrx_groups[] = {
+ "gpio163",
+};
+
+static const char *const usb0_sbtx_groups[] = {
+ "gpio164",
+ "gpio165",
+};
+
+static const char *const usb0_tmu_groups[] = {
+ "gpio98",
+};
+
+static const char *const usb1_dbg_groups[] = {
+ "gpio105",
+ "gpio106",
+};
+
+static const char *const usb1_dp_groups[] = {
+ "gpio124",
+};
+
+static const char *const usb1_phy_ps_groups[] = {
+ "gpio123",
+};
+
+static const char *const usb1_sbrx_groups[] = {
+ "gpio172",
+};
+
+static const char *const usb1_sbtx_groups[] = {
+ "gpio173",
+ "gpio174",
+};
+
+static const char *const usb1_tmu_groups[] = {
+ "gpio98",
+};
+
+static const char *const usb2_dp_groups[] = {
+ "gpio126",
+};
+
+static const char *const usb2_phy_ps_groups[] = {
+ "gpio125",
+};
+
+static const char *const usb2_sbrx_groups[] = {
+ "gpio181",
+};
+
+static const char *const usb2_sbtx_groups[] = {
+ "gpio182",
+ "gpio183",
+};
+
+static const char *const usb2_tmu_groups[] = {
+ "gpio98",
+};
+
+static const char *const vsense_trigger_mirnat_groups[] = {
+ "gpio38",
+};
+
+static const char *const wcn_sw_groups[] = {
+ "gpio221",
+};
+
+static const char *const wcn_sw_ctrl_groups[] = {
+ "gpio214",
+};
+
+static const struct pinfunction glymur_functions[] = {
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(resout_gpio_n),
+ MSM_PIN_FUNCTION(aoss_cti),
+ MSM_PIN_FUNCTION(asc_cci),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_usb),
+ MSM_PIN_FUNCTION(audio_ext_mclk0),
+ MSM_PIN_FUNCTION(audio_ext_mclk1),
+ MSM_PIN_FUNCTION(audio_ref_clk),
+ MSM_PIN_FUNCTION(cam_asc_mclk4),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async_in),
+ MSM_PIN_FUNCTION(cci_i2c_scl),
+ MSM_PIN_FUNCTION(cci_i2c_sda),
+ MSM_PIN_FUNCTION(cci_timer),
+ MSM_PIN_FUNCTION(cmu_rng),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(dbg_out_clk),
+ MSM_PIN_FUNCTION(ddr_bist_complete),
+ MSM_PIN_FUNCTION(ddr_bist_fail),
+ MSM_PIN_FUNCTION(ddr_bist_start),
+ MSM_PIN_FUNCTION(ddr_bist_stop),
+ MSM_PIN_FUNCTION(ddr_pxi),
+ MSM_PIN_FUNCTION(edp0_hot),
+ MSM_PIN_FUNCTION(edp0_lcd),
+ MSM_PIN_FUNCTION(edp1_lcd),
+ MSM_PIN_FUNCTION(egpio),
+ MSM_PIN_FUNCTION(eusb_ac_en),
+ MSM_PIN_FUNCTION(gcc_gp1),
+ MSM_PIN_FUNCTION(gcc_gp2),
+ MSM_PIN_FUNCTION(gcc_gp3),
+ MSM_PIN_FUNCTION(host2wlan_sol),
+ MSM_PIN_FUNCTION(i2c0_s_scl),
+ MSM_PIN_FUNCTION(i2c0_s_sda),
+ MSM_PIN_FUNCTION(i2s0_data),
+ MSM_PIN_FUNCTION(i2s0_sck),
+ MSM_PIN_FUNCTION(i2s0_ws),
+ MSM_PIN_FUNCTION(i2s1_data),
+ MSM_PIN_FUNCTION(i2s1_sck),
+ MSM_PIN_FUNCTION(i2s1_ws),
+ MSM_PIN_FUNCTION(ibi_i3c),
+ MSM_PIN_FUNCTION(jitter_bist),
+ MSM_PIN_FUNCTION(mdp_vsync_out),
+ MSM_PIN_FUNCTION(mdp_vsync_e),
+ MSM_PIN_FUNCTION(mdp_vsync_p),
+ MSM_PIN_FUNCTION(mdp_vsync_s),
+ MSM_PIN_FUNCTION(pcie3a_clk),
+ MSM_PIN_FUNCTION(pcie3a_rst_n),
+ MSM_PIN_FUNCTION(pcie3b_clk),
+ MSM_PIN_FUNCTION(pcie4_clk_req_n),
+ MSM_PIN_FUNCTION(pcie5_clk_req_n),
+ MSM_PIN_FUNCTION(pcie6_clk_req_n),
+ MSM_PIN_FUNCTION(phase_flag),
+ MSM_PIN_FUNCTION(pll_bist_sync),
+ MSM_PIN_FUNCTION(pll_clk_aux),
+ MSM_PIN_FUNCTION(pmc_oca_n),
+ MSM_PIN_FUNCTION(pmc_uva_n),
+ MSM_PIN_FUNCTION(prng_rosc),
+ MSM_PIN_FUNCTION(qdss_cti),
+ MSM_PIN_FUNCTION(qdss_gpio),
+ MSM_PIN_FUNCTION(qspi0),
+ MSM_PIN_FUNCTION(qup0_se0),
+ MSM_PIN_FUNCTION(qup0_se1),
+ MSM_PIN_FUNCTION(qup0_se2),
+ MSM_PIN_FUNCTION(qup0_se3),
+ MSM_PIN_FUNCTION(qup0_se4),
+ MSM_PIN_FUNCTION(qup0_se5),
+ MSM_PIN_FUNCTION(qup0_se6),
+ MSM_PIN_FUNCTION(qup0_se7),
+ MSM_PIN_FUNCTION(qup1_se0),
+ MSM_PIN_FUNCTION(qup1_se1),
+ MSM_PIN_FUNCTION(qup1_se2),
+ MSM_PIN_FUNCTION(qup1_se3),
+ MSM_PIN_FUNCTION(qup1_se4),
+ MSM_PIN_FUNCTION(qup1_se5),
+ MSM_PIN_FUNCTION(qup1_se6),
+ MSM_PIN_FUNCTION(qup1_se7),
+ MSM_PIN_FUNCTION(qup2_se0),
+ MSM_PIN_FUNCTION(qup2_se1),
+ MSM_PIN_FUNCTION(qup2_se2),
+ MSM_PIN_FUNCTION(qup2_se3),
+ MSM_PIN_FUNCTION(qup2_se4),
+ MSM_PIN_FUNCTION(qup2_se5),
+ MSM_PIN_FUNCTION(qup2_se6),
+ MSM_PIN_FUNCTION(qup2_se7),
+ MSM_PIN_FUNCTION(qup3_se0),
+ MSM_PIN_FUNCTION(qup3_se1),
+ MSM_PIN_FUNCTION(sd_write_protect),
+ MSM_PIN_FUNCTION(sdc4_clk),
+ MSM_PIN_FUNCTION(sdc4_cmd),
+ MSM_PIN_FUNCTION(sdc4_data),
+ MSM_PIN_FUNCTION(smb_acok_n),
+ MSM_PIN_FUNCTION(sys_throttle),
+ MSM_PIN_FUNCTION(tb_trig_sdc2),
+ MSM_PIN_FUNCTION(tb_trig_sdc4),
+ MSM_PIN_FUNCTION(tmess_prng),
+ MSM_PIN_FUNCTION(tsense_pwm),
+ MSM_PIN_FUNCTION(tsense_therm),
+ MSM_PIN_FUNCTION(usb0_dp),
+ MSM_PIN_FUNCTION(usb0_phy_ps),
+ MSM_PIN_FUNCTION(usb0_sbrx),
+ MSM_PIN_FUNCTION(usb0_sbtx),
+ MSM_PIN_FUNCTION(usb0_tmu),
+ MSM_PIN_FUNCTION(usb1_dbg),
+ MSM_PIN_FUNCTION(usb1_dp),
+ MSM_PIN_FUNCTION(usb1_phy_ps),
+ MSM_PIN_FUNCTION(usb1_sbrx),
+ MSM_PIN_FUNCTION(usb1_sbtx),
+ MSM_PIN_FUNCTION(usb1_tmu),
+ MSM_PIN_FUNCTION(usb2_dp),
+ MSM_PIN_FUNCTION(usb2_phy_ps),
+ MSM_PIN_FUNCTION(usb2_sbrx),
+ MSM_PIN_FUNCTION(usb2_sbtx),
+ MSM_PIN_FUNCTION(usb2_tmu),
+ MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+ MSM_PIN_FUNCTION(wcn_sw),
+ MSM_PIN_FUNCTION(wcn_sw_ctrl),
+};
+
+static const struct msm_pingroup glymur_groups[] = {
+ [0] = PINGROUP(0, qup0_se0, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup0_se0, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup0_se0, _, _, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup0_se0, _, _, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup0_se1, qup0_se6, ibi_i3c, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup0_se1, qup0_se6, ibi_i3c, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup0_se1, qup0_se6, i2c0_s_sda, phase_flag, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup0_se1, qup0_se6, i2c0_s_scl, phase_flag, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup0_se4, phase_flag, _, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup0_se4, qup0_se2, phase_flag, _, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup0_se5, _, phase_flag, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup0_se5, qup0_se3, phase_flag, _, qdss_cti, _, _, _, _, _, _),
+ [24] = PINGROUP(24, phase_flag, _, _, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, phase_flag, _, _, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, phase_flag, _, _, _, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, phase_flag, _, qdss_cti, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, pll_bist_sync, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, tsense_pwm, _, _, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, tsense_pwm, _, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, tsense_pwm, _, _, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup1_se0, ibi_i3c, qup1_se3, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup1_se0, qup1_se3, tsense_pwm, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup1_se0, qup1_se3, pll_clk_aux, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup1_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup1_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup1_se1, atest_usb, ddr_pxi, vsense_trigger_mirnat, _, _, _, _,
+ _, _, _),
+ [39] = PINGROUP(39, qup1_se1, sys_throttle, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se2, qup3_se1, qup3_se0, atest_usb, ddr_pxi, _, _, _, _,
+ _, _),
+ [42] = PINGROUP(42, qup1_se2, qup3_se1, qup0_se1, atest_usb, ddr_pxi, _, _, _, _,
+ _, _),
+ [43] = PINGROUP(43, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup1_se3, _, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup1_se3, cmu_rng, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup1_se3, cmu_rng, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup1_se3, cmu_rng, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [48] = PINGROUP(48, qup1_se4, qup3_se1, cmu_rng, _, atest_usb, ddr_pxi, _, _, _,
+ _, _),
+ [49] = PINGROUP(49, qup1_se4, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _,
+ _, _, _),
+ [50] = PINGROUP(50, qup1_se4, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _,
+ _, _, _),
+ [51] = PINGROUP(51, qup1_se4, qup1_se2, qup3_se1, dbg_out_clk, atest_usb,
+ ddr_pxi, _, _, _, _, _),
+ [52] = PINGROUP(52, qup1_se5, qup1_se7, jitter_bist, atest_usb, ddr_pxi, _, _, _,
+ _, _, _),
+ [53] = PINGROUP(53, qup1_se5, qup1_se7, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup1_se5, qup1_se7, ddr_bist_start, atest_usb, ddr_pxi, _, _,
+ _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, qup1_se7, ddr_bist_stop, atest_usb, ddr_pxi, _, _,
+ _, _, _, _),
+ [56] = PINGROUP(56, qup1_se6, ddr_bist_fail, _, _, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup1_se6, ddr_bist_complete, _, _, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup1_se6, _, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup1_se6, _, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup2_se0, ibi_i3c, gcc_gp2, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, qup2_se0, qup2_se3, ibi_i3c, atest_usb, ddr_pxi, _, _, _, _,
+ _, _),
+ [66] = PINGROUP(66, qup2_se0, qup2_se3, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qup2_se0, qup2_se3, _, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, qup2_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup2_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup2_se1, _, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qup2_se1, gcc_gp1, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qup2_se2, gcc_gp1, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qup2_se2, gcc_gp2, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qup2_se2, gcc_gp3, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qup2_se2, _, _, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qup2_se3, _, _, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup2_se4, qup2_se7, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, qup2_se4, qup2_se2, qup2_se7, atest_usb, ddr_pxi, _, _, _,
+ _, _, _),
+ [82] = PINGROUP(82, qup2_se4, qup2_se2, qup2_se7, gcc_gp3, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup2_se4, qup2_se2, qup2_se7, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, tmess_prng, _, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, tmess_prng, _, _, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, sys_throttle, tmess_prng, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, tmess_prng, _, _, _, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, cam_mclk, mdp_vsync_p, usb0_tmu, usb1_tmu, usb2_tmu, _, _, _, _, _, _),
+ [99] = PINGROUP(99, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_asc_mclk4, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cci_i2c_sda, mdp_vsync_s, usb1_dbg, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cci_i2c_scl, mdp_vsync_e, usb1_dbg, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_timer, mdp_vsync_out, qdss_gpio, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_timer, mdp_vsync_out, qdss_gpio, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_timer, cci_async_in, mdp_vsync_out, qdss_gpio, _, _, _, _,
+ _, _, _),
+ [112] = PINGROUP(112, cci_timer, cci_async_in, mdp_vsync_out, qdss_gpio, _, _, _, _,
+ _, _, _),
+ [113] = PINGROUP(113, cci_timer, cci_async_in, mdp_vsync_out, qdss_gpio, _, _, _, _,
+ _, _, _),
+ [114] = PINGROUP(114, mdp_vsync_out, mdp_vsync_out, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, mdp_vsync_out, mdp_vsync_out, edp1_lcd, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, host2wlan_sol, _, _, _, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, edp0_hot, edp1_lcd, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, edp0_lcd, _, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, usb0_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, usb0_dp, _, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, usb1_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb1_dp, _, _, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, usb2_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, usb2_dp, _, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, qspi0, sdc4_clk, qup3_se0, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, qspi0, sdc4_cmd, qup3_se0, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, qspi0, tb_trig_sdc4, qup3_se0, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, audio_ext_mclk0, _, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, i2s0_sck, _, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, i2s0_data, _, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, i2s0_data, tb_trig_sdc2, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, i2s0_ws, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, i2s1_sck, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, i2s1_data, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, i2s1_ws, tsense_therm, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, i2s1_data, audio_ext_mclk1, audio_ref_clk, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, pcie3a_rst_n, _, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, pcie3a_clk, _, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, pcie4_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, pcie6_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, pcie5_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, pcie3b_clk, _, _, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, _, _, _, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, resout_gpio_n, _, _, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qdss_cti, _, _, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, sd_write_protect, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, usb0_sbrx, prng_rosc, phase_flag, _, atest_char, _, _, _,
+ _, _, _),
+ [164] = PINGROUP(164, usb0_sbtx, prng_rosc, phase_flag, _, atest_char, _, _, _, _, _,
+ _),
+ [165] = PINGROUP(165, usb0_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, usb1_sbrx, phase_flag, _, atest_char, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, usb1_sbtx, cri_trng, phase_flag, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, usb1_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, usb2_sbrx, _, _, _, _, _, _, _, _, _, _),
+ [182] = PINGROUP(182, usb2_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [183] = PINGROUP(183, usb2_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, phase_flag, _, atest_char, _, _, _, _, _, _, _, _),
+ [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _, _, _),
+ [186] = PINGROUP(186, eusb_ac_en, prng_rosc, phase_flag, _, _, _, _, _, _, _, _),
+ [187] = PINGROUP(187, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP(188, prng_rosc, phase_flag, _, atest_char, _, _, _, _, _, _, _),
+ [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _, _, egpio),
+ [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _, _, egpio),
+ [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _, _, egpio),
+ [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _, _, egpio),
+ [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _, _, egpio),
+ [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _, _, egpio),
+ [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _, _, egpio),
+ [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _, _, egpio),
+ [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _, _, egpio),
+ [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _, _, egpio),
+ [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _, _, egpio),
+ [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _, _, egpio),
+ [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _, _, egpio),
+ [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _, _, egpio),
+ [206] = PINGROUP(206, _, _, _, _, _, _, _, _, _, _, egpio),
+ [207] = PINGROUP(207, _, _, _, _, _, _, _, _, _, _, egpio),
+ [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _, _, egpio),
+ [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _, _, egpio),
+ [210] = PINGROUP(210, _, _, _, _, _, _, _, _, _, _, egpio),
+ [211] = PINGROUP(211, _, _, _, _, _, _, _, _, _, _, egpio),
+ [212] = PINGROUP(212, _, _, _, _, _, _, _, _, _, _, egpio),
+ [213] = PINGROUP(213, _, _, _, _, _, _, _, _, _, _, egpio),
+ [214] = PINGROUP(214, wcn_sw_ctrl, _, _, _, _, _, _, _, _, _, egpio),
+ [215] = PINGROUP(215, _, qdss_cti, _, _, _, _, _, _, _, _, egpio),
+ [216] = PINGROUP(216, _, _, _, _, _, _, _, _, _, _, egpio),
+ [217] = PINGROUP(217, _, qdss_cti, _, _, _, _, _, _, _, _, egpio),
+ [218] = PINGROUP(218, _, _, _, _, _, _, _, _, _, _, egpio),
+ [219] = PINGROUP(219, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [220] = PINGROUP(220, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [221] = PINGROUP(221, wcn_sw, _, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [222] = PINGROUP(222, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [223] = PINGROUP(223, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [224] = PINGROUP(224, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [225] = PINGROUP(225, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [226] = PINGROUP(226, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [227] = PINGROUP(227, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [228] = PINGROUP(228, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [229] = PINGROUP(229, _, _, _, _, _, _, _, _, _, _, egpio),
+ [230] = PINGROUP(230, _, _, _, _, _, _, _, _, _, _, egpio),
+ [231] = PINGROUP(231, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [232] = PINGROUP(232, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [233] = PINGROUP(233, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [234] = PINGROUP(234, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [235] = PINGROUP(235, asc_cci, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [236] = PINGROUP(236, asc_cci, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [237] = PINGROUP(237, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [238] = PINGROUP(238, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [239] = PINGROUP(239, _, _, _, _, _, _, _, _, _, _, egpio),
+ [240] = PINGROUP(240, _, _, _, _, _, _, _, _, _, _, egpio),
+ [241] = PINGROUP(241, _, _, _, _, _, _, _, _, _, _, egpio),
+ [242] = PINGROUP(242, _, _, _, _, _, _, _, _, _, _, egpio),
+ [243] = PINGROUP(243, _, _, _, _, _, _, _, _, _, _, egpio),
+ [244] = PINGROUP(244, _, _, _, _, _, _, _, _, _, _, egpio),
+ [245] = PINGROUP(245, smb_acok_n, _, _, _, _, _, _, _, _, _, _),
+ [246] = PINGROUP(246, _, _, _, _, _, _, _, _, _, _, _),
+ [247] = PINGROUP(247, qup3_se0, _, _, _, _, _, _, _, _, _, _),
+ [248] = PINGROUP(248, pmc_uva_n, _, _, _, _, _, _, _, _, _, _),
+ [249] = PINGROUP(249, pmc_oca_n, _, _, _, _, _, _, _, _, _, _),
+ [250] = UFS_RESET(ufs_reset, 0x104004, 0x105000),
+ [251] = SDC_QDSD_PINGROUP(sdc2_clk, 0xff000, 14, 6),
+ [252] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xff000, 11, 3),
+ [253] = SDC_QDSD_PINGROUP(sdc2_data, 0xff000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map glymur_pdc_map[] = {
+ { 0, 116 }, { 2, 114 }, { 3, 115 }, { 4, 175 }, { 5, 176 },
+ { 7, 111 }, { 11, 129 }, { 13, 130 }, { 15, 112 }, { 19, 113 },
+ { 23, 187 }, { 27, 188 }, { 28, 121 }, { 29, 122 }, { 30, 136 },
+ { 31, 203 }, { 32, 189 }, { 34, 174 }, { 35, 190 }, { 36, 191 },
+ { 39, 124 }, { 43, 192 }, { 47, 193 }, { 51, 123 }, { 53, 133 },
+ { 55, 125 }, { 59, 131 }, { 64, 134 }, { 65, 150 }, { 66, 186 },
+ { 67, 132 }, { 68, 195 }, { 71, 135 }, { 75, 196 }, { 79, 197 },
+ { 83, 198 }, { 84, 181 }, { 85, 199 }, { 87, 200 }, { 91, 201 },
+ { 92, 182 }, { 93, 183 }, { 94, 184 }, { 95, 185 }, { 98, 202 },
+ { 105, 157 }, { 113, 128 }, { 121, 117 }, { 123, 118 }, { 125, 119 },
+ { 129, 120 }, { 131, 126 }, { 132, 160 }, { 133, 194 }, { 134, 127 },
+ { 141, 137 }, { 143, 159 }, { 144, 138 }, { 145, 139 }, { 147, 140 },
+ { 148, 141 }, { 150, 146 }, { 151, 147 }, { 153, 148 }, { 154, 144 },
+ { 156, 149 }, { 157, 151 }, { 163, 142 }, { 172, 143 }, { 181, 145 },
+ { 193, 161 }, { 196, 152 }, { 203, 177 }, { 208, 178 }, { 215, 162 },
+ { 217, 153 }, { 220, 154 }, { 221, 155 }, { 228, 179 }, { 230, 180 },
+ { 232, 206 }, { 234, 172 }, { 235, 173 }, { 242, 158 }, { 244, 156 },
+};
+
+static const struct msm_pinctrl_soc_data glymur_tlmm = {
+ .pins = glymur_pins,
+ .npins = ARRAY_SIZE(glymur_pins),
+ .functions = glymur_functions,
+ .nfunctions = ARRAY_SIZE(glymur_functions),
+ .groups = glymur_groups,
+ .ngroups = ARRAY_SIZE(glymur_groups),
+ .ngpios = 251,
+ .wakeirq_map = glymur_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(glymur_pdc_map),
+ .egpio_func = 11,
+};
+
+static const struct of_device_id glymur_tlmm_of_match[] = {
+ { .compatible = "qcom,glymur-tlmm", .data = &glymur_tlmm },
+ { }
+};
+
+static int glymur_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &glymur_tlmm);
+}
+
+static struct platform_driver glymur_tlmm_driver = {
+ .driver = {
+ .name = "glymur-tlmm",
+ .of_match_table = glymur_tlmm_of_match,
+ },
+ .probe = glymur_tlmm_probe,
+};
+
+static int __init glymur_tlmm_init(void)
+{
+ return platform_driver_register(&glymur_tlmm_driver);
+}
+arch_initcall(glymur_tlmm_init);
+
+static void __exit glymur_tlmm_exit(void)
+{
+ platform_driver_unregister(&glymur_tlmm_driver);
+}
+module_exit(glymur_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI GLYMUR TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, glymur_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
index 10b99d5d8a11..cbf34854f882 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
@@ -630,7 +630,7 @@ static const struct pinfunction ipq5018_functions[] = {
MSM_PIN_FUNCTION(eud_gpio),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(led0),
MSM_PIN_FUNCTION(led2),
MSM_PIN_FUNCTION(mac0),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
index 1ac2fc09c119..239cbe75f198 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -692,7 +692,7 @@ static const struct pinfunction ipq5332_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(lock_det),
MSM_PIN_FUNCTION(mac0),
MSM_PIN_FUNCTION(mac1),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 7ff1f8acc1a3..67b452a033d6 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -641,7 +641,7 @@ static const struct pinfunction ipq5424_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2c0_scl),
MSM_PIN_FUNCTION(i2c0_sda),
MSM_PIN_FUNCTION(i2c1_scl),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index a4ba980252e1..be177fb0a92d 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -891,7 +891,7 @@ static const struct pinfunction ipq6018_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(lpass_aud),
MSM_PIN_FUNCTION(lpass_aud0),
MSM_PIN_FUNCTION(lpass_aud1),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
index 482f13282fc2..e94de9083314 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -838,7 +838,7 @@ static const struct pinfunction ipq8074_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(ldo_en),
MSM_PIN_FUNCTION(ldo_update),
MSM_PIN_FUNCTION(led0),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
index 89c05d8eb550..3ed093ea8eb9 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
@@ -651,7 +651,7 @@ static const struct pinfunction ipq9574_functions[] = {
MSM_PIN_FUNCTION(dwc_ddrphy),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(mac),
MSM_PIN_FUNCTION(mdc),
MSM_PIN_FUNCTION(mdio),
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 54c77e0b96e9..1c97ec44aa5f 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -41,13 +41,27 @@ struct lpi_pinctrl {
static int lpi_gpio_read(struct lpi_pinctrl *state, unsigned int pin,
unsigned int addr)
{
- return ioread32(state->tlmm_base + LPI_TLMM_REG_OFFSET * pin + addr);
+ u32 pin_offset;
+
+ if (state->data->flags & LPI_FLAG_USE_PREDEFINED_PIN_OFFSET)
+ pin_offset = state->data->groups[pin].pin_offset;
+ else
+ pin_offset = LPI_TLMM_REG_OFFSET * pin;
+
+ return ioread32(state->tlmm_base + pin_offset + addr);
}
static int lpi_gpio_write(struct lpi_pinctrl *state, unsigned int pin,
unsigned int addr, unsigned int val)
{
- iowrite32(val, state->tlmm_base + LPI_TLMM_REG_OFFSET * pin + addr);
+ u32 pin_offset;
+
+ if (state->data->flags & LPI_FLAG_USE_PREDEFINED_PIN_OFFSET)
+ pin_offset = state->data->groups[pin].pin_offset;
+ else
+ pin_offset = LPI_TLMM_REG_OFFSET * pin;
+
+ iowrite32(val, state->tlmm_base + pin_offset + addr);
return 0;
}
@@ -174,7 +188,7 @@ static int lpi_config_get(struct pinctrl_dev *pctldev,
arg = 1;
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (is_out)
arg = 1;
break;
@@ -252,7 +266,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
case PIN_CONFIG_INPUT_ENABLE:
output_enabled = false;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
output_enabled = true;
value = arg;
break;
@@ -314,7 +328,7 @@ static int lpi_gpio_direction_output(struct gpio_chip *chip,
struct lpi_pinctrl *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, val);
return lpi_config_set(state->ctrl, pin, &config, 1);
}
@@ -332,7 +346,7 @@ static int lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
struct lpi_pinctrl *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, value);
return lpi_config_set(state->ctrl, pin, &config, 1);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index a9b2f65c1ebe..f48368492861 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -55,6 +55,22 @@ struct pinctrl_pin_desc;
LPI_MUX_##f4, \
}, \
.nfuncs = 5, \
+ .pin_offset = 0, \
+ }
+
+#define LPI_PINGROUP_OFFSET(id, soff, f1, f2, f3, f4, poff) \
+ { \
+ .pin = id, \
+ .slew_offset = soff, \
+ .funcs = (int[]){ \
+ LPI_MUX_gpio, \
+ LPI_MUX_##f1, \
+ LPI_MUX_##f2, \
+ LPI_MUX_##f3, \
+ LPI_MUX_##f4, \
+ }, \
+ .nfuncs = 5, \
+ .pin_offset = poff, \
}
/*
@@ -62,6 +78,7 @@ struct pinctrl_pin_desc;
* pin configuration.
*/
#define LPI_FLAG_SLEW_RATE_SAME_REG BIT(0)
+#define LPI_FLAG_USE_PREDEFINED_PIN_OFFSET BIT(1)
struct lpi_pingroup {
unsigned int pin;
@@ -69,6 +86,7 @@ struct lpi_pingroup {
int slew_offset;
unsigned int *funcs;
unsigned int nfuncs;
+ unsigned int pin_offset;
};
struct lpi_function {
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
index 3e18ba124fed..cef330547ce7 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -861,7 +861,7 @@ static const struct pinfunction mdm9607_functions[] = {
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
MSM_PIN_FUNCTION(gmac_mdio),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(lcd_rst),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
index bea1ca3d1b7f..729fe3d7e14e 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -313,7 +313,7 @@ static const char * const cdc_mclk_groups[] = {
};
static const struct pinfunction mdm9615_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsbi2_i2c),
MSM_PIN_FUNCTION(gsbi3),
MSM_PIN_FUNCTION(gsbi4),
diff --git a/drivers/pinctrl/qcom/pinctrl-milos.c b/drivers/pinctrl/qcom/pinctrl-milos.c
index d11a7bbcd733..19abd5233a2c 100644
--- a/drivers/pinctrl/qcom/pinctrl-milos.c
+++ b/drivers/pinctrl/qcom/pinctrl-milos.c
@@ -974,7 +974,7 @@ static const char *const wcn_sw_ctrl_groups[] = {
};
static const struct pinfunction milos_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 83eb075b6bfa..67525d542c5b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -31,6 +31,7 @@
#include "../core.h"
#include "../pinconf.h"
#include "../pinctrl-utils.h"
+#include "../pinmux.h"
#include "pinctrl-msm.h"
@@ -150,33 +151,6 @@ static int msm_pinmux_request(struct pinctrl_dev *pctldev, unsigned offset)
return gpiochip_line_is_valid(chip, offset) ? 0 : -EINVAL;
}
-static int msm_get_functions_count(struct pinctrl_dev *pctldev)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- return pctrl->soc->nfunctions;
-}
-
-static const char *msm_get_function_name(struct pinctrl_dev *pctldev,
- unsigned function)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- return pctrl->soc->functions[function].name;
-}
-
-static int msm_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned function,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- *groups = pctrl->soc->functions[function].groups;
- *num_groups = pctrl->soc->functions[function].ngroups;
- return 0;
-}
-
static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned function,
unsigned group)
@@ -288,11 +262,13 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
static const struct pinmux_ops msm_pinmux_ops = {
.request = msm_pinmux_request,
- .get_functions_count = msm_get_functions_count,
- .get_function_name = msm_get_function_name,
- .get_function_groups = msm_get_function_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .function_is_gpio = pinmux_generic_function_is_gpio,
.gpio_request_enable = msm_pinmux_request_gpio,
.set_mux = msm_pinmux_set_mux,
+ .strict = true,
};
static int msm_config_reg(struct msm_pinctrl *pctrl,
@@ -319,7 +295,7 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
*bit = g->drv_bit;
*mask = 7;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
case PIN_CONFIG_OUTPUT_ENABLE:
*bit = g->oe_bit;
@@ -409,7 +385,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_DRIVE_STRENGTH:
arg = msm_regval_to_drive(arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
/* Pin is not output */
if (!arg)
return -EINVAL;
@@ -488,7 +464,7 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
else
arg = (arg / 2) - 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
/* set output value */
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_io(pctrl, g);
@@ -1552,6 +1528,7 @@ EXPORT_SYMBOL(msm_pinctrl_dev_pm_ops);
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data)
{
+ const struct pinfunction *func;
struct msm_pinctrl *pctrl;
struct resource *res;
int ret;
@@ -1606,6 +1583,14 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->pctrl);
}
+ for (i = 0; i < soc_data->nfunctions; i++) {
+ func = &soc_data->functions[i];
+
+ ret = pinmux_generic_add_pinfunction(pctrl->pctrl, func, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
ret = msm_gpio_init(pctrl);
if (ret)
return ret;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index d7dc0947bb16..4625fa5320a9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -29,6 +29,11 @@ struct pinctrl_pin_desc;
fname##_groups, \
ARRAY_SIZE(fname##_groups))
+#define MSM_GPIO_PIN_FUNCTION(fname) \
+ [msm_mux_##fname] = PINCTRL_GPIO_PINFUNCTION(#fname, \
+ fname##_groups, \
+ ARRAY_SIZE(fname##_groups))
+
#define QCA_PIN_FUNCTION(fname) \
[qca_mux_##fname] = PINCTRL_PINFUNCTION(#fname, \
fname##_groups, \
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index f9a957347340..a81aa092ef12 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -483,7 +483,7 @@ static const struct pinfunction msm8226_functions[] = {
MSM_PIN_FUNCTION(cci_i2c0),
MSM_PIN_FUNCTION(gp0_clk),
MSM_PIN_FUNCTION(gp1_clk),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(sdc3),
MSM_PIN_FUNCTION(wlan),
};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 4dbc19ffd80e..5ded00396cd9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -714,7 +714,7 @@ static const char * const ebi2_groups[] = {
};
static const struct pinfunction msm8660_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(cam_mclk),
MSM_PIN_FUNCTION(dsub),
MSM_PIN_FUNCTION(ext_gps),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
index 0aa4f77b774f..544a52fb8f3d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -696,7 +696,7 @@ static const struct pinfunction msm8909_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3_clk_a),
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(ldo_en),
MSM_PIN_FUNCTION(ldo_update),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 0dfc6dd33d58..b1b6934bb4b6 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -743,7 +743,7 @@ static const struct pinfunction msm8916_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2_clk_b),
MSM_PIN_FUNCTION(gcc_gp3_clk_a),
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx0),
MSM_PIN_FUNCTION(gsm0_tx1),
MSM_PIN_FUNCTION(gsm1_tx0),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index 2e1a94ab18b2..f23d92d6615b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -1302,7 +1302,7 @@ static const struct pinfunction msm8917_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(key_focus),
MSM_PIN_FUNCTION(key_snapshot),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
index 956383341a7a..67db062fdf56 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -1533,7 +1533,7 @@ static const struct pinfunction msm8953_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(gsm1_tx),
MSM_PIN_FUNCTION(gyro_int),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index a937ea867de7..2fb15208aba0 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -974,7 +974,7 @@ static const struct pinfunction msm8960_functions[] = {
MSM_PIN_FUNCTION(gp_pdm_1b),
MSM_PIN_FUNCTION(gp_pdm_2a),
MSM_PIN_FUNCTION(gp_pdm_2b),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsbi1),
MSM_PIN_FUNCTION(gsbi1_spi_cs1_n),
MSM_PIN_FUNCTION(gsbi1_spi_cs2a_n),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index 3bcb03387781..345539b9e696 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -812,7 +812,7 @@ static const char * const ss_switch_groups[] = {
};
static const struct pinfunction msm8976_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(blsp_spi1),
MSM_PIN_FUNCTION(smb_int),
MSM_PIN_FUNCTION(blsp_i2c1),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
index 7a3b6cbccb68..94e042d1f4b2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -1071,7 +1071,7 @@ static const struct pinfunction msm8994_functions[] = {
MSM_PIN_FUNCTION(uim2),
MSM_PIN_FUNCTION(uim3),
MSM_PIN_FUNCTION(uim4),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
};
static const struct msm_pingroup msm8994_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c
index d86d83106d3b..e5b55693d023 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c
@@ -1532,7 +1532,7 @@ static const struct pinfunction msm8996_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2_clk_b),
MSM_PIN_FUNCTION(gcc_gp3_clk_a),
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm_tx),
MSM_PIN_FUNCTION(hdmi_cec),
MSM_PIN_FUNCTION(hdmi_ddc),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c
index 1daee815888f..b727593af34a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c
@@ -1160,7 +1160,7 @@ static const char * const mss_lte_groups[] = {
};
static const struct pinfunction msm8998_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 8253aa25775b..202bec003e96 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -778,7 +778,7 @@ static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
static const char * const hsic_ctl_groups[] = { "hsic_strobe", "hsic_data" };
static const struct pinfunction msm8x74_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(cci_i2c0),
MSM_PIN_FUNCTION(cci_i2c1),
MSM_PIN_FUNCTION(uim1),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index eeeec6434f6a..38200957451e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -870,11 +870,11 @@ static const struct pinfunction qcm2290_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi1),
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 54e3b4435349..0b8db2c7e58a 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -1296,7 +1296,7 @@ static const char * const i2s_3_ws_a_groups[] = {
};
static const struct pinfunction qcs404_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(hdmi_tx),
MSM_PIN_FUNCTION(hdmi_ddc),
MSM_PIN_FUNCTION(blsp_uart_tx_a2),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 2a943bc46a62..4dfa820d4e77 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -819,7 +819,7 @@ static const char *const wsa_data_groups[] = {
};
static const struct pinfunction qcs615_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(aoss_cti),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
index d6437e26392b..f1af1a620684 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
@@ -929,7 +929,7 @@ static const char *const vsense_trigger_groups[] = {
};
static const struct pinfunction qcs8300_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb2),
@@ -949,7 +949,7 @@ static const struct pinfunction qcs8300_functions[] = {
MSM_PIN_FUNCTION(edp0_hot),
MSM_PIN_FUNCTION(edp0_lcd),
MSM_PIN_FUNCTION(edp1_lcd),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
MSM_PIN_FUNCTION(emac0_mcg2),
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
index eacb89fa3888..7c535698a780 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -904,7 +904,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction qdu1000_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(cmo_pri),
MSM_PIN_FUNCTION(si5518_int),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index 1b62eb3e6620..53f28b9c49ba 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1181,7 +1181,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction sa8775p_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb2),
MSM_PIN_FUNCTION(audio_ref),
@@ -1217,7 +1217,7 @@ static const struct pinfunction sa8775p_functions[] = {
MSM_PIN_FUNCTION(edp2_lcd),
MSM_PIN_FUNCTION(edp3_hot),
MSM_PIN_FUNCTION(edp3_lcd),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
MSM_PIN_FUNCTION(emac0_mcg2),
diff --git a/drivers/pinctrl/qcom/pinctrl-sar2130p.c b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
index 3dd1b5e5cfee..4a53f4ee2041 100644
--- a/drivers/pinctrl/qcom/pinctrl-sar2130p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
@@ -1128,7 +1128,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction sar2130p_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(qup0),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index c43fe10b71ad..3eae51472b13 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -903,7 +903,7 @@ static const struct pinfunction sc7180_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 1b070e9d41f5..44e09608aad0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1153,11 +1153,11 @@ static const struct pinfunction sc7280_functions[] = {
MSM_PIN_FUNCTION(dp_lcd),
MSM_PIN_FUNCTION(edp_hot),
MSM_PIN_FUNCTION(edp_lcd),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(host2wlan_sol),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index 26dd165d1543..d9f9e3dd9dd1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -1272,7 +1272,7 @@ static const struct pinfunction sc8180x_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_gp4),
MSM_PIN_FUNCTION(gcc_gp5),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps),
MSM_PIN_FUNCTION(grfc),
MSM_PIN_FUNCTION(hs1_mi2s),
@@ -1634,7 +1634,7 @@ static int sc8180x_pinctrl_add_tile_resources(struct platform_device *pdev)
return 0;
/* Allocate for new resources */
- nres = devm_kzalloc(&pdev->dev, sizeof(*nres) * nres_num, GFP_KERNEL);
+ nres = devm_kcalloc(&pdev->dev, nres_num, sizeof(*nres), GFP_KERNEL);
if (!nres)
return -ENOMEM;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index 6ccd7e5648d4..cf8297e8b8f8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1506,7 +1506,7 @@ static const struct pinfunction sc8280xp_functions[] = {
MSM_PIN_FUNCTION(edp2_lcd),
MSM_PIN_FUNCTION(edp3_lcd),
MSM_PIN_FUNCTION(edp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_dll),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
@@ -1527,7 +1527,7 @@ static const struct pinfunction sc8280xp_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_gp4),
MSM_PIN_FUNCTION(gcc_gp5),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(hs1_mi2s),
MSM_PIN_FUNCTION(hs2_mi2s),
MSM_PIN_FUNCTION(hs3_mi2s),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c
new file mode 100644
index 000000000000..d93af5f0e8d3
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This driver is solely based on the limited information in downstream code.
+ * Any verification with schematics would be greatly appreciated.
+ *
+ * Copyright (c) 2023, Richard Acayan. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_comp_rx,
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_mclk0,
+ LPI_MUX_pdm_tx,
+ LPI_MUX_pdm_clk,
+ LPI_MUX_pdm_rx,
+ LPI_MUX_pdm_sync,
+
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static const struct pinctrl_pin_desc sdm660_lpi_pinctrl_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+};
+
+static const char * const comp_rx_groups[] = { "gpio22", "gpio24" };
+static const char * const dmic1_clk_groups[] = { "gpio26" };
+static const char * const dmic1_data_groups[] = { "gpio27" };
+static const char * const dmic2_clk_groups[] = { "gpio28" };
+static const char * const dmic2_data_groups[] = { "gpio29" };
+static const char * const mclk0_groups[] = { "gpio18" };
+static const char * const pdm_tx_groups[] = { "gpio20" };
+static const char * const pdm_clk_groups[] = { "gpio18" };
+static const char * const pdm_rx_groups[] = { "gpio21", "gpio23", "gpio25" };
+static const char * const pdm_sync_groups[] = { "gpio19" };
+
+const struct lpi_pingroup sdm660_lpi_pinctrl_groups[] = {
+ LPI_PINGROUP_OFFSET(0, LPI_NO_SLEW, _, _, _, _, 0x0000),
+ LPI_PINGROUP_OFFSET(1, LPI_NO_SLEW, _, _, _, _, 0x1000),
+ LPI_PINGROUP_OFFSET(2, LPI_NO_SLEW, _, _, _, _, 0x2000),
+ LPI_PINGROUP_OFFSET(3, LPI_NO_SLEW, _, _, _, _, 0x2010),
+ LPI_PINGROUP_OFFSET(4, LPI_NO_SLEW, _, _, _, _, 0x3000),
+ LPI_PINGROUP_OFFSET(5, LPI_NO_SLEW, _, _, _, _, 0x3010),
+ LPI_PINGROUP_OFFSET(6, LPI_NO_SLEW, _, _, _, _, 0x4000),
+ LPI_PINGROUP_OFFSET(7, LPI_NO_SLEW, _, _, _, _, 0x4010),
+ LPI_PINGROUP_OFFSET(8, LPI_NO_SLEW, _, _, _, _, 0x5000),
+ LPI_PINGROUP_OFFSET(9, LPI_NO_SLEW, _, _, _, _, 0x5010),
+ LPI_PINGROUP_OFFSET(10, LPI_NO_SLEW, _, _, _, _, 0x5020),
+ LPI_PINGROUP_OFFSET(11, LPI_NO_SLEW, _, _, _, _, 0x5030),
+ LPI_PINGROUP_OFFSET(12, LPI_NO_SLEW, _, _, _, _, 0x6000),
+ LPI_PINGROUP_OFFSET(13, LPI_NO_SLEW, _, _, _, _, 0x6010),
+ LPI_PINGROUP_OFFSET(14, LPI_NO_SLEW, _, _, _, _, 0x7000),
+ LPI_PINGROUP_OFFSET(15, LPI_NO_SLEW, _, _, _, _, 0x7010),
+ LPI_PINGROUP_OFFSET(16, LPI_NO_SLEW, _, _, _, _, 0x5040),
+ LPI_PINGROUP_OFFSET(17, LPI_NO_SLEW, _, _, _, _, 0x5050),
+
+ LPI_PINGROUP_OFFSET(18, LPI_NO_SLEW, pdm_clk, mclk0, _, _, 0x8000),
+ LPI_PINGROUP_OFFSET(19, LPI_NO_SLEW, pdm_sync, _, _, _, 0x8010),
+ LPI_PINGROUP_OFFSET(20, LPI_NO_SLEW, pdm_tx, _, _, _, 0x8020),
+ LPI_PINGROUP_OFFSET(21, LPI_NO_SLEW, pdm_rx, _, _, _, 0x8030),
+ LPI_PINGROUP_OFFSET(22, LPI_NO_SLEW, comp_rx, _, _, _, 0x8040),
+ LPI_PINGROUP_OFFSET(23, LPI_NO_SLEW, pdm_rx, _, _, _, 0x8050),
+ LPI_PINGROUP_OFFSET(24, LPI_NO_SLEW, comp_rx, _, _, _, 0x8060),
+ LPI_PINGROUP_OFFSET(25, LPI_NO_SLEW, pdm_rx, _, _, _, 0x8070),
+ LPI_PINGROUP_OFFSET(26, LPI_NO_SLEW, dmic1_clk, _, _, _, 0x9000),
+ LPI_PINGROUP_OFFSET(27, LPI_NO_SLEW, dmic1_data, _, _, _, 0x9010),
+ LPI_PINGROUP_OFFSET(28, LPI_NO_SLEW, dmic2_clk, _, _, _, 0xa000),
+ LPI_PINGROUP_OFFSET(29, LPI_NO_SLEW, dmic2_data, _, _, _, 0xa010),
+
+ LPI_PINGROUP_OFFSET(30, LPI_NO_SLEW, _, _, _, _, 0xb000),
+ LPI_PINGROUP_OFFSET(31, LPI_NO_SLEW, _, _, _, _, 0xb010),
+};
+
+const struct lpi_function sdm660_lpi_pinctrl_functions[] = {
+ LPI_FUNCTION(comp_rx),
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(mclk0),
+ LPI_FUNCTION(pdm_tx),
+ LPI_FUNCTION(pdm_clk),
+ LPI_FUNCTION(pdm_rx),
+ LPI_FUNCTION(pdm_sync),
+};
+
+static const struct lpi_pinctrl_variant_data sdm660_lpi_pinctrl_data = {
+ .pins = sdm660_lpi_pinctrl_pins,
+ .npins = ARRAY_SIZE(sdm660_lpi_pinctrl_pins),
+ .groups = sdm660_lpi_pinctrl_groups,
+ .ngroups = ARRAY_SIZE(sdm660_lpi_pinctrl_groups),
+ .functions = sdm660_lpi_pinctrl_functions,
+ .nfunctions = ARRAY_SIZE(sdm660_lpi_pinctrl_functions),
+ .flags = LPI_FLAG_SLEW_RATE_SAME_REG | LPI_FLAG_USE_PREDEFINED_PIN_OFFSET
+};
+
+static const struct of_device_id sdm660_lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sdm660-lpass-lpi-pinctrl",
+ .data = &sdm660_lpi_pinctrl_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdm660_lpi_pinctrl_of_match);
+
+static struct platform_driver sdm660_lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sdm660-lpass-lpi-pinctrl",
+ .of_match_table = sdm660_lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+module_platform_driver(sdm660_lpi_pinctrl_driver);
+
+MODULE_AUTHOR("Richard Acayan <mailingradian@gmail.com>");
+MODULE_DESCRIPTION("QTI SDM660 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 1a78288f1bc8..687d986de75c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -1157,7 +1157,7 @@ static const struct pinfunction sdm660_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps_tx_a),
MSM_PIN_FUNCTION(gps_tx_b),
MSM_PIN_FUNCTION(gps_tx_c),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 0fe1fa94cd6d..486b72edf7b4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -991,7 +991,7 @@ static const char * const mss_lte_groups[] = {
};
static const struct pinfunction sdm670_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 0446e291aa48..4cf8575797a0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -976,7 +976,7 @@ static const char * const tsif1_sync_groups[] = {
};
static const struct pinfunction sdm845_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 2c17bf889146..79a7010b73f1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -796,7 +796,7 @@ static const struct pinfunction sdx55_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2s_mclk),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
index 85b5c0206dbd..cc8a99a6a91e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -732,7 +732,7 @@ static const struct pinfunction sdx65_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2s_mclk),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c
index ab13a3a57a83..4078d83d818c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c
@@ -852,7 +852,7 @@ static const struct pinfunction sdx75_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2_clk),
MSM_PIN_FUNCTION(gcc_gp3_clk),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2s_mclk),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
index 1ecdf1ab4f27..d51e271e3361 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm4450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c
@@ -722,7 +722,7 @@ static const char * const wlan1_adc_dtest1_groups[] = {
};
static const struct pinfunction sm4450_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb0),
MSM_PIN_FUNCTION(audio_ref_clk),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
index c273efa43996..06700685ea2a 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -687,7 +687,7 @@ static const struct pinfunction sm6115_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c
index 5092f20e0c1b..5d3d1e402345 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c
@@ -943,7 +943,7 @@ static const char * const dmic1_data_groups[] = {
static const struct pinfunction sm6125_functions[] = {
MSM_PIN_FUNCTION(qup00),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(qdss),
MSM_PIN_FUNCTION(qup01),
MSM_PIN_FUNCTION(qup02),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index ba4686c86c54..220fb582cac9 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -1048,7 +1048,7 @@ static const struct pinfunction sm6350_functions[] = {
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps_tx),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
index 49031571e65e..08b8ef6efaf0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -1172,7 +1172,7 @@ static const struct pinfunction sm6375_functions[] = {
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps_tx),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index 6e89966cd70e..78dd8153a4d4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -960,7 +960,7 @@ static const char * const wsa_data_groups[] = {
};
static const struct pinfunction sm7150_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(aoss_cti),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 794ed99463f7..ad861cd66958 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1217,7 +1217,7 @@ static const struct pinfunction sm8150_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(hs1_mi2s),
MSM_PIN_FUNCTION(hs2_mi2s),
MSM_PIN_FUNCTION(hs3_mi2s),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index fb6f005d64f5..f05361f3100d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -49,6 +49,8 @@ enum {
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
.oe_bit = 9, \
.in_bit = 0, \
.out_bit = 1, \
@@ -511,6 +513,7 @@ enum sm8250_functions {
msm_mux_ddr_pxi2,
msm_mux_ddr_pxi3,
msm_mux_dp_hot,
+ msm_mux_egpio,
msm_mux_dp_lcd,
msm_mux_gcc_gp1,
msm_mux_gcc_gp2,
@@ -830,6 +833,14 @@ static const char * const gpio_groups[] = {
"gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
"gpio177", "gpio178", "gpio179",
};
+static const char * const egpio_groups[] = {
+ "gpio146", "gpio147", "gpio148", "gpio149", "gpio150", "gpio151",
+ "gpio152", "gpio153", "gpio154", "gpio155", "gpio156", "gpio157",
+ "gpio158", "gpio159", "gpio160", "gpio161", "gpio162", "gpio163",
+ "gpio164", "gpio165", "gpio166", "gpio167", "gpio168", "gpio169",
+ "gpio170", "gpio171", "gpio172", "gpio173", "gpio174", "gpio175",
+ "gpio176", "gpio177", "gpio178", "gpio179",
+};
static const char * const qdss_cti_groups[] = {
"gpio0", "gpio2", "gpio2", "gpio44", "gpio45", "gpio46", "gpio92",
"gpio93",
@@ -1018,10 +1029,11 @@ static const struct pinfunction sm8250_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(dp_hot),
MSM_PIN_FUNCTION(dp_lcd),
+ MSM_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(lpass_slimbus),
@@ -1265,40 +1277,40 @@ static const struct msm_pingroup sm8250_groups[] = {
[143] = PINGROUP(143, WEST, lpass_slimbus, mi2s1_data0, ddr_bist, _, _, _, _, _, _),
[144] = PINGROUP(144, WEST, lpass_slimbus, mi2s1_data1, ddr_bist, _, _, _, _, _, _),
[145] = PINGROUP(145, WEST, lpass_slimbus, mi2s1_ws, _, _, _, _, _, _, _),
- [146] = PINGROUP(146, WEST, _, _, _, _, _, _, _, _, _),
- [147] = PINGROUP(147, WEST, _, _, _, _, _, _, _, _, _),
- [148] = PINGROUP(148, WEST, _, _, _, _, _, _, _, _, _),
- [149] = PINGROUP(149, WEST, _, _, _, _, _, _, _, _, _),
- [150] = PINGROUP(150, WEST, _, _, _, _, _, _, _, _, _),
- [151] = PINGROUP(151, WEST, _, _, _, _, _, _, _, _, _),
- [152] = PINGROUP(152, WEST, _, _, _, _, _, _, _, _, _),
- [153] = PINGROUP(153, WEST, _, _, _, _, _, _, _, _, _),
- [154] = PINGROUP(154, WEST, _, _, _, _, _, _, _, _, _),
- [155] = PINGROUP(155, WEST, _, _, _, _, _, _, _, _, _),
- [156] = PINGROUP(156, WEST, _, _, _, _, _, _, _, _, _),
- [157] = PINGROUP(157, WEST, _, _, _, _, _, _, _, _, _),
- [158] = PINGROUP(158, WEST, _, _, _, _, _, _, _, _, _),
- [159] = PINGROUP(159, WEST, cri_trng0, _, _, _, _, _, _, _, _),
- [160] = PINGROUP(160, WEST, cri_trng1, qdss_gpio, _, _, _, _, _, _, _),
- [161] = PINGROUP(161, WEST, cri_trng, qdss_gpio, _, _, _, _, _, _, _),
- [162] = PINGROUP(162, WEST, sp_cmu, qdss_gpio, _, _, _, _, _, _, _),
- [163] = PINGROUP(163, WEST, prng_rosc, qdss_gpio, _, _, _, _, _, _, _),
- [164] = PINGROUP(164, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [165] = PINGROUP(165, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [166] = PINGROUP(166, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [167] = PINGROUP(167, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [168] = PINGROUP(168, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [169] = PINGROUP(169, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [170] = PINGROUP(170, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [171] = PINGROUP(171, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [172] = PINGROUP(172, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [173] = PINGROUP(173, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [174] = PINGROUP(174, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [175] = PINGROUP(175, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [176] = PINGROUP(176, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [177] = PINGROUP(177, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, _),
- [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, WEST, _, _, _, _, _, _, _, _, egpio),
+ [147] = PINGROUP(147, WEST, _, _, _, _, _, _, _, _, egpio),
+ [148] = PINGROUP(148, WEST, _, _, _, _, _, _, _, _, egpio),
+ [149] = PINGROUP(149, WEST, _, _, _, _, _, _, _, _, egpio),
+ [150] = PINGROUP(150, WEST, _, _, _, _, _, _, _, _, egpio),
+ [151] = PINGROUP(151, WEST, _, _, _, _, _, _, _, _, egpio),
+ [152] = PINGROUP(152, WEST, _, _, _, _, _, _, _, _, egpio),
+ [153] = PINGROUP(153, WEST, _, _, _, _, _, _, _, _, egpio),
+ [154] = PINGROUP(154, WEST, _, _, _, _, _, _, _, _, egpio),
+ [155] = PINGROUP(155, WEST, _, _, _, _, _, _, _, _, egpio),
+ [156] = PINGROUP(156, WEST, _, _, _, _, _, _, _, _, egpio),
+ [157] = PINGROUP(157, WEST, _, _, _, _, _, _, _, _, egpio),
+ [158] = PINGROUP(158, WEST, _, _, _, _, _, _, _, _, egpio),
+ [159] = PINGROUP(159, WEST, cri_trng0, _, _, _, _, _, _, _, egpio),
+ [160] = PINGROUP(160, WEST, cri_trng1, qdss_gpio, _, _, _, _, _, _, egpio),
+ [161] = PINGROUP(161, WEST, cri_trng, qdss_gpio, _, _, _, _, _, _, egpio),
+ [162] = PINGROUP(162, WEST, sp_cmu, qdss_gpio, _, _, _, _, _, _, egpio),
+ [163] = PINGROUP(163, WEST, prng_rosc, qdss_gpio, _, _, _, _, _, _, egpio),
+ [164] = PINGROUP(164, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [165] = PINGROUP(165, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [166] = PINGROUP(166, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [167] = PINGROUP(167, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [168] = PINGROUP(168, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [169] = PINGROUP(169, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [170] = PINGROUP(170, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [171] = PINGROUP(171, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [172] = PINGROUP(172, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [173] = PINGROUP(173, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [174] = PINGROUP(174, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [175] = PINGROUP(175, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [176] = PINGROUP(176, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [177] = PINGROUP(177, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, egpio),
+ [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, egpio),
[180] = UFS_RESET(ufs_reset, 0xb8000),
[181] = SDC_PINGROUP(sdc2_clk, 0xb7000, 14, 6),
[182] = SDC_PINGROUP(sdc2_cmd, 0xb7000, 11, 3),
@@ -1333,6 +1345,7 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
.ntiles = ARRAY_SIZE(sm8250_tiles),
.wakeirq_map = sm8250_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map),
+ .egpio_func = 9,
};
static int sm8250_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index c8a3f39ce6f1..99949b552021 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1267,7 +1267,7 @@ static const struct pinfunction sm8350_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(lpass_slimbus),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
index f2e52d5a0f93..9889fc5dc2cd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -1269,7 +1269,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction sm8450_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aon_cam),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
@@ -1291,7 +1291,7 @@ static const struct pinfunction sm8450_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(dp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
index 1b4496cb39eb..10a62031fdfd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -1340,7 +1340,7 @@ static const char *const vsense_trigger_mirnat_groups[] = {
};
static const struct pinfunction sm8550_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aon_cci),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
index 449a0077f4b1..e2ae03800206 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8650.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c
@@ -1328,7 +1328,7 @@ static const char *const vsense_trigger_mirnat_groups[] = {
};
static const struct pinfunction sm8650_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
@@ -1359,7 +1359,7 @@ static const struct pinfunction sm8650_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(do_not),
MSM_PIN_FUNCTION(dp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index 8516693d1db5..6f92f176edd4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -1290,7 +1290,7 @@ static const char *const wcn_sw_ctrl_groups[] = {
};
static const struct pinfunction sm8750_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
@@ -1319,7 +1319,7 @@ static const struct pinfunction sm8750_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(dp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index b7b15874e488..485b68cc93f8 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -438,7 +438,7 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
arg = pad->output_enabled;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pad->out_value;
break;
case PMIC_GPIO_CONF_PULL_UP:
@@ -530,7 +530,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
pad->output_enabled = arg ? true : false;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pad->output_enabled = true;
pad->out_value = arg;
break;
@@ -737,7 +737,7 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
struct pmic_gpio_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, val);
return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
}
@@ -769,7 +769,7 @@ static int pmic_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
struct pmic_gpio_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, value);
return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 22d76b1013a3..64f8024a865c 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -370,7 +370,7 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pad->out_value;
break;
case PMIC_MPP_CONF_DTEST_SELECTOR:
@@ -447,7 +447,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_INPUT_ENABLE:
pad->input_enabled = arg ? true : false;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pad->output_enabled = true;
pad->out_value = arg;
break;
@@ -576,7 +576,7 @@ static int pmic_mpp_direction_output(struct gpio_chip *chip,
struct pmic_mpp_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, val);
return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
}
@@ -605,7 +605,7 @@ static int pmic_mpp_set(struct gpio_chip *chip, unsigned int pin, int value)
struct pmic_mpp_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, value);
return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index fb37b1c1acb4..5c966d51eda7 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -282,7 +282,7 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
arg = pin->output_value;
else
@@ -364,7 +364,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
pin->mode = PM8XXX_GPIO_MODE_INPUT;
banks |= BIT(0) | BIT(1);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
pin->output_value = !!arg;
banks |= BIT(0) | BIT(1);
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 6103849af042..7970fa6e1557 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -337,7 +337,7 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_ENABLE:
arg = pin->input;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pin->output_value;
break;
case PIN_CONFIG_POWER_SOURCE:
@@ -392,7 +392,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_ENABLE:
pin->input = true;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pin->output = true;
pin->output_value = !!arg;
break;
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index d4b215f34c39..bb36f40b19fa 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1407,7 +1407,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction x1e80100_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(RESOUT_GPIO),
MSM_PIN_FUNCTION(aon_cci),
MSM_PIN_FUNCTION(aoss_cti),
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 99ae34a56871..8cbd79a13414 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -44,6 +44,8 @@ config PINCTRL_RENESAS
select PINCTRL_RZG2L if ARCH_R9A09G047
select PINCTRL_RZG2L if ARCH_R9A09G056
select PINCTRL_RZG2L if ARCH_R9A09G057
+ select PINCTRL_RZT2H if ARCH_R9A09G077
+ select PINCTRL_RZT2H if ARCH_R9A09G087
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -302,6 +304,17 @@ config PINCTRL_RZN1
help
This selects pinctrl driver for Renesas RZ/N1 devices.
+config PINCTRL_RZT2H
+ bool "pin control support for RZ/N2H and RZ/T2H" if COMPILE_TEST
+ depends on 64BIT && OF
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/T2H
+ platforms.
+
config PINCTRL_RZV2M
bool "pin control support for RZ/V2M" if COMPILE_TEST
depends on OF
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 2ba623e04bf8..1c5144a1c4b8 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+obj-$(CONFIG_PINCTRL_RZT2H) += pinctrl-rzt2h.o
obj-$(CONFIG_PINCTRL_RZV2M) += pinctrl-rzv2m.o
ifeq ($(CONFIG_COMPILE_TEST),y)
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index cae3e6553499..218c5eff9b67 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A779A0 processor support - PFC hardware block.
+ * R8A779G0 processor support - PFC hardware block.
*
* Copyright (C) 2021 Renesas Electronics Corp.
*
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 23812116ef42..f24e5915cbe4 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -933,7 +933,7 @@ static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl,
case PIN_CONFIG_INPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_INPUT;
break;
- case PIN_CONFIG_OUTPUT: /* for DT backwards compatibility */
+ case PIN_CONFIG_LEVEL: /* for DT backwards compatibility */
case PIN_CONFIG_OUTPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_OUTPUT;
break;
@@ -1120,7 +1120,7 @@ static int rza1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
{
struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev);
struct rza1_mux_conf *mux_confs;
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
int i;
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index b78b5b4ec5af..29a9db197599 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -442,7 +442,7 @@ static int rza2_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
unsigned int group)
{
struct rza2_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *grp;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c52263c2a7b0..f524af6f586f 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -146,8 +146,6 @@
#define SD_CH(off, ch) ((off) + (ch) * 4)
#define ETH_POC(off, ch) ((off) + (ch) * 4)
#define QSPI (0x3008)
-#define ETH_MODE (0x3018)
-#define PFC_OEN (0x3C40) /* known on RZ/V2H(P) only */
#define PVDD_2500 2 /* I/O domain voltage 2.5V */
#define PVDD_1800 1 /* I/O domain voltage <= 1.8V */
@@ -221,11 +219,13 @@ static const struct pin_config_item renesas_rzv2h_conf_items[] = {
* @pwpr: PWPR register offset
* @sd_ch: SD_CH register offset
* @eth_poc: ETH_POC register offset
+ * @oen: OEN register offset
*/
struct rzg2l_register_offsets {
u16 pwpr;
u16 sd_ch;
u16 eth_poc;
+ u16 oen;
};
/**
@@ -254,6 +254,7 @@ enum rzg2l_iolh_index {
* @iolh_groupb_oi: IOLH group B output impedance specific values
* @tint_start_index: the start index for the TINT interrupts
* @drive_strength_ua: drive strength in uA is supported (otherwise mA is supported)
+ * @oen_pwpr_lock: flag indicating if the OEN register is locked by PWPR
* @func_base: base number for port function (see register PFC)
* @oen_max_pin: the maximum pin number supporting output enable
* @oen_max_port: the maximum port number supporting output enable
@@ -266,6 +267,7 @@ struct rzg2l_hwcfg {
u16 iolh_groupb_oi[4];
u16 tint_start_index;
bool drive_strength_ua;
+ bool oen_pwpr_lock;
u8 func_base;
u8 oen_max_pin;
u8 oen_max_port;
@@ -295,8 +297,7 @@ struct rzg2l_pinctrl_data {
#endif
void (*pwpr_pfc_lock_unlock)(struct rzg2l_pinctrl *pctrl, bool lock);
void (*pmc_writeb)(struct rzg2l_pinctrl *pctrl, u8 val, u16 offset);
- u32 (*oen_read)(struct rzg2l_pinctrl *pctrl, unsigned int _pin);
- int (*oen_write)(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen);
+ int (*pin_to_oen_bit)(struct rzg2l_pinctrl *pctrl, unsigned int _pin);
int (*hw_to_bias_param)(unsigned int val);
int (*bias_param_to_hw)(enum pin_config_param param);
};
@@ -320,9 +321,10 @@ struct rzg2l_pinctrl_pin_settings {
* @iolh: IOLH registers cache
* @pupd: PUPD registers cache
* @ien: IEN registers cache
+ * @smt: SMT registers cache
* @sd_ch: SD_CH registers cache
* @eth_poc: ET_POC registers cache
- * @eth_mode: ETH_MODE register cache
+ * @oen: Output Enable register cache
* @qspi: QSPI registers cache
*/
struct rzg2l_pinctrl_reg_cache {
@@ -333,9 +335,10 @@ struct rzg2l_pinctrl_reg_cache {
u32 *iolh[2];
u32 *ien[2];
u32 *pupd[2];
+ u32 *smt;
u8 sd_ch[2];
u8 eth_poc[2];
- u8 eth_mode;
+ u8 oen;
u8 qspi;
};
@@ -394,6 +397,14 @@ static const u64 r9a09g047_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 5, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 6, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 7, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 1, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 2, RZV2H_MPXED_PIN_FUNCS),
@@ -402,6 +413,14 @@ static const u64 r9a09g047_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 5, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 6, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 7, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 0, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
@@ -421,6 +440,14 @@ static const u64 r9a09g047_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 2, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 3, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 7, RZV2H_MPXED_PIN_FUNCS),
};
static const u64 r9a09g057_variable_pin_cfg[] = {
@@ -549,7 +576,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
- struct function_desc *func;
+ const struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
const unsigned int *pins;
@@ -1065,34 +1092,48 @@ static int rzg2l_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
return -EINVAL;
}
-static u32 rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+static int rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
int bit;
- bit = rzg2l_pin_to_oen_bit(pctrl, _pin);
+ if (!pctrl->data->pin_to_oen_bit)
+ return -EOPNOTSUPP;
+
+ bit = pctrl->data->pin_to_oen_bit(pctrl, _pin);
if (bit < 0)
- return 0;
+ return -EINVAL;
- return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+ return !(readb(pctrl->base + pctrl->data->hwcfg->regs.oen) & BIT(bit));
}
static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
{
+ const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
+ u16 oen_offset = pctrl->data->hwcfg->regs.oen;
unsigned long flags;
+ u8 val, pwpr;
int bit;
- u8 val;
- bit = rzg2l_pin_to_oen_bit(pctrl, _pin);
+ if (!pctrl->data->pin_to_oen_bit)
+ return -EOPNOTSUPP;
+
+ bit = pctrl->data->pin_to_oen_bit(pctrl, _pin);
if (bit < 0)
- return bit;
+ return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
- val = readb(pctrl->base + ETH_MODE);
+ val = readb(pctrl->base + oen_offset);
if (oen)
val &= ~BIT(bit);
else
val |= BIT(bit);
- writeb(val, pctrl->base + ETH_MODE);
+ if (pctrl->data->hwcfg->oen_pwpr_lock) {
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ }
+ writeb(val, pctrl->base + oen_offset);
+ if (pctrl->data->hwcfg->oen_pwpr_lock)
+ writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -1118,39 +1159,6 @@ static int rzg3s_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
return bit;
}
-static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
-{
- int bit;
-
- bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
- if (bit < 0)
- return bit;
-
- return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
-}
-
-static int rzg3s_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
-{
- unsigned long flags;
- int bit;
- u8 val;
-
- bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
- if (bit < 0)
- return bit;
-
- spin_lock_irqsave(&pctrl->lock, flags);
- val = readb(pctrl->base + ETH_MODE);
- if (oen)
- val &= ~BIT(bit);
- else
- val |= BIT(bit);
- writeb(val, pctrl->base + ETH_MODE);
- spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
-}
-
static int rzg2l_hw_to_bias_param(unsigned int bias)
{
switch (bias) {
@@ -1216,55 +1224,37 @@ static int rzv2h_bias_param_to_hw(enum pin_config_param param)
return -EINVAL;
}
-static u8 rzv2h_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+static int rzg2l_pin_names_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin,
+ const char * const pin_names[], unsigned int count)
{
- static const char * const pin_names[] = { "ET0_TXC_TXCLK", "ET1_TXC_TXCLK",
- "XSPI0_RESET0N", "XSPI0_CS0N",
- "XSPI0_CKN", "XSPI0_CKP" };
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[_pin];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(pin_names); i++) {
+ for (i = 0; i < count; i++) {
if (!strcmp(pin_desc->name, pin_names[i]))
return i;
}
- /* Should not happen. */
- return 0;
+ return -EINVAL;
}
-static u32 rzv2h_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+static int rzv2h_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- u8 bit;
-
- bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
+ static const char * const pin_names[] = {
+ "ET0_TXC_TXCLK", "ET1_TXC_TXCLK", "XSPI0_RESET0N",
+ "XSPI0_CS0N", "XSPI0_CKN", "XSPI0_CKP"
+ };
- return !(readb(pctrl->base + PFC_OEN) & BIT(bit));
+ return rzg2l_pin_names_to_oen_bit(pctrl, _pin, pin_names, ARRAY_SIZE(pin_names));
}
-static int rzv2h_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
+static int rzg3e_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
- const struct rzg2l_register_offsets *regs = &hwcfg->regs;
- unsigned long flags;
- u8 val, bit;
- u8 pwpr;
-
- bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
- spin_lock_irqsave(&pctrl->lock, flags);
- val = readb(pctrl->base + PFC_OEN);
- if (oen)
- val &= ~BIT(bit);
- else
- val |= BIT(bit);
-
- pwpr = readb(pctrl->base + regs->pwpr);
- writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
- writeb(val, pctrl->base + PFC_OEN);
- writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ static const char * const pin_names[] = {
+ "PB1", "PE1", "PL4", "PL1", "PL2", "PL0"
+ };
- return 0;
+ return rzg2l_pin_names_to_oen_bit(pctrl, _pin, pin_names, ARRAY_SIZE(pin_names));
}
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
@@ -1308,11 +1298,10 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
if (!(cfg & PIN_CFG_OEN))
return -EINVAL;
- if (!pctrl->data->oen_read)
- return -EOPNOTSUPP;
- arg = pctrl->data->oen_read(pctrl, _pin);
- if (!arg)
- return -EINVAL;
+ ret = rzg2l_read_oen(pctrl, _pin);
+ if (ret < 0)
+ return ret;
+ arg = ret;
break;
case PIN_CONFIG_POWER_SOURCE:
@@ -1471,9 +1460,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
if (!(cfg & PIN_CFG_OEN))
return -EINVAL;
- if (!pctrl->data->oen_write)
- return -EOPNOTSUPP;
- ret = pctrl->data->oen_write(pctrl, _pin, !!arg);
+ ret = rzg2l_write_oen(pctrl, _pin, !!arg);
if (ret)
return ret;
break;
@@ -2058,17 +2045,17 @@ static const u64 r9a09g047_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x28, RZV2H_MPXED_PIN_FUNCS), /* P8 */
0x0,
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2a), /* PA */
- RZG2L_GPIO_PORT_PACK(8, 0x2b, RZV2H_MPXED_PIN_FUNCS), /* PB */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2b), /* PB */
RZG2L_GPIO_PORT_PACK(3, 0x2c, RZV2H_MPXED_PIN_FUNCS), /* PC */
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2d), /* PD */
- RZG2L_GPIO_PORT_PACK(8, 0x2e, RZV2H_MPXED_PIN_FUNCS), /* PE */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2e), /* PE */
RZG2L_GPIO_PORT_PACK(3, 0x2f, RZV2H_MPXED_PIN_FUNCS), /* PF */
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x30), /* PG */
RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x31), /* PH */
0x0,
RZG2L_GPIO_PORT_PACK_VARIABLE(5, 0x33), /* PJ */
RZG2L_GPIO_PORT_PACK(4, 0x34, RZV2H_MPXED_PIN_FUNCS), /* PK */
- RZG2L_GPIO_PORT_PACK(8, 0x35, RZV2H_MPXED_PIN_FUNCS), /* PL */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x35), /* PL */
RZG2L_GPIO_PORT_PACK(8, 0x36, RZV2H_MPXED_PIN_FUNCS), /* PM */
0x0,
0x0,
@@ -2719,6 +2706,10 @@ static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
if (!cache->pfc)
return -ENOMEM;
+ cache->smt = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->smt), GFP_KERNEL);
+ if (!cache->smt)
+ return -ENOMEM;
+
for (u8 i = 0; i < 2; i++) {
u32 n_dedicated_pins = pctrl->data->n_dedicated_pins;
@@ -2980,7 +2971,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
for (u32 port = 0; port < nports; port++) {
- bool has_iolh, has_ien, has_pupd;
+ bool has_iolh, has_ien, has_pupd, has_smt;
u32 off, caps;
u8 pincnt;
u64 cfg;
@@ -2993,6 +2984,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
has_ien = !!(caps & PIN_CFG_IEN);
has_pupd = !!(caps & PIN_CFG_PUPD);
+ has_smt = !!(caps & PIN_CFG_SMT);
if (suspend)
RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
@@ -3031,6 +3023,9 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
cache->ien[1][port]);
}
}
+
+ if (has_smt)
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + SMT(off), cache->smt[port]);
}
}
@@ -3164,7 +3159,7 @@ static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
}
cache->qspi = readb(pctrl->base + QSPI);
- cache->eth_mode = readb(pctrl->base + ETH_MODE);
+ cache->oen = readb(pctrl->base + pctrl->data->hwcfg->regs.oen);
if (!atomic_read(&pctrl->wakeup_path))
clk_disable_unprepare(pctrl->clk);
@@ -3180,6 +3175,8 @@ static int rzg2l_pinctrl_resume_noirq(struct device *dev)
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct rzg2l_register_offsets *regs = &hwcfg->regs;
struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+ unsigned long flags;
+ u8 pwpr;
int ret;
if (!atomic_read(&pctrl->wakeup_path)) {
@@ -3189,7 +3186,16 @@ static int rzg2l_pinctrl_resume_noirq(struct device *dev)
}
writeb(cache->qspi, pctrl->base + QSPI);
- writeb(cache->eth_mode, pctrl->base + ETH_MODE);
+ if (pctrl->data->hwcfg->oen_pwpr_lock) {
+ spin_lock_irqsave(&pctrl->lock, flags);
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ }
+ writeb(cache->oen, pctrl->base + pctrl->data->hwcfg->regs.oen);
+ if (pctrl->data->hwcfg->oen_pwpr_lock) {
+ writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
for (u8 i = 0; i < 2; i++) {
if (regs->sd_ch)
writeb(cache->sd_ch[i], pctrl->base + SD_CH(regs->sd_ch, i));
@@ -3241,6 +3247,7 @@ static const struct rzg2l_hwcfg rzg2l_hwcfg = {
.pwpr = 0x3014,
.sd_ch = 0x3000,
.eth_poc = 0x300c,
+ .oen = 0x3018,
},
.iolh_groupa_ua = {
/* 3v3 power source */
@@ -3256,6 +3263,7 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
.pwpr = 0x3000,
.sd_ch = 0x3004,
.eth_poc = 0x3010,
+ .oen = 0x3018,
},
.iolh_groupa_ua = {
/* 1v8 power source */
@@ -3287,8 +3295,10 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
static const struct rzg2l_hwcfg rzv2h_hwcfg = {
.regs = {
.pwpr = 0x3c04,
+ .oen = 0x3c40,
},
.tint_start_index = 17,
+ .oen_pwpr_lock = true,
};
static struct rzg2l_pinctrl_data r9a07g043_data = {
@@ -3305,8 +3315,7 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
#endif
.pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzg2l_pmc_writeb,
- .oen_read = &rzg2l_read_oen,
- .oen_write = &rzg2l_write_oen,
+ .pin_to_oen_bit = &rzg2l_pin_to_oen_bit,
.hw_to_bias_param = &rzg2l_hw_to_bias_param,
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
@@ -3322,8 +3331,7 @@ static struct rzg2l_pinctrl_data r9a07g044_data = {
.hwcfg = &rzg2l_hwcfg,
.pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzg2l_pmc_writeb,
- .oen_read = &rzg2l_read_oen,
- .oen_write = &rzg2l_write_oen,
+ .pin_to_oen_bit = &rzg2l_pin_to_oen_bit,
.hw_to_bias_param = &rzg2l_hw_to_bias_param,
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
@@ -3338,8 +3346,7 @@ static struct rzg2l_pinctrl_data r9a08g045_data = {
.hwcfg = &rzg3s_hwcfg,
.pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzg2l_pmc_writeb,
- .oen_read = &rzg3s_oen_read,
- .oen_write = &rzg3s_oen_write,
+ .pin_to_oen_bit = &rzg3s_pin_to_oen_bit,
.hw_to_bias_param = &rzg2l_hw_to_bias_param,
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
@@ -3361,8 +3368,7 @@ static struct rzg2l_pinctrl_data r9a09g047_data = {
#endif
.pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzv2h_pmc_writeb,
- .oen_read = &rzv2h_oen_read,
- .oen_write = &rzv2h_oen_write,
+ .pin_to_oen_bit = &rzg3e_pin_to_oen_bit,
.hw_to_bias_param = &rzv2h_hw_to_bias_param,
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
@@ -3384,8 +3390,7 @@ static struct rzg2l_pinctrl_data r9a09g056_data = {
#endif
.pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzv2h_pmc_writeb,
- .oen_read = &rzv2h_oen_read,
- .oen_write = &rzv2h_oen_write,
+ .pin_to_oen_bit = &rzv2h_pin_to_oen_bit,
.hw_to_bias_param = &rzv2h_hw_to_bias_param,
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
@@ -3408,8 +3413,7 @@ static struct rzg2l_pinctrl_data r9a09g057_data = {
#endif
.pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzv2h_pmc_writeb,
- .oen_read = &rzv2h_oen_read,
- .oen_write = &rzv2h_oen_write,
+ .pin_to_oen_bit = &rzv2h_pin_to_oen_bit,
.hw_to_bias_param = &rzv2h_hw_to_bias_param,
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzt2h.c b/drivers/pinctrl/renesas/pinctrl-rzt2h.c
new file mode 100644
index 000000000000..3872638f5ebb
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzt2h.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/T2H Pin Control and GPIO driver core
+ *
+ * Based on drivers/pinctrl/renesas/pinctrl-rzg2l.c
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzt2h"
+
+#define P(m) (0x001 * (m))
+#define PM(m) (0x200 + 2 * (m))
+#define PMC(m) (0x400 + (m))
+#define PFC(m) (0x600 + 8 * (m))
+#define PIN(m) (0x800 + (m))
+#define RSELP(m) (0xc00 + (m))
+
+#define PM_MASK GENMASK(1, 0)
+#define PM_PIN_MASK(pin) (PM_MASK << ((pin) * 2))
+#define PM_INPUT BIT(0)
+#define PM_OUTPUT BIT(1)
+
+#define PFC_MASK GENMASK_ULL(5, 0)
+#define PFC_PIN_MASK(pin) (PFC_MASK << ((pin) * 8))
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 8 higher bits [23:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(23, 16)
+
+#define RZT2H_PIN_ID_TO_PORT(id) ((id) / RZT2H_PINS_PER_PORT)
+#define RZT2H_PIN_ID_TO_PIN(id) ((id) % RZT2H_PINS_PER_PORT)
+
+#define RZT2H_MAX_SAFETY_PORTS 12
+
+struct rzt2h_pinctrl_data {
+ unsigned int n_port_pins;
+ const u8 *port_pin_configs;
+ unsigned int n_ports;
+};
+
+struct rzt2h_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+ const struct rzt2h_pinctrl_data *data;
+ void __iomem *base0, *base1;
+ struct device *dev;
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+ spinlock_t lock; /* lock read/write registers */
+ struct mutex mutex; /* serialize adding groups and functions */
+ bool safety_port_enabled;
+};
+
+#define RZT2H_GET_BASE(pctrl, port) \
+ ((port) > RZT2H_MAX_SAFETY_PORTS ? (pctrl)->base0 : (pctrl)->base1)
+
+#define RZT2H_PINCTRL_REG_ACCESS(size, type) \
+static inline void rzt2h_pinctrl_write##size(struct rzt2h_pinctrl *pctrl, u8 port, \
+ type val, unsigned int offset) \
+{ \
+ write##size(val, RZT2H_GET_BASE(pctrl, port) + offset); \
+} \
+static inline type rzt2h_pinctrl_read##size(struct rzt2h_pinctrl *pctrl, u8 port, \
+ unsigned int offset) \
+{ \
+ return read##size(RZT2H_GET_BASE(pctrl, port) + offset); \
+}
+
+RZT2H_PINCTRL_REG_ACCESS(b, u8)
+RZT2H_PINCTRL_REG_ACCESS(w, u16)
+RZT2H_PINCTRL_REG_ACCESS(q, u64)
+
+static int rzt2h_validate_pin(struct rzt2h_pinctrl *pctrl, unsigned int offset)
+{
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 pin = RZT2H_PIN_ID_TO_PIN(offset);
+ u8 pincfg;
+
+ if (offset >= pctrl->data->n_port_pins || port >= pctrl->data->n_ports)
+ return -EINVAL;
+
+ if (!pctrl->safety_port_enabled && port <= RZT2H_MAX_SAFETY_PORTS)
+ return -EINVAL;
+
+ pincfg = pctrl->data->port_pin_configs[port];
+ return (pincfg & BIT(pin)) ? 0 : -EINVAL;
+}
+
+static void rzt2h_pinctrl_set_pfc_mode(struct rzt2h_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ u64 reg64;
+ u16 reg16;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ /* Set pin to 'Non-use (Hi-Z input protection)' */
+ reg16 = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg16 &= ~PM_PIN_MASK(pin);
+ rzt2h_pinctrl_writew(pctrl, port, reg16, PM(port));
+
+ /* Temporarily switch to GPIO mode with PMC register */
+ reg16 = rzt2h_pinctrl_readb(pctrl, port, PMC(port));
+ rzt2h_pinctrl_writeb(pctrl, port, reg16 & ~BIT(pin), PMC(port));
+
+ /* Select Pin function mode with PFC register */
+ reg64 = rzt2h_pinctrl_readq(pctrl, port, PFC(port));
+ reg64 &= ~PFC_PIN_MASK(pin);
+ rzt2h_pinctrl_writeq(pctrl, port, reg64 | ((u64)func << (pin * 8)), PFC(port));
+
+ /* Switch to Peripheral pin function with PMC register */
+ reg16 = rzt2h_pinctrl_readb(pctrl, port, PMC(port));
+ rzt2h_pinctrl_writeb(pctrl, port, reg16 | BIT(pin), PMC(port));
+};
+
+static int rzt2h_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzt2h_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct function_desc *func;
+ struct group_desc *group;
+ const unsigned int *pins;
+ unsigned int i;
+ u8 *psel_val;
+ int ret;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->grp.pins;
+
+ for (i = 0; i < group->grp.npins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin:%u PSEL:%u\n",
+ RZT2H_PIN_ID_TO_PORT(pins[i]), RZT2H_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ ret = rzt2h_validate_pin(pctrl, pins[i]);
+ if (ret)
+ return ret;
+
+ rzt2h_pinctrl_set_pfc_mode(pctrl, RZT2H_PIN_ID_TO_PORT(pins[i]),
+ RZT2H_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzt2h_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzt2h_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct device_node *parent,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzt2h_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ unsigned int *pins;
+ const char *name;
+ const char *pin;
+ u8 *psel_val;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux) {
+ nmaps += 1;
+ if (num_configs)
+ nmaps += 1;
+ }
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzt2h_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = FIELD_GET(MUX_PIN_ID_MASK, value);
+ psel_val[i] = FIELD_GET(MUX_FUNC_MASK, value);
+ }
+
+ if (parent) {
+ name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
+ parent, np);
+ if (!name) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ } else {
+ name = np->name;
+ }
+
+ if (num_configs) {
+ ret = rzt2h_map_add_config(&maps[idx], name,
+ PIN_MAP_TYPE_CONFIGS_GROUP,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+
+ scoped_guard(mutex, &pctrl->mutex) {
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = name;
+ fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = name;
+ maps[idx].data.mux.function = name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzt2h_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzt2h_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzt2h_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ ret = rzt2h_dt_subnode_to_map(pctldev, child, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps == 0) {
+ ret = rzt2h_dt_subnode_to_map(pctldev, np, NULL, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ rzt2h_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+}
+
+static const struct pinctrl_ops rzt2h_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzt2h_dt_node_to_map,
+ .dt_free_map = rzt2h_dt_free_map,
+};
+
+static const struct pinmux_ops rzt2h_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzt2h_pinctrl_set_mux,
+ .strict = true,
+};
+
+static int rzt2h_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ int ret;
+ u8 reg;
+
+ ret = rzt2h_validate_pin(pctrl, offset);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_gpio_request(chip, offset);
+ if (ret)
+ return ret;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ /* Select GPIO mode in PMC Register */
+ reg = rzt2h_pinctrl_readb(pctrl, port, PMC(port));
+ reg &= ~BIT(bit);
+ rzt2h_pinctrl_writeb(pctrl, port, reg, PMC(port));
+
+ return 0;
+}
+
+static void rzt2h_gpio_set_direction(struct rzt2h_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ u16 reg;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ reg = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg &= ~PM_PIN_MASK(bit);
+
+ reg |= (output ? PM_OUTPUT : PM_INPUT) << (bit * 2);
+ rzt2h_pinctrl_writew(pctrl, port, reg, PM(port));
+}
+
+static int rzt2h_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ u16 reg;
+ int ret;
+
+ ret = rzt2h_validate_pin(pctrl, offset);
+ if (ret)
+ return ret;
+
+ if (rzt2h_pinctrl_readb(pctrl, port, PMC(port)) & BIT(bit))
+ return -EINVAL;
+
+ reg = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg = (reg >> (bit * 2)) & PM_MASK;
+ if (reg & PM_OUTPUT)
+ return GPIO_LINE_DIRECTION_OUT;
+ if (reg & PM_INPUT)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return -EINVAL;
+}
+
+static int rzt2h_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ u8 reg;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ reg = rzt2h_pinctrl_readb(pctrl, port, P(port));
+ if (value)
+ rzt2h_pinctrl_writeb(pctrl, port, reg | BIT(bit), P(port));
+ else
+ rzt2h_pinctrl_writeb(pctrl, port, reg & ~BIT(bit), P(port));
+
+ return 0;
+}
+
+static int rzt2h_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ u16 reg;
+
+ reg = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg = (reg >> (bit * 2)) & PM_MASK;
+ if (reg & PM_INPUT)
+ return !!(rzt2h_pinctrl_readb(pctrl, port, PIN(port)) & BIT(bit));
+ if (reg & PM_OUTPUT)
+ return !!(rzt2h_pinctrl_readb(pctrl, port, P(port)) & BIT(bit));
+
+ return -EINVAL;
+}
+
+static int rzt2h_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+
+ rzt2h_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static int rzt2h_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+
+ rzt2h_gpio_set(chip, offset, value);
+ rzt2h_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static void rzt2h_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip, offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzt2h_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzt2h_gpio_names[] = {
+ "P00_0", "P00_1", "P00_2", "P00_3", "P00_4", "P00_5", "P00_6", "P00_7",
+ "P01_0", "P01_1", "P01_2", "P01_3", "P01_4", "P01_5", "P01_6", "P01_7",
+ "P02_0", "P02_1", "P02_2", "P02_3", "P02_4", "P02_5", "P02_6", "P02_7",
+ "P03_0", "P03_1", "P03_2", "P03_3", "P03_4", "P03_5", "P03_6", "P03_7",
+ "P04_0", "P04_1", "P04_2", "P04_3", "P04_4", "P04_5", "P04_6", "P04_7",
+ "P05_0", "P05_1", "P05_2", "P05_3", "P05_4", "P05_5", "P05_6", "P05_7",
+ "P06_0", "P06_1", "P06_2", "P06_3", "P06_4", "P06_5", "P06_6", "P06_7",
+ "P07_0", "P07_1", "P07_2", "P07_3", "P07_4", "P07_5", "P07_6", "P07_7",
+ "P08_0", "P08_1", "P08_2", "P08_3", "P08_4", "P08_5", "P08_6", "P08_7",
+ "P09_0", "P09_1", "P09_2", "P09_3", "P09_4", "P09_5", "P09_6", "P09_7",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P22_0", "P22_1", "P22_2", "P22_3", "P22_4", "P22_5", "P22_6", "P22_7",
+ "P23_0", "P23_1", "P23_2", "P23_3", "P23_4", "P23_5", "P23_6", "P23_7",
+ "P24_0", "P24_1", "P24_2", "P24_3", "P24_4", "P24_5", "P24_6", "P24_7",
+ "P25_0", "P25_1", "P25_2", "P25_3", "P25_4", "P25_5", "P25_6", "P25_7",
+ "P26_0", "P26_1", "P26_2", "P26_3", "P26_4", "P26_5", "P26_6", "P26_7",
+ "P27_0", "P27_1", "P27_2", "P27_3", "P27_4", "P27_5", "P27_6", "P27_7",
+ "P28_0", "P28_1", "P28_2", "P28_3", "P28_4", "P28_5", "P28_6", "P28_7",
+ "P29_0", "P29_1", "P29_2", "P29_3", "P29_4", "P29_5", "P29_6", "P29_7",
+ "P30_0", "P30_1", "P30_2", "P30_3", "P30_4", "P30_5", "P30_6", "P30_7",
+ "P31_0", "P31_1", "P31_2", "P31_3", "P31_4", "P31_5", "P31_6", "P31_7",
+ "P32_0", "P32_1", "P32_2", "P32_3", "P32_4", "P32_5", "P32_6", "P32_7",
+ "P33_0", "P33_1", "P33_2", "P33_3", "P33_4", "P33_5", "P33_6", "P33_7",
+ "P34_0", "P34_1", "P34_2", "P34_3", "P34_4", "P34_5", "P34_6", "P34_7",
+ "P35_0", "P35_1", "P35_2", "P35_3", "P35_4", "P35_5", "P35_6", "P35_7",
+};
+
+static int rzt2h_gpio_register(struct rzt2h_pinctrl *pctrl)
+{
+ struct pinctrl_gpio_range *range = &pctrl->gpio_range;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ struct device *dev = pctrl->dev;
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(dev->of_node, "gpio-ranges", 3, 0, &of_args);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to parse gpio-ranges\n");
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != pctrl->data->n_port_pins)
+ return dev_err_probe(dev, -EINVAL,
+ "gpio-ranges does not match selected SOC\n");
+
+ chip->base = -1;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->ngpio = of_args.args[2];
+ chip->names = rzt2h_gpio_names;
+ chip->request = rzt2h_gpio_request;
+ chip->free = rzt2h_gpio_free;
+ chip->get_direction = rzt2h_gpio_get_direction;
+ chip->direction_input = rzt2h_gpio_direction_input;
+ chip->direction_output = rzt2h_gpio_direction_output;
+ chip->get = rzt2h_gpio_get;
+ chip->set = rzt2h_gpio_set;
+ chip->label = dev_name(dev);
+
+ range->id = 0;
+ range->pin_base = 0;
+ range->base = 0;
+ range->npins = chip->ngpio;
+ range->name = chip->label;
+ range->gc = chip;
+
+ ret = devm_gpiochip_add_data(dev, chip, pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "gpiochip registration failed\n");
+
+ return ret;
+}
+
+static int rzt2h_pinctrl_register(struct rzt2h_pinctrl *pctrl)
+{
+ struct pinctrl_desc *desc = &pctrl->desc;
+ struct device *dev = pctrl->dev;
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ int ret;
+
+ desc->name = DRV_NAME;
+ desc->npins = pctrl->data->n_port_pins;
+ desc->pctlops = &rzt2h_pinctrl_pctlops;
+ desc->pmxops = &rzt2h_pinctrl_pmxops;
+ desc->owner = THIS_MODULE;
+
+ pins = devm_kcalloc(dev, desc->npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ desc->pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = rzt2h_gpio_names[i];
+ if (i && !(i % RZT2H_PINS_PER_PORT))
+ j++;
+ }
+
+ ret = devm_pinctrl_register_and_init(dev, desc, pctrl, &pctrl->pctl);
+ if (ret)
+ return dev_err_probe(dev, ret, "pinctrl registration failed\n");
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret)
+ return dev_err_probe(dev, ret, "pinctrl enable failed\n");
+
+ return rzt2h_gpio_register(pctrl);
+}
+
+static int rzt2h_pinctrl_cfg_regions(struct platform_device *pdev,
+ struct rzt2h_pinctrl *pctrl)
+{
+ struct resource *res;
+
+ pctrl->base0 = devm_platform_ioremap_resource_byname(pdev, "nsr");
+ if (IS_ERR(pctrl->base0))
+ return PTR_ERR(pctrl->base0);
+
+ /*
+ * Open-coded instead of using devm_platform_ioremap_resource_byname()
+ * because the "srs" region is optional.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "srs");
+ if (res) {
+ u8 port;
+
+ pctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->base1))
+ return PTR_ERR(pctrl->base1);
+
+ pctrl->safety_port_enabled = true;
+
+ /* Configure to select safety region 0x812c0xxx */
+ for (port = 0; port <= RZT2H_MAX_SAFETY_PORTS; port++)
+ writeb(0x0, pctrl->base1 + RSELP(port));
+ }
+
+ return 0;
+}
+
+static int rzt2h_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzt2h_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = dev;
+ pctrl->data = of_device_get_match_data(dev);
+
+ ret = rzt2h_pinctrl_cfg_regions(pdev, pctrl);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&pctrl->lock);
+ mutex_init(&pctrl->mutex);
+ platform_set_drvdata(pdev, pctrl);
+
+ return rzt2h_pinctrl_register(pctrl);
+}
+
+static const u8 r9a09g077_gpio_configs[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
+};
+
+static const u8 r9a09g087_gpio_configs[] = {
+ 0x1f, 0xff, 0xff, 0x1f, 0x00, 0xfe, 0xff, 0x00, 0x7e, 0xf0, 0xff, 0x01,
+ 0xff, 0xff, 0xff, 0x00, 0xe0, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x01,
+ 0xe0, 0xff, 0xff, 0x7f, 0x00, 0xfe, 0xff, 0x7f, 0x00, 0xfc, 0x7f,
+};
+
+static struct rzt2h_pinctrl_data r9a09g077_data = {
+ .n_port_pins = ARRAY_SIZE(r9a09g077_gpio_configs) * RZT2H_PINS_PER_PORT,
+ .port_pin_configs = r9a09g077_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g077_gpio_configs),
+};
+
+static struct rzt2h_pinctrl_data r9a09g087_data = {
+ .n_port_pins = ARRAY_SIZE(r9a09g087_gpio_configs) * RZT2H_PINS_PER_PORT,
+ .port_pin_configs = r9a09g087_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g087_gpio_configs),
+};
+
+static const struct of_device_id rzt2h_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a09g077-pinctrl",
+ .data = &r9a09g077_data,
+ },
+ {
+ .compatible = "renesas,r9a09g087-pinctrl",
+ .data = &r9a09g087_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzt2h_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzt2h_pinctrl_of_table),
+ .suppress_bind_attrs = true,
+ },
+ .probe = rzt2h_pinctrl_probe,
+};
+
+static int __init rzt2h_pinctrl_init(void)
+{
+ return platform_driver_register(&rzt2h_pinctrl_driver);
+}
+core_initcall(rzt2h_pinctrl_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thierry Bultel <thierry.bultel.yh@bp.renesas.com>");
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for the RZ/T2H family");
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index daaa986d994d..dce68f93d2d5 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -162,7 +162,7 @@ static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned int group_selector)
{
struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
const unsigned int *pins;
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 29d16c9c1bd1..3a742f74ecd1 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -726,7 +726,8 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins;
unsigned int num_pins;
- unsigned int i, ret;
+ unsigned int i;
+ int ret;
pins = pmx->pfc->info->groups[group].pins;
num_pins = pmx->pfc->info->groups[group].nr_pins;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 5fe7c4b9f7bd..323487dfa8c2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -76,6 +76,15 @@ static const struct samsung_pin_bank_type exynos8895_bank_type_off = {
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
};
+/*
+ * Bank type for non-alive type. Bit fields:
+ * CON: 4, DAT: 1, PUD: 4, DRV: 4
+ */
+static const struct samsung_pin_bank_type artpec_bank_type_off = {
+ .fld_width = { 4, 1, 4, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
/* Pad retention control code for accessing PMU regmap */
static atomic_t exynos_shared_retention_refcnt;
@@ -1816,3 +1825,44 @@ const struct samsung_pinctrl_of_match_data gs101_of_data __initconst = {
.ctrl = gs101_pin_ctrl,
.num_ctrl = ARRAY_SIZE(gs101_pin_ctrl),
};
+
+/* pin banks of artpec8 pin-controller (FSYS0) */
+static const struct samsung_pin_bank_data artpec8_pin_banks0[] __initconst = {
+ ARTPEC_PIN_BANK_EINTG(5, 0x000, "gpf0", 0x00),
+ ARTPEC_PIN_BANK_EINTG(4, 0x020, "gpf1", 0x04),
+ ARTPEC_PIN_BANK_EINTG(8, 0x040, "gpf2", 0x08),
+ ARTPEC_PIN_BANK_EINTG(4, 0x060, "gpf3", 0x0c),
+ ARTPEC_PIN_BANK_EINTG(7, 0x080, "gpf4", 0x10),
+ ARTPEC_PIN_BANK_EINTG(8, 0x0a0, "gpe0", 0x14),
+ ARTPEC_PIN_BANK_EINTG(8, 0x0c0, "gpe1", 0x18),
+ ARTPEC_PIN_BANK_EINTG(6, 0x0e0, "gpe2", 0x1c),
+ ARTPEC_PIN_BANK_EINTG(8, 0x100, "gps0", 0x20),
+ ARTPEC_PIN_BANK_EINTG(8, 0x120, "gps1", 0x24),
+};
+
+/* pin banks of artpec8 pin-controller (PERIC) */
+static const struct samsung_pin_bank_data artpec8_pin_banks1[] __initconst = {
+ ARTPEC_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ ARTPEC_PIN_BANK_EINTG(8, 0x020, "gpa1", 0x04),
+ ARTPEC_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ ARTPEC_PIN_BANK_EINTG(2, 0x060, "gpk0", 0x0c),
+};
+
+static const struct samsung_pin_ctrl artpec8_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 FSYS data */
+ .pin_banks = artpec8_pin_banks0,
+ .nr_banks = ARRAY_SIZE(artpec8_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 1 PERIC data */
+ .pin_banks = artpec8_pin_banks1,
+ .nr_banks = ARRAY_SIZE(artpec8_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data artpec8_of_data __initconst = {
+ .ctrl = artpec8_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(artpec8_pin_ctrl),
+};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 362dc533186f..c9c38f8988dd 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -236,6 +236,16 @@
.name = id \
}
+#define ARTPEC_PIN_BANK_EINTG(pins, reg, id, offs) \
+ { \
+ .type = &artpec_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 24745e1d78ce..c099195fc464 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1482,6 +1482,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &s5pv210_of_data },
#endif
#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
+ { .compatible = "axis,artpec8-pinctrl",
+ .data = &artpec8_of_data },
{ .compatible = "google,gs101-pinctrl",
.data = &gs101_of_data },
{ .compatible = "samsung,exynos2200-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 1cabcbe1401a..3e8ef91d94a3 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -381,6 +381,7 @@ struct samsung_pmx_func {
};
/* list of all exported SoC specific data */
+extern const struct samsung_pinctrl_of_match_data artpec8_of_data;
extern const struct samsung_pinctrl_of_match_data exynos2200_of_data;
extern const struct samsung_pinctrl_of_match_data exynos3250_of_data;
extern const struct samsung_pinctrl_of_match_data exynos4210_of_data;
@@ -402,10 +403,6 @@ extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data;
extern const struct samsung_pinctrl_of_match_data fsd_of_data;
extern const struct samsung_pinctrl_of_match_data gs101_of_data;
extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2440_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2450_of_data;
extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;
#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index 9996b1c4a07e..33af9b5791c1 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -707,7 +707,7 @@ static void spacemit_pinconf_dbg_show(struct pinctrl_dev *pctldev,
spacemit_get_drive_strength_mA(IO_TYPE_1V8, tmp),
spacemit_get_drive_strength_mA(IO_TYPE_3V3, tmp));
- seq_printf(seq, ", register (0x%04x)\n", value);
+ seq_printf(seq, ", register (0x%04x)", value);
}
static const struct pinconf_ops spacemit_pinconf_ops = {
@@ -847,7 +847,7 @@ static const struct pinctrl_pin_desc k1_pin_desc[] = {
PINCTRL_PIN(67, "GPIO_67"),
PINCTRL_PIN(68, "GPIO_68"),
PINCTRL_PIN(69, "GPIO_69"),
- PINCTRL_PIN(70, "GPIO_70/PRI_DTI"),
+ PINCTRL_PIN(70, "GPIO_70/PRI_TDI"),
PINCTRL_PIN(71, "GPIO_71/PRI_TMS"),
PINCTRL_PIN(72, "GPIO_72/PRI_TCK"),
PINCTRL_PIN(73, "GPIO_73/PRI_TDO"),
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index c4a1d99dfed0..16cf9d15f247 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -258,8 +258,7 @@ static int sprd_dt_node_to_map(struct pinctrl_dev *pctldev,
grp = sprd_pinctrl_find_group_by_name(pctl, np->name);
if (!grp) {
- dev_err(pctl->dev, "unable to find group for node %s\n",
- of_node_full_name(np));
+ dev_err(pctl->dev, "unable to find group for node %pOF\n", np);
return -EINVAL;
}
@@ -276,16 +275,14 @@ static int sprd_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret < 0) {
if (ret != -EINVAL)
dev_err(pctl->dev,
- "%s: could not parse property function\n",
- of_node_full_name(np));
+ "%pOF: could not parse property function\n", np);
function = NULL;
}
ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
&num_configs);
if (ret < 0) {
- dev_err(pctl->dev, "%s: could not parse node property\n",
- of_node_full_name(np));
+ dev_err(pctl->dev, "%pOF: could not parse node property\n", np);
return ret;
}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c b/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
index 971959a75b0c..0b1dff01e04c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
@@ -576,7 +576,7 @@ static const struct pinmux_ops stm32_hdp_pinmux_ops = {
.gpio_set_direction = NULL,
};
-static struct pinctrl_desc stm32_hdp_pdesc = {
+static const struct pinctrl_desc stm32_hdp_pdesc = {
.name = DRIVER_NAME,
.pins = stm32_hdp_pins,
.npins = ARRAY_SIZE(stm32_hdp_pins),
@@ -642,7 +642,7 @@ static int stm32_hdp_probe(struct platform_device *pdev)
hdp->gpio_chip.gc.can_sleep = true;
hdp->gpio_chip.gc.names = stm32_hdp_pins_group;
- config = (typeof(config)){
+ config = (struct gpio_generic_chip_config) {
.dev = dev,
.sz = 4,
.dat = hdp->base + HDP_GPOVAL,
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 823c8fe758e2..3ebb468de830 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1236,7 +1236,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_DOWN:
ret = stm32_pconf_set_bias(bank, offset, 2);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
__stm32_gpio_set(bank, offset, arg);
ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
break;
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index 3e924aa86cc2..fabe7efaa837 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -488,7 +488,7 @@ static int sppctl_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
case PIN_CONFIG_INPUT_ENABLE:
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
return sppctl_gpio_direction_output(chip, offset, 0);
case PIN_CONFIG_PERSIST_STATE:
@@ -580,7 +580,7 @@ static int sppctl_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
arg = 0;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (!sppctl_first_get(&pctl->spp_gchip->chip, pin))
return -EINVAL;
if (!sppctl_master_get(&pctl->spp_gchip->chip, pin))
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
index 4e34b0cd3b73..50a16f3bd131 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
@@ -103,7 +103,7 @@ static struct sunxi_desc_pin *init_pins_table(struct device *dev,
return ERR_PTR(-EINVAL);
}
- pins = devm_kzalloc(dev, desc->npins * sizeof(*pins), GFP_KERNEL);
+ pins = devm_kcalloc(dev, desc->npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
return ERR_PTR(-ENOMEM);
@@ -199,7 +199,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
* Allocate the memory needed for the functions in one table.
* We later use pointers into this table to mark each pin.
*/
- func = devm_kzalloc(dev, num_funcs * sizeof(*func), GFP_KERNEL);
+ func = devm_kcalloc(dev, num_funcs, sizeof(*func), GFP_KERNEL);
if (!func)
return -ENOMEM;
@@ -274,8 +274,7 @@ static void fill_pin_function(struct device *dev, struct device_node *node,
if (!strcmp(pins[pin].pin.name, name))
break;
if (pin == npins) {
- dev_warn(dev, "%s: cannot find pin %s\n",
- of_node_full_name(node), name);
+ dev_warn(dev, "%pOF: cannot find pin %s\n", node, name);
index++;
continue;
}
@@ -283,8 +282,8 @@ static void fill_pin_function(struct device *dev, struct device_node *node,
/* Read the associated mux value. */
muxval = sunxi_pinctrl_dt_read_pinmux(node, index);
if (muxval == INVALID_MUX) {
- dev_warn(dev, "%s: invalid mux value for pin %s\n",
- of_node_full_name(node), name);
+ dev_warn(dev, "%pOF: invalid mux value for pin %s\n",
+ node, name);
index++;
continue;
}
diff --git a/drivers/pinctrl/tegra/Kconfig b/drivers/pinctrl/tegra/Kconfig
index 4e87d19323ba..660d101ea367 100644
--- a/drivers/pinctrl/tegra/Kconfig
+++ b/drivers/pinctrl/tegra/Kconfig
@@ -24,6 +24,10 @@ config PINCTRL_TEGRA210
bool
select PINCTRL_TEGRA
+config PINCTRL_TEGRA186
+ bool
+ select PINCTRL_TEGRA
+
config PINCTRL_TEGRA194
bool
select PINCTRL_TEGRA
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
index a93973701d4c..82176526549e 100644
--- a/drivers/pinctrl/tegra/Makefile
+++ b/drivers/pinctrl/tegra/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
obj-$(CONFIG_PINCTRL_TEGRA210) += pinctrl-tegra210.o
+obj-$(CONFIG_PINCTRL_TEGRA186) += pinctrl-tegra186.o
obj-$(CONFIG_PINCTRL_TEGRA194) += pinctrl-tegra194.o
obj-$(CONFIG_PINCTRL_TEGRA234) += pinctrl-tegra234.o
obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra186.c b/drivers/pinctrl/tegra/pinctrl-tegra186.c
new file mode 100644
index 000000000000..4a1d6476af9b
--- /dev/null
+++ b/drivers/pinctrl/tegra/pinctrl-tegra186.c
@@ -0,0 +1,1979 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Pinctrl data for the NVIDIA Tegra186 pinmux
+ *
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-tegra.h"
+
+/* Define unique ID for each pins */
+enum {
+ TEGRA_PIN_PEX_L0_RST_N_PA0,
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PA1,
+ TEGRA_PIN_PEX_WAKE_N_PA2,
+ TEGRA_PIN_PEX_L1_RST_N_PA3,
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PA4,
+ TEGRA_PIN_PEX_L2_RST_N_PA5,
+ TEGRA_PIN_PEX_L2_CLKREQ_N_PA6,
+ TEGRA_PIN_UART4_TX_PB0,
+ TEGRA_PIN_UART4_RX_PB1,
+ TEGRA_PIN_UART4_RTS_PB2,
+ TEGRA_PIN_UART4_CTS_PB3,
+ TEGRA_PIN_GPIO_WAN1_PB4,
+ TEGRA_PIN_GPIO_WAN2_PB5,
+ TEGRA_PIN_GPIO_WAN3_PB6,
+ TEGRA_PIN_GPIO_WAN4_PC0,
+ TEGRA_PIN_DAP2_SCLK_PC1,
+ TEGRA_PIN_DAP2_DOUT_PC2,
+ TEGRA_PIN_DAP2_DIN_PC3,
+ TEGRA_PIN_DAP2_FS_PC4,
+ TEGRA_PIN_GEN1_I2C_SCL_PC5,
+ TEGRA_PIN_GEN1_I2C_SDA_PC6,
+ TEGRA_PIN_SDMMC1_CLK_PD0,
+ TEGRA_PIN_SDMMC1_CMD_PD1,
+ TEGRA_PIN_SDMMC1_DAT0_PD2,
+ TEGRA_PIN_SDMMC1_DAT1_PD3,
+ TEGRA_PIN_SDMMC1_DAT2_PD4,
+ TEGRA_PIN_SDMMC1_DAT3_PD5,
+ TEGRA_PIN_EQOS_TXC_PE0,
+ TEGRA_PIN_EQOS_TD0_PE1,
+ TEGRA_PIN_EQOS_TD1_PE2,
+ TEGRA_PIN_EQOS_TD2_PE3,
+ TEGRA_PIN_EQOS_TD3_PE4,
+ TEGRA_PIN_EQOS_TX_CTL_PE5,
+ TEGRA_PIN_EQOS_RD0_PE6,
+ TEGRA_PIN_EQOS_RD1_PE7,
+ TEGRA_PIN_EQOS_RD2_PF0,
+ TEGRA_PIN_EQOS_RD3_PF1,
+ TEGRA_PIN_EQOS_RX_CTL_PF2,
+ TEGRA_PIN_EQOS_RXC_PF3,
+ TEGRA_PIN_EQOS_MDIO_PF4,
+ TEGRA_PIN_EQOS_MDC_PF5,
+ TEGRA_PIN_SDMMC3_CLK_PG0,
+ TEGRA_PIN_SDMMC3_CMD_PG1,
+ TEGRA_PIN_SDMMC3_DAT0_PG2,
+ TEGRA_PIN_SDMMC3_DAT1_PG3,
+ TEGRA_PIN_SDMMC3_DAT2_PG4,
+ TEGRA_PIN_SDMMC3_DAT3_PG5,
+ TEGRA_PIN_GPIO_WAN5_PH0,
+ TEGRA_PIN_GPIO_WAN6_PH1,
+ TEGRA_PIN_GPIO_WAN7_PH2,
+ TEGRA_PIN_GPIO_WAN8_PH3,
+ TEGRA_PIN_BCPU_PWR_REQ_PH4,
+ TEGRA_PIN_MCPU_PWR_REQ_PH5,
+ TEGRA_PIN_GPU_PWR_REQ_PH6,
+ TEGRA_PIN_GPIO_PQ0_PI0,
+ TEGRA_PIN_GPIO_PQ1_PI1,
+ TEGRA_PIN_GPIO_PQ2_PI2,
+ TEGRA_PIN_GPIO_PQ3_PI3,
+ TEGRA_PIN_GPIO_PQ4_PI4,
+ TEGRA_PIN_GPIO_PQ5_PI5,
+ TEGRA_PIN_GPIO_PQ6_PI6,
+ TEGRA_PIN_GPIO_PQ7_PI7,
+ TEGRA_PIN_DAP1_SCLK_PJ0,
+ TEGRA_PIN_DAP1_DOUT_PJ1,
+ TEGRA_PIN_DAP1_DIN_PJ2,
+ TEGRA_PIN_DAP1_FS_PJ3,
+ TEGRA_PIN_AUD_MCLK_PJ4,
+ TEGRA_PIN_GPIO_AUD0_PJ5,
+ TEGRA_PIN_GPIO_AUD1_PJ6,
+ TEGRA_PIN_GPIO_AUD2_PJ7,
+ TEGRA_PIN_GPIO_AUD3_PK0,
+ TEGRA_PIN_GEN7_I2C_SCL_PL0,
+ TEGRA_PIN_GEN7_I2C_SDA_PL1,
+ TEGRA_PIN_GEN9_I2C_SCL_PL2,
+ TEGRA_PIN_GEN9_I2C_SDA_PL3,
+ TEGRA_PIN_USB_VBUS_EN0_PL4,
+ TEGRA_PIN_USB_VBUS_EN1_PL5,
+ TEGRA_PIN_GP_PWM6_PL6,
+ TEGRA_PIN_GP_PWM7_PL7,
+ TEGRA_PIN_DMIC1_DAT_PM0,
+ TEGRA_PIN_DMIC1_CLK_PM1,
+ TEGRA_PIN_DMIC2_DAT_PM2,
+ TEGRA_PIN_DMIC2_CLK_PM3,
+ TEGRA_PIN_DMIC4_DAT_PM4,
+ TEGRA_PIN_DMIC4_CLK_PM5,
+ TEGRA_PIN_GPIO_CAM1_PN0,
+ TEGRA_PIN_GPIO_CAM2_PN1,
+ TEGRA_PIN_GPIO_CAM3_PN2,
+ TEGRA_PIN_GPIO_CAM4_PN3,
+ TEGRA_PIN_GPIO_CAM5_PN4,
+ TEGRA_PIN_GPIO_CAM6_PN5,
+ TEGRA_PIN_GPIO_CAM7_PN6,
+ TEGRA_PIN_EXTPERIPH1_CLK_PO0,
+ TEGRA_PIN_EXTPERIPH2_CLK_PO1,
+ TEGRA_PIN_CAM_I2C_SCL_PO2,
+ TEGRA_PIN_CAM_I2C_SDA_PO3,
+ TEGRA_PIN_DP_AUX_CH0_HPD_PP0,
+ TEGRA_PIN_DP_AUX_CH1_HPD_PP1,
+ TEGRA_PIN_HDMI_CEC_PP2,
+ TEGRA_PIN_GPIO_EDP0_PP3,
+ TEGRA_PIN_GPIO_EDP1_PP4,
+ TEGRA_PIN_GPIO_EDP2_PP5,
+ TEGRA_PIN_GPIO_EDP3_PP6,
+ TEGRA_PIN_DIRECTDC1_CLK_PQ0,
+ TEGRA_PIN_DIRECTDC1_IN_PQ1,
+ TEGRA_PIN_DIRECTDC1_OUT0_PQ2,
+ TEGRA_PIN_DIRECTDC1_OUT1_PQ3,
+ TEGRA_PIN_DIRECTDC1_OUT2_PQ4,
+ TEGRA_PIN_DIRECTDC1_OUT3_PQ5,
+ TEGRA_PIN_QSPI_SCK_PR0,
+ TEGRA_PIN_QSPI_IO0_PR1,
+ TEGRA_PIN_QSPI_IO1_PR2,
+ TEGRA_PIN_QSPI_IO2_PR3,
+ TEGRA_PIN_QSPI_IO3_PR4,
+ TEGRA_PIN_QSPI_CS_N_PR5,
+ TEGRA_PIN_UART1_TX_PT0,
+ TEGRA_PIN_UART1_RX_PT1,
+ TEGRA_PIN_UART1_RTS_PT2,
+ TEGRA_PIN_UART1_CTS_PT3,
+ TEGRA_PIN_UART2_TX_PX0,
+ TEGRA_PIN_UART2_RX_PX1,
+ TEGRA_PIN_UART2_RTS_PX2,
+ TEGRA_PIN_UART2_CTS_PX3,
+ TEGRA_PIN_UART5_TX_PX4,
+ TEGRA_PIN_UART5_RX_PX5,
+ TEGRA_PIN_UART5_RTS_PX6,
+ TEGRA_PIN_UART5_CTS_PX7,
+ TEGRA_PIN_GPIO_MDM1_PY0,
+ TEGRA_PIN_GPIO_MDM2_PY1,
+ TEGRA_PIN_GPIO_MDM3_PY2,
+ TEGRA_PIN_GPIO_MDM4_PY3,
+ TEGRA_PIN_GPIO_MDM5_PY4,
+ TEGRA_PIN_GPIO_MDM6_PY5,
+ TEGRA_PIN_GPIO_MDM7_PY6,
+ TEGRA_PIN_UFS0_REF_CLK_PBB0,
+ TEGRA_PIN_UFS0_RST_PBB1,
+ TEGRA_PIN_DAP4_SCLK_PCC0,
+ TEGRA_PIN_DAP4_DOUT_PCC1,
+ TEGRA_PIN_DAP4_DIN_PCC2,
+ TEGRA_PIN_DAP4_FS_PCC3,
+ TEGRA_PIN_DIRECTDC_COMP,
+ TEGRA_PIN_SDMMC1_COMP,
+ TEGRA_PIN_EQOS_COMP,
+ TEGRA_PIN_SDMMC3_COMP,
+ TEGRA_PIN_QSPI_COMP,
+};
+
+enum {
+ TEGRA_PIN_PWR_I2C_SCL_PS0,
+ TEGRA_PIN_PWR_I2C_SDA_PS1,
+ TEGRA_PIN_BATT_OC_PS2,
+ TEGRA_PIN_SAFE_STATE_PS3,
+ TEGRA_PIN_VCOMP_ALERT_PS4,
+ TEGRA_PIN_GPIO_DIS0_PU0,
+ TEGRA_PIN_GPIO_DIS1_PU1,
+ TEGRA_PIN_GPIO_DIS2_PU2,
+ TEGRA_PIN_GPIO_DIS3_PU3,
+ TEGRA_PIN_GPIO_DIS4_PU4,
+ TEGRA_PIN_GPIO_DIS5_PU5,
+ TEGRA_PIN_GPIO_SEN0_PV0,
+ TEGRA_PIN_GPIO_SEN1_PV1,
+ TEGRA_PIN_GPIO_SEN2_PV2,
+ TEGRA_PIN_GPIO_SEN3_PV3,
+ TEGRA_PIN_GPIO_SEN4_PV4,
+ TEGRA_PIN_GPIO_SEN5_PV5,
+ TEGRA_PIN_GPIO_SEN6_PV6,
+ TEGRA_PIN_GPIO_SEN7_PV7,
+ TEGRA_PIN_GEN8_I2C_SCL_PW0,
+ TEGRA_PIN_GEN8_I2C_SDA_PW1,
+ TEGRA_PIN_UART3_TX_PW2,
+ TEGRA_PIN_UART3_RX_PW3,
+ TEGRA_PIN_UART3_RTS_PW4,
+ TEGRA_PIN_UART3_CTS_PW5,
+ TEGRA_PIN_UART7_TX_PW6,
+ TEGRA_PIN_UART7_RX_PW7,
+ TEGRA_PIN_CAN1_DOUT_PZ0,
+ TEGRA_PIN_CAN1_DIN_PZ1,
+ TEGRA_PIN_CAN0_DOUT_PZ2,
+ TEGRA_PIN_CAN0_DIN_PZ3,
+ TEGRA_PIN_CAN_GPIO0_PAA0,
+ TEGRA_PIN_CAN_GPIO1_PAA1,
+ TEGRA_PIN_CAN_GPIO2_PAA2,
+ TEGRA_PIN_CAN_GPIO3_PAA3,
+ TEGRA_PIN_CAN_GPIO4_PAA4,
+ TEGRA_PIN_CAN_GPIO5_PAA5,
+ TEGRA_PIN_CAN_GPIO6_PAA6,
+ TEGRA_PIN_CAN_GPIO7_PAA7,
+ TEGRA_PIN_GPIO_SEN8_PEE0,
+ TEGRA_PIN_GPIO_SEN9_PEE1,
+ TEGRA_PIN_TOUCH_CLK_PEE2,
+ TEGRA_PIN_POWER_ON_PFF0,
+ TEGRA_PIN_GPIO_SW1_PFF1,
+ TEGRA_PIN_GPIO_SW2_PFF2,
+ TEGRA_PIN_GPIO_SW3_PFF3,
+ TEGRA_PIN_GPIO_SW4_PFF4,
+ TEGRA_PIN_SHUTDOWN,
+ TEGRA_PIN_PMU_INT,
+ TEGRA_PIN_SOC_PWR_REQ,
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+/* Table for pin descriptor */
+static const struct pinctrl_pin_desc tegra186_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_RST_N_PA0, "PEX_L0_RST_N_PA0"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_CLKREQ_N_PA1, "PEX_L0_CLKREQ_N_PA1"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_WAKE_N_PA2, "PEX_WAKE_N_PA2"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_RST_N_PA3, "PEX_L1_RST_N_PA3"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_CLKREQ_N_PA4, "PEX_L1_CLKREQ_N_PA4"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L2_RST_N_PA5, "PEX_L2_RST_N_PA5"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L2_CLKREQ_N_PA6, "PEX_L2_CLKREQ_N_PA6"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_TX_PB0, "UART4_TX_PB0"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_RX_PB1, "UART4_RX_PB1"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_RTS_PB2, "UART4_RTS_PB2"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_CTS_PB3, "UART4_CTS_PB3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN1_PB4, "GPIO_WAN1_PB4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN2_PB5, "GPIO_WAN2_PB5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN3_PB6, "GPIO_WAN3_PB6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN4_PC0, "GPIO_WAN4_PC0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PC1, "DAP2_SCLK_PC1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PC2, "DAP2_DOUT_PC2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PC3, "DAP2_DIN_PC3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PC4, "DAP2_FS_PC4"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC5, "GEN1_I2C_SCL_PC5"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC6, "GEN1_I2C_SDA_PC6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PD0, "SDMMC1_CLK_PD0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PD1, "SDMMC1_CMD_PD1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PD2, "SDMMC1_DAT0_PD2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PD3, "SDMMC1_DAT1_PD3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PD4, "SDMMC1_DAT2_PD4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PD5, "SDMMC1_DAT3_PD5"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TXC_PE0, "EQOS_TXC_PE0"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD0_PE1, "EQOS_TD0_PE1"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD1_PE2, "EQOS_TD1_PE2"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD2_PE3, "EQOS_TD2_PE3"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD3_PE4, "EQOS_TD3_PE4"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TX_CTL_PE5, "EQOS_TX_CTL_PE5"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD0_PE6, "EQOS_RD0_PE6"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD1_PE7, "EQOS_RD1_PE7"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD2_PF0, "EQOS_RD2_PF0"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD3_PF1, "EQOS_RD3_PF1"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RX_CTL_PF2, "EQOS_RX_CTL_PF2"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RXC_PF3, "EQOS_RXC_PF3"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_MDIO_PF4, "EQOS_MDIO_PF4"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_MDC_PF5, "EQOS_MDC_PF5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PG0, "SDMMC3_CLK_PG0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PG1, "SDMMC3_CMD_PG1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PG2, "SDMMC3_DAT0_PG2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PG3, "SDMMC3_DAT1_PG3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PG4, "SDMMC3_DAT2_PG4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PG5, "SDMMC3_DAT3_PG5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN5_PH0, "GPIO_WAN5_PH0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN6_PH1, "GPIO_WAN6_PH1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN7_PH2, "GPIO_WAN7_PH2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN8_PH3, "GPIO_WAN8_PH3"),
+ PINCTRL_PIN(TEGRA_PIN_BCPU_PWR_REQ_PH4, "BCPU_PWR_REQ_PH4"),
+ PINCTRL_PIN(TEGRA_PIN_MCPU_PWR_REQ_PH5, "MCPU_PWR_REQ_PH5"),
+ PINCTRL_PIN(TEGRA_PIN_GPU_PWR_REQ_PH6, "GPU_PWR_REQ_PH6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ0_PI0, "GPIO_PQ0_PI0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ1_PI1, "GPIO_PQ1_PI1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ2_PI2, "GPIO_PQ2_PI2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ3_PI3, "GPIO_PQ3_PI3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ4_PI4, "GPIO_PQ4_PI4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ5_PI5, "GPIO_PQ5_PI5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ6_PI6, "GPIO_PQ6_PI6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ7_PI7, "GPIO_PQ7_PI7"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PJ0, "DAP1_SCLK_PJ0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PJ1, "DAP1_DOUT_PJ1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PJ2, "DAP1_DIN_PJ2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PJ3, "DAP1_FS_PJ3"),
+ PINCTRL_PIN(TEGRA_PIN_AUD_MCLK_PJ4, "AUD_MCLK_PJ4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD0_PJ5, "GPIO_AUD0_PJ5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD1_PJ6, "GPIO_AUD1_PJ6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD2_PJ7, "GPIO_AUD2_PJ7"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD3_PK0, "GPIO_AUD3_PK0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN7_I2C_SCL_PL0, "GEN7_I2C_SCL_PL0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN7_I2C_SDA_PL1, "GEN7_I2C_SDA_PL1"),
+ PINCTRL_PIN(TEGRA_PIN_GEN9_I2C_SCL_PL2, "GEN9_I2C_SCL_PL2"),
+ PINCTRL_PIN(TEGRA_PIN_GEN9_I2C_SDA_PL3, "GEN9_I2C_SDA_PL3"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PL4, "USB_VBUS_EN0_PL4"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PL5, "USB_VBUS_EN1_PL5"),
+ PINCTRL_PIN(TEGRA_PIN_GP_PWM6_PL6, "GP_PWM6_PL6"),
+ PINCTRL_PIN(TEGRA_PIN_GP_PWM7_PL7, "GP_PWM7_PL7"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC1_DAT_PM0, "DMIC1_DAT_PM0"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC1_CLK_PM1, "DMIC1_CLK_PM1"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC2_DAT_PM2, "DMIC2_DAT_PM2"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC2_CLK_PM3, "DMIC2_CLK_PM3"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC4_DAT_PM4, "DMIC4_DAT_PM4"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC4_CLK_PM5, "DMIC4_CLK_PM5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM1_PN0, "GPIO_CAM1_PN0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM2_PN1, "GPIO_CAM2_PN1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM3_PN2, "GPIO_CAM3_PN2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM4_PN3, "GPIO_CAM4_PN3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM5_PN4, "GPIO_CAM6_PN5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM6_PN5, "GPIO_CAM6_PN5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM7_PN6, "GPIO_CAM7_PN6"),
+ PINCTRL_PIN(TEGRA_PIN_EXTPERIPH1_CLK_PO0, "EXTPERIPH1_CLK_PO0"),
+ PINCTRL_PIN(TEGRA_PIN_EXTPERIPH2_CLK_PO1, "EXTPERIPH2_CLK_PO1"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PO2, "CAM_I2C_SCL_PO2"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PO3, "CAM_I2C_SDA_PO3"),
+ PINCTRL_PIN(TEGRA_PIN_DP_AUX_CH0_HPD_PP0, "DP_AUX_CH0_HPD_PP0"),
+ PINCTRL_PIN(TEGRA_PIN_DP_AUX_CH1_HPD_PP1, "DP_AUX_CH1_HPD_PP1"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PP2, "HDMI_CEC_PP2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP0_PP3, "GPIO_EDP0_PP3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP1_PP4, "GPIO_EDP1_PP4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP2_PP5, "GPIO_EDP2_PP5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP3_PP6, "GPIO_EDP3_PP6"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_CLK_PQ0, "DIRECTDC1_CLK_PQ0"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_IN_PQ1, "DIRECTDC1_IN_PQ1"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT0_PQ2, "DIRECTDC1_OUT0_PQ2"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT1_PQ3, "DIRECTDC1_OUT1_PQ3"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT2_PQ4, "DIRECTDC1_OUT2_PQ4"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT3_PQ5, "DIRECTDC1_OUT3_PQ5"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_SCK_PR0, "QSPI_SCK_PR0"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO0_PR1, "QSPI_IO0_PR1"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO1_PR2, "QSPI_IO1_PR2"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO2_PR3, "QSPI_IO2_PR3"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO3_PR4, "QSPI_IO3_PR4"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_CS_N_PR5, "QSPI_CS_N_PR5"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_TX_PT0, "UART1_TX_PT0"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_RX_PT1, "UART1_RX_PT1"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_RTS_PT2, "UART1_RTS_PT2"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_CTS_PT3, "UART1_CTS_PT3"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_TX_PX0, "UART2_TX_PX0"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RX_PX1, "UART2_RX_PX1"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RTS_PX2, "UART2_RTS_PX2"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_CTS_PX3, "UART2_CTS_PX3"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_TX_PX4, "UART5_TX_PX4"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_RX_PX5, "UART5_RX_PX5"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_RTS_PX6, "UART5_RTS_PX6"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_CTS_PX7, "UART5_CTS_PX7"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM1_PY0, "GPIO_MDM1_PY0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM2_PY1, "GPIO_MDM2_PY1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM3_PY2, "GPIO_MDM3_PY2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM4_PY3, "GPIO_MDM4_PY3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM5_PY4, "GPIO_MDM5_PY4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM6_PY5, "GPIO_MDM6_PY5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM7_PY6, "GPIO_MDM7_PY6"),
+ PINCTRL_PIN(TEGRA_PIN_UFS0_REF_CLK_PBB0, "UFS0_REF_CLK_PBB0"),
+ PINCTRL_PIN(TEGRA_PIN_UFS0_RST_PBB1, "UFS0_RST_PBB1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PCC0, "DAP4_SCLK_PCC0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PCC1, "DAP4_DOUT_PCC1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PCC2, "DAP4_DIN_PCC2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PCC3, "DAP4_FS_PCC3"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC_COMP, "DIRECTDC_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_COMP, "SDMMC1_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_COMP, "EQOS_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_COMP, "SDMMC3_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_COMP, "QSPI_COMP"),
+};
+
+static const unsigned int pex_l0_rst_n_pa0_pins[] = {
+ TEGRA_PIN_PEX_L0_RST_N_PA0,
+};
+
+static const unsigned int pex_l0_clkreq_n_pa1_pins[] = {
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PA1,
+};
+
+static const unsigned int pex_wake_n_pa2_pins[] = {
+ TEGRA_PIN_PEX_WAKE_N_PA2,
+};
+
+static const unsigned int pex_l1_rst_n_pa3_pins[] = {
+ TEGRA_PIN_PEX_L1_RST_N_PA3,
+};
+
+static const unsigned int pex_l1_clkreq_n_pa4_pins[] = {
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PA4,
+};
+
+static const unsigned int pex_l2_rst_n_pa5_pins[] = {
+ TEGRA_PIN_PEX_L2_RST_N_PA5,
+};
+
+static const unsigned int pex_l2_clkreq_n_pa6_pins[] = {
+ TEGRA_PIN_PEX_L2_CLKREQ_N_PA6,
+};
+
+static const unsigned int uart4_tx_pb0_pins[] = {
+ TEGRA_PIN_UART4_TX_PB0,
+};
+
+static const unsigned int uart4_rx_pb1_pins[] = {
+ TEGRA_PIN_UART4_RX_PB1,
+};
+
+static const unsigned int uart4_rts_pb2_pins[] = {
+ TEGRA_PIN_UART4_RTS_PB2,
+};
+
+static const unsigned int uart4_cts_pb3_pins[] = {
+ TEGRA_PIN_UART4_CTS_PB3,
+};
+
+static const unsigned int gpio_wan1_pb4_pins[] = {
+ TEGRA_PIN_GPIO_WAN1_PB4,
+};
+
+static const unsigned int gpio_wan2_pb5_pins[] = {
+ TEGRA_PIN_GPIO_WAN2_PB5,
+};
+
+static const unsigned int gpio_wan3_pb6_pins[] = {
+ TEGRA_PIN_GPIO_WAN3_PB6,
+};
+
+static const unsigned int gpio_wan4_pc0_pins[] = {
+ TEGRA_PIN_GPIO_WAN4_PC0,
+};
+
+static const unsigned int dap2_sclk_pc1_pins[] = {
+ TEGRA_PIN_DAP2_SCLK_PC1,
+};
+
+static const unsigned int dap2_dout_pc2_pins[] = {
+ TEGRA_PIN_DAP2_DOUT_PC2,
+};
+
+static const unsigned int dap2_din_pc3_pins[] = {
+ TEGRA_PIN_DAP2_DIN_PC3,
+};
+
+static const unsigned int dap2_fs_pc4_pins[] = {
+ TEGRA_PIN_DAP2_FS_PC4,
+};
+
+static const unsigned int gen1_i2c_scl_pc5_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PC5,
+};
+
+static const unsigned int gen1_i2c_sda_pc6_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SDA_PC6,
+};
+
+static const unsigned int sdmmc1_clk_pd0_pins[] = {
+ TEGRA_PIN_SDMMC1_CLK_PD0,
+};
+
+static const unsigned int sdmmc1_cmd_pd1_pins[] = {
+ TEGRA_PIN_SDMMC1_CMD_PD1,
+};
+
+static const unsigned int sdmmc1_dat0_pd2_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT0_PD2,
+};
+
+static const unsigned int sdmmc1_dat1_pd3_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT1_PD3,
+};
+
+static const unsigned int sdmmc1_dat2_pd4_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT2_PD4,
+};
+
+static const unsigned int sdmmc1_dat3_pd5_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PD5,
+};
+
+static const unsigned int eqos_txc_pe0_pins[] = {
+ TEGRA_PIN_EQOS_TXC_PE0,
+};
+
+static const unsigned int eqos_td0_pe1_pins[] = {
+ TEGRA_PIN_EQOS_TD0_PE1,
+};
+
+static const unsigned int eqos_td1_pe2_pins[] = {
+ TEGRA_PIN_EQOS_TD1_PE2,
+};
+
+static const unsigned int eqos_td2_pe3_pins[] = {
+ TEGRA_PIN_EQOS_TD2_PE3,
+};
+
+static const unsigned int eqos_td3_pe4_pins[] = {
+ TEGRA_PIN_EQOS_TD3_PE4,
+};
+
+static const unsigned int eqos_tx_ctl_pe5_pins[] = {
+ TEGRA_PIN_EQOS_TX_CTL_PE5,
+};
+
+static const unsigned int eqos_rd0_pe6_pins[] = {
+ TEGRA_PIN_EQOS_RD0_PE6,
+};
+
+static const unsigned int eqos_rd1_pe7_pins[] = {
+ TEGRA_PIN_EQOS_RD1_PE7,
+};
+
+static const unsigned int eqos_rd2_pf0_pins[] = {
+ TEGRA_PIN_EQOS_RD2_PF0,
+};
+
+static const unsigned int eqos_rd3_pf1_pins[] = {
+ TEGRA_PIN_EQOS_RD3_PF1,
+};
+
+static const unsigned int eqos_rx_ctl_pf2_pins[] = {
+ TEGRA_PIN_EQOS_RX_CTL_PF2,
+};
+
+static const unsigned int eqos_rxc_pf3_pins[] = {
+ TEGRA_PIN_EQOS_RXC_PF3,
+};
+
+static const unsigned int eqos_mdio_pf4_pins[] = {
+ TEGRA_PIN_EQOS_MDIO_PF4,
+};
+
+static const unsigned int eqos_mdc_pf5_pins[] = {
+ TEGRA_PIN_EQOS_MDC_PF5,
+};
+
+static const unsigned int sdmmc3_clk_pg0_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PG0,
+};
+
+static const unsigned int sdmmc3_cmd_pg1_pins[] = {
+ TEGRA_PIN_SDMMC3_CMD_PG1,
+};
+
+static const unsigned int sdmmc3_dat0_pg2_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT0_PG2,
+};
+
+static const unsigned int sdmmc3_dat1_pg3_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT1_PG3,
+};
+
+static const unsigned int sdmmc3_dat2_pg4_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT2_PG4,
+};
+
+static const unsigned int sdmmc3_dat3_pg5_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT3_PG5,
+};
+
+static const unsigned int gpio_wan5_ph0_pins[] = {
+ TEGRA_PIN_GPIO_WAN5_PH0,
+};
+
+static const unsigned int gpio_wan6_ph1_pins[] = {
+ TEGRA_PIN_GPIO_WAN6_PH1,
+};
+
+static const unsigned int gpio_wan7_ph2_pins[] = {
+ TEGRA_PIN_GPIO_WAN7_PH2,
+};
+
+static const unsigned int gpio_wan8_ph3_pins[] = {
+ TEGRA_PIN_GPIO_WAN8_PH3,
+};
+
+static const unsigned int bcpu_pwr_req_ph4_pins[] = {
+ TEGRA_PIN_BCPU_PWR_REQ_PH4,
+};
+
+static const unsigned int mcpu_pwr_req_ph5_pins[] = {
+ TEGRA_PIN_MCPU_PWR_REQ_PH5,
+};
+
+static const unsigned int gpu_pwr_req_ph6_pins[] = {
+ TEGRA_PIN_GPU_PWR_REQ_PH6,
+};
+
+static const unsigned int gpio_pq0_pi0_pins[] = {
+ TEGRA_PIN_GPIO_PQ0_PI0,
+};
+
+static const unsigned int gpio_pq1_pi1_pins[] = {
+ TEGRA_PIN_GPIO_PQ1_PI1,
+};
+
+static const unsigned int gpio_pq2_pi2_pins[] = {
+ TEGRA_PIN_GPIO_PQ2_PI2,
+};
+
+static const unsigned int gpio_pq3_pi3_pins[] = {
+ TEGRA_PIN_GPIO_PQ3_PI3,
+};
+
+static const unsigned int gpio_pq4_pi4_pins[] = {
+ TEGRA_PIN_GPIO_PQ4_PI4,
+};
+
+static const unsigned int gpio_pq5_pi5_pins[] = {
+ TEGRA_PIN_GPIO_PQ5_PI5,
+};
+
+static const unsigned int gpio_pq6_pi6_pins[] = {
+ TEGRA_PIN_GPIO_PQ6_PI6,
+};
+
+static const unsigned int gpio_pq7_pi7_pins[] = {
+ TEGRA_PIN_GPIO_PQ7_PI7,
+};
+
+static const unsigned int dap1_sclk_pj0_pins[] = {
+ TEGRA_PIN_DAP1_SCLK_PJ0,
+};
+
+static const unsigned int dap1_dout_pj1_pins[] = {
+ TEGRA_PIN_DAP1_DOUT_PJ1,
+};
+
+static const unsigned int dap1_din_pj2_pins[] = {
+ TEGRA_PIN_DAP1_DIN_PJ2,
+};
+
+static const unsigned int dap1_fs_pj3_pins[] = {
+ TEGRA_PIN_DAP1_FS_PJ3,
+};
+
+static const unsigned int aud_mclk_pj4_pins[] = {
+ TEGRA_PIN_AUD_MCLK_PJ4,
+};
+
+static const unsigned int gpio_aud0_pj5_pins[] = {
+ TEGRA_PIN_GPIO_AUD0_PJ5,
+};
+
+static const unsigned int gpio_aud1_pj6_pins[] = {
+ TEGRA_PIN_GPIO_AUD1_PJ6,
+};
+
+static const unsigned int gpio_aud2_pj7_pins[] = {
+ TEGRA_PIN_GPIO_AUD2_PJ7,
+};
+
+static const unsigned int gpio_aud3_pk0_pins[] = {
+ TEGRA_PIN_GPIO_AUD3_PK0,
+};
+
+static const unsigned int gen7_i2c_scl_pl0_pins[] = {
+ TEGRA_PIN_GEN7_I2C_SCL_PL0,
+};
+
+static const unsigned int gen7_i2c_sda_pl1_pins[] = {
+ TEGRA_PIN_GEN7_I2C_SDA_PL1,
+};
+
+static const unsigned int gen9_i2c_scl_pl2_pins[] = {
+ TEGRA_PIN_GEN9_I2C_SCL_PL2,
+};
+
+static const unsigned int gen9_i2c_sda_pl3_pins[] = {
+ TEGRA_PIN_GEN9_I2C_SDA_PL3,
+};
+
+static const unsigned int usb_vbus_en0_pl4_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN0_PL4,
+};
+
+static const unsigned int usb_vbus_en1_pl5_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN1_PL5,
+};
+
+static const unsigned int gp_pwm6_pl6_pins[] = {
+ TEGRA_PIN_GP_PWM6_PL6,
+};
+
+static const unsigned int gp_pwm7_pl7_pins[] = {
+ TEGRA_PIN_GP_PWM7_PL7,
+};
+
+static const unsigned int dmic1_dat_pm0_pins[] = {
+ TEGRA_PIN_DMIC1_DAT_PM0,
+};
+
+static const unsigned int dmic1_clk_pm1_pins[] = {
+ TEGRA_PIN_DMIC1_CLK_PM1,
+};
+
+static const unsigned int dmic2_dat_pm2_pins[] = {
+ TEGRA_PIN_DMIC2_DAT_PM2,
+};
+
+static const unsigned int dmic2_clk_pm3_pins[] = {
+ TEGRA_PIN_DMIC2_CLK_PM3,
+};
+
+static const unsigned int dmic4_dat_pm4_pins[] = {
+ TEGRA_PIN_DMIC4_DAT_PM4,
+};
+
+static const unsigned int dmic4_clk_pm5_pins[] = {
+ TEGRA_PIN_DMIC4_CLK_PM5,
+};
+
+static const unsigned int gpio_cam1_pn0_pins[] = {
+ TEGRA_PIN_GPIO_CAM1_PN0,
+};
+
+static const unsigned int gpio_cam2_pn1_pins[] = {
+ TEGRA_PIN_GPIO_CAM2_PN1,
+};
+
+static const unsigned int gpio_cam3_pn2_pins[] = {
+ TEGRA_PIN_GPIO_CAM3_PN2,
+};
+
+static const unsigned int gpio_cam4_pn3_pins[] = {
+ TEGRA_PIN_GPIO_CAM4_PN3,
+};
+
+static const unsigned int gpio_cam5_pn4_pins[] = {
+ TEGRA_PIN_GPIO_CAM5_PN4,
+};
+
+static const unsigned int gpio_cam6_pn5_pins[] = {
+ TEGRA_PIN_GPIO_CAM6_PN5,
+};
+
+static const unsigned int gpio_cam7_pn6_pins[] = {
+ TEGRA_PIN_GPIO_CAM7_PN6,
+};
+
+static const unsigned int extperiph1_clk_po0_pins[] = {
+ TEGRA_PIN_EXTPERIPH1_CLK_PO0,
+};
+
+static const unsigned int extperiph2_clk_po1_pins[] = {
+ TEGRA_PIN_EXTPERIPH2_CLK_PO1,
+};
+
+static const unsigned int cam_i2c_scl_po2_pins[] = {
+ TEGRA_PIN_CAM_I2C_SCL_PO2,
+};
+
+static const unsigned int cam_i2c_sda_po3_pins[] = {
+ TEGRA_PIN_CAM_I2C_SDA_PO3,
+};
+
+static const unsigned int dp_aux_ch0_hpd_pp0_pins[] = {
+ TEGRA_PIN_DP_AUX_CH0_HPD_PP0,
+};
+
+static const unsigned int dp_aux_ch1_hpd_pp1_pins[] = {
+ TEGRA_PIN_DP_AUX_CH1_HPD_PP1,
+};
+
+static const unsigned int hdmi_cec_pp2_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PP2,
+};
+
+static const unsigned int gpio_edp0_pp3_pins[] = {
+ TEGRA_PIN_GPIO_EDP0_PP3,
+};
+
+static const unsigned int gpio_edp1_pp4_pins[] = {
+ TEGRA_PIN_GPIO_EDP1_PP4,
+};
+
+static const unsigned int gpio_edp2_pp5_pins[] = {
+ TEGRA_PIN_GPIO_EDP2_PP5,
+};
+
+static const unsigned int gpio_edp3_pp6_pins[] = {
+ TEGRA_PIN_GPIO_EDP3_PP6,
+};
+
+static const unsigned int directdc1_clk_pq0_pins[] = {
+ TEGRA_PIN_DIRECTDC1_CLK_PQ0,
+};
+
+static const unsigned int directdc1_in_pq1_pins[] = {
+ TEGRA_PIN_DIRECTDC1_IN_PQ1,
+};
+
+static const unsigned int directdc1_out0_pq2_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT0_PQ2,
+};
+
+static const unsigned int directdc1_out1_pq3_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT1_PQ3,
+};
+
+static const unsigned int directdc1_out2_pq4_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT2_PQ4,
+};
+
+static const unsigned int directdc1_out3_pq5_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT3_PQ5,
+};
+
+static const unsigned int qspi_sck_pr0_pins[] = {
+ TEGRA_PIN_QSPI_SCK_PR0,
+};
+
+static const unsigned int qspi_io0_pr1_pins[] = {
+ TEGRA_PIN_QSPI_IO0_PR1,
+};
+
+static const unsigned int qspi_io1_pr2_pins[] = {
+ TEGRA_PIN_QSPI_IO1_PR2,
+};
+
+static const unsigned int qspi_io2_pr3_pins[] = {
+ TEGRA_PIN_QSPI_IO2_PR3,
+};
+
+static const unsigned int qspi_io3_pr4_pins[] = {
+ TEGRA_PIN_QSPI_IO3_PR4,
+};
+
+static const unsigned int qspi_cs_n_pr5_pins[] = {
+ TEGRA_PIN_QSPI_CS_N_PR5,
+};
+
+static const unsigned int pwr_i2c_scl_ps0_pins[] = {
+ TEGRA_PIN_PWR_I2C_SCL_PS0,
+};
+
+static const unsigned int pwr_i2c_sda_ps1_pins[] = {
+ TEGRA_PIN_PWR_I2C_SDA_PS1,
+};
+
+static const unsigned int batt_oc_ps2_pins[] = {
+ TEGRA_PIN_BATT_OC_PS2,
+};
+
+static const unsigned int safe_state_ps3_pins[] = {
+ TEGRA_PIN_SAFE_STATE_PS3,
+};
+
+static const unsigned int vcomp_alert_ps4_pins[] = {
+ TEGRA_PIN_VCOMP_ALERT_PS4,
+};
+
+static const unsigned int uart1_tx_pt0_pins[] = {
+ TEGRA_PIN_UART1_TX_PT0,
+};
+
+static const unsigned int uart1_rx_pt1_pins[] = {
+ TEGRA_PIN_UART1_RX_PT1,
+};
+
+static const unsigned int uart1_rts_pt2_pins[] = {
+ TEGRA_PIN_UART1_RTS_PT2,
+};
+
+static const unsigned int uart1_cts_pt3_pins[] = {
+ TEGRA_PIN_UART1_CTS_PT3,
+};
+
+static const unsigned int gpio_dis0_pu0_pins[] = {
+ TEGRA_PIN_GPIO_DIS0_PU0,
+};
+
+static const unsigned int gpio_dis1_pu1_pins[] = {
+ TEGRA_PIN_GPIO_DIS1_PU1,
+};
+
+static const unsigned int gpio_dis2_pu2_pins[] = {
+ TEGRA_PIN_GPIO_DIS2_PU2,
+};
+
+static const unsigned int gpio_dis3_pu3_pins[] = {
+ TEGRA_PIN_GPIO_DIS3_PU3,
+};
+
+static const unsigned int gpio_dis4_pu4_pins[] = {
+ TEGRA_PIN_GPIO_DIS4_PU4,
+};
+
+static const unsigned int gpio_dis5_pu5_pins[] = {
+ TEGRA_PIN_GPIO_DIS5_PU5,
+};
+
+static const unsigned int gpio_sen0_pv0_pins[] = {
+ TEGRA_PIN_GPIO_SEN0_PV0,
+};
+
+static const unsigned int gpio_sen1_pv1_pins[] = {
+ TEGRA_PIN_GPIO_SEN1_PV1,
+};
+
+static const unsigned int gpio_sen2_pv2_pins[] = {
+ TEGRA_PIN_GPIO_SEN2_PV2,
+};
+
+static const unsigned int gpio_sen3_pv3_pins[] = {
+ TEGRA_PIN_GPIO_SEN3_PV3,
+};
+
+static const unsigned int gpio_sen4_pv4_pins[] = {
+ TEGRA_PIN_GPIO_SEN4_PV4,
+};
+
+static const unsigned int gpio_sen5_pv5_pins[] = {
+ TEGRA_PIN_GPIO_SEN5_PV5,
+};
+
+static const unsigned int gpio_sen6_pv6_pins[] = {
+ TEGRA_PIN_GPIO_SEN6_PV6,
+};
+
+static const unsigned int gpio_sen7_pv7_pins[] = {
+ TEGRA_PIN_GPIO_SEN7_PV7,
+};
+
+static const unsigned int gen8_i2c_scl_pw0_pins[] = {
+ TEGRA_PIN_GEN8_I2C_SCL_PW0,
+};
+
+static const unsigned int gen8_i2c_sda_pw1_pins[] = {
+ TEGRA_PIN_GEN8_I2C_SDA_PW1,
+};
+
+static const unsigned int uart3_tx_pw2_pins[] = {
+ TEGRA_PIN_UART3_TX_PW2,
+};
+
+static const unsigned int uart3_rx_pw3_pins[] = {
+ TEGRA_PIN_UART3_RX_PW3,
+};
+
+static const unsigned int uart3_rts_pw4_pins[] = {
+ TEGRA_PIN_UART3_RTS_PW4,
+};
+
+static const unsigned int uart3_cts_pw5_pins[] = {
+ TEGRA_PIN_UART3_CTS_PW5,
+};
+
+static const unsigned int uart7_tx_pw6_pins[] = {
+ TEGRA_PIN_UART7_TX_PW6,
+};
+
+static const unsigned int uart7_rx_pw7_pins[] = {
+ TEGRA_PIN_UART7_RX_PW7,
+};
+
+static const unsigned int uart2_tx_px0_pins[] = {
+ TEGRA_PIN_UART2_TX_PX0,
+};
+
+static const unsigned int uart2_rx_px1_pins[] = {
+ TEGRA_PIN_UART2_RX_PX1,
+};
+
+static const unsigned int uart2_rts_px2_pins[] = {
+ TEGRA_PIN_UART2_RTS_PX2,
+};
+
+static const unsigned int uart2_cts_px3_pins[] = {
+ TEGRA_PIN_UART2_CTS_PX3,
+};
+
+static const unsigned int uart5_tx_px4_pins[] = {
+ TEGRA_PIN_UART5_TX_PX4,
+};
+
+static const unsigned int uart5_rx_px5_pins[] = {
+ TEGRA_PIN_UART5_RX_PX5,
+};
+
+static const unsigned int uart5_rts_px6_pins[] = {
+ TEGRA_PIN_UART5_RTS_PX6,
+};
+
+static const unsigned int uart5_cts_px7_pins[] = {
+ TEGRA_PIN_UART5_CTS_PX7,
+};
+
+static const unsigned int gpio_mdm1_py0_pins[] = {
+ TEGRA_PIN_GPIO_MDM1_PY0,
+};
+
+static const unsigned int gpio_mdm2_py1_pins[] = {
+ TEGRA_PIN_GPIO_MDM2_PY1,
+};
+
+static const unsigned int gpio_mdm3_py2_pins[] = {
+ TEGRA_PIN_GPIO_MDM3_PY2,
+};
+
+static const unsigned int gpio_mdm4_py3_pins[] = {
+ TEGRA_PIN_GPIO_MDM4_PY3,
+};
+
+static const unsigned int gpio_mdm5_py4_pins[] = {
+ TEGRA_PIN_GPIO_MDM5_PY4,
+};
+
+static const unsigned int gpio_mdm6_py5_pins[] = {
+ TEGRA_PIN_GPIO_MDM6_PY5,
+};
+
+static const unsigned int gpio_mdm7_py6_pins[] = {
+ TEGRA_PIN_GPIO_MDM7_PY6,
+};
+
+static const unsigned int can1_dout_pz0_pins[] = {
+ TEGRA_PIN_CAN1_DOUT_PZ0,
+};
+
+static const unsigned int can1_din_pz1_pins[] = {
+ TEGRA_PIN_CAN1_DIN_PZ1,
+};
+
+static const unsigned int can0_dout_pz2_pins[] = {
+ TEGRA_PIN_CAN0_DOUT_PZ2,
+};
+
+static const unsigned int can0_din_pz3_pins[] = {
+ TEGRA_PIN_CAN0_DIN_PZ3,
+};
+
+static const unsigned int can_gpio0_paa0_pins[] = {
+ TEGRA_PIN_CAN_GPIO0_PAA0,
+};
+
+static const unsigned int can_gpio1_paa1_pins[] = {
+ TEGRA_PIN_CAN_GPIO1_PAA1,
+};
+
+static const unsigned int can_gpio2_paa2_pins[] = {
+ TEGRA_PIN_CAN_GPIO2_PAA2,
+};
+
+static const unsigned int can_gpio3_paa3_pins[] = {
+ TEGRA_PIN_CAN_GPIO3_PAA3,
+};
+
+static const unsigned int can_gpio4_paa4_pins[] = {
+ TEGRA_PIN_CAN_GPIO4_PAA4,
+};
+
+static const unsigned int can_gpio5_paa5_pins[] = {
+ TEGRA_PIN_CAN_GPIO5_PAA5,
+};
+
+static const unsigned int can_gpio6_paa6_pins[] = {
+ TEGRA_PIN_CAN_GPIO6_PAA6,
+};
+
+static const unsigned int can_gpio7_paa7_pins[] = {
+ TEGRA_PIN_CAN_GPIO7_PAA7,
+};
+
+static const unsigned int ufs0_ref_clk_pbb0_pins[] = {
+ TEGRA_PIN_UFS0_REF_CLK_PBB0,
+};
+
+static const unsigned int ufs0_rst_pbb1_pins[] = {
+ TEGRA_PIN_UFS0_RST_PBB1,
+};
+
+static const unsigned int dap4_sclk_pcc0_pins[] = {
+ TEGRA_PIN_DAP4_SCLK_PCC0,
+};
+
+static const unsigned int dap4_dout_pcc1_pins[] = {
+ TEGRA_PIN_DAP4_DOUT_PCC1,
+};
+
+static const unsigned int dap4_din_pcc2_pins[] = {
+ TEGRA_PIN_DAP4_DIN_PCC2,
+};
+
+static const unsigned int dap4_fs_pcc3_pins[] = {
+ TEGRA_PIN_DAP4_FS_PCC3,
+};
+
+static const unsigned int gpio_sen8_pee0_pins[] = {
+ TEGRA_PIN_GPIO_SEN8_PEE0,
+};
+
+static const unsigned int gpio_sen9_pee1_pins[] = {
+ TEGRA_PIN_GPIO_SEN9_PEE1,
+};
+
+static const unsigned int touch_clk_pee2_pins[] = {
+ TEGRA_PIN_TOUCH_CLK_PEE2,
+};
+
+static const unsigned int power_on_pff0_pins[] = {
+ TEGRA_PIN_POWER_ON_PFF0,
+};
+
+static const unsigned int gpio_sw1_pff1_pins[] = {
+ TEGRA_PIN_GPIO_SW1_PFF1,
+};
+
+static const unsigned int gpio_sw2_pff2_pins[] = {
+ TEGRA_PIN_GPIO_SW2_PFF2,
+};
+
+static const unsigned int gpio_sw3_pff3_pins[] = {
+ TEGRA_PIN_GPIO_SW3_PFF3,
+};
+
+static const unsigned int gpio_sw4_pff4_pins[] = {
+ TEGRA_PIN_GPIO_SW4_PFF4,
+};
+
+static const unsigned int directdc_comp_pins[] = {
+ TEGRA_PIN_DIRECTDC_COMP,
+};
+
+static const unsigned int sdmmc1_comp_pins[] = {
+ TEGRA_PIN_SDMMC1_COMP,
+};
+
+static const unsigned int eqos_comp_pins[] = {
+ TEGRA_PIN_EQOS_COMP,
+};
+
+static const unsigned int sdmmc3_comp_pins[] = {
+ TEGRA_PIN_SDMMC3_COMP,
+};
+
+static const unsigned int qspi_comp_pins[] = {
+ TEGRA_PIN_QSPI_COMP,
+};
+
+static const unsigned int shutdown_pins[] = {
+ TEGRA_PIN_SHUTDOWN,
+};
+
+static const unsigned int pmu_int_pins[] = {
+ TEGRA_PIN_PMU_INT,
+};
+
+static const unsigned int soc_pwr_req_pins[] = {
+ TEGRA_PIN_SOC_PWR_REQ,
+};
+
+static const unsigned int clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned int sdmmc4_clk_pins[] = {};
+
+static const unsigned int sdmmc4_cmd_pins[] = {};
+
+static const unsigned int sdmmc4_dqs_pins[] = {};
+
+static const unsigned int sdmmc4_dat7_pins[] = {};
+
+static const unsigned int sdmmc4_dat6_pins[] = {};
+
+static const unsigned int sdmmc4_dat5_pins[] = {};
+
+static const unsigned int sdmmc4_dat4_pins[] = {};
+
+static const unsigned int sdmmc4_dat3_pins[] = {};
+
+static const unsigned int sdmmc4_dat2_pins[] = {};
+
+static const unsigned int sdmmc4_dat1_pins[] = {};
+
+static const unsigned int sdmmc4_dat0_pins[] = {};
+
+/* Define unique ID for each function */
+enum tegra_mux_dt {
+ TEGRA_MUX_RSVD0,
+ TEGRA_MUX_RSVD1,
+ TEGRA_MUX_RSVD2,
+ TEGRA_MUX_RSVD3,
+ TEGRA_MUX_TOUCH,
+ TEGRA_MUX_UARTC,
+ TEGRA_MUX_I2C8,
+ TEGRA_MUX_UARTG,
+ TEGRA_MUX_SPI2,
+ TEGRA_MUX_GP,
+ TEGRA_MUX_DCA,
+ TEGRA_MUX_WDT,
+ TEGRA_MUX_I2C2,
+ TEGRA_MUX_CAN1,
+ TEGRA_MUX_CAN0,
+ TEGRA_MUX_DMIC3,
+ TEGRA_MUX_DMIC5,
+ TEGRA_MUX_GPIO,
+ TEGRA_MUX_DSPK1,
+ TEGRA_MUX_DSPK0,
+ TEGRA_MUX_SPDIF,
+ TEGRA_MUX_AUD,
+ TEGRA_MUX_I2S1,
+ TEGRA_MUX_DMIC1,
+ TEGRA_MUX_DMIC2,
+ TEGRA_MUX_I2S3,
+ TEGRA_MUX_DMIC4,
+ TEGRA_MUX_I2S4,
+ TEGRA_MUX_EXTPERIPH2,
+ TEGRA_MUX_EXTPERIPH1,
+ TEGRA_MUX_I2C3,
+ TEGRA_MUX_VGP1,
+ TEGRA_MUX_VGP2,
+ TEGRA_MUX_VGP3,
+ TEGRA_MUX_VGP4,
+ TEGRA_MUX_VGP5,
+ TEGRA_MUX_VGP6,
+ TEGRA_MUX_EXTPERIPH3,
+ TEGRA_MUX_EXTPERIPH4,
+ TEGRA_MUX_SPI4,
+ TEGRA_MUX_I2S2,
+ TEGRA_MUX_UARTD,
+ TEGRA_MUX_I2C1,
+ TEGRA_MUX_UARTA,
+ TEGRA_MUX_DIRECTDC1,
+ TEGRA_MUX_DIRECTDC,
+ TEGRA_MUX_IQC0,
+ TEGRA_MUX_IQC1,
+ TEGRA_MUX_I2S6,
+ TEGRA_MUX_DTV,
+ TEGRA_MUX_UARTF,
+ TEGRA_MUX_SDMMC3,
+ TEGRA_MUX_SDMMC4,
+ TEGRA_MUX_SDMMC1,
+ TEGRA_MUX_DP,
+ TEGRA_MUX_HDMI,
+ TEGRA_MUX_PE2,
+ TEGRA_MUX_SATA,
+ TEGRA_MUX_PE,
+ TEGRA_MUX_PE1,
+ TEGRA_MUX_PE0,
+ TEGRA_MUX_SOC,
+ TEGRA_MUX_EQOS,
+ TEGRA_MUX_SDMMC2,
+ TEGRA_MUX_QSPI,
+ TEGRA_MUX_SCE,
+ TEGRA_MUX_I2C5,
+ TEGRA_MUX_DISPLAYA,
+ TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DCC,
+ TEGRA_MUX_DCB,
+ TEGRA_MUX_SPI1,
+ TEGRA_MUX_UARTB,
+ TEGRA_MUX_UARTE,
+ TEGRA_MUX_SPI3,
+ TEGRA_MUX_NV,
+ TEGRA_MUX_CCLA,
+ TEGRA_MUX_I2C7,
+ TEGRA_MUX_I2C9,
+ TEGRA_MUX_I2S5,
+ TEGRA_MUX_USB,
+ TEGRA_MUX_UFS0,
+};
+
+/* Make list of each function name */
+#define TEGRA_PIN_FUNCTION(lid) #lid
+
+static const char * const tegra186_functions[] = {
+ TEGRA_PIN_FUNCTION(rsvd0),
+ TEGRA_PIN_FUNCTION(rsvd1),
+ TEGRA_PIN_FUNCTION(rsvd2),
+ TEGRA_PIN_FUNCTION(rsvd3),
+ TEGRA_PIN_FUNCTION(touch),
+ TEGRA_PIN_FUNCTION(uartc),
+ TEGRA_PIN_FUNCTION(i2c8),
+ TEGRA_PIN_FUNCTION(uartg),
+ TEGRA_PIN_FUNCTION(spi2),
+ TEGRA_PIN_FUNCTION(gp),
+ TEGRA_PIN_FUNCTION(dca),
+ TEGRA_PIN_FUNCTION(wdt),
+ TEGRA_PIN_FUNCTION(i2c2),
+ TEGRA_PIN_FUNCTION(can1),
+ TEGRA_PIN_FUNCTION(can0),
+ TEGRA_PIN_FUNCTION(dmic3),
+ TEGRA_PIN_FUNCTION(dmic5),
+ TEGRA_PIN_FUNCTION(gpio),
+ TEGRA_PIN_FUNCTION(dspk1),
+ TEGRA_PIN_FUNCTION(dspk0),
+ TEGRA_PIN_FUNCTION(spdif),
+ TEGRA_PIN_FUNCTION(aud),
+ TEGRA_PIN_FUNCTION(i2s1),
+ TEGRA_PIN_FUNCTION(dmic1),
+ TEGRA_PIN_FUNCTION(dmic2),
+ TEGRA_PIN_FUNCTION(i2s3),
+ TEGRA_PIN_FUNCTION(dmic4),
+ TEGRA_PIN_FUNCTION(i2s4),
+ TEGRA_PIN_FUNCTION(extperiph2),
+ TEGRA_PIN_FUNCTION(extperiph1),
+ TEGRA_PIN_FUNCTION(i2c3),
+ TEGRA_PIN_FUNCTION(vgp1),
+ TEGRA_PIN_FUNCTION(vgp2),
+ TEGRA_PIN_FUNCTION(vgp3),
+ TEGRA_PIN_FUNCTION(vgp4),
+ TEGRA_PIN_FUNCTION(vgp5),
+ TEGRA_PIN_FUNCTION(vgp6),
+ TEGRA_PIN_FUNCTION(extperiph3),
+ TEGRA_PIN_FUNCTION(extperiph4),
+ TEGRA_PIN_FUNCTION(spi4),
+ TEGRA_PIN_FUNCTION(i2s2),
+ TEGRA_PIN_FUNCTION(uartd),
+ TEGRA_PIN_FUNCTION(i2c1),
+ TEGRA_PIN_FUNCTION(uarta),
+ TEGRA_PIN_FUNCTION(directdc1),
+ TEGRA_PIN_FUNCTION(directdc),
+ TEGRA_PIN_FUNCTION(iqc0),
+ TEGRA_PIN_FUNCTION(iqc1),
+ TEGRA_PIN_FUNCTION(i2s6),
+ TEGRA_PIN_FUNCTION(dtv),
+ TEGRA_PIN_FUNCTION(uartf),
+ TEGRA_PIN_FUNCTION(sdmmc3),
+ TEGRA_PIN_FUNCTION(sdmmc4),
+ TEGRA_PIN_FUNCTION(sdmmc1),
+ TEGRA_PIN_FUNCTION(dp),
+ TEGRA_PIN_FUNCTION(hdmi),
+ TEGRA_PIN_FUNCTION(pe2),
+ TEGRA_PIN_FUNCTION(sata),
+ TEGRA_PIN_FUNCTION(pe),
+ TEGRA_PIN_FUNCTION(pe1),
+ TEGRA_PIN_FUNCTION(pe0),
+ TEGRA_PIN_FUNCTION(soc),
+ TEGRA_PIN_FUNCTION(eqos),
+ TEGRA_PIN_FUNCTION(sdmmc2),
+ TEGRA_PIN_FUNCTION(qspi),
+ TEGRA_PIN_FUNCTION(sce),
+ TEGRA_PIN_FUNCTION(i2c5),
+ TEGRA_PIN_FUNCTION(displaya),
+ TEGRA_PIN_FUNCTION(displayb),
+ TEGRA_PIN_FUNCTION(dcc),
+ TEGRA_PIN_FUNCTION(dcb),
+ TEGRA_PIN_FUNCTION(spi1),
+ TEGRA_PIN_FUNCTION(uartb),
+ TEGRA_PIN_FUNCTION(uarte),
+ TEGRA_PIN_FUNCTION(spi3),
+ TEGRA_PIN_FUNCTION(nv),
+ TEGRA_PIN_FUNCTION(ccla),
+ TEGRA_PIN_FUNCTION(i2c7),
+ TEGRA_PIN_FUNCTION(i2c9),
+ TEGRA_PIN_FUNCTION(i2s5),
+ TEGRA_PIN_FUNCTION(usb),
+ TEGRA_PIN_FUNCTION(ufs0),
+};
+
+#define PINGROUP_REG_Y(r) ((r))
+#define PINGROUP_REG_N(r) -1
+
+#define DRV_PINGROUP_Y(r) ((r))
+#define DRV_PINGROUP_N(r) -1
+
+#define DRV_PINGROUP_ENTRY_N(pg_name) \
+ .drv_reg = -1, \
+ .drv_bank = -1, \
+ .drvdn_bit = -1, \
+ .drvdn_width = -1, \
+ .drvup_bit = -1, \
+ .drvup_width = -1, \
+ .slwr_bit = -1, \
+ .slwr_width = -1, \
+ .slwf_bit = -1, \
+ .slwf_width = -1
+
+#define DRV_PINGROUP_ENTRY_Y(r, drvdn_b, drvdn_w, drvup_b, \
+ drvup_w, slwr_b, slwr_w, slwf_b, \
+ slwf_w, bank) \
+ .drv_reg = ((r)), \
+ .drv_bank = bank, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w
+
+#define PIN_PINGROUP_ENTRY_N(pg_name) \
+ .mux_reg = -1, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_bit = -1, \
+ .e_io_hv_bit = -1, \
+ .odrain_bit = -1, \
+ .lock_bit = -1, \
+ .parked_bit = -1, \
+ .lpmd_bit = -1, \
+ .drvtype_bit = -1, \
+ .lpdr_bit = -1, \
+ .pbias_buf_bit = -1, \
+ .preemp_bit = -1, \
+ .rfu_in_bit = -1
+
+#define PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_io_hv, e_lpbk, e_input, \
+ e_lpdr, e_pbias_buf, gpio_sfio_sel, \
+ e_od, schmitt_b, drvtype, epreemp, \
+ io_reset, rfu_in) \
+ .mux_reg = PINGROUP_REG_Y(r), \
+ .lpmd_bit = -1, \
+ .lock_bit = -1, \
+ .hsm_bit = -1, \
+ .mux_bank = bank, \
+ .mux_bit = 0, \
+ .pupd_reg = PINGROUP_REG_##pupd(r), \
+ .pupd_bank = bank, \
+ .pupd_bit = 2, \
+ .tri_reg = PINGROUP_REG_Y(r), \
+ .tri_bank = bank, \
+ .tri_bit = 4, \
+ .einput_bit = e_input, \
+ .sfsel_bit = gpio_sfio_sel, \
+ .odrain_bit = e_od, \
+ .schmitt_bit = schmitt_b, \
+ .drvtype_bit = 13, \
+ .lpdr_bit = e_lpdr, \
+
+/* main drive pin groups */
+#define drive_gpio_aud3_pk0 DRV_PINGROUP_ENTRY_Y(0x1004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_aud2_pj7 DRV_PINGROUP_ENTRY_Y(0x100c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_aud1_pj6 DRV_PINGROUP_ENTRY_Y(0x1014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_aud0_pj5 DRV_PINGROUP_ENTRY_Y(0x101c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_aud_mclk_pj4 DRV_PINGROUP_ENTRY_Y(0x1024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_fs_pj3 DRV_PINGROUP_ENTRY_Y(0x102c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_din_pj2 DRV_PINGROUP_ENTRY_Y(0x1034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_dout_pj1 DRV_PINGROUP_ENTRY_Y(0x103c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_sclk_pj0 DRV_PINGROUP_ENTRY_Y(0x1044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dmic1_clk_pm1 DRV_PINGROUP_ENTRY_Y(0x2004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic1_dat_pm0 DRV_PINGROUP_ENTRY_Y(0x200c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic2_dat_pm2 DRV_PINGROUP_ENTRY_Y(0x2014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic2_clk_pm3 DRV_PINGROUP_ENTRY_Y(0x201c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic4_dat_pm4 DRV_PINGROUP_ENTRY_Y(0x2024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic4_clk_pm5 DRV_PINGROUP_ENTRY_Y(0x202c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_fs_pcc3 DRV_PINGROUP_ENTRY_Y(0x2034, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_din_pcc2 DRV_PINGROUP_ENTRY_Y(0x203c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_dout_pcc1 DRV_PINGROUP_ENTRY_Y(0x2044, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_sclk_pcc0 DRV_PINGROUP_ENTRY_Y(0x204c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_extperiph2_clk_po1 DRV_PINGROUP_ENTRY_Y(0x0004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_extperiph1_clk_po0 DRV_PINGROUP_ENTRY_Y(0x000c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_cam_i2c_sda_po3 DRV_PINGROUP_ENTRY_Y(0x0014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_cam_i2c_scl_po2 DRV_PINGROUP_ENTRY_Y(0x001c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam1_pn0 DRV_PINGROUP_ENTRY_Y(0x0024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam2_pn1 DRV_PINGROUP_ENTRY_Y(0x002c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam3_pn2 DRV_PINGROUP_ENTRY_Y(0x0034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam4_pn3 DRV_PINGROUP_ENTRY_Y(0x003c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam5_pn4 DRV_PINGROUP_ENTRY_Y(0x0044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam6_pn5 DRV_PINGROUP_ENTRY_Y(0x004c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam7_pn6 DRV_PINGROUP_ENTRY_Y(0x0054, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_din_pc3 DRV_PINGROUP_ENTRY_Y(0x4004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_dout_pc2 DRV_PINGROUP_ENTRY_Y(0x400c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_fs_pc4 DRV_PINGROUP_ENTRY_Y(0x4014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_sclk_pc1 DRV_PINGROUP_ENTRY_Y(0x401c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_cts_pb3 DRV_PINGROUP_ENTRY_Y(0x4024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_rts_pb2 DRV_PINGROUP_ENTRY_Y(0x402c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_rx_pb1 DRV_PINGROUP_ENTRY_Y(0x4034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_tx_pb0 DRV_PINGROUP_ENTRY_Y(0x403c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan4_pc0 DRV_PINGROUP_ENTRY_Y(0x4044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan3_pb6 DRV_PINGROUP_ENTRY_Y(0x404c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan2_pb5 DRV_PINGROUP_ENTRY_Y(0x4054, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan1_pb4 DRV_PINGROUP_ENTRY_Y(0x405c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen1_i2c_scl_pc5 DRV_PINGROUP_ENTRY_Y(0x4064, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen1_i2c_sda_pc6 DRV_PINGROUP_ENTRY_Y(0x406c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_cts_pt3 DRV_PINGROUP_ENTRY_Y(0x5004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_rts_pt2 DRV_PINGROUP_ENTRY_Y(0x500c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_rx_pt1 DRV_PINGROUP_ENTRY_Y(0x5014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_tx_pt0 DRV_PINGROUP_ENTRY_Y(0x501c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_directdc1_out3_pq5 DRV_PINGROUP_ENTRY_Y(0x502c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_out2_pq4 DRV_PINGROUP_ENTRY_Y(0x5034, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_out1_pq3 DRV_PINGROUP_ENTRY_Y(0x503c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_out0_pq2 DRV_PINGROUP_ENTRY_Y(0x5044, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_in_pq1 DRV_PINGROUP_ENTRY_Y(0x504c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_clk_pq0 DRV_PINGROUP_ENTRY_Y(0x5054, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_gpio_pq0_pi0 DRV_PINGROUP_ENTRY_Y(0x3004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq1_pi1 DRV_PINGROUP_ENTRY_Y(0x300c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq2_pi2 DRV_PINGROUP_ENTRY_Y(0x3014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq3_pi3 DRV_PINGROUP_ENTRY_Y(0x301c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq4_pi4 DRV_PINGROUP_ENTRY_Y(0x3024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq5_pi5 DRV_PINGROUP_ENTRY_Y(0x302c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq6_pi6 DRV_PINGROUP_ENTRY_Y(0x3034, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq7_pi7 DRV_PINGROUP_ENTRY_Y(0x303c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_edp2_pp5 DRV_PINGROUP_ENTRY_Y(0x10004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_edp3_pp6 DRV_PINGROUP_ENTRY_Y(0x1000c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_edp0_pp3 DRV_PINGROUP_ENTRY_Y(0x10014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_edp1_pp4 DRV_PINGROUP_ENTRY_Y(0x1001c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dp_aux_ch0_hpd_pp0 DRV_PINGROUP_ENTRY_Y(0x10024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dp_aux_ch1_hpd_pp1 DRV_PINGROUP_ENTRY_Y(0x1002c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_hdmi_cec_pp2 DRV_PINGROUP_ENTRY_Y(0x10034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l2_clkreq_n_pa6 DRV_PINGROUP_ENTRY_Y(0x7004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_wake_n_pa2 DRV_PINGROUP_ENTRY_Y(0x700c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l1_clkreq_n_pa4 DRV_PINGROUP_ENTRY_Y(0x7014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l1_rst_n_pa3 DRV_PINGROUP_ENTRY_Y(0x701c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l0_clkreq_n_pa1 DRV_PINGROUP_ENTRY_Y(0x7024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l0_rst_n_pa0 DRV_PINGROUP_ENTRY_Y(0x702c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l2_rst_n_pa5 DRV_PINGROUP_ENTRY_Y(0x7034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_sdmmc1_clk_pd0 DRV_PINGROUP_ENTRY_Y(0x8004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_cmd_pd1 DRV_PINGROUP_ENTRY_Y(0x800c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat3_pd5 DRV_PINGROUP_ENTRY_Y(0x8018, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat2_pd4 DRV_PINGROUP_ENTRY_Y(0x8020, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat1_pd3 DRV_PINGROUP_ENTRY_Y(0x8028, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat0_pd2 DRV_PINGROUP_ENTRY_Y(0x8030, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td3_pe4 DRV_PINGROUP_ENTRY_Y(0x9004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td2_pe3 DRV_PINGROUP_ENTRY_Y(0x900c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td1_pe2 DRV_PINGROUP_ENTRY_Y(0x9014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td0_pe1 DRV_PINGROUP_ENTRY_Y(0x901c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd3_pf1 DRV_PINGROUP_ENTRY_Y(0x9024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd2_pf0 DRV_PINGROUP_ENTRY_Y(0x902c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd1_pe7 DRV_PINGROUP_ENTRY_Y(0x9034, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_mdio_pf4 DRV_PINGROUP_ENTRY_Y(0x903c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd0_pe6 DRV_PINGROUP_ENTRY_Y(0x9044, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_mdc_pf5 DRV_PINGROUP_ENTRY_Y(0x904c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_txc_pe0 DRV_PINGROUP_ENTRY_Y(0x9058, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rxc_pf3 DRV_PINGROUP_ENTRY_Y(0x9060, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_tx_ctl_pe5 DRV_PINGROUP_ENTRY_Y(0x9068, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rx_ctl_pf2 DRV_PINGROUP_ENTRY_Y(0x9070, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat3_pg5 DRV_PINGROUP_ENTRY_Y(0xa004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat2_pg4 DRV_PINGROUP_ENTRY_Y(0xa00c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat1_pg3 DRV_PINGROUP_ENTRY_Y(0xa014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat0_pg2 DRV_PINGROUP_ENTRY_Y(0xa01c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_cmd_pg1 DRV_PINGROUP_ENTRY_Y(0xa028, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_clk_pg0 DRV_PINGROUP_ENTRY_Y(0xa030, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io3_pr4 DRV_PINGROUP_ENTRY_Y(0xB004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io2_pr3 DRV_PINGROUP_ENTRY_Y(0xB00C, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io1_pr2 DRV_PINGROUP_ENTRY_Y(0xB014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io0_pr1 DRV_PINGROUP_ENTRY_Y(0xB01C, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_sck_pr0 DRV_PINGROUP_ENTRY_Y(0xB024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_cs_n_pr5 DRV_PINGROUP_ENTRY_Y(0xB02C, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_wan8_ph3 DRV_PINGROUP_ENTRY_Y(0xd004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan7_ph2 DRV_PINGROUP_ENTRY_Y(0xd00c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan6_ph1 DRV_PINGROUP_ENTRY_Y(0xd014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan5_ph0 DRV_PINGROUP_ENTRY_Y(0xd01c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_tx_px0 DRV_PINGROUP_ENTRY_Y(0xd024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_rx_px1 DRV_PINGROUP_ENTRY_Y(0xd02c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_rts_px2 DRV_PINGROUP_ENTRY_Y(0xd034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_cts_px3 DRV_PINGROUP_ENTRY_Y(0xd03c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_rx_px5 DRV_PINGROUP_ENTRY_Y(0xd044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_tx_px4 DRV_PINGROUP_ENTRY_Y(0xd04c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_rts_px6 DRV_PINGROUP_ENTRY_Y(0xd054, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_cts_px7 DRV_PINGROUP_ENTRY_Y(0xd05c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm1_py0 DRV_PINGROUP_ENTRY_Y(0xd064, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm2_py1 DRV_PINGROUP_ENTRY_Y(0xd06c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm3_py2 DRV_PINGROUP_ENTRY_Y(0xd074, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm4_py3 DRV_PINGROUP_ENTRY_Y(0xd07c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm5_py4 DRV_PINGROUP_ENTRY_Y(0xd084, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm6_py5 DRV_PINGROUP_ENTRY_Y(0xd08c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm7_py6 DRV_PINGROUP_ENTRY_Y(0xd094, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_bcpu_pwr_req_ph4 DRV_PINGROUP_ENTRY_Y(0xd09c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_mcpu_pwr_req_ph5 DRV_PINGROUP_ENTRY_Y(0xd0a4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpu_pwr_req_ph6 DRV_PINGROUP_ENTRY_Y(0xd0ac, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen7_i2c_scl_pl0 DRV_PINGROUP_ENTRY_Y(0xd0b4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen7_i2c_sda_pl1 DRV_PINGROUP_ENTRY_Y(0xd0bc, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen9_i2c_sda_pl3 DRV_PINGROUP_ENTRY_Y(0xd0c4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen9_i2c_scl_pl2 DRV_PINGROUP_ENTRY_Y(0xd0cc, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_usb_vbus_en0_pl4 DRV_PINGROUP_ENTRY_Y(0xd0d4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_usb_vbus_en1_pl5 DRV_PINGROUP_ENTRY_Y(0xd0dc, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gp_pwm7_pl7 DRV_PINGROUP_ENTRY_Y(0xd0e4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gp_pwm6_pl6 DRV_PINGROUP_ENTRY_Y(0xd0ec, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_ufs0_rst_pbb1 DRV_PINGROUP_ENTRY_Y(0x11004, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_ufs0_ref_clk_pbb0 DRV_PINGROUP_ENTRY_Y(0x1100c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+
+#define drive_directdc_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc1_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_eqos_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc3_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_clk DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_cmd DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dqs DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat7 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat6 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat5 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat4 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat3 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat2 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat1 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat0 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_qspi_comp DRV_PINGROUP_ENTRY_N(no_entry)
+
+/* AON drive pin groups */
+#define drive_touch_clk_pee2 DRV_PINGROUP_ENTRY_Y(0x2004, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_cts_pw5 DRV_PINGROUP_ENTRY_Y(0x200c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_rts_pw4 DRV_PINGROUP_ENTRY_Y(0x2014, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_rx_pw3 DRV_PINGROUP_ENTRY_Y(0x201c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_tx_pw2 DRV_PINGROUP_ENTRY_Y(0x2024, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gen8_i2c_sda_pw1 DRV_PINGROUP_ENTRY_Y(0x202c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gen8_i2c_scl_pw0 DRV_PINGROUP_ENTRY_Y(0x2034, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart7_rx_pw7 DRV_PINGROUP_ENTRY_Y(0x203c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart7_tx_pw6 DRV_PINGROUP_ENTRY_Y(0x2044, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen0_pv0 DRV_PINGROUP_ENTRY_Y(0x204c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen1_pv1 DRV_PINGROUP_ENTRY_Y(0x2054, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen2_pv2 DRV_PINGROUP_ENTRY_Y(0x205c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen3_pv3 DRV_PINGROUP_ENTRY_Y(0x2064, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen4_pv4 DRV_PINGROUP_ENTRY_Y(0x206c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen5_pv5 DRV_PINGROUP_ENTRY_Y(0x2074, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen6_pv6 DRV_PINGROUP_ENTRY_Y(0x207c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen7_pv7 DRV_PINGROUP_ENTRY_Y(0x2084, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen8_pee0 DRV_PINGROUP_ENTRY_Y(0x208c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen9_pee1 DRV_PINGROUP_ENTRY_Y(0x2094, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_can_gpio7_paa7 DRV_PINGROUP_ENTRY_Y(0x3004, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can1_dout_pz0 DRV_PINGROUP_ENTRY_Y(0x300C, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can1_din_pz1 DRV_PINGROUP_ENTRY_Y(0x3014, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can0_dout_pz2 DRV_PINGROUP_ENTRY_Y(0x301c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can0_din_pz3 DRV_PINGROUP_ENTRY_Y(0x3024, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio0_paa0 DRV_PINGROUP_ENTRY_Y(0x302c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio1_paa1 DRV_PINGROUP_ENTRY_Y(0x3034, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio2_paa2 DRV_PINGROUP_ENTRY_Y(0x303c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio3_paa3 DRV_PINGROUP_ENTRY_Y(0x3044, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio4_paa4 DRV_PINGROUP_ENTRY_Y(0x304c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio5_paa5 DRV_PINGROUP_ENTRY_Y(0x3054, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio6_paa6 DRV_PINGROUP_ENTRY_Y(0x305c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_gpio_sw1_pff1 DRV_PINGROUP_ENTRY_Y(0x1004, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sw2_pff2 DRV_PINGROUP_ENTRY_Y(0x100c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sw3_pff3 DRV_PINGROUP_ENTRY_Y(0x1014, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sw4_pff4 DRV_PINGROUP_ENTRY_Y(0x101c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_shutdown DRV_PINGROUP_ENTRY_Y(0x1024, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_pmu_int DRV_PINGROUP_ENTRY_Y(0x102C, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_safe_state_ps3 DRV_PINGROUP_ENTRY_Y(0x1034, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_vcomp_alert_ps4 DRV_PINGROUP_ENTRY_Y(0x103c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_soc_pwr_req DRV_PINGROUP_ENTRY_Y(0x1044, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_batt_oc_ps2 DRV_PINGROUP_ENTRY_Y(0x104c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_clk_32k_in DRV_PINGROUP_ENTRY_Y(0x1054, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_power_on_pff0 DRV_PINGROUP_ENTRY_Y(0x105c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_pwr_i2c_scl_ps0 DRV_PINGROUP_ENTRY_Y(0x1064, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_pwr_i2c_sda_ps1 DRV_PINGROUP_ENTRY_Y(0x106c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis0_pu0 DRV_PINGROUP_ENTRY_Y(0x1084, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis1_pu1 DRV_PINGROUP_ENTRY_Y(0x108c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis2_pu2 DRV_PINGROUP_ENTRY_Y(0x1094, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis3_pu3 DRV_PINGROUP_ENTRY_Y(0x109c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis4_pu4 DRV_PINGROUP_ENTRY_Y(0x10a4, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis5_pu5 DRV_PINGROUP_ENTRY_Y(0x10ac, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+
+#define PINGROUP(pg_name, f0, f1, f2, f3, r, bank, pupd, e_io_hv, e_lpbk, e_input, e_lpdr, e_pbias_buf, \
+ gpio_sfio_sel, e_od, schmitt_b, drvtype, epreemp, io_reset, rfu_in) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_io_hv, e_lpbk, \
+ e_input, e_lpdr, e_pbias_buf, \
+ gpio_sfio_sel, e_od, \
+ schmitt_b, drvtype, \
+ epreemp, io_reset, \
+ rfu_in) \
+ drive_##pg_name, \
+ }
+
+static const struct tegra_pingroup tegra186_groups[] = {
+ PINGROUP(gpio_aud3_pk0, RSVD0, DSPK1, SPDIF, RSVD3, 0x1000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_aud2_pj7, RSVD0, DSPK1, SPDIF, RSVD3, 0x1008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_aud1_pj6, RSVD0, RSVD1, RSVD2, RSVD3, 0x1010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_aud0_pj5, RSVD0, RSVD1, RSVD2, RSVD3, 0x1018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(aud_mclk_pj4, AUD, RSVD1, RSVD2, RSVD3, 0x1020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_fs_pj3, I2S1, RSVD1, RSVD2, RSVD3, 0x1028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_din_pj2, I2S1, RSVD1, RSVD2, RSVD3, 0x1030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_dout_pj1, I2S1, RSVD1, RSVD2, RSVD3, 0x1038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_sclk_pj0, I2S1, RSVD1, RSVD2, RSVD3, 0x1040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dmic1_clk_pm1, DMIC1, I2S3, RSVD2, RSVD3, 0x2000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic1_dat_pm0, DMIC1, I2S3, RSVD2, RSVD3, 0x2008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic2_dat_pm2, DMIC2, I2S3, RSVD2, RSVD3, 0x2010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic2_clk_pm3, DMIC2, I2S3, RSVD2, RSVD3, 0x2018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic4_dat_pm4, DMIC4, DSPK0, RSVD2, RSVD3, 0x2020, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic4_clk_pm5, DMIC4, DSPK0, RSVD2, RSVD3, 0x2028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_fs_pcc3, I2S4, RSVD1, RSVD2, RSVD3, 0x2030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_din_pcc2, I2S4, RSVD1, RSVD2, RSVD3, 0x2038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_dout_pcc1, I2S4, RSVD1, RSVD2, RSVD3, 0x2040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_sclk_pcc0, I2S4, RSVD1, RSVD2, RSVD3, 0x2048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(extperiph2_clk_po1, EXTPERIPH2, RSVD1, RSVD2, RSVD3, 0x0000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(extperiph1_clk_po0, EXTPERIPH1, RSVD1, RSVD2, RSVD3, 0x0008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(cam_i2c_sda_po3, I2C3, RSVD1, RSVD2, RSVD3, 0x0010, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(cam_i2c_scl_po2, I2C3, RSVD1, RSVD2, RSVD3, 0x0018, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam1_pn0, VGP1, RSVD1, RSVD2, RSVD3, 0x0020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam2_pn1, VGP2, EXTPERIPH3, RSVD2, RSVD3, 0x0028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam3_pn2, VGP3, EXTPERIPH4, RSVD2, RSVD3, 0x0030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam4_pn3, VGP4, SPI4, RSVD2, RSVD3, 0x0038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam5_pn4, VGP5, SPI4, RSVD2, RSVD3, 0x0040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam6_pn5, VGP6, SPI4, RSVD2, RSVD3, 0x0048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam7_pn6, RSVD0, SPI4, RSVD2, RSVD3, 0x0050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_din_pc3, I2S2, RSVD1, RSVD2, RSVD3, 0x4000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_dout_pc2, I2S2, RSVD1, RSVD2, RSVD3, 0x4008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_fs_pc4, I2S2, RSVD1, RSVD2, RSVD3, 0x4010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_sclk_pc1, I2S2, RSVD1, RSVD2, RSVD3, 0x4018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_cts_pb3, UARTD, RSVD1, RSVD2, RSVD3, 0x4020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_rts_pb2, UARTD, RSVD1, RSVD2, RSVD3, 0x4028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_rx_pb1, UARTD, RSVD1, RSVD2, RSVD3, 0x4030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_tx_pb0, UARTD, RSVD1, RSVD2, RSVD3, 0x4038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan4_pc0, RSVD0, RSVD1, RSVD2, RSVD3, 0x4040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan3_pb6, RSVD0, RSVD1, RSVD2, RSVD3, 0x4048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan2_pb5, RSVD0, RSVD1, RSVD2, RSVD3, 0x4050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan1_pb4, RSVD0, RSVD1, RSVD2, RSVD3, 0x4058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen1_i2c_scl_pc5, I2C1, RSVD1, RSVD2, RSVD3, 0x4060, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen1_i2c_sda_pc6, I2C1, RSVD1, RSVD2, RSVD3, 0x4068, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_cts_pt3, UARTA, RSVD1, RSVD2, RSVD3, 0x5000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_rts_pt2, UARTA, RSVD1, RSVD2, RSVD3, 0x5008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_rx_pt1, UARTA, RSVD1, RSVD2, RSVD3, 0x5010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_tx_pt0, UARTA, RSVD1, RSVD2, RSVD3, 0x5018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(directdc1_out3_pq5, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_out2_pq4, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_out1_pq3, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_out0_pq2, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_in_pq1, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_clk_pq0, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5050, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc_comp, DIRECTDC, RSVD1, RSVD2, RSVD3, 0x5058, 0, Y, -1, -1, -1, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq0_pi0, RSVD0, IQC0, I2S6, RSVD3, 0x3000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq1_pi1, RSVD0, IQC0, I2S6, RSVD3, 0x3008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq2_pi2, RSVD0, IQC0, I2S6, RSVD3, 0x3010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq3_pi3, RSVD0, IQC0, I2S6, RSVD3, 0x3018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq4_pi4, RSVD0, IQC1, DTV, RSVD3, 0x3020, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq5_pi5, RSVD0, IQC1, DTV, RSVD3, 0x3028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq6_pi6, RSVD0, IQC1, DTV, RSVD3, 0x3030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq7_pi7, RSVD0, IQC1, DTV, RSVD3, 0x3038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_edp2_pp5, RSVD0, UARTF, SDMMC3, RSVD3, 0x10000, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_edp3_pp6, RSVD0, UARTF, SDMMC1, RSVD3, 0x10008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_edp0_pp3, RSVD0, UARTF, SDMMC3, RSVD3, 0x10010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_edp1_pp4, RSVD0, UARTF, SDMMC1, RSVD3, 0x10018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dp_aux_ch0_hpd_pp0, DP, RSVD1, RSVD2, RSVD3, 0x10020, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dp_aux_ch1_hpd_pp1, DP, RSVD1, RSVD2, RSVD3, 0x10028, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(hdmi_cec_pp2, HDMI, RSVD1, RSVD2, RSVD3, 0x10030, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l2_clkreq_n_pa6, PE2, GP, SATA, RSVD3, 0x7000, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_wake_n_pa2, PE, RSVD1, RSVD2, RSVD3, 0x7008, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l1_clkreq_n_pa4, PE1, RSVD1, RSVD2, RSVD3, 0x7010, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l1_rst_n_pa3, PE1, RSVD1, RSVD2, RSVD3, 0x7018, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l0_clkreq_n_pa1, PE0, RSVD1, RSVD2, RSVD3, 0x7020, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l0_rst_n_pa0, PE0, RSVD1, RSVD2, RSVD3, 0x7028, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l2_rst_n_pa5, PE2, SOC, SATA, RSVD3, 0x7030, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(sdmmc1_clk_pd0, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8000, 0, Y, 5, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_cmd_pd1, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_comp, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8010, 0, Y, -1, -1, 6, -1, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(sdmmc1_dat3_pd5, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8014, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_dat2_pd4, SDMMC1, RSVD1, RSVD2, RSVD3, 0x801c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_dat1_pd3, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8024, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_dat0_pd2, SDMMC1, RSVD1, RSVD2, RSVD3, 0x802c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td3_pe4, EQOS, SDMMC2, RSVD2, RSVD3, 0x9000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td2_pe3, EQOS, SDMMC2, RSVD2, RSVD3, 0x9008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td1_pe2, EQOS, SDMMC2, RSVD2, RSVD3, 0x9010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td0_pe1, EQOS, SDMMC2, RSVD2, RSVD3, 0x9018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd3_pf1, EQOS, SDMMC2, RSVD2, RSVD3, 0x9020, 0, Y, -1, 5, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd2_pf0, EQOS, SDMMC2, RSVD2, RSVD3, 0x9028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd1_pe7, EQOS, SDMMC2, RSVD2, RSVD3, 0x9030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_mdio_pf4, EQOS, SOC, RSVD2, RSVD3, 0x9038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd0_pe6, EQOS, SDMMC2, RSVD2, RSVD3, 0x9040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_mdc_pf5, EQOS, RSVD1, RSVD2, RSVD3, 0x9048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_comp, EQOS, SDMMC2, RSVD2, RSVD3, 0x9050, 0, Y, -1, -1, -1, -1, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(eqos_txc_pe0, EQOS, SDMMC2, RSVD2, RSVD3, 0x9054, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rxc_pf3, EQOS, SDMMC2, RSVD2, RSVD3, 0x905c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_tx_ctl_pe5, EQOS, SDMMC2, RSVD2, RSVD3, 0x9064, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rx_ctl_pf2, EQOS, SDMMC2, RSVD2, RSVD3, 0x906c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat3_pg5, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat2_pg4, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat1_pg3, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat0_pg2, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_comp, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa020, 0, Y, -1, -1, -1, -1, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(sdmmc3_cmd_pg1, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa024, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_clk_pg0, SDMMC3, RSVD1, RSVD1, RSVD3, 0xa02c, 0, Y, -1, 5, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_clk, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6004, 0, Y, -1, 5, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_cmd, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6008, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dqs, SDMMC4, RSVD1, RSVD2, RSVD3, 0x600c, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat7, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6010, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat6, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6014, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat5, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6018, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat4, SDMMC4, RSVD1, RSVD2, RSVD3, 0x601c, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat3, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6020, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat2, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6024, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat1, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6028, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat0, SDMMC4, RSVD1, RSVD2, RSVD3, 0x602c, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(qspi_io3_pr4, QSPI, RSVD1, RSVD2, RSVD3, 0xB000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_io2_pr3, QSPI, RSVD1, RSVD2, RSVD3, 0xB008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_io1_pr2, QSPI, RSVD1, RSVD2, RSVD3, 0xB010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_io0_pr1, QSPI, RSVD1, RSVD2, RSVD3, 0xB018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_sck_pr0, QSPI, RSVD1, RSVD2, RSVD3, 0xB020, 0, Y, -1, 5, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_cs_n_pr5, QSPI, RSVD1, RSVD2, RSVD3, 0xB028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_comp, QSPI, RSVD1, RSVD2, RSVD3, 0xB030, 0, Y, -1, -1, -1, -1, -1, -1, -1, -1, Y, -1, -1, Y),
+ PINGROUP(gpio_wan8_ph3, RSVD0, RSVD1, SPI1, RSVD3, 0xd000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan7_ph2, RSVD0, RSVD1, SPI1, RSVD3, 0xd008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan6_ph1, RSVD0, RSVD1, SPI1, RSVD3, 0xd010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan5_ph0, RSVD0, RSVD1, SPI1, RSVD3, 0xd018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_tx_px0, UARTB, RSVD1, RSVD2, RSVD3, 0xd020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_rx_px1, UARTB, RSVD1, RSVD2, RSVD3, 0xd028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_rts_px2, UARTB, RSVD1, RSVD2, RSVD3, 0xd030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_cts_px3, UARTB, RSVD1, RSVD2, RSVD3, 0xd038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_rx_px5, UARTE, SPI3, GP, RSVD3, 0xd040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_tx_px4, UARTE, SPI3, NV, RSVD3, 0xd048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_rts_px6, UARTE, SPI3, RSVD2, RSVD3, 0xd050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_cts_px7, UARTE, SPI3, RSVD2, RSVD3, 0xd058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm1_py0, RSVD0, RSVD1, RSVD2, RSVD3, 0xd060, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm2_py1, RSVD0, RSVD1, RSVD2, RSVD3, 0xd068, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm3_py2, RSVD0, RSVD1, RSVD2, RSVD3, 0xd070, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm4_py3, RSVD0, SPI1, CCLA, RSVD3, 0xd078, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm5_py4, RSVD0, SPI1, RSVD2, RSVD3, 0xd080, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm6_py5, SOC, RSVD1, RSVD2, RSVD3, 0xd088, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm7_py6, RSVD0, RSVD1, RSVD2, RSVD3, 0xd090, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(bcpu_pwr_req_ph4, RSVD0, RSVD1, RSVD2, RSVD3, 0xd098, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(mcpu_pwr_req_ph5, RSVD0, RSVD1, RSVD2, RSVD3, 0xd0a0, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpu_pwr_req_ph6, RSVD0, RSVD1, RSVD2, RSVD3, 0xd0a8, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen7_i2c_scl_pl0, I2C7, I2S5, RSVD2, RSVD3, 0xd0b0, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen7_i2c_sda_pl1, I2C7, I2S5, RSVD2, RSVD3, 0xd0b8, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen9_i2c_sda_pl3, I2C9, I2S5, RSVD2, RSVD3, 0xd0c0, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen9_i2c_scl_pl2, I2C9, I2S5, RSVD2, RSVD3, 0xd0c8, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(usb_vbus_en0_pl4, USB, RSVD1, RSVD2, RSVD3, 0xd0d0, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(usb_vbus_en1_pl5, USB, RSVD1, RSVD2, RSVD3, 0xd0d8, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gp_pwm7_pl7, GP, RSVD1, RSVD2, RSVD3, 0xd0e0, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gp_pwm6_pl6, GP, RSVD1, RSVD2, RSVD3, 0xd0e8, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(ufs0_rst_pbb1, UFS0, RSVD1, RSVD2, RSVD3, 0x11000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(ufs0_ref_clk_pbb0, UFS0, RSVD1, RSVD2, RSVD3, 0x11008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+};
+
+static const struct tegra_pinctrl_soc_data tegra186_pinctrl = {
+ .pins = tegra186_pins,
+ .npins = ARRAY_SIZE(tegra186_pins),
+ .functions = tegra186_functions,
+ .nfunctions = ARRAY_SIZE(tegra186_functions),
+ .groups = tegra186_groups,
+ .ngroups = ARRAY_SIZE(tegra186_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = true,
+ .drvtype_in_mux = true,
+ .sfsel_in_mux = true,
+};
+
+static const struct pinctrl_pin_desc tegra186_aon_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PS0, "PWR_I2C_SCL_PS0"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PS1, "PWR_I2C_SDA_PS1"),
+ PINCTRL_PIN(TEGRA_PIN_BATT_OC_PS2, "BATT_OC_PS2"),
+ PINCTRL_PIN(TEGRA_PIN_SAFE_STATE_PS3, "SAFE_STATE_PS3"),
+ PINCTRL_PIN(TEGRA_PIN_VCOMP_ALERT_PS4, "VCOMP_ALERT_PS4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS0_PU0, "GPIO_DIS0_PU0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS1_PU1, "GPIO_DIS1_PU1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS2_PU2, "GPIO_DIS2_PU2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS3_PU3, "GPIO_DIS3_PU3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS4_PU4, "GPIO_DIS4_PU4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS5_PU5, "GPIO_DIS5_PU5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN0_PV0, "GPIO_SEN0_PV0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN1_PV1, "GPIO_SEN1_PV1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN2_PV2, "GPIO_SEN2_PV2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN3_PV3, "GPIO_SEN3_PV3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN4_PV4, "GPIO_SEN4_PV4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN5_PV5, "GPIO_SEN5_PV5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN6_PV6, "GPIO_SEN6_PV6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN7_PV7, "GPIO_SEN7_PV7"),
+ PINCTRL_PIN(TEGRA_PIN_GEN8_I2C_SCL_PW0, "GEN8_I2C_SCL_PW0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN8_I2C_SDA_PW1, "GEN8_I2C_SDA_PW1"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_TX_PW2, "UART3_TX_PW2"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RX_PW3, "UART3_RX_PW3"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RTS_PW4, "UART3_RTS_PW4"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_CTS_PW5, "UART3_CTS_PW5"),
+ PINCTRL_PIN(TEGRA_PIN_UART7_TX_PW6, "UART7_TX_PW6"),
+ PINCTRL_PIN(TEGRA_PIN_UART7_RX_PW7, "UART7_RX_PW7"),
+ PINCTRL_PIN(TEGRA_PIN_CAN1_DOUT_PZ0, "CAN1_DOUT_PZ0"),
+ PINCTRL_PIN(TEGRA_PIN_CAN1_DIN_PZ1, "CAN1_DIN_PZ1"),
+ PINCTRL_PIN(TEGRA_PIN_CAN0_DOUT_PZ2, "CAN0_DOUT_PZ2"),
+ PINCTRL_PIN(TEGRA_PIN_CAN0_DIN_PZ3, "CAN0_DIN_PZ3"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO0_PAA0, "CAN_GPIO0_PAA0"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO1_PAA1, "CAN_GPIO1_PAA1"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO2_PAA2, "CAN_GPIO2_PAA2"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO3_PAA3, "CAN_GPIO3_PAA3"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO4_PAA4, "CAN_GPIO4_PAA4"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO5_PAA5, "CAN_GPIO5_PAA5"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO6_PAA6, "CAN_GPIO6_PAA6"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO7_PAA7, "CAN_GPIO7_PAA7"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN8_PEE0, "GPIO_SEN8_PEE0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN9_PEE1, "GPIO_SEN9_PEE1"),
+ PINCTRL_PIN(TEGRA_PIN_TOUCH_CLK_PEE2, "TOUCH_CLK_PEE2"),
+ PINCTRL_PIN(TEGRA_PIN_POWER_ON_PFF0, "POWER_ON_PFF0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW1_PFF1, "GPIO_SW1_PFF1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW2_PFF2, "GPIO_SW2_PFF2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW3_PFF3, "GPIO_SW3_PFF3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW4_PFF4, "GPIO_SW4_PFF4"),
+ PINCTRL_PIN(TEGRA_PIN_SHUTDOWN, "SHUTDOWN"),
+ PINCTRL_PIN(TEGRA_PIN_PMU_INT, "PMU_INT"),
+ PINCTRL_PIN(TEGRA_PIN_SOC_PWR_REQ, "SOC_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+};
+
+static const struct tegra_pingroup tegra186_aon_groups[] = {
+ PINGROUP(touch_clk_pee2, TOUCH, RSVD1, RSVD2, RSVD3, 0x2000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_cts_pw5, UARTC, RSVD1, RSVD2, RSVD3, 0x2008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_rts_pw4, UARTC, RSVD1, RSVD2, RSVD3, 0x2010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_rx_pw3, UARTC, RSVD1, RSVD2, RSVD3, 0x2018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_tx_pw2, UARTC, RSVD1, RSVD2, RSVD3, 0x2020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen8_i2c_sda_pw1, I2C8, RSVD1, RSVD2, RSVD3, 0x2028, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen8_i2c_scl_pw0, I2C8, RSVD1, RSVD2, RSVD3, 0x2030, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart7_rx_pw7, UARTG, RSVD1, RSVD2, RSVD3, 0x2038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart7_tx_pw6, UARTG, RSVD1, RSVD2, RSVD3, 0x2040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen0_pv0, RSVD0, RSVD1, RSVD2, RSVD3, 0x2048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen1_pv1, SPI2, RSVD1, RSVD2, RSVD3, 0x2050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen2_pv2, SPI2, RSVD1, RSVD2, RSVD3, 0x2058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen3_pv3, SPI2, RSVD1, RSVD2, RSVD3, 0x2060, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen4_pv4, SPI2, RSVD1, RSVD2, RSVD3, 0x2068, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen5_pv5, RSVD0, RSVD1, RSVD2, RSVD3, 0x2070, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen6_pv6, RSVD0, GP, RSVD2, RSVD3, 0x2078, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen7_pv7, RSVD0, WDT, RSVD2, RSVD3, 0x2080, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen8_pee0, RSVD0, I2C2, RSVD2, RSVD3, 0x2088, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen9_pee1, RSVD0, I2C2, RSVD2, RSVD3, 0x2090, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(can_gpio7_paa7, RSVD0, WDT, RSVD2, RSVD3, 0x3000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can1_dout_pz0, CAN1, RSVD1, RSVD2, RSVD3, 0x3008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can1_din_pz1, CAN1, RSVD1, RSVD2, RSVD3, 0x3010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can0_dout_pz2, CAN0, RSVD1, RSVD2, RSVD3, 0x3018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can0_din_pz3, CAN0, RSVD1, RSVD2, RSVD3, 0x3020, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio0_paa0, RSVD0, DMIC3, DMIC5, RSVD3, 0x3028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio1_paa1, RSVD0, DMIC3, DMIC5, RSVD3, 0x3030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio2_paa2, GPIO, RSVD1, RSVD2, RSVD3, 0x3038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio3_paa3, RSVD0, RSVD1, RSVD2, RSVD3, 0x3040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio4_paa4, RSVD0, RSVD1, RSVD2, RSVD3, 0x3048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio5_paa5, RSVD0, RSVD1, RSVD2, RSVD3, 0x3050, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio6_paa6, RSVD0, RSVD1, RSVD2, RSVD3, 0x3058, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_sw1_pff1, RSVD0, RSVD1, RSVD2, RSVD3, 0x1000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sw2_pff2, RSVD0, RSVD1, RSVD2, RSVD3, 0x1008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sw3_pff3, RSVD0, RSVD1, RSVD2, RSVD3, 0x1010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sw4_pff4, RSVD0, RSVD1, RSVD2, RSVD3, 0x1018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(shutdown, RSVD0, RSVD1, RSVD2, RSVD3, 0x1020, 0, Y, -1, -1, 6, 8, -1, -1, -1, 12, N, -1, -1, N),
+ PINGROUP(pmu_int, RSVD0, RSVD1, RSVD2, RSVD3, 0x1028, 0, Y, -1, -1, 6, 8, -1, -1, -1, 12, N, -1, -1, N),
+ PINGROUP(safe_state_ps3, SCE, RSVD1, RSVD2, RSVD3, 0x1030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(vcomp_alert_ps4, SOC, RSVD1, RSVD2, RSVD3, 0x1038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(soc_pwr_req, RSVD0, RSVD1, RSVD2, RSVD3, 0x1040, 0, Y, -1, -1, 6, 8, -1, -1, -1, 12, N, -1, -1, N),
+ PINGROUP(batt_oc_ps2, SOC, RSVD1, RSVD2, RSVD3, 0x1048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(clk_32k_in, RSVD0, RSVD1, RSVD2, RSVD3, 0x1050, 0, Y, -1, -1, 6, 8, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(power_on_pff0, RSVD0, RSVD1, RSVD2, RSVD3, 0x1058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pwr_i2c_scl_ps0, I2C5, RSVD1, RSVD2, RSVD3, 0x1060, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pwr_i2c_sda_ps1, I2C5, RSVD1, RSVD2, RSVD3, 0x1068, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis0_pu0, RSVD0, GP, DCB, DCC, 0x1080, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis1_pu1, RSVD0, RSVD1, DISPLAYA, RSVD3, 0x1088, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis2_pu2, RSVD0, GP, DCA, RSVD3, 0x1090, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis3_pu3, RSVD0, RSVD1, DISPLAYB, DCC, 0x1098, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis4_pu4, RSVD0, SOC, DCA, RSVD3, 0x10a0, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis5_pu5, RSVD0, GP, DCC, DCB, 0x10a8, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+};
+
+static const struct tegra_pinctrl_soc_data tegra186_pinctrl_aon = {
+ .pins = tegra186_aon_pins,
+ .npins = ARRAY_SIZE(tegra186_aon_pins),
+ .functions = tegra186_functions,
+ .nfunctions = ARRAY_SIZE(tegra186_functions),
+ .groups = tegra186_aon_groups,
+ .ngroups = ARRAY_SIZE(tegra186_aon_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = true,
+ .drvtype_in_mux = true,
+ .sfsel_in_mux = true,
+};
+
+static int tegra186_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct tegra_pinctrl_soc_data *soc = of_device_get_match_data(&pdev->dev);
+
+ return tegra_pinctrl_probe(pdev, soc);
+}
+
+static const struct of_device_id tegra186_pinctrl_of_match[] = {
+ { .compatible = "nvidia,tegra186-pinmux", .data = &tegra186_pinctrl },
+ { .compatible = "nvidia,tegra186-pinmux-aon", .data = &tegra186_pinctrl_aon },
+ { },
+};
+
+static struct platform_driver tegra186_pinctrl_driver = {
+ .driver = {
+ .name = "tegra186-pinctrl",
+ .of_match_table = tegra186_pinctrl_of_match,
+ },
+ .probe = tegra186_pinctrl_probe,
+};
+
+static int __init tegra186_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra186_pinctrl_driver);
+}
+arch_initcall(tegra186_pinctrl_init);
diff --git a/drivers/pmdomain/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c
index 9467235110f4..82c33cf727a8 100644
--- a/drivers/pmdomain/apple/pmgr-pwrstate.c
+++ b/drivers/pmdomain/apple/pmgr-pwrstate.c
@@ -306,6 +306,7 @@ err_remove:
}
static const struct of_device_id apple_pmgr_ps_of_match[] = {
+ { .compatible = "apple,t8103-pmgr-pwrstate" },
{ .compatible = "apple,pmgr-pwrstate" },
{}
};
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index d2ff76e74a05..219f96f2aaaf 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/isapnp.h>
#include <linux/mutex.h>
+#include <linux/string_choices.h>
#include <asm/io.h>
#include "../base.h"
@@ -1037,7 +1038,7 @@ static int __init isapnp_init(void)
if (cards)
printk(KERN_INFO
"isapnp: %i Plug & Play card%s detected total\n", cards,
- cards > 1 ? "s" : "");
+ str_plural(cards));
else
printk(KERN_INFO "isapnp: No Plug & Play card found\n");
diff --git a/drivers/power/supply/88pm860x_charger.c b/drivers/power/supply/88pm860x_charger.c
index 2b9fcb7e71d7..8d99c6ff72ed 100644
--- a/drivers/power/supply/88pm860x_charger.c
+++ b/drivers/power/supply/88pm860x_charger.c
@@ -284,8 +284,8 @@ static int set_charging_fsm(struct pm860x_charger_info *info)
{
struct power_supply *psy;
union power_supply_propval data;
- unsigned char fsm_state[][16] = { "init", "discharge", "precharge",
- "fastcharge",
+ static const unsigned char fsm_state[][16] = {
+ "init", "discharge", "precharge", "fastcharge",
};
int ret;
int vbatt;
@@ -313,7 +313,7 @@ static int set_charging_fsm(struct pm860x_charger_info *info)
dev_dbg(info->dev, "Entering FSM:%s, Charger:%s, Battery:%s, "
"Allowed:%d\n",
- &fsm_state[info->state][0],
+ fsm_state[info->state],
(info->online) ? "online" : "N/A",
(info->present) ? "present" : "N/A", info->allowed);
dev_dbg(info->dev, "set_charging_fsm:vbatt:%d(mV)\n", vbatt);
@@ -385,7 +385,7 @@ static int set_charging_fsm(struct pm860x_charger_info *info)
}
dev_dbg(info->dev,
"Out FSM:%s, Charger:%s, Battery:%s, Allowed:%d\n",
- &fsm_state[info->state][0],
+ fsm_state[info->state],
(info->online) ? "online" : "N/A",
(info->present) ? "present" : "N/A", info->allowed);
mutex_unlock(&info->lock);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 11893c50c5d2..dca4be23ee70 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -35,6 +35,9 @@ config APM_POWER
Say Y here to enable support APM status emulation using
battery class devices.
+config ADC_BATTERY_HELPER
+ tristate
+
config GENERIC_ADC_BATTERY
tristate "Generic battery support using IIO"
depends on IIO
@@ -244,6 +247,18 @@ config BATTERY_INGENIC
This driver can also be built as a module. If so, the module will be
called ingenic-battery.
+config BATTERY_INTEL_DC_TI
+ tristate "Intel Bay / Cherry Trail Dollar Cove TI battery driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI && INTEL_DC_TI_ADC && IIO && ACPI
+ select ADC_BATTERY_HELPER
+ help
+ Choose this option if you want to monitor battery status on Intel
+ Bay Trail / Cherry Trail tablets using the Dollar Cove TI PMIC's
+ coulomb-counter as fuel-gauge.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_dc_ti_battery.
+
config BATTERY_IPAQ_MICRO
tristate "iPAQ Atmel Micro ASIC battery driver"
depends on MFD_IPAQ_MICRO
@@ -1050,6 +1065,7 @@ config CHARGER_SURFACE
config BATTERY_UG3105
tristate "uPI uG3105 battery monitor driver"
depends on I2C
+ select ADC_BATTERY_HELPER
help
Battery monitor driver for the uPI uG3105 battery monitor.
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a7d91244bb82..99a820d38197 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -7,6 +7,7 @@ power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_POWER_SUPPLY_HWMON) += power_supply_hwmon.o
+obj-$(CONFIG_ADC_BATTERY_HELPER) += adc-battery-helper.o
obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
obj-$(CONFIG_APM_POWER) += apm_power.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_SAMSUNG_SDI) += samsung-sdi-battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_INGENIC) += ingenic-battery.o
+obj-$(CONFIG_BATTERY_INTEL_DC_TI) += intel_dc_ti_battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b00c84fbc33c..e5202a7b6209 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -667,7 +667,8 @@ static int ab8500_btemp_bind(struct device *dev, struct device *master,
/* Create a work queue for the btemp */
di->btemp_wq =
- alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (di->btemp_wq == NULL) {
dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
diff --git a/drivers/power/supply/adc-battery-helper.c b/drivers/power/supply/adc-battery-helper.c
new file mode 100644
index 000000000000..6e0f5b6d73d7
--- /dev/null
+++ b/drivers/power/supply/adc-battery-helper.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Helper for batteries with accurate current and voltage measurement, but
+ * without temperature measurement or without a "resistance-temp-table".
+ *
+ * Some fuel-gauges are not full-featured autonomous fuel-gauges.
+ * These fuel-gauges offer accurate current and voltage measurements but
+ * their coulomb-counters are intended to work together with an always on
+ * micro-controller monitoring the fuel-gauge.
+ *
+ * This adc-battery-helper code offers open-circuit-voltage (ocv) and through
+ * that capacity estimation for devices where such limited functionality
+ * fuel-gauges are exposed directly to Linux.
+ *
+ * This helper requires the hw to provide accurate battery current_now and
+ * voltage_now measurement and this helper the provides the following properties
+ * based on top of those readings:
+ *
+ * POWER_SUPPLY_PROP_STATUS
+ * POWER_SUPPLY_PROP_VOLTAGE_OCV
+ * POWER_SUPPLY_PROP_VOLTAGE_NOW
+ * POWER_SUPPLY_PROP_CURRENT_NOW
+ * POWER_SUPPLY_PROP_CAPACITY
+ *
+ * As well as optional the following properties assuming an always present
+ * system-scope battery, allowing direct use of adc_battery_helper_get_prop()
+ * in this common case:
+ * POWER_SUPPLY_PROP_PRESENT
+ * POWER_SUPPLY_PROP_SCOPE
+ *
+ * Using this helper is as simple as:
+ *
+ * 1. Embed a struct adc_battery_helper this MUST be the first member of
+ * the battery driver's data struct.
+ * 2. Use adc_battery_helper_props[] or add the above properties to
+ * the list of properties in power_supply_desc
+ * 3. Call adc_battery_helper_init() after registering the power_supply and
+ * before returning from the probe() function
+ * 4. Use adc_battery_helper_get_prop() as the power-supply's get_property()
+ * method, or call it for the above properties.
+ * 5. Use adc_battery_helper_external_power_changed() as the power-supply's
+ * external_power_changed() method or call it from that method.
+ * 6. Use adc_battery_helper_[suspend|resume]() as suspend-resume methods or
+ * call them from the driver's suspend-resume methods.
+ *
+ * The provided get_voltage_and_current_now() method will be called by this
+ * helper at adc_battery_helper_init() time and later.
+ *
+ * Copyright (c) 2021-2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/devm-helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/workqueue.h>
+
+#include "adc-battery-helper.h"
+
+#define MOV_AVG_WINDOW_SIZE ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE
+#define INIT_POLL_TIME (5 * HZ)
+#define POLL_TIME (30 * HZ)
+#define SETTLE_TIME (1 * HZ)
+
+#define INIT_POLL_COUNT 30
+
+#define CURR_HYST_UA 65000
+
+#define LOW_BAT_UV 3700000
+#define FULL_BAT_HYST_UV 38000
+
+#define AMBIENT_TEMP_CELSIUS 25
+
+static int adc_battery_helper_get_status(struct adc_battery_helper *help)
+{
+ int full_uv =
+ help->psy->battery_info->constant_charge_voltage_max_uv - FULL_BAT_HYST_UV;
+
+ if (help->curr_ua > CURR_HYST_UA)
+ return POWER_SUPPLY_STATUS_CHARGING;
+
+ if (help->curr_ua < -CURR_HYST_UA)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ if (help->supplied) {
+ bool full;
+
+ if (help->charge_finished)
+ full = gpiod_get_value_cansleep(help->charge_finished);
+ else
+ full = help->ocv_avg_uv > full_uv;
+
+ if (full)
+ return POWER_SUPPLY_STATUS_FULL;
+ }
+
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+}
+
+static void adc_battery_helper_work(struct work_struct *work)
+{
+ struct adc_battery_helper *help = container_of(work, struct adc_battery_helper,
+ work.work);
+ int i, curr_diff_ua, volt_diff_uv, res_mohm, ret, win_size;
+ struct device *dev = help->psy->dev.parent;
+ int volt_uv, prev_volt_uv = help->volt_uv;
+ int curr_ua, prev_curr_ua = help->curr_ua;
+ bool prev_supplied = help->supplied;
+ int prev_status = help->status;
+
+ guard(mutex)(&help->lock);
+
+ ret = help->get_voltage_and_current_now(help->psy, &volt_uv, &curr_ua);
+ if (ret)
+ goto out;
+
+ help->volt_uv = volt_uv;
+ help->curr_ua = curr_ua;
+
+ help->ocv_uv[help->ocv_avg_index] =
+ help->volt_uv - help->curr_ua * help->intern_res_avg_mohm / 1000;
+ dev_dbg(dev, "volt-now: %d, curr-now: %d, volt-ocv: %d\n",
+ help->volt_uv, help->curr_ua, help->ocv_uv[help->ocv_avg_index]);
+ help->ocv_avg_index = (help->ocv_avg_index + 1) % MOV_AVG_WINDOW_SIZE;
+ help->poll_count++;
+
+ help->ocv_avg_uv = 0;
+ win_size = min(help->poll_count, MOV_AVG_WINDOW_SIZE);
+ for (i = 0; i < win_size; i++)
+ help->ocv_avg_uv += help->ocv_uv[i];
+ help->ocv_avg_uv /= win_size;
+
+ help->supplied = power_supply_am_i_supplied(help->psy);
+ help->status = adc_battery_helper_get_status(help);
+ if (help->status == POWER_SUPPLY_STATUS_FULL)
+ help->capacity = 100;
+ else
+ help->capacity = power_supply_batinfo_ocv2cap(help->psy->battery_info,
+ help->ocv_avg_uv,
+ AMBIENT_TEMP_CELSIUS);
+
+ /*
+ * Skip internal resistance calc on charger [un]plug and
+ * when the battery is almost empty (voltage low).
+ */
+ if (help->supplied != prev_supplied ||
+ help->volt_uv < LOW_BAT_UV ||
+ help->poll_count < 2)
+ goto out;
+
+ /*
+ * Assuming that the OCV voltage does not change significantly
+ * between 2 polls, then we can calculate the internal resistance
+ * on a significant current change by attributing all voltage
+ * change between the 2 readings to the internal resistance.
+ */
+ curr_diff_ua = abs(help->curr_ua - prev_curr_ua);
+ if (curr_diff_ua < CURR_HYST_UA)
+ goto out;
+
+ volt_diff_uv = abs(help->volt_uv - prev_volt_uv);
+ res_mohm = volt_diff_uv * 1000 / curr_diff_ua;
+
+ if ((res_mohm < (help->intern_res_avg_mohm * 2 / 3)) ||
+ (res_mohm > (help->intern_res_avg_mohm * 4 / 3))) {
+ dev_dbg(dev, "Ignoring outlier internal resistance %d mOhm\n", res_mohm);
+ goto out;
+ }
+
+ dev_dbg(dev, "Internal resistance %d mOhm\n", res_mohm);
+
+ help->intern_res_mohm[help->intern_res_avg_index] = res_mohm;
+ help->intern_res_avg_index = (help->intern_res_avg_index + 1) % MOV_AVG_WINDOW_SIZE;
+ help->intern_res_poll_count++;
+
+ help->intern_res_avg_mohm = 0;
+ win_size = min(help->intern_res_poll_count, MOV_AVG_WINDOW_SIZE);
+ for (i = 0; i < win_size; i++)
+ help->intern_res_avg_mohm += help->intern_res_mohm[i];
+ help->intern_res_avg_mohm /= win_size;
+
+out:
+ queue_delayed_work(system_percpu_wq, &help->work,
+ (help->poll_count <= INIT_POLL_COUNT) ?
+ INIT_POLL_TIME : POLL_TIME);
+
+ if (help->status != prev_status)
+ power_supply_changed(help->psy);
+}
+
+const enum power_supply_property adc_battery_helper_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+EXPORT_SYMBOL_GPL(adc_battery_helper_properties);
+
+static_assert(ARRAY_SIZE(adc_battery_helper_properties) ==
+ ADC_HELPER_NUM_PROPERTIES);
+
+int adc_battery_helper_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct adc_battery_helper *help = power_supply_get_drvdata(psy);
+ int dummy, ret = 0;
+
+ /*
+ * Avoid racing with adc_battery_helper_work() while it is updating
+ * variables and avoid calling get_voltage_and_current_now() reentrantly.
+ */
+ guard(mutex)(&help->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = help->status;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = help->get_voltage_and_current_now(psy, &val->intval, &dummy);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = help->ocv_avg_uv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = help->get_voltage_and_current_now(psy, &dummy, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = help->capacity;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_get_property);
+
+void adc_battery_helper_external_power_changed(struct power_supply *psy)
+{
+ struct adc_battery_helper *help = power_supply_get_drvdata(psy);
+
+ dev_dbg(help->psy->dev.parent, "external power changed\n");
+ mod_delayed_work(system_percpu_wq, &help->work, SETTLE_TIME);
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_external_power_changed);
+
+static void adc_battery_helper_start_work(struct adc_battery_helper *help)
+{
+ help->poll_count = 0;
+ help->ocv_avg_index = 0;
+
+ queue_delayed_work(system_percpu_wq, &help->work, 0);
+ flush_delayed_work(&help->work);
+}
+
+int adc_battery_helper_init(struct adc_battery_helper *help, struct power_supply *psy,
+ adc_battery_helper_get_func get_voltage_and_current_now,
+ struct gpio_desc *charge_finished_gpio)
+{
+ struct device *dev = psy->dev.parent;
+ int ret;
+
+ help->psy = psy;
+ help->get_voltage_and_current_now = get_voltage_and_current_now;
+ help->charge_finished = charge_finished_gpio;
+
+ ret = devm_mutex_init(dev, &help->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_delayed_work_autocancel(dev, &help->work, adc_battery_helper_work);
+ if (ret)
+ return ret;
+
+ if (!help->psy->battery_info ||
+ help->psy->battery_info->factory_internal_resistance_uohm == -EINVAL ||
+ help->psy->battery_info->constant_charge_voltage_max_uv == -EINVAL ||
+ !psy->battery_info->ocv_table[0]) {
+ dev_err(dev, "error required properties are missing\n");
+ return -ENODEV;
+ }
+
+ /* Use provided internal resistance as start point (in milli-ohm) */
+ help->intern_res_avg_mohm =
+ help->psy->battery_info->factory_internal_resistance_uohm / 1000;
+ /* Also add it to the internal resistance moving average window */
+ help->intern_res_mohm[0] = help->intern_res_avg_mohm;
+ help->intern_res_avg_index = 1;
+ help->intern_res_poll_count = 1;
+
+ adc_battery_helper_start_work(help);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_init);
+
+int adc_battery_helper_suspend(struct device *dev)
+{
+ struct adc_battery_helper *help = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&help->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_suspend);
+
+int adc_battery_helper_resume(struct device *dev)
+{
+ struct adc_battery_helper *help = dev_get_drvdata(dev);
+
+ adc_battery_helper_start_work(help);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_resume);
+
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("ADC battery capacity estimation helper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/adc-battery-helper.h b/drivers/power/supply/adc-battery-helper.h
new file mode 100644
index 000000000000..4e42181c8983
--- /dev/null
+++ b/drivers/power/supply/adc-battery-helper.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Helper for batteries with accurate current and voltage measurement, but
+ * without temperature measurement or without a "resistance-temp-table".
+ * Copyright (c) 2021-2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#define ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE 8
+
+struct power_supply;
+struct gpio_desc;
+
+/*
+ * The adc battery helper code needs voltage- and current-now to be sampled as
+ * close to each other (in sample-time) as possible. A single getter function is
+ * used to allow the battery driver to handle this in the best way possible.
+ */
+typedef int (*adc_battery_helper_get_func)(struct power_supply *psy, int *volt, int *curr);
+
+struct adc_battery_helper {
+ struct power_supply *psy;
+ struct gpio_desc *charge_finished;
+ struct delayed_work work;
+ struct mutex lock;
+ adc_battery_helper_get_func get_voltage_and_current_now;
+ int ocv_uv[ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE]; /* micro-volt */
+ int intern_res_mohm[ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE]; /* milli-ohm */
+ int poll_count;
+ int ocv_avg_index;
+ int ocv_avg_uv; /* micro-volt */
+ int intern_res_poll_count;
+ int intern_res_avg_index;
+ int intern_res_avg_mohm; /* milli-ohm */
+ int volt_uv; /* micro-volt */
+ int curr_ua; /* micro-ampere */
+ int capacity; /* percent */
+ int status;
+ bool supplied;
+};
+
+extern const enum power_supply_property adc_battery_helper_properties[];
+/* Must be const cannot be an external. Asserted in adc-battery-helper.c */
+#define ADC_HELPER_NUM_PROPERTIES 7
+
+int adc_battery_helper_init(struct adc_battery_helper *help, struct power_supply *psy,
+ adc_battery_helper_get_func get_voltage_and_current_now,
+ struct gpio_desc *charge_finished_gpio);
+/*
+ * The below functions can be directly used as power-supply / suspend-resume
+ * callbacks. They cast the power_supply_get_drvdata() / dev_get_drvdata() data
+ * directly to struct adc_battery_helper. Therefor struct adc_battery_helper
+ * MUST be the first member of the battery driver's data struct.
+ */
+int adc_battery_helper_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+void adc_battery_helper_external_power_changed(struct power_supply *psy);
+int adc_battery_helper_suspend(struct device *dev);
+int adc_battery_helper_resume(struct device *dev);
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 917c26ee56bc..b50a28b9dd38 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -842,7 +842,7 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
if (bq->automode < 1)
return NOTIFY_OK;
- mod_delayed_work(system_wq, &bq->work, 0);
+ mod_delayed_work(system_percpu_wq, &bq->work, 0);
return NOTIFY_OK;
}
@@ -1516,7 +1516,7 @@ static int bq2415x_power_supply_init(struct bq2415x_device *bq)
ret = bq2415x_detect_revision(bq);
if (ret < 0)
- strcpy(revstr, "unknown");
+ strscpy(revstr, "unknown", sizeof(revstr));
else
sprintf(revstr, "1.%d", ret);
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index e1510c7fdab3..ed0ceae8d90b 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1467,7 +1467,7 @@ static void bq24190_charger_external_power_changed(struct power_supply *psy)
* too low default 500mA iinlim. Delay setting the input-current-limit
* for 300ms to avoid this.
*/
- queue_delayed_work(system_wq, &bdi->input_current_limit_work,
+ queue_delayed_work(system_percpu_wq, &bdi->input_current_limit_work,
msecs_to_jiffies(300));
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index ad2d9ecf32a5..19445e39651c 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1127,7 +1127,7 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
mutex_lock(&bq27xxx_list_lock);
list_for_each_entry(di, &bq27xxx_battery_devices, list)
- mod_delayed_work(system_wq, &di->work, 0);
+ mod_delayed_work(system_percpu_wq, &di->work, 0);
mutex_unlock(&bq27xxx_list_lock);
return ret;
@@ -1945,7 +1945,7 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
di->last_update = jiffies;
if (!di->removed && poll_interval > 0)
- mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+ mod_delayed_work(system_percpu_wq, &di->work, poll_interval * HZ);
}
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
@@ -2221,14 +2221,7 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
/* After charger plug in/out wait 0.5s for things to stabilize */
- mod_delayed_work(system_wq, &di->work, HZ / 2);
-}
-
-static void bq27xxx_battery_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
+ mod_delayed_work(system_percpu_wq, &di->work, HZ / 2);
}
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -2242,9 +2235,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
int ret;
INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
- mutex_init(&di->lock);
- ret = devm_add_action_or_reset(di->dev, bq27xxx_battery_mutex_destroy,
- &di->lock);
+ ret = devm_mutex_init(di->dev, &di->lock);
if (ret)
return ret;
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index f63c3c410451..2263d5d3448f 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -506,10 +506,7 @@ static int cw_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- if (cw_bat->battery->charge_full_design_uah > 0)
- val->intval = cw_bat->battery->charge_full_design_uah;
- else
- val->intval = 0;
+ val->intval = max(cw_bat->battery->charge_full_design_uah, 0);
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
@@ -702,8 +699,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (!cw_bat->battery_workqueue)
return -ENOMEM;
- devm_delayed_work_autocancel(&client->dev,
- &cw_bat->battery_delay_work, cw_bat_work);
+ devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work);
queue_delayed_work(cw_bat->battery_workqueue,
&cw_bat->battery_delay_work, msecs_to_jiffies(10));
return 0;
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 1b2da9b5fb65..2504190eba82 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -79,7 +79,8 @@ static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val)
for (i = 0; i < ndescs; i++) {
bool val = (mapping.gpiodata >> i) & 1;
- gpiod_set_value_cansleep(gpios[ndescs-i-1], val);
+
+ gpiod_set_value_cansleep(gpios[ndescs - i - 1], val);
}
gpio_charger->charge_current_limit = mapping.limit_ua;
@@ -226,14 +227,14 @@ static int init_charge_current_limit(struct device *dev,
gpio_charger->current_limit_map_size = len / 2;
len = device_property_read_u32_array(dev, "charge-current-limit-mapping",
- (u32*) gpio_charger->current_limit_map, len);
+ (u32 *) gpio_charger->current_limit_map, len);
if (len < 0)
return len;
set_def_limit = !device_property_read_u32(dev,
"charge-current-limit-default-microamp",
&def_limit);
- for (i=0; i < gpio_charger->current_limit_map_size; i++) {
+ for (i = 0; i < gpio_charger->current_limit_map_size; i++) {
if (gpio_charger->current_limit_map[i].limit_ua > cur_limit) {
dev_err(dev, "charge-current-limit-mapping not sorted by current in descending order\n");
return -EINVAL;
diff --git a/drivers/power/supply/intel_dc_ti_battery.c b/drivers/power/supply/intel_dc_ti_battery.c
new file mode 100644
index 000000000000..56b0c92e9d28
--- /dev/null
+++ b/drivers/power/supply/intel_dc_ti_battery.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Battery driver for the coulomb-counter of the Intel Dollar Cove TI PMIC
+ *
+ * Note the Intel Dollar Cove TI PMIC coulomb-counter is not a full-featured
+ * autonomous fuel-gauge. It is intended to work together with an always on
+ * micro-controller monitoring it.
+ *
+ * Since Linux does not monitor coulomb-counter changes while the device
+ * is off or suspended, voltage based capacity estimation from
+ * the adc-battery-helper code is used.
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ *
+ * Register definitions and calibration code was taken from
+ * kernel/drivers/platform/x86/dc_ti_cc.c from the Acer A1-840 Android kernel
+ * which has the following copyright header:
+ *
+ * Copyright (C) 2014 Intel Corporation
+ * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * dc_ti_cc.c is part of the Acer A1-840 Android kernel source-code archive
+ * named: "App. Guide_Acer_20151221_A_A.zip"
+ * which is distributed by Acer from the Acer A1-840 support page:
+ * https://www.acer.com/us-en/support/product-support/A1-840/downloads
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/consumer.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/timekeeping.h>
+
+#include "adc-battery-helper.h"
+
+#define DC_TI_PMIC_VERSION_REG 0x00
+#define PMIC_VERSION_A0 0xC0
+#define PMIC_VERSION_A1 0xC1
+
+#define DC_TI_CC_CNTL_REG 0x60
+#define CC_CNTL_CC_CTR_EN BIT(0)
+#define CC_CNTL_CC_CLR_EN BIT(1)
+#define CC_CNTL_CC_CAL_EN BIT(2)
+#define CC_CNTL_CC_OFFSET_EN BIT(3)
+#define CC_CNTL_SMPL_INTVL GENMASK(5, 4)
+#define CC_CNTL_SMPL_INTVL_15MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 0)
+#define CC_CNTL_SMPL_INTVL_62MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 1)
+#define CC_CNTL_SMPL_INTVL_125MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 2)
+#define CC_CNTL_SMPL_INTVL_250MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 3)
+
+#define DC_TI_SMPL_CTR0_REG 0x69
+#define DC_TI_SMPL_CTR1_REG 0x68
+#define DC_TI_SMPL_CTR2_REG 0x67
+
+#define DC_TI_CC_OFFSET_HI_REG 0x61
+#define CC_OFFSET_HI_MASK 0x3F
+#define DC_TI_CC_OFFSET_LO_REG 0x62
+
+#define DC_TI_SW_OFFSET_REG 0x6C
+
+#define DC_TI_CC_ACC3_REG 0x63
+#define DC_TI_CC_ACC2_REG 0x64
+#define DC_TI_CC_ACC1_REG 0x65
+#define DC_TI_CC_ACC0_REG 0x66
+
+#define DC_TI_CC_INTG1_REG 0x6A
+#define DC_TI_CC_INTG1_MASK 0x3F
+#define DC_TI_CC_INTG0_REG 0x6B
+
+#define DC_TI_EEPROM_ACCESS_CONTROL 0x88
+#define EEPROM_UNLOCK 0xDA
+#define EEPROM_LOCK 0x00
+
+#define DC_TI_EEPROM_CC_GAIN_REG 0xF4
+#define CC_TRIM_REVISION GENMASK(3, 0)
+#define CC_GAIN_CORRECTION GENMASK(7, 4)
+
+#define PMIC_VERSION_A0_TRIM_REV 3
+#define PMIC_VERSION_A1_MIN_TRIM_REV 1
+
+#define DC_TI_EEPROM_CC_OFFSET_REG 0xFD
+
+#define DC_TI_EEPROM_CTRL 0xFE
+#define EEPROM_BANK0_SEL 0x01
+#define EEPROM_BANK1_SEL 0x02
+
+#define SMPL_INTVL_US 15000
+#define SMPL_INTVL_MS (SMPL_INTVL_US / USEC_PER_MSEC)
+#define CALIBRATION_TIME_US (10 * SMPL_INTVL_US)
+#define SLEEP_SLACK_US 2500
+
+/* CC gain correction is in 0.0025 increments */
+#define CC_GAIN_STEP 25
+#define CC_GAIN_DIV 10000
+
+/* CC offset is in 0.5 units per 250ms (default sample interval) */
+#define CC_OFFSET_DIV 2
+#define CC_OFFSET_SMPL_INTVL_MS 250
+
+/* CC accumulator scale is 366.2 ųCoulumb / unit */
+#define CC_ACC_TO_UA(acc, smpl_ctr) \
+ ((acc) * (3662 * MSEC_PER_SEC / 10) / ((smpl_ctr) * SMPL_INTVL_MS))
+
+#define DEV_NAME "chtdc_ti_battery"
+
+struct dc_ti_battery_chip {
+ /* Must be the first member see adc-battery-helper documentation */
+ struct adc_battery_helper helper;
+ struct device *dev;
+ struct regmap *regmap;
+ struct iio_channel *vbat_channel;
+ struct power_supply *psy;
+ int cc_gain;
+ int cc_offset;
+};
+
+static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
+{
+ struct dc_ti_battery_chip *chip = power_supply_get_drvdata(psy);
+ s64 cnt_start_usec, now_usec, sleep_usec;
+ unsigned int reg_val;
+ s32 acc, smpl_ctr;
+ int ret;
+
+ /*
+ * Enable coulomb-counter before reading Vbat from ADC, so that the CC
+ * samples are from the same time period as the Vbat reading.
+ */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN | CC_CNTL_CC_CTR_EN);
+ if (ret)
+ goto out_err;
+
+ cnt_start_usec = ktime_get_ns() / NSEC_PER_USEC;
+
+ /* Read Vbat, convert IIO mV to power-supply ųV */
+ ret = iio_read_channel_processed_scale(chip->vbat_channel, volt, 1000);
+ if (ret < 0)
+ goto out_err;
+
+ /* Sleep at least 3 sample-times + slack to get 3+ CC samples */
+ now_usec = ktime_get_ns() / NSEC_PER_USEC;
+ sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - (now_usec - cnt_start_usec);
+ if (sleep_usec > 0 && sleep_usec < 1000000)
+ usleep_range(sleep_usec, sleep_usec + SLEEP_SLACK_US);
+
+ /*
+ * The PMIC latches the coulomb- and sample-counters upon reading the
+ * CC_ACC0 register. Reading multiple registers at once is not supported.
+ *
+ * Step 1: Read CC_ACC0 - CC_ACC3
+ */
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC0_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc = reg_val;
+
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC1_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc |= reg_val << 8;
+
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC2_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc |= reg_val << 16;
+
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC3_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc |= reg_val << 24;
+
+ /* Step 2: Read SMPL_CTR0 - SMPL_CTR2 */
+ ret = regmap_read(chip->regmap, DC_TI_SMPL_CTR0_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ smpl_ctr = reg_val;
+
+ ret = regmap_read(chip->regmap, DC_TI_SMPL_CTR1_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ smpl_ctr |= reg_val << 8;
+
+ ret = regmap_read(chip->regmap, DC_TI_SMPL_CTR2_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ smpl_ctr |= reg_val << 16;
+
+ /* Disable the coulumb-counter again */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN);
+ if (ret)
+ goto out_err;
+
+ /* Apply calibration */
+ acc -= chip->cc_offset * smpl_ctr * SMPL_INTVL_MS /
+ (CC_OFFSET_SMPL_INTVL_MS * CC_OFFSET_DIV);
+ acc = acc * (CC_GAIN_DIV - chip->cc_gain * CC_GAIN_STEP) / CC_GAIN_DIV;
+ *curr = CC_ACC_TO_UA(acc, smpl_ctr);
+
+ return 0;
+
+out_err:
+ dev_err(chip->dev, "IO-error %d communicating with PMIC\n", ret);
+ return ret;
+}
+
+static const struct power_supply_desc dc_ti_battery_psy_desc = {
+ .name = "intel_dc_ti_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = adc_battery_helper_get_property,
+ .external_power_changed = adc_battery_helper_external_power_changed,
+ .properties = adc_battery_helper_properties,
+ .num_properties = ADC_HELPER_NUM_PROPERTIES,
+};
+
+static int dc_ti_battery_hw_init(struct dc_ti_battery_chip *chip)
+{
+ u8 pmic_version, cc_trim_rev;
+ unsigned int reg_val;
+ int ret;
+
+ /* Set sample rate to 15 ms and calibrate the coulomb-counter */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN |
+ CC_CNTL_CC_CAL_EN | CC_CNTL_CC_CTR_EN);
+ if (ret)
+ goto out;
+
+ fsleep(CALIBRATION_TIME_US);
+
+ /* Disable coulomb-counter it is only used while getting the current */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(chip->regmap, DC_TI_PMIC_VERSION_REG, &reg_val);
+ if (ret)
+ goto out;
+
+ pmic_version = reg_val;
+
+ /*
+ * As per the PMIC vendor (TI), the calibration offset and gain err
+ * values are stored in EEPROM Bank 0 and Bank 1 of the PMIC.
+ * We need to read the stored offset and gain margins and need
+ * to apply the corrections to the raw coulomb counter value.
+ */
+
+ /* Unlock the EEPROM Access */
+ ret = regmap_write(chip->regmap, DC_TI_EEPROM_ACCESS_CONTROL, EEPROM_UNLOCK);
+ if (ret)
+ goto out;
+
+ /* Select Bank 1 to read CC GAIN Err correction */
+ ret = regmap_write(chip->regmap, DC_TI_EEPROM_CTRL, EEPROM_BANK1_SEL);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(chip->regmap, DC_TI_EEPROM_CC_GAIN_REG, &reg_val);
+ if (ret)
+ goto out;
+
+ cc_trim_rev = FIELD_GET(CC_TRIM_REVISION, reg_val);
+
+ dev_dbg(chip->dev, "pmic-ver 0x%02x trim-rev %d\n", pmic_version, cc_trim_rev);
+
+ if (!(pmic_version == PMIC_VERSION_A0 && cc_trim_rev == PMIC_VERSION_A0_TRIM_REV) &&
+ !(pmic_version == PMIC_VERSION_A1 && cc_trim_rev >= PMIC_VERSION_A1_MIN_TRIM_REV)) {
+ dev_dbg(chip->dev, "unsupported trim-revision, using uncalibrated CC values\n");
+ goto out_relock;
+ }
+
+ chip->cc_gain = 1 - (int)FIELD_GET(CC_GAIN_CORRECTION, reg_val);
+
+ /* Select Bank 0 to read CC OFFSET Correction */
+ ret = regmap_write(chip->regmap, DC_TI_EEPROM_CTRL, EEPROM_BANK0_SEL);
+ if (ret)
+ goto out_relock;
+
+ ret = regmap_read(chip->regmap, DC_TI_EEPROM_CC_OFFSET_REG, &reg_val);
+ if (ret)
+ goto out_relock;
+
+ chip->cc_offset = (s8)reg_val;
+
+ dev_dbg(chip->dev, "cc-offset %d cc-gain %d\n", chip->cc_offset, chip->cc_gain);
+
+out_relock:
+ /* Re-lock the EEPROM Access */
+ regmap_write(chip->regmap, DC_TI_EEPROM_ACCESS_CONTROL, EEPROM_LOCK);
+out:
+ if (ret)
+ dev_err(chip->dev, "IO-error %d initializing PMIC\n", ret);
+
+ return ret;
+}
+
+static int dc_ti_battery_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct power_supply_config psy_cfg = {};
+ struct fwnode_reference_args args;
+ struct gpio_desc *charge_finished;
+ struct dc_ti_battery_chip *chip;
+ int ret;
+
+ /* On most devices with a Dollar Cove TI the battery is handled by ACPI */
+ if (!acpi_quirk_skip_acpi_ac_and_battery())
+ return -ENODEV;
+
+ /* ACPI glue code adds a "monitored-battery" fwnode, wait for this */
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "monitored-battery",
+ NULL, 0, 0, &args);
+ if (ret) {
+ dev_dbg(dev, "fwnode_property_get_ref() ret %d\n", ret);
+ return dev_err_probe(dev, -EPROBE_DEFER, "Waiting for monitored-battery fwnode\n");
+ }
+
+ fwnode_handle_put(args.fwnode);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ chip->regmap = pmic->regmap;
+
+ chip->vbat_channel = devm_iio_channel_get(dev, "VBAT");
+ if (IS_ERR(chip->vbat_channel)) {
+ dev_dbg(dev, "devm_iio_channel_get() ret %ld\n", PTR_ERR(chip->vbat_channel));
+ return dev_err_probe(dev, -EPROBE_DEFER, "Waiting for VBAT IIO channel\n");
+ }
+
+ charge_finished = devm_gpiod_get_optional(dev, "charged", GPIOD_IN);
+ if (IS_ERR(charge_finished))
+ return dev_err_probe(dev, PTR_ERR(charge_finished), "Getting charged GPIO\n");
+
+ ret = dc_ti_battery_hw_init(chip);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, chip);
+
+ psy_cfg.drv_data = chip;
+ chip->psy = devm_power_supply_register(dev, &dc_ti_battery_psy_desc, &psy_cfg);
+ if (IS_ERR(chip->psy))
+ return PTR_ERR(chip->psy);
+
+ return adc_battery_helper_init(&chip->helper, chip->psy,
+ dc_ti_battery_get_voltage_and_current_now,
+ charge_finished);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(dc_ti_battery_pm_ops, adc_battery_helper_suspend,
+ adc_battery_helper_resume, NULL);
+
+static struct platform_driver dc_ti_battery_driver = {
+ .driver = {
+ .name = DEV_NAME,
+ .pm = pm_sleep_ptr(&dc_ti_battery_pm_ops),
+ },
+ .probe = dc_ti_battery_probe,
+};
+module_platform_driver(dc_ti_battery_driver);
+
+MODULE_ALIAS("platform:" DEV_NAME);
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("Intel Dollar Cove (TI) battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 7e0568a5353f..ff8573a5ca6d 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -232,7 +232,8 @@ static int micro_batt_probe(struct platform_device *pdev)
return -ENOMEM;
mb->micro = dev_get_drvdata(pdev->dev.parent);
- mb->wq = alloc_workqueue("ipaq-battery-wq", WQ_MEM_RECLAIM, 0);
+ mb->wq = alloc_workqueue("ipaq-battery-wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!mb->wq)
return -ENOMEM;
diff --git a/drivers/power/supply/max77705_charger.c b/drivers/power/supply/max77705_charger.c
index 329b430d0e50..b1a227bf72e2 100644
--- a/drivers/power/supply/max77705_charger.c
+++ b/drivers/power/supply/max77705_charger.c
@@ -40,31 +40,30 @@ static enum power_supply_property max77705_charger_props[] = {
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
-static int max77705_chgin_irq(void *irq_drv_data)
+static irqreturn_t max77705_chgin_irq(int irq, void *irq_drv_data)
{
- struct max77705_charger_data *charger = irq_drv_data;
+ struct max77705_charger_data *chg = irq_drv_data;
- queue_work(charger->wqueue, &charger->chgin_work);
+ queue_work(chg->wqueue, &chg->chgin_work);
- return 0;
+ return IRQ_HANDLED;
}
static const struct regmap_irq max77705_charger_irqs[] = {
- { .mask = MAX77705_BYP_IM, },
- { .mask = MAX77705_INP_LIMIT_IM, },
- { .mask = MAX77705_BATP_IM, },
- { .mask = MAX77705_BAT_IM, },
- { .mask = MAX77705_CHG_IM, },
- { .mask = MAX77705_WCIN_IM, },
- { .mask = MAX77705_CHGIN_IM, },
- { .mask = MAX77705_AICL_IM, },
+ REGMAP_IRQ_REG_LINE(MAX77705_BYP_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_INP_LIMIT_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_BATP_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_BAT_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_CHG_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_WCIN_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_CHGIN_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_AICL_I, BITS_PER_BYTE),
};
static struct regmap_irq_chip max77705_charger_irq_chip = {
.name = "max77705-charger",
.status_base = MAX77705_CHG_REG_INT,
.mask_base = MAX77705_CHG_REG_INT_MASK,
- .handle_post_irq = max77705_chgin_irq,
.num_regs = 1,
.irqs = max77705_charger_irqs,
.num_irqs = ARRAY_SIZE(max77705_charger_irqs),
@@ -74,8 +73,7 @@ static int max77705_charger_enable(struct max77705_charger_data *chg)
{
int rv;
- rv = regmap_update_bits(chg->regmap, MAX77705_CHG_REG_CNFG_09,
- MAX77705_CHG_EN_MASK, MAX77705_CHG_EN_MASK);
+ rv = regmap_field_write(chg->rfield[MAX77705_CHG_EN], 1);
if (rv)
dev_err(chg->dev, "unable to enable the charger: %d\n", rv);
@@ -87,10 +85,7 @@ static void max77705_charger_disable(void *data)
struct max77705_charger_data *chg = data;
int rv;
- rv = regmap_update_bits(chg->regmap,
- MAX77705_CHG_REG_CNFG_09,
- MAX77705_CHG_EN_MASK,
- MAX77705_CHG_DISABLE);
+ rv = regmap_field_write(chg->rfield[MAX77705_CHG_EN], MAX77705_CHG_DISABLE);
if (rv)
dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
}
@@ -109,19 +104,30 @@ static int max77705_get_online(struct regmap *regmap, int *val)
return 0;
}
-static int max77705_check_battery(struct max77705_charger_data *charger, int *val)
+static int max77705_set_integer(struct max77705_charger_data *chg, enum max77705_field_idx fidx,
+ unsigned int clamp_min, unsigned int clamp_max,
+ unsigned int div, int val)
+{
+ unsigned int regval;
+
+ regval = clamp_val(val, clamp_min, clamp_max) / div;
+
+ return regmap_field_write(chg->rfield[fidx], regval);
+}
+
+static int max77705_check_battery(struct max77705_charger_data *chg, int *val)
{
unsigned int reg_data;
unsigned int reg_data2;
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
regmap_read(regmap, MAX77705_CHG_REG_INT_OK, &reg_data);
- dev_dbg(charger->dev, "CHG_INT_OK(0x%x)\n", reg_data);
+ dev_dbg(chg->dev, "CHG_INT_OK(0x%x)\n", reg_data);
regmap_read(regmap, MAX77705_CHG_REG_DETAILS_00, &reg_data2);
- dev_dbg(charger->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
+ dev_dbg(chg->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
if ((reg_data & MAX77705_BATP_OK) || !(reg_data2 & MAX77705_BATP_DTLS))
*val = true;
@@ -131,13 +137,13 @@ static int max77705_check_battery(struct max77705_charger_data *charger, int *va
return 0;
}
-static int max77705_get_charge_type(struct max77705_charger_data *charger, int *val)
+static int max77705_get_charge_type(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
- unsigned int reg_data;
+ struct regmap *regmap = chg->regmap;
+ unsigned int reg_data, chg_en;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
- if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ regmap_field_read(chg->rfield[MAX77705_CHG_EN], &chg_en);
+ if (!chg_en) {
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
return 0;
}
@@ -159,13 +165,13 @@ static int max77705_get_charge_type(struct max77705_charger_data *charger, int *
return 0;
}
-static int max77705_get_status(struct max77705_charger_data *charger, int *val)
+static int max77705_get_status(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
- unsigned int reg_data;
+ struct regmap *regmap = chg->regmap;
+ unsigned int reg_data, chg_en;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
- if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ regmap_field_read(chg->rfield[MAX77705_CHG_EN], &chg_en);
+ if (!chg_en) {
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
return 0;
}
@@ -234,10 +240,10 @@ static int max77705_get_vbus_state(struct regmap *regmap, int *value)
return 0;
}
-static int max77705_get_battery_health(struct max77705_charger_data *charger,
+static int max77705_get_battery_health(struct max77705_charger_data *chg,
int *value)
{
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
unsigned int bat_dtls;
regmap_read(regmap, MAX77705_CHG_REG_DETAILS_01, &bat_dtls);
@@ -245,16 +251,16 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
switch (bat_dtls) {
case MAX77705_BATTERY_NOBAT:
- dev_dbg(charger->dev, "%s: No battery and the charger is suspended\n",
+ dev_dbg(chg->dev, "%s: No battery and the chg is suspended\n",
__func__);
*value = POWER_SUPPLY_HEALTH_NO_BATTERY;
break;
case MAX77705_BATTERY_PREQUALIFICATION:
- dev_dbg(charger->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
+ dev_dbg(chg->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
__func__);
break;
case MAX77705_BATTERY_DEAD:
- dev_dbg(charger->dev, "%s: battery dead\n", __func__);
+ dev_dbg(chg->dev, "%s: battery dead\n", __func__);
*value = POWER_SUPPLY_HEALTH_DEAD;
break;
case MAX77705_BATTERY_GOOD:
@@ -262,11 +268,11 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
*value = POWER_SUPPLY_HEALTH_GOOD;
break;
case MAX77705_BATTERY_OVERVOLTAGE:
- dev_dbg(charger->dev, "%s: battery ovp\n", __func__);
+ dev_dbg(chg->dev, "%s: battery ovp\n", __func__);
*value = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
break;
default:
- dev_dbg(charger->dev, "%s: battery unknown\n", __func__);
+ dev_dbg(chg->dev, "%s: battery unknown\n", __func__);
*value = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
}
@@ -274,9 +280,9 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
return 0;
}
-static int max77705_get_health(struct max77705_charger_data *charger, int *val)
+static int max77705_get_health(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
int ret, is_online = 0;
ret = max77705_get_online(regmap, &is_online);
@@ -287,24 +293,19 @@ static int max77705_get_health(struct max77705_charger_data *charger, int *val)
if (ret || (*val != POWER_SUPPLY_HEALTH_GOOD))
return ret;
}
- return max77705_get_battery_health(charger, val);
+ return max77705_get_battery_health(chg, val);
}
-static int max77705_get_input_current(struct max77705_charger_data *charger,
+static int max77705_get_input_current(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data;
int get_current = 0;
- struct regmap *regmap = charger->regmap;
-
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
- reg_data &= MAX77705_CHG_CHGIN_LIM_MASK;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CHGIN_LIM], &reg_data);
if (reg_data <= 3)
get_current = MAX77705_CURRENT_CHGIN_MIN;
- else if (reg_data >= MAX77705_CHG_CHGIN_LIM_MASK)
- get_current = MAX77705_CURRENT_CHGIN_MAX;
else
get_current = (reg_data + 1) * MAX77705_CURRENT_CHGIN_STEP;
@@ -313,26 +314,23 @@ static int max77705_get_input_current(struct max77705_charger_data *charger,
return 0;
}
-static int max77705_get_charge_current(struct max77705_charger_data *charger,
+static int max77705_get_charge_current(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data;
- struct regmap *regmap = charger->regmap;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_02, &reg_data);
- reg_data &= MAX77705_CHG_CC;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CC_LIM], &reg_data);
*val = reg_data <= 0x2 ? MAX77705_CURRENT_CHGIN_MIN : reg_data * MAX77705_CURRENT_CHG_STEP;
return 0;
}
-static int max77705_set_float_voltage(struct max77705_charger_data *charger,
+static int max77705_set_float_voltage(struct max77705_charger_data *chg,
int float_voltage)
{
int float_voltage_mv;
unsigned int reg_data = 0;
- struct regmap *regmap = charger->regmap;
float_voltage_mv = float_voltage / 1000;
reg_data = float_voltage_mv <= 4000 ? 0x0 :
@@ -340,20 +338,16 @@ static int max77705_set_float_voltage(struct max77705_charger_data *charger,
(float_voltage_mv <= 4200) ? (float_voltage_mv - 4000) / 50 :
(((float_voltage_mv - 4200) / 10) + 0x04);
- return regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_04,
- MAX77705_CHG_CV_PRM_MASK,
- (reg_data << MAX77705_CHG_CV_PRM_SHIFT));
+ return regmap_field_write(chg->rfield[MAX77705_CHG_CV_PRM], reg_data);
}
-static int max77705_get_float_voltage(struct max77705_charger_data *charger,
+static int max77705_get_float_voltage(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data = 0;
int voltage_mv;
- struct regmap *regmap = charger->regmap;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_04, &reg_data);
- reg_data &= MAX77705_CHG_PRM_MASK;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CV_PRM], &reg_data);
voltage_mv = reg_data <= 0x04 ? reg_data * 50 + 4000 :
(reg_data - 4) * 10 + 4200;
*val = voltage_mv * 1000;
@@ -365,28 +359,28 @@ static int max77705_chg_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct max77705_charger_data *charger = power_supply_get_drvdata(psy);
- struct regmap *regmap = charger->regmap;
+ struct max77705_charger_data *chg = power_supply_get_drvdata(psy);
+ struct regmap *regmap = chg->regmap;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
return max77705_get_online(regmap, &val->intval);
case POWER_SUPPLY_PROP_PRESENT:
- return max77705_check_battery(charger, &val->intval);
+ return max77705_check_battery(chg, &val->intval);
case POWER_SUPPLY_PROP_STATUS:
- return max77705_get_status(charger, &val->intval);
+ return max77705_get_status(chg, &val->intval);
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- return max77705_get_charge_type(charger, &val->intval);
+ return max77705_get_charge_type(chg, &val->intval);
case POWER_SUPPLY_PROP_HEALTH:
- return max77705_get_health(charger, &val->intval);
+ return max77705_get_health(chg, &val->intval);
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- return max77705_get_input_current(charger, &val->intval);
+ return max77705_get_input_current(chg, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- return max77705_get_charge_current(charger, &val->intval);
+ return max77705_get_charge_current(chg, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- return max77705_get_float_voltage(charger, &val->intval);
+ return max77705_get_float_voltage(chg, &val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = charger->bat_info->voltage_max_design_uv;
+ val->intval = chg->bat_info->voltage_max_design_uv;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = max77705_charger_model;
@@ -400,74 +394,131 @@ static int max77705_chg_get_property(struct power_supply *psy,
return 0;
}
+static int max77705_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct max77705_charger_data *chg = power_supply_get_drvdata(psy);
+ int err = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ err = max77705_set_integer(chg, MAX77705_CHG_CC_LIM,
+ MAX77705_CURRENT_CHGIN_MIN,
+ MAX77705_CURRENT_CHGIN_MAX,
+ MAX77705_CURRENT_CHG_STEP,
+ val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ err = max77705_set_integer(chg, MAX77705_CHG_CHGIN_LIM,
+ MAX77705_CURRENT_CHGIN_MIN,
+ MAX77705_CURRENT_CHGIN_MAX,
+ MAX77705_CURRENT_CHGIN_STEP,
+ val->intval);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+};
+
+static int max77705_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct power_supply_desc max77705_charger_psy_desc = {
.name = "max77705-charger",
- .type = POWER_SUPPLY_TYPE_USB,
+ .type = POWER_SUPPLY_TYPE_USB,
.properties = max77705_charger_props,
+ .property_is_writeable = max77705_property_is_writeable,
.num_properties = ARRAY_SIZE(max77705_charger_props),
.get_property = max77705_chg_get_property,
+ .set_property = max77705_set_property,
};
static void max77705_chgin_isr_work(struct work_struct *work)
{
- struct max77705_charger_data *charger =
+ struct max77705_charger_data *chg =
container_of(work, struct max77705_charger_data, chgin_work);
- power_supply_changed(charger->psy_chg);
+ power_supply_changed(chg->psy_chg);
}
-static void max77705_charger_initialize(struct max77705_charger_data *chg)
+static int max77705_charger_initialize(struct max77705_charger_data *chg)
{
- u8 reg_data;
struct power_supply_battery_info *info;
struct regmap *regmap = chg->regmap;
+ int err;
- if (power_supply_get_battery_info(chg->psy_chg, &info) < 0)
- return;
+ err = power_supply_get_battery_info(chg->psy_chg, &info);
+ if (err)
+ return dev_err_probe(chg->dev, err, "error on getting battery info");
chg->bat_info = info;
/* unlock charger setting protect */
/* slowest LX slope */
- reg_data = MAX77705_CHGPROT_MASK | MAX77705_SLOWEST_LX_SLOPE;
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_06, reg_data,
- reg_data);
+ err = regmap_field_write(chg->rfield[MAX77705_CHGPROT], MAX77705_CHGPROT_UNLOCKED);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_LX_SLOPE], MAX77705_SLOWEST_LX_SLOPE);
+ if (err)
+ goto err;
/* fast charge timer disable */
/* restart threshold disable */
/* pre-qual charge disable */
- reg_data = (MAX77705_FCHGTIME_DISABLE << MAX77705_FCHGTIME_SHIFT) |
- (MAX77705_CHG_RSTRT_DISABLE << MAX77705_CHG_RSTRT_SHIFT) |
- (MAX77705_CHG_PQEN_DISABLE << MAX77705_PQEN_SHIFT);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_01,
- (MAX77705_FCHGTIME_MASK |
- MAX77705_CHG_RSTRT_MASK |
- MAX77705_PQEN_MASK),
- reg_data);
-
- /* OTG off(UNO on), boost off */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
- MAX77705_OTG_CTRL, 0);
+ err = regmap_field_write(chg->rfield[MAX77705_FCHGTIME], MAX77705_FCHGTIME_DISABLE);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_CHG_RSTRT], MAX77705_CHG_RSTRT_DISABLE);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_CHG_PQEN], MAX77705_CHG_PQEN_DISABLE);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_MODE],
+ MAX77705_CHG_MASK | MAX77705_BUCK_MASK);
+ if (err)
+ goto err;
/* charge current 450mA(default) */
/* otg current limit 900mA */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_02,
- MAX77705_OTG_ILIM_MASK,
- MAX77705_OTG_ILIM_900 << MAX77705_OTG_ILIM_SHIFT);
+ err = regmap_field_write(chg->rfield[MAX77705_OTG_ILIM], MAX77705_OTG_ILIM_900);
+ if (err)
+ goto err;
/* BAT to SYS OCP 4.80A */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_05,
- MAX77705_REG_B2SOVRC_MASK,
- MAX77705_B2SOVRC_4_8A << MAX77705_REG_B2SOVRC_SHIFT);
+ err = regmap_field_write(chg->rfield[MAX77705_REG_B2SOVRC], MAX77705_B2SOVRC_4_8A);
+ if (err)
+ goto err;
+
/* top off current 150mA */
/* top off timer 30min */
- reg_data = (MAX77705_TO_ITH_150MA << MAX77705_TO_ITH_SHIFT) |
- (MAX77705_TO_TIME_30M << MAX77705_TO_TIME_SHIFT) |
- (MAX77705_SYS_TRACK_DISABLE << MAX77705_SYS_TRACK_DIS_SHIFT);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_03,
- (MAX77705_TO_ITH_MASK |
- MAX77705_TO_TIME_MASK |
- MAX77705_SYS_TRACK_DIS_MASK), reg_data);
+ err = regmap_field_write(chg->rfield[MAX77705_TO], MAX77705_TO_ITH_150MA);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_TO_TIME], MAX77705_TO_TIME_30M);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_SYS_TRACK], MAX77705_SYS_TRACK_DISABLE);
+ if (err)
+ goto err;
/* cv voltage 4.2V or 4.35V */
/* MINVSYS 3.6V(default) */
@@ -478,28 +529,38 @@ static void max77705_charger_initialize(struct max77705_charger_data *chg)
max77705_set_float_voltage(chg, info->voltage_max_design_uv);
}
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
- MAX77705_VCHGIN_REG_MASK, MAX77705_VCHGIN_4_5);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
- MAX77705_WCIN_REG_MASK, MAX77705_WCIN_4_5);
+ err = regmap_field_write(chg->rfield[MAX77705_VCHGIN], MAX77705_VCHGIN_4_5);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_WCIN], MAX77705_WCIN_4_5);
+ if (err)
+ goto err;
/* Watchdog timer */
regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
MAX77705_WDTEN_MASK, 0);
- /* Active Discharge Enable */
- regmap_update_bits(regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
-
/* VBYPSET=5.0V */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_11, MAX77705_VBYPSET_MASK, 0);
+ err = regmap_field_write(chg->rfield[MAX77705_VBYPSET], 0);
+ if (err)
+ goto err;
/* Switching Frequency : 1.5MHz */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_08, MAX77705_REG_FSW_MASK,
- (MAX77705_CHG_FSW_1_5MHz << MAX77705_REG_FSW_SHIFT));
+ err = regmap_field_write(chg->rfield[MAX77705_REG_FSW], MAX77705_CHG_FSW_1_5MHz);
+ if (err)
+ goto err;
/* Auto skip mode */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12, MAX77705_REG_DISKIP_MASK,
- (MAX77705_AUTO_SKIP << MAX77705_REG_DISKIP_SHIFT));
+ err = regmap_field_write(chg->rfield[MAX77705_REG_DISKIP], MAX77705_AUTO_SKIP);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ return dev_err_probe(chg->dev, err, "error while configuring");
+
}
static int max77705_charger_probe(struct i2c_client *i2c)
@@ -523,11 +584,13 @@ static int max77705_charger_probe(struct i2c_client *i2c)
if (IS_ERR(chg->regmap))
return PTR_ERR(chg->regmap);
- ret = regmap_update_bits(chg->regmap,
- MAX77705_CHG_REG_INT_MASK,
- MAX77705_CHGIN_IM, 0);
- if (ret)
- return ret;
+ for (int i = 0; i < MAX77705_N_REGMAP_FIELDS; i++) {
+ chg->rfield[i] = devm_regmap_field_alloc(dev, chg->regmap,
+ max77705_reg_field[i]);
+ if (IS_ERR(chg->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(chg->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
pscfg.fwnode = dev_fwnode(dev);
pscfg.drv_data = chg;
@@ -538,7 +601,7 @@ static int max77705_charger_probe(struct i2c_client *i2c)
max77705_charger_irq_chip.irq_drv_data = chg;
ret = devm_regmap_add_irq_chip(chg->dev, chg->regmap, i2c->irq,
- IRQF_ONESHOT | IRQF_SHARED, 0,
+ IRQF_ONESHOT, 0,
&max77705_charger_irq_chip,
&irq_data);
if (ret)
@@ -546,7 +609,7 @@ static int max77705_charger_probe(struct i2c_client *i2c)
chg->wqueue = create_singlethread_workqueue(dev_name(dev));
if (!chg->wqueue)
- return dev_err_probe(dev, -ENOMEM, "failed to create workqueue\n");
+ return -ENOMEM;
ret = devm_work_autocancel(dev, &chg->chgin_work, max77705_chgin_isr_work);
if (ret) {
@@ -554,7 +617,20 @@ static int max77705_charger_probe(struct i2c_client *i2c)
goto destroy_wq;
}
- max77705_charger_initialize(chg);
+ ret = max77705_charger_initialize(chg);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to initialize charger IC\n");
+ goto destroy_wq;
+ }
+
+ ret = devm_request_threaded_irq(dev, regmap_irq_get_virq(irq_data, MAX77705_CHGIN_I),
+ NULL, max77705_chgin_irq,
+ IRQF_TRIGGER_NONE,
+ "chgin-irq", chg);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to Request chgin IRQ\n");
+ goto destroy_wq;
+ }
ret = max77705_charger_enable(chg);
if (ret) {
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index e6fe68cebc32..3d6ff4005533 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
err = max77976_get_online(chg, &val->intval);
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
val->intval = MAX77976_CHG_CC_MAX;
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
err = max77976_get_integer(chg, CHG_CC,
MAX77976_CHG_CC_MIN,
MAX77976_CHG_CC_MAX,
@@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy,
int err = 0;
switch (psp) {
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
err = max77976_set_integer(chg, CHG_CC,
MAX77976_CHG_CC_MIN,
MAX77976_CHG_CC_MAX,
@@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
return true;
default:
@@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
index 98579998b300..e6db961d5818 100644
--- a/drivers/power/supply/mt6370-charger.c
+++ b/drivers/power/supply/mt6370-charger.c
@@ -761,13 +761,6 @@ static int mt6370_chg_init_psy(struct mt6370_priv *priv)
return PTR_ERR_OR_ZERO(priv->psy);
}
-static void mt6370_chg_destroy_attach_lock(void *data)
-{
- struct mutex *attach_lock = data;
-
- mutex_destroy(attach_lock);
-}
-
static void mt6370_chg_destroy_wq(void *data)
{
struct workqueue_struct *wq = data;
@@ -894,22 +887,19 @@ static int mt6370_chg_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Failed to init psy\n");
- mutex_init(&priv->attach_lock);
- ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_attach_lock,
- &priv->attach_lock);
+ ret = devm_mutex_init(dev, &priv->attach_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+ return ret;
priv->attach = MT6370_ATTACH_STAT_DETACH;
priv->wq = create_singlethread_workqueue(dev_name(priv->dev));
if (!priv->wq)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to create workqueue\n");
+ return -ENOMEM;
ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_wq, priv->wq);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init wq\n");
+ return ret;
ret = devm_work_autocancel(dev, &priv->bc12_work, mt6370_chg_bc12_work_func);
if (ret)
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 18e5e84a81c6..198405f7126f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -223,6 +223,8 @@ static struct power_supply_attr power_supply_attrs[] __ro_after_init = {
POWER_SUPPLY_ATTR(MANUFACTURE_YEAR),
POWER_SUPPLY_ATTR(MANUFACTURE_MONTH),
POWER_SUPPLY_ATTR(MANUFACTURE_DAY),
+ POWER_SUPPLY_ATTR(INTERNAL_RESISTANCE),
+ POWER_SUPPLY_ATTR(STATE_OF_HEALTH),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(MODEL_NAME),
POWER_SUPPLY_ATTR(MANUFACTURER),
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index 99808ea9851f..3c2837ef3461 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -2,10 +2,12 @@
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Linaro Ltd
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/power_supply.h>
#include <linux/property.h>
@@ -18,8 +20,10 @@
#define BATTMGR_STRING_LEN 128
enum qcom_battmgr_variant {
- QCOM_BATTMGR_SM8350,
QCOM_BATTMGR_SC8280XP,
+ QCOM_BATTMGR_SM8350,
+ QCOM_BATTMGR_SM8550,
+ QCOM_BATTMGR_X1E80100,
};
#define BATTMGR_BAT_STATUS 0x1
@@ -30,8 +34,9 @@ enum qcom_battmgr_variant {
#define NOTIF_BAT_PROPERTY 0x30
#define NOTIF_USB_PROPERTY 0x32
#define NOTIF_WLS_PROPERTY 0x34
-#define NOTIF_BAT_INFO 0x81
#define NOTIF_BAT_STATUS 0x80
+#define NOTIF_BAT_INFO 0x81
+#define NOTIF_BAT_CHARGING_STATE 0x83
#define BATTMGR_BAT_INFO 0x9
@@ -65,6 +70,9 @@ enum qcom_battmgr_variant {
#define BATT_RESISTANCE 21
#define BATT_POWER_NOW 22
#define BATT_POWER_AVG 23
+#define BATT_CHG_CTRL_EN 24
+#define BATT_CHG_CTRL_START_THR 25
+#define BATT_CHG_CTRL_END_THR 26
#define BATTMGR_USB_PROPERTY_GET 0x32
#define BATTMGR_USB_PROPERTY_SET 0x33
@@ -89,6 +97,13 @@ enum qcom_battmgr_variant {
#define WLS_TYPE 5
#define WLS_BOOST_EN 6
+#define BATTMGR_CHG_CTRL_LIMIT_EN 0x48
+#define CHARGE_CTRL_START_THR_MIN 50
+#define CHARGE_CTRL_START_THR_MAX 95
+#define CHARGE_CTRL_END_THR_MIN 55
+#define CHARGE_CTRL_END_THR_MAX 100
+#define CHARGE_CTRL_DELTA_SOC 5
+
struct qcom_battmgr_enable_request {
struct pmic_glink_hdr hdr;
__le32 battery_id;
@@ -123,6 +138,13 @@ struct qcom_battmgr_discharge_time_request {
__le32 reserved;
};
+struct qcom_battmgr_charge_ctrl_request {
+ struct pmic_glink_hdr hdr;
+ __le32 enable;
+ __le32 target_soc;
+ __le32 delta_soc;
+};
+
struct qcom_battmgr_message {
struct pmic_glink_hdr hdr;
union {
@@ -235,6 +257,8 @@ struct qcom_battmgr_info {
unsigned int capacity_warning;
unsigned int cycle_count;
unsigned int charge_count;
+ unsigned int charge_ctrl_start;
+ unsigned int charge_ctrl_end;
char model_number[BATTMGR_STRING_LEN];
char serial_number[BATTMGR_STRING_LEN];
char oem_info[BATTMGR_STRING_LEN];
@@ -254,6 +278,8 @@ struct qcom_battmgr_status {
unsigned int voltage_now;
unsigned int voltage_ocv;
unsigned int temperature;
+ unsigned int resistance;
+ unsigned int soh_percent;
unsigned int discharge_time;
unsigned int charge_time;
@@ -418,7 +444,11 @@ static const u8 sm8350_bat_prop_map[] = {
[POWER_SUPPLY_PROP_MODEL_NAME] = BATT_MODEL_NAME,
[POWER_SUPPLY_PROP_TIME_TO_FULL_AVG] = BATT_TTF_AVG,
[POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG] = BATT_TTE_AVG,
+ [POWER_SUPPLY_PROP_INTERNAL_RESISTANCE] = BATT_RESISTANCE,
+ [POWER_SUPPLY_PROP_STATE_OF_HEALTH] = BATT_SOH,
[POWER_SUPPLY_PROP_POWER_NOW] = BATT_POWER_NOW,
+ [POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD] = BATT_CHG_CTRL_START_THR,
+ [POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD] = BATT_CHG_CTRL_END_THR,
};
static int qcom_battmgr_bat_sm8350_update(struct qcom_battmgr *battmgr,
@@ -489,7 +519,8 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
if (!battmgr->service_up)
return -EAGAIN;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
else
ret = qcom_battmgr_bat_sm8350_update(battmgr, psp);
@@ -584,12 +615,24 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TEMP:
val->intval = battmgr->status.temperature;
break;
+ case POWER_SUPPLY_PROP_INTERNAL_RESISTANCE:
+ val->intval = battmgr->status.resistance;
+ break;
+ case POWER_SUPPLY_PROP_STATE_OF_HEALTH:
+ val->intval = battmgr->status.soh_percent;
+ break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
val->intval = battmgr->status.discharge_time;
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
val->intval = battmgr->status.charge_time;
break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ val->intval = battmgr->info.charge_ctrl_start;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ val->intval = battmgr->info.charge_ctrl_end;
+ break;
case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
val->intval = battmgr->info.year;
break;
@@ -615,6 +658,149 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
return 0;
}
+static int qcom_battmgr_set_charge_control(struct qcom_battmgr *battmgr,
+ u32 target_soc, u32 delta_soc)
+{
+ struct qcom_battmgr_charge_ctrl_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(BATTMGR_CHG_CTRL_LIMIT_EN),
+ .enable = cpu_to_le32(1),
+ .target_soc = cpu_to_le32(target_soc),
+ .delta_soc = cpu_to_le32(delta_soc),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static int qcom_battmgr_set_charge_start_threshold(struct qcom_battmgr *battmgr, int start_soc)
+{
+ u32 target_soc, delta_soc;
+ int ret;
+
+ if (start_soc < CHARGE_CTRL_START_THR_MIN ||
+ start_soc > CHARGE_CTRL_START_THR_MAX) {
+ dev_err(battmgr->dev, "charge control start threshold exceed range: [%u - %u]\n",
+ CHARGE_CTRL_START_THR_MIN, CHARGE_CTRL_START_THR_MAX);
+ return -EINVAL;
+ }
+
+ /*
+ * If the new start threshold is larger than the old end threshold,
+ * move the end threshold one step (DELTA_SOC) after the new start
+ * threshold.
+ */
+ if (start_soc > battmgr->info.charge_ctrl_end) {
+ target_soc = start_soc + CHARGE_CTRL_DELTA_SOC;
+ target_soc = min_t(u32, target_soc, CHARGE_CTRL_END_THR_MAX);
+ delta_soc = target_soc - start_soc;
+ delta_soc = min_t(u32, delta_soc, CHARGE_CTRL_DELTA_SOC);
+ } else {
+ target_soc = battmgr->info.charge_ctrl_end;
+ delta_soc = battmgr->info.charge_ctrl_end - start_soc;
+ }
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_set_charge_control(battmgr, target_soc, delta_soc);
+ mutex_unlock(&battmgr->lock);
+ if (!ret) {
+ battmgr->info.charge_ctrl_start = start_soc;
+ battmgr->info.charge_ctrl_end = target_soc;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_set_charge_end_threshold(struct qcom_battmgr *battmgr, int end_soc)
+{
+ u32 delta_soc = CHARGE_CTRL_DELTA_SOC;
+ int ret;
+
+ if (end_soc < CHARGE_CTRL_END_THR_MIN ||
+ end_soc > CHARGE_CTRL_END_THR_MAX) {
+ dev_err(battmgr->dev, "charge control end threshold exceed range: [%u - %u]\n",
+ CHARGE_CTRL_END_THR_MIN, CHARGE_CTRL_END_THR_MAX);
+ return -EINVAL;
+ }
+
+ if (battmgr->info.charge_ctrl_start && end_soc > battmgr->info.charge_ctrl_start)
+ delta_soc = end_soc - battmgr->info.charge_ctrl_start;
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_set_charge_control(battmgr, end_soc, delta_soc);
+ mutex_unlock(&battmgr->lock);
+ if (!ret) {
+ battmgr->info.charge_ctrl_start = end_soc - delta_soc;
+ battmgr->info.charge_ctrl_end = end_soc;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_charge_control_thresholds_init(struct qcom_battmgr *battmgr)
+{
+ int ret;
+ u8 en, end_soc, start_soc, delta_soc;
+
+ ret = nvmem_cell_read_u8(battmgr->dev->parent, "charge_limit_en", &en);
+ if (!ret && en != 0) {
+ ret = nvmem_cell_read_u8(battmgr->dev->parent, "charge_limit_end", &end_soc);
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_u8(battmgr->dev->parent, "charge_limit_delta", &delta_soc);
+ if (ret < 0)
+ return ret;
+
+ if (delta_soc >= end_soc)
+ return -EINVAL;
+
+ start_soc = end_soc - delta_soc;
+ end_soc = clamp(end_soc, CHARGE_CTRL_END_THR_MIN, CHARGE_CTRL_END_THR_MAX);
+ start_soc = clamp(start_soc, CHARGE_CTRL_START_THR_MIN, CHARGE_CTRL_START_THR_MAX);
+
+ battmgr->info.charge_ctrl_start = start_soc;
+ battmgr->info.charge_ctrl_end = end_soc;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_bat_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ return 1;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_bat_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *pval)
+{
+ struct qcom_battmgr *battmgr = power_supply_get_drvdata(psy);
+
+ if (!battmgr->service_up)
+ return -EAGAIN;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ return qcom_battmgr_set_charge_start_threshold(battmgr, pval->intval);
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ return qcom_battmgr_set_charge_end_threshold(battmgr, pval->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const enum power_supply_property sc8280xp_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -649,6 +835,43 @@ static const struct power_supply_desc sc8280xp_bat_psy_desc = {
.get_property = qcom_battmgr_bat_get_property,
};
+static const enum power_supply_property x1e80100_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_EMPTY,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_desc x1e80100_bat_psy_desc = {
+ .name = "qcom-battmgr-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = x1e80100_bat_props,
+ .num_properties = ARRAY_SIZE(x1e80100_bat_props),
+ .get_property = qcom_battmgr_bat_get_property,
+ .set_property = qcom_battmgr_bat_set_property,
+ .property_is_writeable = qcom_battmgr_bat_is_writeable,
+};
+
static const enum power_supply_property sm8350_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
@@ -668,6 +891,8 @@ static const enum power_supply_property sm8350_bat_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
POWER_SUPPLY_PROP_POWER_NOW,
};
@@ -679,6 +904,42 @@ static const struct power_supply_desc sm8350_bat_psy_desc = {
.get_property = qcom_battmgr_bat_get_property,
};
+static const enum power_supply_property sm8550_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_desc sm8550_bat_psy_desc = {
+ .name = "qcom-battmgr-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = sm8550_bat_props,
+ .num_properties = ARRAY_SIZE(sm8550_bat_props),
+ .get_property = qcom_battmgr_bat_get_property,
+ .set_property = qcom_battmgr_bat_set_property,
+ .property_is_writeable = qcom_battmgr_bat_is_writeable,
+};
+
static int qcom_battmgr_ac_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -754,7 +1015,8 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy,
if (!battmgr->service_up)
return -EAGAIN;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
else
ret = qcom_battmgr_usb_sm8350_update(battmgr, psp);
@@ -876,7 +1138,8 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy,
if (!battmgr->service_up)
return -EAGAIN;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
else
ret = qcom_battmgr_wls_sm8350_update(battmgr, psp);
@@ -947,12 +1210,14 @@ static void qcom_battmgr_notification(struct qcom_battmgr *battmgr,
}
notification = le32_to_cpu(msg->notification);
+ notification &= 0xff;
switch (notification) {
case NOTIF_BAT_INFO:
battmgr->info.valid = false;
fallthrough;
case NOTIF_BAT_STATUS:
case NOTIF_BAT_PROPERTY:
+ case NOTIF_BAT_CHARGING_STATE:
power_supply_changed(battmgr->bat_psy);
break;
case NOTIF_USB_PROPERTY:
@@ -982,7 +1247,8 @@ static void qcom_battmgr_sc8280xp_strcpy(char *dest, const char *src)
static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry)
{
- if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN))
+ if ((!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN)) ||
+ (!strncmp(chemistry, "OOI", BATTMGR_CHEMISTRY_LEN)))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN))
return POWER_SUPPLY_TECHNOLOGY_LIPO;
@@ -1095,6 +1361,9 @@ static void qcom_battmgr_sc8280xp_callback(struct qcom_battmgr *battmgr,
case BATTMGR_BAT_CHARGE_TIME:
battmgr->status.charge_time = le32_to_cpu(resp->time);
break;
+ case BATTMGR_CHG_CTRL_LIMIT_EN:
+ battmgr->error = 0;
+ break;
default:
dev_warn(battmgr->dev, "unknown message %#x\n", opcode);
break;
@@ -1159,6 +1428,9 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
case BATT_CAPACITY:
battmgr->status.percent = le32_to_cpu(resp->intval.value) / 100;
break;
+ case BATT_SOH:
+ battmgr->status.soh_percent = le32_to_cpu(resp->intval.value);
+ break;
case BATT_VOLT_OCV:
battmgr->status.voltage_ocv = le32_to_cpu(resp->intval.value);
break;
@@ -1199,9 +1471,18 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
case BATT_TTE_AVG:
battmgr->status.discharge_time = le32_to_cpu(resp->intval.value);
break;
+ case BATT_RESISTANCE:
+ battmgr->status.resistance = le32_to_cpu(resp->intval.value);
+ break;
case BATT_POWER_NOW:
battmgr->status.power_now = le32_to_cpu(resp->intval.value);
break;
+ case BATT_CHG_CTRL_START_THR:
+ battmgr->info.charge_ctrl_start = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CHG_CTRL_END_THR:
+ battmgr->info.charge_ctrl_end = le32_to_cpu(resp->intval.value);
+ break;
default:
dev_warn(battmgr->dev, "unknown property %#x\n", property);
break;
@@ -1284,6 +1565,7 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
}
break;
case BATTMGR_REQUEST_NOTIFICATION:
+ case BATTMGR_CHG_CTRL_LIMIT_EN:
battmgr->error = 0;
break;
default:
@@ -1303,7 +1585,8 @@ static void qcom_battmgr_callback(const void *data, size_t len, void *priv)
if (opcode == BATTMGR_NOTIFICATION)
qcom_battmgr_notification(battmgr, data, len);
- else if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ else if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
qcom_battmgr_sc8280xp_callback(battmgr, data, len);
else
qcom_battmgr_sm8350_callback(battmgr, data, len);
@@ -1339,7 +1622,8 @@ static void qcom_battmgr_pdr_notify(void *priv, int state)
static const struct of_device_id qcom_battmgr_of_variants[] = {
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
- { .compatible = "qcom,x1e80100-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)QCOM_BATTMGR_SM8550 },
+ { .compatible = "qcom,x1e80100-pmic-glink", .data = (void *)QCOM_BATTMGR_X1E80100 },
/* Unmatched devices falls back to QCOM_BATTMGR_SM8350 */
{}
};
@@ -1349,11 +1633,13 @@ static char *qcom_battmgr_battery[] = { "battery" };
static int qcom_battmgr_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
+ const struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg_supply = {};
struct power_supply_config psy_cfg = {};
const struct of_device_id *match;
struct qcom_battmgr *battmgr;
struct device *dev = &adev->dev;
+ int ret;
battmgr = devm_kzalloc(dev, sizeof(*battmgr), GFP_KERNEL);
if (!battmgr)
@@ -1379,8 +1665,19 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
else
battmgr->variant = QCOM_BATTMGR_SM8350;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP) {
- battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg);
+ ret = qcom_battmgr_charge_control_thresholds_init(battmgr);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to init battery charge control thresholds\n");
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100) {
+ if (battmgr->variant == QCOM_BATTMGR_X1E80100)
+ psy_desc = &x1e80100_bat_psy_desc;
+ else
+ psy_desc = &sc8280xp_bat_psy_desc;
+
+ battmgr->bat_psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
if (IS_ERR(battmgr->bat_psy))
return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
"failed to register battery power supply\n");
@@ -1400,7 +1697,12 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy),
"failed to register wireless charing power supply\n");
} else {
- battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg);
+ if (battmgr->variant == QCOM_BATTMGR_SM8550)
+ psy_desc = &sm8550_bat_psy_desc;
+ else
+ psy_desc = &sm8350_bat_psy_desc;
+
+ battmgr->bat_psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
if (IS_ERR(battmgr->bat_psy))
return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
"failed to register battery power supply\n");
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 1251022eb052..9436c6bbf51f 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1046,7 +1046,7 @@ static void rk817_charging_monitor(struct work_struct *work)
rk817_read_props(charger);
/* Run every 8 seconds like the BSP driver did. */
- queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
+ queue_delayed_work(system_percpu_wq, &charger->work, msecs_to_jiffies(8000));
}
static void rk817_cleanup_node(void *data)
@@ -1206,7 +1206,7 @@ static int rk817_charger_probe(struct platform_device *pdev)
return ret;
/* Force the first update immediately. */
- mod_delayed_work(system_wq, &charger->work, 0);
+ mod_delayed_work(system_percpu_wq, &charger->work, 0);
return 0;
}
@@ -1226,7 +1226,7 @@ static int __maybe_unused rk817_resume(struct device *dev)
struct rk817_charger *charger = dev_get_drvdata(dev);
/* force an immediate update */
- mod_delayed_work(system_wq, &charger->work, 0);
+ mod_delayed_work(system_percpu_wq, &charger->work, 0);
return 0;
}
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index e9aba9ad393c..fe773dd8b404 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -633,7 +633,9 @@ out:
static const enum power_supply_property rt9467_chg_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
@@ -656,6 +658,8 @@ static int rt9467_psy_get_property(struct power_supply *psy,
return rt9467_psy_get_status(data, &val->intval);
case POWER_SUPPLY_PROP_ONLINE:
return regmap_field_read(data->rm_field[F_PWR_RDY], &val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return rt9467_get_adc(data, RT9467_ADC_VBUS_DIV5, &val->intval);
case POWER_SUPPLY_PROP_CURRENT_MAX:
mutex_lock(&data->attach_lock);
if (data->psy_usb_type == POWER_SUPPLY_USB_TYPE_UNKNOWN ||
@@ -665,6 +669,8 @@ static int rt9467_psy_get_property(struct power_supply *psy,
val->intval = 1500000;
mutex_unlock(&data->attach_lock);
return 0;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return rt9467_get_adc(data, RT9467_ADC_IBUS, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
mutex_lock(&data->ichg_ieoc_lock);
val->intval = data->ichg_ua;
@@ -1141,27 +1147,6 @@ static int rt9467_reset_chip(struct rt9467_chg_data *data)
return regmap_field_write(data->rm_field[F_RST], 1);
}
-static void rt9467_chg_destroy_adc_lock(void *data)
-{
- struct mutex *adc_lock = data;
-
- mutex_destroy(adc_lock);
-}
-
-static void rt9467_chg_destroy_attach_lock(void *data)
-{
- struct mutex *attach_lock = data;
-
- mutex_destroy(attach_lock);
-}
-
-static void rt9467_chg_destroy_ichg_ieoc_lock(void *data)
-{
- struct mutex *ichg_ieoc_lock = data;
-
- mutex_destroy(ichg_ieoc_lock);
-}
-
static void rt9467_chg_complete_aicl_done(void *data)
{
struct completion *aicl_done = data;
@@ -1214,29 +1199,23 @@ static int rt9467_charger_probe(struct i2c_client *i2c)
if (ret)
return dev_err_probe(dev, ret, "Failed to add irq chip\n");
- mutex_init(&data->adc_lock);
- ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_adc_lock,
- &data->adc_lock);
+ ret = devm_mutex_init(dev, &data->adc_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init ADC lock\n");
+ return ret;
- mutex_init(&data->attach_lock);
- ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_attach_lock,
- &data->attach_lock);
+ ret = devm_mutex_init(dev, &data->attach_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+ return ret;
- mutex_init(&data->ichg_ieoc_lock);
- ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_ichg_ieoc_lock,
- &data->ichg_ieoc_lock);
+ ret = devm_mutex_init(dev, &data->ichg_ieoc_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init ICHG/IEOC lock\n");
+ return ret;
init_completion(&data->aicl_done);
ret = devm_add_action_or_reset(dev, rt9467_chg_complete_aicl_done,
&data->aicl_done);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init AICL done completion\n");
+ return ret;
ret = rt9467_do_charger_init(data);
if (ret)
diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index 7cdcd415e868..b0220ec2d926 100644
--- a/drivers/power/supply/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
@@ -116,7 +116,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
int mid = (max + min) / 2;
if (rx51_temp_table2[mid] <= raw)
min = mid;
- else if (rx51_temp_table2[mid] > raw)
+ else
max = mid;
if (rx51_temp_table2[mid] == raw)
break;
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index 27764123b929..7d5e67620580 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -154,8 +154,7 @@ static const struct regmap_config sbs_regmap = {
.val_format_endian = REGMAP_ENDIAN_LITTLE, /* since based on SMBus */
};
-static const struct power_supply_desc sbs_desc = {
- .name = "sbs-charger",
+static const struct power_supply_desc sbs_default_desc = {
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = sbs_properties,
.num_properties = ARRAY_SIZE(sbs_properties),
@@ -165,9 +164,20 @@ static const struct power_supply_desc sbs_desc = {
static int sbs_probe(struct i2c_client *client)
{
struct power_supply_config psy_cfg = {};
+ struct power_supply_desc *sbs_desc;
struct sbs_info *chip;
int ret, val;
+ sbs_desc = devm_kmemdup(&client->dev, &sbs_default_desc,
+ sizeof(*sbs_desc), GFP_KERNEL);
+ if (!sbs_desc)
+ return -ENOMEM;
+
+ sbs_desc->name = devm_kasprintf(&client->dev, GFP_KERNEL, "sbs-%s",
+ dev_name(&client->dev));
+ if (!sbs_desc->name)
+ return -ENOMEM;
+
chip = devm_kzalloc(&client->dev, sizeof(struct sbs_info), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -191,7 +201,7 @@ static int sbs_probe(struct i2c_client *client)
return dev_err_probe(&client->dev, ret, "Failed to get device status\n");
chip->last_state = val;
- chip->power_supply = devm_power_supply_register(&client->dev, &sbs_desc, &psy_cfg);
+ chip->power_supply = devm_power_supply_register(&client->dev, sbs_desc, &psy_cfg);
if (IS_ERR(chip->power_supply))
return dev_err_probe(&client->dev, PTR_ERR(chip->power_supply),
"Failed to register power supply\n");
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index 869729dfcd66..6fe526222f7f 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -348,7 +348,7 @@ static int sbsm_probe(struct i2c_client *client)
data->muxc = i2c_mux_alloc(adapter, dev, SBSM_MAX_BATS, 0,
I2C_MUX_LOCKED, &sbsm_select, NULL);
if (!data->muxc)
- return dev_err_probe(dev, -ENOMEM, "failed to alloc i2c mux\n");
+ return -ENOMEM;
data->muxc->priv = data;
ret = devm_add_action_or_reset(dev, sbsm_del_mux_adapter, data);
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index d32a7633f9e7..fe94435340de 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -493,7 +493,7 @@ static irqreturn_t ucs1002_alert_irq(int irq, void *data)
{
struct ucs1002_info *info = data;
- mod_delayed_work(system_wq, &info->health_poll, 0);
+ mod_delayed_work(system_percpu_wq, &info->health_poll, 0);
return IRQ_HANDLED;
}
diff --git a/drivers/power/supply/ug3105_battery.c b/drivers/power/supply/ug3105_battery.c
index e8a1de7cade0..210e0f9aa5e0 100644
--- a/drivers/power/supply/ug3105_battery.c
+++ b/drivers/power/supply/ug3105_battery.c
@@ -10,7 +10,22 @@
* is off or suspended, the coulomb counter is not used atm.
*
* Possible improvements:
- * 1. Activate commented out total_coulomb_count code
+ * 1. Add coulumb counter reading, e.g. something like this:
+ * Read + reset coulomb counter every 10 polls (every 300 seconds)
+ *
+ * if ((chip->poll_count % 10) == 0) {
+ * val = ug3105_read_word(chip->client, UG3105_REG_COULOMB_CNT);
+ * if (val < 0)
+ * goto out;
+ *
+ * i2c_smbus_write_byte_data(chip->client, UG3105_REG_CTRL1,
+ * UG3105_CTRL1_RESET_COULOMB_CNT);
+ *
+ * chip->total_coulomb_count += (s16)val;
+ * dev_dbg(&chip->client->dev, "coulomb count %d total %d\n",
+ * (s16)val, chip->total_coulomb_count);
+ * }
+ *
* 2. Reset total_coulomb_count val to 0 when the battery is as good as empty
* and remember that we did this (and clear the flag for this on susp/resume)
* 3. When the battery is full check if the flag that we set total_coulomb_count
@@ -31,24 +46,16 @@
* has shown that an estimated 7404mWh increase of the battery's energy results
* in a total_coulomb_count increase of 3277 units with a 5 milli-ohm sense R.
*
- * Copyright (C) 2021 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021 - 2025 Hans de Goede <hansg@kernel.org>
*/
-#include <linux/devm-helpers.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
-#include <linux/workqueue.h>
-
-#define UG3105_MOV_AVG_WINDOW 8
-#define UG3105_INIT_POLL_TIME (5 * HZ)
-#define UG3105_POLL_TIME (30 * HZ)
-#define UG3105_SETTLE_TIME (1 * HZ)
-#define UG3105_INIT_POLL_COUNT 30
+#include "adc-battery-helper.h"
#define UG3105_REG_MODE 0x00
#define UG3105_REG_CTRL1 0x01
@@ -61,34 +68,13 @@
#define UG3105_CTRL1_RESET_COULOMB_CNT 0x03
-#define UG3105_CURR_HYST_UA 65000
-
-#define UG3105_LOW_BAT_UV 3700000
-#define UG3105_FULL_BAT_HYST_UV 38000
-
-#define AMBIENT_TEMP_CELCIUS 25
-
struct ug3105_chip {
+ /* Must be the first member see adc-battery-helper documentation */
+ struct adc_battery_helper helper;
struct i2c_client *client;
struct power_supply *psy;
- struct delayed_work work;
- struct mutex lock;
- int ocv[UG3105_MOV_AVG_WINDOW]; /* micro-volt */
- int intern_res[UG3105_MOV_AVG_WINDOW]; /* milli-ohm */
- int poll_count;
- int ocv_avg_index;
- int ocv_avg; /* micro-volt */
- int intern_res_poll_count;
- int intern_res_avg_index;
- int intern_res_avg; /* milli-ohm */
- int volt; /* micro-volt */
- int curr; /* micro-ampere */
- int total_coulomb_count;
int uv_per_unit;
int ua_per_unit;
- int status;
- int capacity;
- bool supplied;
};
static int ug3105_read_word(struct i2c_client *client, u8 reg)
@@ -102,230 +88,43 @@ static int ug3105_read_word(struct i2c_client *client, u8 reg)
return val;
}
-static int ug3105_get_status(struct ug3105_chip *chip)
-{
- int full = chip->psy->battery_info->constant_charge_voltage_max_uv -
- UG3105_FULL_BAT_HYST_UV;
-
- if (chip->curr > UG3105_CURR_HYST_UA)
- return POWER_SUPPLY_STATUS_CHARGING;
-
- if (chip->curr < -UG3105_CURR_HYST_UA)
- return POWER_SUPPLY_STATUS_DISCHARGING;
-
- if (chip->supplied && chip->ocv_avg > full)
- return POWER_SUPPLY_STATUS_FULL;
-
- return POWER_SUPPLY_STATUS_NOT_CHARGING;
-}
-
-static void ug3105_work(struct work_struct *work)
-{
- struct ug3105_chip *chip = container_of(work, struct ug3105_chip,
- work.work);
- int i, val, curr_diff, volt_diff, res, win_size;
- bool prev_supplied = chip->supplied;
- int prev_status = chip->status;
- int prev_volt = chip->volt;
- int prev_curr = chip->curr;
- struct power_supply *psy;
-
- mutex_lock(&chip->lock);
-
- psy = chip->psy;
- if (!psy)
- goto out;
-
- val = ug3105_read_word(chip->client, UG3105_REG_BAT_VOLT);
- if (val < 0)
- goto out;
- chip->volt = val * chip->uv_per_unit;
-
- val = ug3105_read_word(chip->client, UG3105_REG_BAT_CURR);
- if (val < 0)
- goto out;
- chip->curr = (s16)val * chip->ua_per_unit;
-
- chip->ocv[chip->ocv_avg_index] =
- chip->volt - chip->curr * chip->intern_res_avg / 1000;
- chip->ocv_avg_index = (chip->ocv_avg_index + 1) % UG3105_MOV_AVG_WINDOW;
- chip->poll_count++;
-
- /*
- * See possible improvements comment above.
- *
- * Read + reset coulomb counter every 10 polls (every 300 seconds)
- * if ((chip->poll_count % 10) == 0) {
- * val = ug3105_read_word(chip->client, UG3105_REG_COULOMB_CNT);
- * if (val < 0)
- * goto out;
- *
- * i2c_smbus_write_byte_data(chip->client, UG3105_REG_CTRL1,
- * UG3105_CTRL1_RESET_COULOMB_CNT);
- *
- * chip->total_coulomb_count += (s16)val;
- * dev_dbg(&chip->client->dev, "coulomb count %d total %d\n",
- * (s16)val, chip->total_coulomb_count);
- * }
- */
-
- chip->ocv_avg = 0;
- win_size = min(chip->poll_count, UG3105_MOV_AVG_WINDOW);
- for (i = 0; i < win_size; i++)
- chip->ocv_avg += chip->ocv[i];
- chip->ocv_avg /= win_size;
-
- chip->supplied = power_supply_am_i_supplied(psy);
- chip->status = ug3105_get_status(chip);
- if (chip->status == POWER_SUPPLY_STATUS_FULL)
- chip->capacity = 100;
- else
- chip->capacity = power_supply_batinfo_ocv2cap(chip->psy->battery_info,
- chip->ocv_avg,
- AMBIENT_TEMP_CELCIUS);
-
- /*
- * Skip internal resistance calc on charger [un]plug and
- * when the battery is almost empty (voltage low).
- */
- if (chip->supplied != prev_supplied ||
- chip->volt < UG3105_LOW_BAT_UV ||
- chip->poll_count < 2)
- goto out;
-
- /*
- * Assuming that the OCV voltage does not change significantly
- * between 2 polls, then we can calculate the internal resistance
- * on a significant current change by attributing all voltage
- * change between the 2 readings to the internal resistance.
- */
- curr_diff = abs(chip->curr - prev_curr);
- if (curr_diff < UG3105_CURR_HYST_UA)
- goto out;
-
- volt_diff = abs(chip->volt - prev_volt);
- res = volt_diff * 1000 / curr_diff;
-
- if ((res < (chip->intern_res_avg * 2 / 3)) ||
- (res > (chip->intern_res_avg * 4 / 3))) {
- dev_dbg(&chip->client->dev, "Ignoring outlier internal resistance %d mOhm\n", res);
- goto out;
- }
-
- dev_dbg(&chip->client->dev, "Internal resistance %d mOhm\n", res);
-
- chip->intern_res[chip->intern_res_avg_index] = res;
- chip->intern_res_avg_index = (chip->intern_res_avg_index + 1) % UG3105_MOV_AVG_WINDOW;
- chip->intern_res_poll_count++;
-
- chip->intern_res_avg = 0;
- win_size = min(chip->intern_res_poll_count, UG3105_MOV_AVG_WINDOW);
- for (i = 0; i < win_size; i++)
- chip->intern_res_avg += chip->intern_res[i];
- chip->intern_res_avg /= win_size;
-
-out:
- mutex_unlock(&chip->lock);
-
- queue_delayed_work(system_wq, &chip->work,
- (chip->poll_count <= UG3105_INIT_POLL_COUNT) ?
- UG3105_INIT_POLL_TIME : UG3105_POLL_TIME);
-
- if (chip->status != prev_status && psy)
- power_supply_changed(psy);
-}
-
-static enum power_supply_property ug3105_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_SCOPE,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_OCV,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CAPACITY,
-};
-
-static int ug3105_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int ug3105_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
{
struct ug3105_chip *chip = power_supply_get_drvdata(psy);
- int ret = 0;
-
- mutex_lock(&chip->lock);
+ int ret;
- if (!chip->psy) {
- ret = -EAGAIN;
- goto out;
- }
+ ret = ug3105_read_word(chip->client, UG3105_REG_BAT_VOLT);
+ if (ret < 0)
+ return ret;
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = chip->status;
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = 1;
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = ug3105_read_word(chip->client, UG3105_REG_BAT_VOLT);
- if (ret < 0)
- break;
- val->intval = ret * chip->uv_per_unit;
- ret = 0;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- val->intval = chip->ocv_avg;
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = ug3105_read_word(chip->client, UG3105_REG_BAT_CURR);
- if (ret < 0)
- break;
- val->intval = (s16)ret * chip->ua_per_unit;
- ret = 0;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = chip->capacity;
- break;
- default:
- ret = -EINVAL;
- }
+ *volt = ret * chip->uv_per_unit;
-out:
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static void ug3105_external_power_changed(struct power_supply *psy)
-{
- struct ug3105_chip *chip = power_supply_get_drvdata(psy);
+ ret = ug3105_read_word(chip->client, UG3105_REG_BAT_CURR);
+ if (ret < 0)
+ return ret;
- dev_dbg(&chip->client->dev, "external power changed\n");
- mod_delayed_work(system_wq, &chip->work, UG3105_SETTLE_TIME);
+ *curr = (s16)ret * chip->ua_per_unit;
+ return 0;
}
static const struct power_supply_desc ug3105_psy_desc = {
.name = "ug3105_battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
- .get_property = ug3105_get_property,
- .external_power_changed = ug3105_external_power_changed,
- .properties = ug3105_battery_props,
- .num_properties = ARRAY_SIZE(ug3105_battery_props),
+ .get_property = adc_battery_helper_get_property,
+ .external_power_changed = adc_battery_helper_external_power_changed,
+ .properties = adc_battery_helper_properties,
+ .num_properties = ADC_HELPER_NUM_PROPERTIES,
};
-static void ug3105_init(struct ug3105_chip *chip)
+static void ug3105_start(struct i2c_client *client)
+{
+ i2c_smbus_write_byte_data(client, UG3105_REG_MODE, UG3105_MODE_RUN);
+ i2c_smbus_write_byte_data(client, UG3105_REG_CTRL1, UG3105_CTRL1_RESET_COULOMB_CNT);
+}
+
+static void ug3105_stop(struct i2c_client *client)
{
- chip->poll_count = 0;
- chip->ocv_avg_index = 0;
- chip->total_coulomb_count = 0;
- i2c_smbus_write_byte_data(chip->client, UG3105_REG_MODE,
- UG3105_MODE_RUN);
- i2c_smbus_write_byte_data(chip->client, UG3105_REG_CTRL1,
- UG3105_CTRL1_RESET_COULOMB_CNT);
- queue_delayed_work(system_wq, &chip->work, 0);
- flush_delayed_work(&chip->work);
+ i2c_smbus_write_byte_data(client, UG3105_REG_MODE, UG3105_MODE_STANDBY);
}
static int ug3105_probe(struct i2c_client *client)
@@ -333,7 +132,6 @@ static int ug3105_probe(struct i2c_client *client)
struct power_supply_config psy_cfg = {};
struct device *dev = &client->dev;
u32 curr_sense_res_uohm = 10000;
- struct power_supply *psy;
struct ug3105_chip *chip;
int ret;
@@ -342,23 +140,8 @@ static int ug3105_probe(struct i2c_client *client)
return -ENOMEM;
chip->client = client;
- mutex_init(&chip->lock);
- ret = devm_delayed_work_autocancel(dev, &chip->work, ug3105_work);
- if (ret)
- return ret;
- psy_cfg.drv_data = chip;
- psy = devm_power_supply_register(dev, &ug3105_psy_desc, &psy_cfg);
- if (IS_ERR(psy))
- return PTR_ERR(psy);
-
- if (!psy->battery_info ||
- psy->battery_info->factory_internal_resistance_uohm == -EINVAL ||
- psy->battery_info->constant_charge_voltage_max_uv == -EINVAL ||
- !psy->battery_info->ocv_table[0]) {
- dev_err(dev, "error required properties are missing\n");
- return -ENODEV;
- }
+ ug3105_start(client);
device_property_read_u32(dev, "upisemi,rsns-microohm", &curr_sense_res_uohm);
@@ -366,35 +149,36 @@ static int ug3105_probe(struct i2c_client *client)
* DAC maximum is 4.5V divided by 65536 steps + an unknown factor of 10
* coming from somewhere for some reason (verified with a volt-meter).
*/
- chip->uv_per_unit = 45000000/65536;
+ chip->uv_per_unit = 45000000 / 65536;
/* Datasheet says 8.1 uV per unit for the current ADC */
chip->ua_per_unit = 8100000 / curr_sense_res_uohm;
- /* Use provided internal resistance as start point (in milli-ohm) */
- chip->intern_res_avg = psy->battery_info->factory_internal_resistance_uohm / 1000;
- /* Also add it to the internal resistance moving average window */
- chip->intern_res[0] = chip->intern_res_avg;
- chip->intern_res_avg_index = 1;
- chip->intern_res_poll_count = 1;
-
- mutex_lock(&chip->lock);
- chip->psy = psy;
- mutex_unlock(&chip->lock);
+ psy_cfg.drv_data = chip;
+ chip->psy = devm_power_supply_register(dev, &ug3105_psy_desc, &psy_cfg);
+ if (IS_ERR(chip->psy)) {
+ ret = PTR_ERR(chip->psy);
+ goto stop;
+ }
- ug3105_init(chip);
+ ret = adc_battery_helper_init(&chip->helper, chip->psy,
+ ug3105_get_voltage_and_current_now, NULL);
+ if (ret)
+ goto stop;
i2c_set_clientdata(client, chip);
return 0;
+
+stop:
+ ug3105_stop(client);
+ return ret;
}
static int __maybe_unused ug3105_suspend(struct device *dev)
{
struct ug3105_chip *chip = dev_get_drvdata(dev);
- cancel_delayed_work_sync(&chip->work);
- i2c_smbus_write_byte_data(chip->client, UG3105_REG_MODE,
- UG3105_MODE_STANDBY);
-
+ adc_battery_helper_suspend(dev);
+ ug3105_stop(chip->client);
return 0;
}
@@ -402,8 +186,8 @@ static int __maybe_unused ug3105_resume(struct device *dev)
{
struct ug3105_chip *chip = dev_get_drvdata(dev);
- ug3105_init(chip);
-
+ ug3105_start(chip->client);
+ adc_battery_helper_resume(dev);
return 0;
}
@@ -422,10 +206,12 @@ static struct i2c_driver ug3105_i2c_driver = {
.pm = &ug3105_pm_ops,
},
.probe = ug3105_probe,
+ .remove = ug3105_stop,
+ .shutdown = ug3105_stop,
.id_table = ug3105_id,
};
module_i2c_driver(ug3105_i2c_driver);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org");
MODULE_DESCRIPTION("uPI uG3105 battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 5ad7cc438068..a25eb2018acd 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -133,7 +133,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
duration_us = READ_ONCE(ii_dev->run_duration_us);
duration_us += READ_ONCE(ii_dev->idle_duration_us);
- hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
+ hrtimer_forward_now(timer, us_to_ktime(duration_us));
return HRTIMER_RESTART;
}
@@ -232,8 +232,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
idle_inject_wakeup(ii_dev);
hrtimer_start(&ii_dev->timer,
- ns_to_ktime((idle_duration_us + run_duration_us) *
- NSEC_PER_USEC),
+ us_to_ktime(idle_duration_us + run_duration_us),
HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 204278eb215e..5f8ea34d11d6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -67,7 +67,7 @@ config PTP_1588_CLOCK_QORIQ
packets using the SO_TIMESTAMPING API.
To compile this driver as a module, choose M here: the module
- will be called ptp-qoriq.
+ will be called ptp_qoriq.
comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
@@ -252,4 +252,15 @@ config PTP_S390
driver provides the raw clock value without the delta to
userspace. That way userspace programs like chrony could steer
the kernel clock.
+
+config PTP_NETC_V4_TIMER
+ tristate "NXP NETC V4 Timer PTP Driver"
+ depends on PTP_1588_CLOCK
+ depends on PCI_MSI
+ help
+ This driver adds support for using the NXP NETC V4 Timer as a PTP
+ clock, the clock is used by ENETC V4 or NETC V4 Switch for PTP time
+ synchronization. It also supports periodic output signal (e.g. PPS)
+ and external trigger timestamping.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 25f846fe48c9..bdc47e284f14 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -12,9 +12,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_VMCLOCK) += ptp_vmclock.o
-obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
-ptp-qoriq-y += ptp_qoriq.o
-ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
obj-$(CONFIG_PTP_1588_CLOCK_FC3W) += ptp_fc3.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
@@ -23,3 +21,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
obj-$(CONFIG_PTP_1588_CLOCK_OCP) += ptp_ocp.o
obj-$(CONFIG_PTP_DFL_TOD) += ptp_dfl_tod.o
obj-$(CONFIG_PTP_S390) += ptp_s390.o
+obj-$(CONFIG_PTP_NETC_V4_TIMER) += ptp_netc.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 4ca5a464a46a..8106eb617c8c 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -47,6 +47,26 @@ static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
return err;
}
+void ptp_disable_all_events(struct ptp_clock *ptp)
+{
+ struct ptp_clock_info *info = ptp->info;
+ unsigned int i;
+
+ mutex_lock(&ptp->pincfg_mux);
+ /* Disable any pins that may raise EXTTS events */
+ for (i = 0; i < info->n_pins; i++)
+ if (info->pin_config[i].func == PTP_PF_EXTTS)
+ ptp_disable_pinfunc(info, info->pin_config[i].func,
+ info->pin_config[i].chan);
+
+ /* Disable the PPS event if the driver has PPS support */
+ if (info->pps) {
+ struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
+ info->enable(info, &req, 0);
+ }
+ mutex_unlock(&ptp->pincfg_mux);
+}
+
int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
@@ -91,12 +111,18 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
return -EOPNOTSUPP;
}
- /* Disable whatever function was previously assigned. */
+ /* Disable whichever pin was previously assigned to this function and
+ * channel.
+ */
if (pin1) {
ptp_disable_pinfunc(info, func, chan);
pin1->func = PTP_PF_NONE;
pin1->chan = 0;
}
+
+ /* Disable whatever function was previously assigned to the requested
+ * pin.
+ */
ptp_disable_pinfunc(info, pin2->func, pin2->chan);
pin2->func = func;
pin2->chan = chan;
@@ -285,17 +311,21 @@ static long ptp_enable_pps(struct ptp_clock *ptp, bool enable)
return ops->enable(ops, &req, enable);
}
-static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg)
+typedef int (*ptp_crosststamp_fn)(struct ptp_clock_info *,
+ struct system_device_crosststamp *);
+
+static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg,
+ ptp_crosststamp_fn crosststamp_fn)
{
struct ptp_sys_offset_precise precise_offset;
struct system_device_crosststamp xtstamp;
struct timespec64 ts;
int err;
- if (!ptp->info->getcrosststamp)
+ if (!crosststamp_fn)
return -EOPNOTSUPP;
- err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
+ err = crosststamp_fn(ptp->info, &xtstamp);
if (err)
return err;
@@ -313,12 +343,17 @@ static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg)
return copy_to_user(arg, &precise_offset, sizeof(precise_offset)) ? -EFAULT : 0;
}
-static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg)
+typedef int (*ptp_gettimex_fn)(struct ptp_clock_info *,
+ struct timespec64 *,
+ struct ptp_system_timestamp *);
+
+static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg,
+ ptp_gettimex_fn gettimex_fn)
{
struct ptp_sys_offset_extended *extoff __free(kfree) = NULL;
struct ptp_system_timestamp sts;
- if (!ptp->info->gettimex64)
+ if (!gettimex_fn)
return -EOPNOTSUPP;
extoff = memdup_user(arg, sizeof(*extoff));
@@ -346,7 +381,7 @@ static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg)
struct timespec64 ts;
int err;
- err = ptp->info->gettimex64(ptp->info, &ts, &sts);
+ err = gettimex_fn(ptp->info, &ts, &sts);
if (err)
return err;
@@ -497,11 +532,13 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_SYS_OFFSET_PRECISE:
case PTP_SYS_OFFSET_PRECISE2:
- return ptp_sys_offset_precise(ptp, argptr);
+ return ptp_sys_offset_precise(ptp, argptr,
+ ptp->info->getcrosststamp);
case PTP_SYS_OFFSET_EXTENDED:
case PTP_SYS_OFFSET_EXTENDED2:
- return ptp_sys_offset_extended(ptp, argptr);
+ return ptp_sys_offset_extended(ptp, argptr,
+ ptp->info->gettimex64);
case PTP_SYS_OFFSET:
case PTP_SYS_OFFSET2:
@@ -523,6 +560,13 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_MASK_EN_SINGLE:
return ptp_mask_en_single(pccontext->private_clkdata, argptr);
+ case PTP_SYS_OFFSET_PRECISE_CYCLES:
+ return ptp_sys_offset_precise(ptp, argptr,
+ ptp->info->getcrosscycles);
+
+ case PTP_SYS_OFFSET_EXTENDED_CYCLES:
+ return ptp_sys_offset_extended(ptp, argptr,
+ ptp->info->getcyclesx64);
default:
return -ENOTTY;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 1cc06b7cb17e..ef020599b771 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/posix-clock.h>
#include <linux/pps_kernel.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
@@ -100,6 +101,9 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
return -EBUSY;
}
+ if (!timespec64_valid_settod(tp))
+ return -EINVAL;
+
return ptp->info->settime64(ptp->info, tp);
}
@@ -130,7 +134,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
- struct timespec64 ts;
+ struct timespec64 ts, ts2;
ktime_t kt;
s64 delta;
@@ -143,6 +147,14 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
+ /* Make sure the offset is valid */
+ err = ptp_clock_gettime(pc, &ts2);
+ if (err)
+ return err;
+ ts2 = timespec64_add(ts2, ts);
+ if (!timespec64_valid_settod(&ts2))
+ return -EINVAL;
+
kt = timespec64_to_ktime(ts);
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
@@ -236,6 +248,69 @@ static void ptp_aux_kworker(struct kthread_work *work)
kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
}
+static ssize_t ptp_n_perout_loopback_read(struct file *filep,
+ char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct ptp_clock *ptp = filep->private_data;
+ char buf[12] = {};
+
+ snprintf(buf, sizeof(buf), "%d\n", ptp->info->n_per_lp);
+
+ return simple_read_from_buffer(buffer, count, pos, buf, strlen(buf));
+}
+
+static const struct file_operations ptp_n_perout_loopback_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ptp_n_perout_loopback_read,
+};
+
+static ssize_t ptp_perout_loopback_write(struct file *filep,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ptp_clock *ptp = filep->private_data;
+ struct ptp_clock_info *ops = ptp->info;
+ unsigned int index, enable;
+ int len, cnt, err;
+ char buf[32] = {};
+
+ if (*ppos || !count)
+ return -EINVAL;
+
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(buf, sizeof(buf) - 1,
+ ppos, buffer, count);
+ if (len < 0)
+ return len;
+
+ buf[len] = '\0';
+ cnt = sscanf(buf, "%u %u", &index, &enable);
+ if (cnt != 2)
+ return -EINVAL;
+
+ if (index >= ops->n_per_lp)
+ return -EINVAL;
+
+ if (enable != 0 && enable != 1)
+ return -EINVAL;
+
+ err = ops->perout_loopback(ops, index, enable);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static const struct file_operations ptp_perout_loopback_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ptp_perout_loopback_write,
+};
+
/* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -377,6 +452,12 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Debugfs initialization */
snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
+ if (info->n_per_lp > 0 && info->perout_loopback) {
+ debugfs_create_file("n_perout_loopback", 0400, ptp->debugfs_root,
+ ptp, &ptp_n_perout_loopback_fops);
+ debugfs_create_file("perout_loopback", 0200, ptp->debugfs_root,
+ ptp, &ptp_perout_loopback_ops);
+ }
return ptp;
@@ -417,9 +498,21 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
device_for_each_child(&ptp->dev, NULL, unregister_vclock);
}
+ /* Get the device to stop posix_clock_unregister() doing the last put
+ * and freeing the structure(s)
+ */
+ get_device(&ptp->dev);
+
+ /* Wake up any userspace waiting for an event. */
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ /* Tear down the POSIX clock, which removes the user interface. */
+ posix_clock_unregister(&ptp->clock);
+
+ /* Disable all sources of event generation. */
+ ptp_disable_all_events(ptp);
+
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
@@ -429,7 +522,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- posix_clock_unregister(&ptp->clock);
+ /* The final put, normally here, will invoke ptp_clock_release(). */
+ put_device(&ptp->dev);
return 0;
}
@@ -477,6 +571,58 @@ int ptp_clock_index(struct ptp_clock *ptp)
}
EXPORT_SYMBOL(ptp_clock_index);
+static int ptp_clock_of_node_match(struct device *dev, const void *data)
+{
+ const struct device_node *parent_np = data;
+
+ return (dev->parent && dev_of_node(dev->parent) == parent_np);
+}
+
+int ptp_clock_index_by_of_node(struct device_node *np)
+{
+ struct ptp_clock *ptp;
+ struct device *dev;
+ int phc_index;
+
+ dev = class_find_device(&ptp_class, NULL, np,
+ ptp_clock_of_node_match);
+ if (!dev)
+ return -1;
+
+ ptp = dev_get_drvdata(dev);
+ phc_index = ptp_clock_index(ptp);
+ put_device(dev);
+
+ return phc_index;
+}
+EXPORT_SYMBOL_GPL(ptp_clock_index_by_of_node);
+
+static int ptp_clock_dev_match(struct device *dev, const void *data)
+{
+ const struct device *parent = data;
+
+ return dev->parent == parent;
+}
+
+int ptp_clock_index_by_dev(struct device *parent)
+{
+ struct ptp_clock *ptp;
+ struct device *dev;
+ int phc_index;
+
+ dev = class_find_device(&ptp_class, NULL, parent,
+ ptp_clock_dev_match);
+ if (!dev)
+ return -1;
+
+ ptp = dev_get_drvdata(dev);
+ phc_index = ptp_clock_index(ptp);
+ put_device(dev);
+
+ return phc_index;
+}
+EXPORT_SYMBOL_GPL(ptp_clock_index_by_dev);
+
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index b8d4df8c6da2..59cd6bbb33f3 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -1161,7 +1161,7 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
SET_U16_MSB(idtcm->channel[3].output_mask, val);
break;
default:
- err = -EFAULT; /* Bad address */;
+ err = -EFAULT; /* Bad address */
break;
}
diff --git a/drivers/ptp/ptp_netc.c b/drivers/ptp/ptp_netc.c
new file mode 100644
index 000000000000..94e952ee6990
--- /dev/null
+++ b/drivers/ptp/ptp_netc.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NXP NETC V4 Timer driver
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define NETC_TMR_PCI_VENDOR_NXP 0x1131
+
+#define NETC_TMR_CTRL 0x0080
+#define TMR_CTRL_CK_SEL GENMASK(1, 0)
+#define TMR_CTRL_TE BIT(2)
+#define TMR_ETEP(i) BIT(8 + (i))
+#define TMR_COMP_MODE BIT(15)
+#define TMR_CTRL_TCLK_PERIOD GENMASK(25, 16)
+#define TMR_CTRL_PPL(i) BIT(27 - (i))
+#define TMR_CTRL_FS BIT(28)
+
+#define NETC_TMR_TEVENT 0x0084
+#define TMR_TEVNET_PPEN(i) BIT(7 - (i))
+#define TMR_TEVENT_PPEN_ALL GENMASK(7, 5)
+#define TMR_TEVENT_ALMEN(i) BIT(16 + (i))
+#define TMR_TEVENT_ETS_THREN(i) BIT(20 + (i))
+#define TMR_TEVENT_ETSEN(i) BIT(24 + (i))
+#define TMR_TEVENT_ETS_OVEN(i) BIT(28 + (i))
+#define TMR_TEVENT_ETS(i) (TMR_TEVENT_ETS_THREN(i) | \
+ TMR_TEVENT_ETSEN(i) | \
+ TMR_TEVENT_ETS_OVEN(i))
+
+#define NETC_TMR_TEMASK 0x0088
+#define NETC_TMR_STAT 0x0094
+#define TMR_STAT_ETS_VLD(i) BIT(24 + (i))
+
+#define NETC_TMR_CNT_L 0x0098
+#define NETC_TMR_CNT_H 0x009c
+#define NETC_TMR_ADD 0x00a0
+#define NETC_TMR_PRSC 0x00a8
+#define NETC_TMR_ECTRL 0x00ac
+#define NETC_TMR_OFF_L 0x00b0
+#define NETC_TMR_OFF_H 0x00b4
+
+/* i = 0, 1, i indicates the index of TMR_ALARM */
+#define NETC_TMR_ALARM_L(i) (0x00b8 + (i) * 8)
+#define NETC_TMR_ALARM_H(i) (0x00bc + (i) * 8)
+
+/* i = 0, 1, 2. i indicates the index of TMR_FIPER. */
+#define NETC_TMR_FIPER(i) (0x00d0 + (i) * 4)
+
+#define NETC_TMR_FIPER_CTRL 0x00dc
+#define FIPER_CTRL_DIS(i) (BIT(7) << (i) * 8)
+#define FIPER_CTRL_PG(i) (BIT(6) << (i) * 8)
+#define FIPER_CTRL_FS_ALARM(i) (BIT(5) << (i) * 8)
+#define FIPER_CTRL_PW(i) (GENMASK(4, 0) << (i) * 8)
+#define FIPER_CTRL_SET_PW(i, v) (((v) & GENMASK(4, 0)) << 8 * (i))
+
+/* i = 0, 1, i indicates the index of TMR_ETTS */
+#define NETC_TMR_ETTS_L(i) (0x00e0 + (i) * 8)
+#define NETC_TMR_ETTS_H(i) (0x00e4 + (i) * 8)
+#define NETC_TMR_CUR_TIME_L 0x00f0
+#define NETC_TMR_CUR_TIME_H 0x00f4
+
+#define NETC_TMR_REGS_BAR 0
+#define NETC_GLOBAL_OFFSET 0x10000
+#define NETC_GLOBAL_IPBRR0 0xbf8
+#define IPBRR0_IP_REV GENMASK(15, 0)
+#define NETC_REV_4_1 0x0401
+
+#define NETC_TMR_FIPER_NUM 3
+#define NETC_TMR_INVALID_CHANNEL NETC_TMR_FIPER_NUM
+#define NETC_TMR_DEFAULT_PRSC 2
+#define NETC_TMR_DEFAULT_ALARM GENMASK_ULL(63, 0)
+#define NETC_TMR_DEFAULT_FIPER GENMASK(31, 0)
+#define NETC_TMR_FIPER_MAX_PW GENMASK(4, 0)
+#define NETC_TMR_ALARM_NUM 2
+#define NETC_TMR_DEFAULT_ETTF_THR 7
+
+/* 1588 timer reference clock source select */
+#define NETC_TMR_CCM_TIMER1 0 /* enet_timer1_clk_root, from CCM */
+#define NETC_TMR_SYSTEM_CLK 1 /* enet_clk_root/2, from CCM */
+#define NETC_TMR_EXT_OSC 2 /* tmr_1588_clk, from IO pins */
+
+#define NETC_TMR_SYSCLK_333M 333333333U
+
+enum netc_pp_type {
+ NETC_PP_PPS = 1,
+ NETC_PP_PEROUT,
+};
+
+struct netc_pp {
+ enum netc_pp_type type;
+ bool enabled;
+ int alarm_id;
+ u32 period; /* pulse period, ns */
+ u64 stime; /* start time, ns */
+};
+
+struct netc_timer {
+ void __iomem *base;
+ struct pci_dev *pdev;
+ spinlock_t lock; /* Prevent concurrent access to registers */
+
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ u32 clk_select;
+ u32 clk_freq;
+ u32 oclk_prsc;
+ /* High 32-bit is integer part, low 32-bit is fractional part */
+ u64 period;
+
+ int irq;
+ char irq_name[24];
+ int revision;
+ u32 tmr_emask;
+ u8 pps_channel;
+ u8 fs_alarm_num;
+ u8 fs_alarm_bitmap;
+ struct netc_pp pp[NETC_TMR_FIPER_NUM]; /* periodic pulse */
+};
+
+#define netc_timer_rd(p, o) netc_read((p)->base + (o))
+#define netc_timer_wr(p, o, v) netc_write((p)->base + (o), v)
+#define ptp_to_netc_timer(ptp) container_of((ptp), struct netc_timer, caps)
+
+static const char *const timer_clk_src[] = {
+ "ccm",
+ "ext"
+};
+
+static void netc_timer_cnt_write(struct netc_timer *priv, u64 ns)
+{
+ u32 tmr_cnt_h = upper_32_bits(ns);
+ u32 tmr_cnt_l = lower_32_bits(ns);
+
+ /* Writes to the TMR_CNT_L register copies the written value
+ * into the shadow TMR_CNT_L register. Writes to the TMR_CNT_H
+ * register copies the values written into the shadow TMR_CNT_H
+ * register. Contents of the shadow registers are copied into
+ * the TMR_CNT_L and TMR_CNT_H registers following a write into
+ * the TMR_CNT_H register. So the user must writes to TMR_CNT_L
+ * register first. Other H/L registers should have the same
+ * behavior.
+ */
+ netc_timer_wr(priv, NETC_TMR_CNT_L, tmr_cnt_l);
+ netc_timer_wr(priv, NETC_TMR_CNT_H, tmr_cnt_h);
+}
+
+static u64 netc_timer_offset_read(struct netc_timer *priv)
+{
+ u32 tmr_off_l, tmr_off_h;
+ u64 offset;
+
+ tmr_off_l = netc_timer_rd(priv, NETC_TMR_OFF_L);
+ tmr_off_h = netc_timer_rd(priv, NETC_TMR_OFF_H);
+ offset = (((u64)tmr_off_h) << 32) | tmr_off_l;
+
+ return offset;
+}
+
+static void netc_timer_offset_write(struct netc_timer *priv, u64 offset)
+{
+ u32 tmr_off_h = upper_32_bits(offset);
+ u32 tmr_off_l = lower_32_bits(offset);
+
+ netc_timer_wr(priv, NETC_TMR_OFF_L, tmr_off_l);
+ netc_timer_wr(priv, NETC_TMR_OFF_H, tmr_off_h);
+}
+
+static u64 netc_timer_cur_time_read(struct netc_timer *priv)
+{
+ u32 time_h, time_l;
+ u64 ns;
+
+ /* The user should read NETC_TMR_CUR_TIME_L first to
+ * get correct current time.
+ */
+ time_l = netc_timer_rd(priv, NETC_TMR_CUR_TIME_L);
+ time_h = netc_timer_rd(priv, NETC_TMR_CUR_TIME_H);
+ ns = (u64)time_h << 32 | time_l;
+
+ return ns;
+}
+
+static void netc_timer_alarm_write(struct netc_timer *priv,
+ u64 alarm, int index)
+{
+ u32 alarm_h = upper_32_bits(alarm);
+ u32 alarm_l = lower_32_bits(alarm);
+
+ netc_timer_wr(priv, NETC_TMR_ALARM_L(index), alarm_l);
+ netc_timer_wr(priv, NETC_TMR_ALARM_H(index), alarm_h);
+}
+
+static u32 netc_timer_get_integral_period(struct netc_timer *priv)
+{
+ u32 tmr_ctrl, integral_period;
+
+ tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ integral_period = FIELD_GET(TMR_CTRL_TCLK_PERIOD, tmr_ctrl);
+
+ return integral_period;
+}
+
+static u32 netc_timer_calculate_fiper_pw(struct netc_timer *priv,
+ u32 fiper)
+{
+ u64 divisor, pulse_width;
+
+ /* Set the FIPER pulse width to half FIPER interval by default.
+ * pulse_width = (fiper / 2) / TMR_GCLK_period,
+ * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq,
+ * TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz,
+ * so pulse_width = fiper * clk_freq / (2 * NSEC_PER_SEC * oclk_prsc).
+ */
+ divisor = mul_u32_u32(2 * NSEC_PER_SEC, priv->oclk_prsc);
+ pulse_width = div64_u64(mul_u32_u32(fiper, priv->clk_freq), divisor);
+
+ /* The FIPER_PW field only has 5 bits, need to update oclk_prsc */
+ if (pulse_width > NETC_TMR_FIPER_MAX_PW)
+ pulse_width = NETC_TMR_FIPER_MAX_PW;
+
+ return pulse_width;
+}
+
+static void netc_timer_set_pps_alarm(struct netc_timer *priv, int channel,
+ u32 integral_period)
+{
+ struct netc_pp *pp = &priv->pp[channel];
+ u64 alarm;
+
+ /* Get the alarm value */
+ alarm = netc_timer_cur_time_read(priv) + NSEC_PER_MSEC;
+ alarm = roundup_u64(alarm, NSEC_PER_SEC);
+ alarm = roundup_u64(alarm, integral_period);
+
+ netc_timer_alarm_write(priv, alarm, pp->alarm_id);
+}
+
+static void netc_timer_set_perout_alarm(struct netc_timer *priv, int channel,
+ u32 integral_period)
+{
+ u64 cur_time = netc_timer_cur_time_read(priv);
+ struct netc_pp *pp = &priv->pp[channel];
+ u64 alarm, delta, min_time;
+ u32 period = pp->period;
+ u64 stime = pp->stime;
+
+ min_time = cur_time + NSEC_PER_MSEC + period;
+ if (stime < min_time) {
+ delta = min_time - stime;
+ stime += roundup_u64(delta, period);
+ }
+
+ alarm = roundup_u64(stime - period, integral_period);
+ netc_timer_alarm_write(priv, alarm, pp->alarm_id);
+}
+
+static int netc_timer_get_alarm_id(struct netc_timer *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->fs_alarm_num; i++) {
+ if (!(priv->fs_alarm_bitmap & BIT(i))) {
+ priv->fs_alarm_bitmap |= BIT(i);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static u64 netc_timer_get_gclk_period(struct netc_timer *priv)
+{
+ /* TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz.
+ * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq.
+ * TMR_GCLK_period = (NSEC_PER_SEC * oclk_prsc) / clk_freq
+ */
+
+ return div_u64(mul_u32_u32(NSEC_PER_SEC, priv->oclk_prsc),
+ priv->clk_freq);
+}
+
+static void netc_timer_enable_periodic_pulse(struct netc_timer *priv,
+ u8 channel)
+{
+ u32 fiper_pw, fiper, fiper_ctrl, integral_period;
+ struct netc_pp *pp = &priv->pp[channel];
+ int alarm_id = pp->alarm_id;
+
+ integral_period = netc_timer_get_integral_period(priv);
+ /* Set to desired FIPER interval in ns - TCLK_PERIOD */
+ fiper = pp->period - integral_period;
+ fiper_pw = netc_timer_calculate_fiper_pw(priv, fiper);
+
+ fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ fiper_ctrl &= ~(FIPER_CTRL_DIS(channel) | FIPER_CTRL_PW(channel) |
+ FIPER_CTRL_FS_ALARM(channel));
+ fiper_ctrl |= FIPER_CTRL_SET_PW(channel, fiper_pw);
+ fiper_ctrl |= alarm_id ? FIPER_CTRL_FS_ALARM(channel) : 0;
+
+ priv->tmr_emask |= TMR_TEVENT_ALMEN(alarm_id);
+
+ if (pp->type == NETC_PP_PPS) {
+ priv->tmr_emask |= TMR_TEVNET_PPEN(channel);
+ netc_timer_set_pps_alarm(priv, channel, integral_period);
+ } else {
+ netc_timer_set_perout_alarm(priv, channel, integral_period);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask);
+ netc_timer_wr(priv, NETC_TMR_FIPER(channel), fiper);
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static void netc_timer_disable_periodic_pulse(struct netc_timer *priv,
+ u8 channel)
+{
+ struct netc_pp *pp = &priv->pp[channel];
+ int alarm_id = pp->alarm_id;
+ u32 fiper_ctrl;
+
+ if (!pp->enabled)
+ return;
+
+ priv->tmr_emask &= ~(TMR_TEVNET_PPEN(channel) |
+ TMR_TEVENT_ALMEN(alarm_id));
+
+ fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ fiper_ctrl |= FIPER_CTRL_DIS(channel);
+
+ netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, alarm_id);
+ netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask);
+ netc_timer_wr(priv, NETC_TMR_FIPER(channel), NETC_TMR_DEFAULT_FIPER);
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static u8 netc_timer_select_pps_channel(struct netc_timer *priv)
+{
+ int i;
+
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ if (!priv->pp[i].enabled)
+ return i;
+ }
+
+ return NETC_TMR_INVALID_CHANNEL;
+}
+
+/* Note that users should not use this API to output PPS signal on
+ * external pins, because PTP_CLK_REQ_PPS trigger internal PPS event
+ * for input into kernel PPS subsystem. See:
+ * https://lore.kernel.org/r/20201117213826.18235-1-a.fatoum@pengutronix.de
+ */
+static int netc_timer_enable_pps(struct netc_timer *priv,
+ struct ptp_clock_request *rq, int on)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned long flags;
+ struct netc_pp *pp;
+ int err = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (on) {
+ int alarm_id;
+ u8 channel;
+
+ if (priv->pps_channel < NETC_TMR_FIPER_NUM) {
+ channel = priv->pps_channel;
+ } else {
+ channel = netc_timer_select_pps_channel(priv);
+ if (channel == NETC_TMR_INVALID_CHANNEL) {
+ dev_err(dev, "No available FIPERs\n");
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+ }
+
+ pp = &priv->pp[channel];
+ if (pp->enabled)
+ goto unlock_spinlock;
+
+ alarm_id = netc_timer_get_alarm_id(priv);
+ if (alarm_id == priv->fs_alarm_num) {
+ dev_err(dev, "No available ALARMs\n");
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+
+ pp->enabled = true;
+ pp->type = NETC_PP_PPS;
+ pp->alarm_id = alarm_id;
+ pp->period = NSEC_PER_SEC;
+ priv->pps_channel = channel;
+
+ netc_timer_enable_periodic_pulse(priv, channel);
+ } else {
+ /* pps_channel is invalid if PPS is not enabled, so no
+ * processing is needed.
+ */
+ if (priv->pps_channel >= NETC_TMR_FIPER_NUM)
+ goto unlock_spinlock;
+
+ netc_timer_disable_periodic_pulse(priv, priv->pps_channel);
+ pp = &priv->pp[priv->pps_channel];
+ priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id);
+ memset(pp, 0, sizeof(*pp));
+ priv->pps_channel = NETC_TMR_INVALID_CHANNEL;
+ }
+
+unlock_spinlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+}
+
+static int net_timer_enable_perout(struct netc_timer *priv,
+ struct ptp_clock_request *rq, int on)
+{
+ struct device *dev = &priv->pdev->dev;
+ u32 channel = rq->perout.index;
+ unsigned long flags;
+ struct netc_pp *pp;
+ int err = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ pp = &priv->pp[channel];
+ if (pp->type == NETC_PP_PPS) {
+ dev_err(dev, "FIPER%u is being used for PPS\n", channel);
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+
+ if (on) {
+ u64 period_ns, gclk_period, max_period, min_period;
+ struct timespec64 period, stime;
+ u32 integral_period;
+ int alarm_id;
+
+ period.tv_sec = rq->perout.period.sec;
+ period.tv_nsec = rq->perout.period.nsec;
+ period_ns = timespec64_to_ns(&period);
+
+ integral_period = netc_timer_get_integral_period(priv);
+ max_period = (u64)NETC_TMR_DEFAULT_FIPER + integral_period;
+ gclk_period = netc_timer_get_gclk_period(priv);
+ min_period = gclk_period * 4 + integral_period;
+ if (period_ns > max_period || period_ns < min_period) {
+ dev_err(dev, "The period range is %llu ~ %llu\n",
+ min_period, max_period);
+ err = -EINVAL;
+ goto unlock_spinlock;
+ }
+
+ if (pp->enabled) {
+ alarm_id = pp->alarm_id;
+ } else {
+ alarm_id = netc_timer_get_alarm_id(priv);
+ if (alarm_id == priv->fs_alarm_num) {
+ dev_err(dev, "No available ALARMs\n");
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+
+ pp->type = NETC_PP_PEROUT;
+ pp->enabled = true;
+ pp->alarm_id = alarm_id;
+ }
+
+ stime.tv_sec = rq->perout.start.sec;
+ stime.tv_nsec = rq->perout.start.nsec;
+ pp->stime = timespec64_to_ns(&stime);
+ pp->period = period_ns;
+
+ netc_timer_enable_periodic_pulse(priv, channel);
+ } else {
+ netc_timer_disable_periodic_pulse(priv, channel);
+ priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id);
+ memset(pp, 0, sizeof(*pp));
+ }
+
+unlock_spinlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+}
+
+static void netc_timer_handle_etts_event(struct netc_timer *priv, int index,
+ bool update_event)
+{
+ struct ptp_clock_event event;
+ u32 etts_l = 0, etts_h = 0;
+
+ while (netc_timer_rd(priv, NETC_TMR_STAT) & TMR_STAT_ETS_VLD(index)) {
+ etts_l = netc_timer_rd(priv, NETC_TMR_ETTS_L(index));
+ etts_h = netc_timer_rd(priv, NETC_TMR_ETTS_H(index));
+ }
+
+ /* Invalid time stamp */
+ if (!etts_l && !etts_h)
+ return;
+
+ if (update_event) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = index;
+ event.timestamp = (u64)etts_h << 32;
+ event.timestamp |= etts_l;
+ ptp_clock_event(priv->clock, &event);
+ }
+}
+
+static int netc_timer_enable_extts(struct netc_timer *priv,
+ struct ptp_clock_request *rq, int on)
+{
+ int index = rq->extts.index;
+ unsigned long flags;
+ u32 tmr_ctrl;
+
+ /* Reject requests to enable time stamping on both edges */
+ if ((rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netc_timer_handle_etts_event(priv, rq->extts.index, false);
+ if (on) {
+ tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ tmr_ctrl |= TMR_ETEP(index);
+ else
+ tmr_ctrl &= ~TMR_ETEP(index);
+
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ priv->tmr_emask |= TMR_TEVENT_ETS(index);
+ } else {
+ priv->tmr_emask &= ~TMR_TEVENT_ETS(index);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void netc_timer_disable_fiper(struct netc_timer *priv)
+{
+ u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ int i;
+
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ if (!priv->pp[i].enabled)
+ continue;
+
+ fiper_ctrl |= FIPER_CTRL_DIS(i);
+ netc_timer_wr(priv, NETC_TMR_FIPER(i), NETC_TMR_DEFAULT_FIPER);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static void netc_timer_enable_fiper(struct netc_timer *priv)
+{
+ u32 integral_period = netc_timer_get_integral_period(priv);
+ u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ int i;
+
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ struct netc_pp *pp = &priv->pp[i];
+ u32 fiper;
+
+ if (!pp->enabled)
+ continue;
+
+ fiper_ctrl &= ~FIPER_CTRL_DIS(i);
+
+ if (pp->type == NETC_PP_PPS)
+ netc_timer_set_pps_alarm(priv, i, integral_period);
+ else if (pp->type == NETC_PP_PEROUT)
+ netc_timer_set_perout_alarm(priv, i, integral_period);
+
+ fiper = pp->period - integral_period;
+ netc_timer_wr(priv, NETC_TMR_FIPER(i), fiper);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static int netc_timer_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ return netc_timer_enable_pps(priv, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return net_timer_enable_perout(priv, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return netc_timer_enable_extts(priv, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netc_timer_perout_loopback(struct ptp_clock_info *ptp,
+ unsigned int index, int on)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ unsigned long flags;
+ u32 tmr_ctrl;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ if (on)
+ tmr_ctrl |= TMR_CTRL_PPL(index);
+ else
+ tmr_ctrl &= ~TMR_CTRL_PPL(index);
+
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void netc_timer_adjust_period(struct netc_timer *priv, u64 period)
+{
+ u32 fractional_period = lower_32_bits(period);
+ u32 integral_period = upper_32_bits(period);
+ u32 tmr_ctrl, old_tmr_ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ old_tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ tmr_ctrl = u32_replace_bits(old_tmr_ctrl, integral_period,
+ TMR_CTRL_TCLK_PERIOD);
+ if (tmr_ctrl != old_tmr_ctrl) {
+ netc_timer_disable_fiper(priv);
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ netc_timer_enable_fiper(priv);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_ADD, fractional_period);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int netc_timer_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ u64 new_period;
+
+ new_period = adjust_by_scaled_ppm(priv->period, scaled_ppm);
+ netc_timer_adjust_period(priv, new_period);
+
+ return 0;
+}
+
+static int netc_timer_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ unsigned long flags;
+ s64 tmr_off;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netc_timer_disable_fiper(priv);
+
+ /* Adjusting TMROFF instead of TMR_CNT is that the timer
+ * counter keeps increasing during reading and writing
+ * TMR_CNT, which will cause latency.
+ */
+ tmr_off = netc_timer_offset_read(priv);
+ tmr_off += delta;
+ netc_timer_offset_write(priv, tmr_off);
+
+ netc_timer_enable_fiper(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int netc_timer_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ptp_read_system_prets(sts);
+ ns = netc_timer_cur_time_read(priv);
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int netc_timer_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netc_timer_disable_fiper(priv);
+ netc_timer_offset_write(priv, 0);
+ netc_timer_cnt_write(priv, ns);
+ netc_timer_enable_fiper(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static const struct ptp_clock_info netc_timer_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "NETC Timer PTP clock",
+ .max_adj = 500000000,
+ .n_pins = 0,
+ .n_alarm = 2,
+ .pps = 1,
+ .n_per_out = 3,
+ .n_ext_ts = 2,
+ .n_per_lp = 2,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
+ .adjfine = netc_timer_adjfine,
+ .adjtime = netc_timer_adjtime,
+ .gettimex64 = netc_timer_gettimex64,
+ .settime64 = netc_timer_settime64,
+ .enable = netc_timer_enable,
+ .perout_loopback = netc_timer_perout_loopback,
+};
+
+static void netc_timer_init(struct netc_timer *priv)
+{
+ u32 fractional_period = lower_32_bits(priv->period);
+ u32 integral_period = upper_32_bits(priv->period);
+ u32 tmr_ctrl, fiper_ctrl;
+ struct timespec64 now;
+ u64 ns;
+ int i;
+
+ /* Software must enable timer first and the clock selected must be
+ * active, otherwise, the registers which are in the timer clock
+ * domain are not accessible.
+ */
+ tmr_ctrl = FIELD_PREP(TMR_CTRL_CK_SEL, priv->clk_select) |
+ TMR_CTRL_TE | TMR_CTRL_FS;
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ netc_timer_wr(priv, NETC_TMR_PRSC, priv->oclk_prsc);
+
+ /* Disable FIPER by default */
+ fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ fiper_ctrl |= FIPER_CTRL_DIS(i);
+ fiper_ctrl &= ~FIPER_CTRL_PG(i);
+ }
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+ netc_timer_wr(priv, NETC_TMR_ECTRL, NETC_TMR_DEFAULT_ETTF_THR);
+
+ ktime_get_real_ts64(&now);
+ ns = timespec64_to_ns(&now);
+ netc_timer_cnt_write(priv, ns);
+
+ /* Allow atomic writes to TCLK_PERIOD and TMR_ADD, An update to
+ * TCLK_PERIOD does not take effect until TMR_ADD is written.
+ */
+ tmr_ctrl |= FIELD_PREP(TMR_CTRL_TCLK_PERIOD, integral_period) |
+ TMR_COMP_MODE;
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ netc_timer_wr(priv, NETC_TMR_ADD, fractional_period);
+}
+
+static int netc_timer_pci_probe(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct netc_timer *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable device\n");
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_regions() failed, err:%pe\n",
+ ERR_PTR(err));
+ goto disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ priv->pdev = pdev;
+ priv->base = pci_ioremap_bar(pdev, NETC_TMR_REGS_BAR);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto release_mem_regions;
+ }
+
+ pci_set_drvdata(pdev, priv);
+
+ return 0;
+
+release_mem_regions:
+ pci_release_mem_regions(pdev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void netc_timer_pci_remove(struct pci_dev *pdev)
+{
+ struct netc_timer *priv = pci_get_drvdata(pdev);
+
+ iounmap(priv->base);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int netc_timer_get_reference_clk_source(struct netc_timer *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct clk *clk;
+ int i;
+
+ /* Select NETC system clock as the reference clock by default */
+ priv->clk_select = NETC_TMR_SYSTEM_CLK;
+ priv->clk_freq = NETC_TMR_SYSCLK_333M;
+
+ /* Update the clock source of the reference clock if the clock
+ * is specified in DT node.
+ */
+ for (i = 0; i < ARRAY_SIZE(timer_clk_src); i++) {
+ clk = devm_clk_get_optional_enabled(dev, timer_clk_src[i]);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to enable clock\n");
+
+ if (clk) {
+ priv->clk_freq = clk_get_rate(clk);
+ priv->clk_select = i ? NETC_TMR_EXT_OSC :
+ NETC_TMR_CCM_TIMER1;
+ break;
+ }
+ }
+
+ /* The period is a 64-bit number, the high 32-bit is the integer
+ * part of the period, the low 32-bit is the fractional part of
+ * the period. In order to get the desired 32-bit fixed-point
+ * format, multiply the numerator of the fraction by 2^32.
+ */
+ priv->period = div_u64((u64)NSEC_PER_SEC << 32, priv->clk_freq);
+
+ return 0;
+}
+
+static int netc_timer_parse_dt(struct netc_timer *priv)
+{
+ return netc_timer_get_reference_clk_source(priv);
+}
+
+static irqreturn_t netc_timer_isr(int irq, void *data)
+{
+ struct netc_timer *priv = data;
+ struct ptp_clock_event event;
+ u32 tmr_event;
+
+ spin_lock(&priv->lock);
+
+ tmr_event = netc_timer_rd(priv, NETC_TMR_TEVENT);
+ tmr_event &= priv->tmr_emask;
+ /* Clear interrupts status */
+ netc_timer_wr(priv, NETC_TMR_TEVENT, tmr_event);
+
+ if (tmr_event & TMR_TEVENT_ALMEN(0))
+ netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 0);
+
+ if (tmr_event & TMR_TEVENT_ALMEN(1))
+ netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 1);
+
+ if (tmr_event & TMR_TEVENT_PPEN_ALL) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(priv->clock, &event);
+ }
+
+ if (tmr_event & TMR_TEVENT_ETS(0))
+ netc_timer_handle_etts_event(priv, 0, true);
+
+ if (tmr_event & TMR_TEVENT_ETS(1))
+ netc_timer_handle_etts_event(priv, 1, true);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int netc_timer_init_msix_irq(struct netc_timer *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ int err, n;
+
+ n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (n != 1) {
+ err = (n < 0) ? n : -EPERM;
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed\n");
+ return err;
+ }
+
+ priv->irq = pci_irq_vector(pdev, 0);
+ err = request_irq(priv->irq, netc_timer_isr, 0, priv->irq_name, priv);
+ if (err) {
+ dev_err(&pdev->dev, "request_irq() failed\n");
+ pci_free_irq_vectors(pdev);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void netc_timer_free_msix_irq(struct netc_timer *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+
+ disable_irq(priv->irq);
+ free_irq(priv->irq, priv);
+ pci_free_irq_vectors(pdev);
+}
+
+static int netc_timer_get_global_ip_rev(struct netc_timer *priv)
+{
+ u32 val;
+
+ val = netc_timer_rd(priv, NETC_GLOBAL_OFFSET + NETC_GLOBAL_IPBRR0);
+
+ return val & IPBRR0_IP_REV;
+}
+
+static int netc_timer_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct netc_timer *priv;
+ int err;
+
+ err = netc_timer_pci_probe(pdev);
+ if (err)
+ return err;
+
+ priv = pci_get_drvdata(pdev);
+ priv->revision = netc_timer_get_global_ip_rev(priv);
+ if (priv->revision == NETC_REV_4_1)
+ priv->fs_alarm_num = 1;
+ else
+ priv->fs_alarm_num = NETC_TMR_ALARM_NUM;
+
+ err = netc_timer_parse_dt(priv);
+ if (err)
+ goto timer_pci_remove;
+
+ priv->caps = netc_timer_ptp_caps;
+ priv->oclk_prsc = NETC_TMR_DEFAULT_PRSC;
+ priv->pps_channel = NETC_TMR_INVALID_CHANNEL;
+ spin_lock_init(&priv->lock);
+ snprintf(priv->irq_name, sizeof(priv->irq_name), "ptp-netc %s",
+ pci_name(pdev));
+
+ err = netc_timer_init_msix_irq(priv);
+ if (err)
+ goto timer_pci_remove;
+
+ netc_timer_init(priv);
+ priv->clock = ptp_clock_register(&priv->caps, dev);
+ if (IS_ERR(priv->clock)) {
+ err = PTR_ERR(priv->clock);
+ goto free_msix_irq;
+ }
+
+ return 0;
+
+free_msix_irq:
+ netc_timer_free_msix_irq(priv);
+timer_pci_remove:
+ netc_timer_pci_remove(pdev);
+
+ return err;
+}
+
+static void netc_timer_remove(struct pci_dev *pdev)
+{
+ struct netc_timer *priv = pci_get_drvdata(pdev);
+
+ netc_timer_wr(priv, NETC_TMR_TEMASK, 0);
+ netc_timer_wr(priv, NETC_TMR_CTRL, 0);
+ ptp_clock_unregister(priv->clock);
+ netc_timer_free_msix_irq(priv);
+ netc_timer_pci_remove(pdev);
+}
+
+static const struct pci_device_id netc_timer_id_table[] = {
+ { PCI_DEVICE(NETC_TMR_PCI_VENDOR_NXP, 0xee02) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, netc_timer_id_table);
+
+static struct pci_driver netc_timer_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = netc_timer_id_table,
+ .probe = netc_timer_probe,
+ .remove = netc_timer_remove,
+};
+module_pci_driver(netc_timer_driver);
+
+MODULE_DESCRIPTION("NXP NETC Timer PTP Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 4e1286ce05c9..794ec6e71990 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1485,6 +1485,8 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
.pps = true,
.n_ext_ts = 6,
.n_per_out = 5,
+ .supported_extts_flags = PTP_STRICT_FLAGS | PTP_RISING_EDGE,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE | PTP_PEROUT_PHASE,
};
static void
@@ -2095,10 +2097,6 @@ ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
{
struct ptp_ocp_signal s = { };
- if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
s.polarity = bp->signal[gen].polarity;
s.period = ktime_set(req->period.sec, req->period.nsec);
if (!s.period)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b352df4cd3f9..db4039d642b4 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
#define PTP_DEFAULT_MAX_VCLOCKS 20
+#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int)))
#define PTP_MAX_CHANNELS 2048
enum {
@@ -141,6 +142,8 @@ extern const struct class ptp_class;
* see ptp_chardev.c
*/
+void ptp_disable_all_events(struct ptp_clock *ptp);
+
/* caller must hold pincfg_mux */
int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 4d488c1f1941..8da995e36aeb 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -465,6 +465,25 @@ static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
return 0;
}
+static int ptp_qoriq_perout_loopback(struct ptp_clock_info *ptp,
+ unsigned int index, int on)
+{
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 loopback_bit = index ? PP2L : PP1L;
+ u32 tmr_ctrl;
+
+ tmr_ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ if (on)
+ tmr_ctrl |= loopback_bit;
+ else
+ tmr_ctrl &= ~loopback_bit;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
+
+ return 0;
+}
+
int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
const struct ptp_clock_info *caps)
{
@@ -479,6 +498,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->base = base;
ptp_qoriq->caps = *caps;
+ ptp_qoriq->caps.n_per_lp = 2;
+ ptp_qoriq->caps.perout_loopback = ptp_qoriq_perout_loopback;
if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
ptp_qoriq->cksel = DEFAULT_CKSEL;
@@ -568,7 +589,7 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
return PTR_ERR(ptp_qoriq->clock);
ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock);
- ptp_qoriq_create_debugfs(ptp_qoriq);
+
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_init);
@@ -580,7 +601,6 @@ void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq)
ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, 0);
ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, 0);
- ptp_qoriq_remove_debugfs(ptp_qoriq);
ptp_clock_unregister(ptp_qoriq->clock);
iounmap(ptp_qoriq->base);
free_irq(ptp_qoriq->irq, ptp_qoriq);
diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c
deleted file mode 100644
index e8dddcedf288..000000000000
--- a/drivers/ptp/ptp_qoriq_debugfs.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Copyright 2019 NXP
- */
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/fsl/ptp_qoriq.h>
-
-static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- *val = ctrl & PP1L ? 1 : 0;
-
- return 0;
-}
-
-static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- if (val == 0)
- ctrl &= ~PP1L;
- else
- ctrl |= PP1L;
-
- ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get,
- ptp_qoriq_fiper1_lpbk_set, "%llu\n");
-
-static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- *val = ctrl & PP2L ? 1 : 0;
-
- return 0;
-}
-
-static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- if (val == 0)
- ctrl &= ~PP2L;
- else
- ctrl |= PP2L;
-
- ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get,
- ptp_qoriq_fiper2_lpbk_set, "%llu\n");
-
-void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
-{
- struct dentry *root;
-
- root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL);
- if (IS_ERR(root))
- return;
- if (!root)
- goto err_root;
-
- ptp_qoriq->debugfs_root = root;
-
- if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root,
- ptp_qoriq, &ptp_qoriq_fiper1_fops))
- goto err_node;
- if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root,
- ptp_qoriq, &ptp_qoriq_fiper2_fops))
- goto err_node;
- return;
-
-err_node:
- debugfs_remove_recursive(root);
- ptp_qoriq->debugfs_root = NULL;
-err_root:
- dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n");
-}
-
-void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
-{
- debugfs_remove_recursive(ptp_qoriq->debugfs_root);
- ptp_qoriq->debugfs_root = NULL;
-}
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 6b1b8f57cd95..200eaf500696 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev,
size_t size;
u32 max;
- if (kstrtou32(buf, 0, &max) || max == 0)
+ if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT)
return -EINVAL;
if (max == ptp->max_vclocks)
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 94af77baa7a1..e98b7e03162c 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -317,7 +317,7 @@ static int adsp_load(struct rproc *rproc, const struct firmware *fw)
struct qcom_adsp *adsp = rproc->priv;
int ret;
- ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware,
adsp->mem_region, adsp->mem_phys,
adsp->mem_size, &adsp->mem_reloc);
if (ret)
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 02e29171cbbe..55a7da801183 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -242,9 +242,8 @@ static int qcom_pas_load(struct rproc *rproc, const struct firmware *fw)
goto release_dtb_firmware;
ret = qcom_mdt_load_no_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name,
- pas->dtb_pas_id, pas->dtb_mem_region,
- pas->dtb_mem_phys, pas->dtb_mem_size,
- &pas->dtb_mem_reloc);
+ pas->dtb_mem_region, pas->dtb_mem_phys,
+ pas->dtb_mem_size, &pas->dtb_mem_reloc);
if (ret)
goto release_dtb_metadata;
}
@@ -307,7 +306,7 @@ static int qcom_pas_start(struct rproc *rproc)
if (ret)
goto disable_px_supply;
- ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id,
+ ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware,
pas->mem_region, pas->mem_phys, pas->mem_size,
&pas->mem_reloc);
if (ret)
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 93648734a2f2..07c88623f597 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -757,7 +757,7 @@ static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
int ret;
ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
- 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_region, wcss->mem_phys,
wcss->mem_size, &wcss->mem_reloc);
if (ret)
return ret;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 635eef469ab7..78b7078478d4 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -22,6 +22,13 @@ config RESET_A10SR
This option enables support for the external reset functions for
peripheral PHYs on the Altera Arria10 System Resource Chip.
+config RESET_ASPEED
+ tristate "ASPEED Reset Driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This enables the reset controller driver for AST2700.
+
config RESET_ATH79
bool "AR71xx Reset Driver" if COMPILE_TEST
default ATH79
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index a917d2522e8d..f7934f9fb90b 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -6,6 +6,7 @@ obj-y += starfive/
obj-y += sti/
obj-y += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
+obj-$(CONFIG_RESET_ASPEED) += reset-aspeed.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
diff --git a/drivers/reset/reset-aspeed.c b/drivers/reset/reset-aspeed.c
new file mode 100644
index 000000000000..dd2f860a69d7
--- /dev/null
+++ b/drivers/reset/reset-aspeed.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 ASPEED Technology Inc.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/reset/aspeed,ast2700-scu.h>
+
+#define SCU0_RESET_CTRL1 0x200
+#define SCU0_RESET_CTRL2 0x220
+#define SCU1_RESET_CTRL1 0x200
+#define SCU1_RESET_CTRL2 0x220
+#define SCU1_PCIE3_CTRL 0x908
+
+struct ast2700_reset_signal {
+ bool dedicated_clr; /* dedicated reset clr offset */
+ u32 offset, bit;
+};
+
+struct aspeed_reset_info {
+ unsigned int nr_resets;
+ const struct ast2700_reset_signal *signal;
+};
+
+struct aspeed_reset {
+ struct reset_controller_dev rcdev;
+ struct aspeed_reset_info *info;
+ spinlock_t lock; /* Protect read-modify-write cycle */
+ void __iomem *base;
+};
+
+static const struct ast2700_reset_signal ast2700_reset0_signals[] = {
+ [SCU0_RESET_SDRAM] = { true, SCU0_RESET_CTRL1, BIT(0) },
+ [SCU0_RESET_DDRPHY] = { true, SCU0_RESET_CTRL1, BIT(1) },
+ [SCU0_RESET_RSA] = { true, SCU0_RESET_CTRL1, BIT(2) },
+ [SCU0_RESET_SHA3] = { true, SCU0_RESET_CTRL1, BIT(3) },
+ [SCU0_RESET_HACE] = { true, SCU0_RESET_CTRL1, BIT(4) },
+ [SCU0_RESET_SOC] = { true, SCU0_RESET_CTRL1, BIT(5) },
+ [SCU0_RESET_VIDEO] = { true, SCU0_RESET_CTRL1, BIT(6) },
+ [SCU0_RESET_2D] = { true, SCU0_RESET_CTRL1, BIT(7) },
+ [SCU0_RESET_PCIS] = { true, SCU0_RESET_CTRL1, BIT(8) },
+ [SCU0_RESET_RVAS0] = { true, SCU0_RESET_CTRL1, BIT(9) },
+ [SCU0_RESET_RVAS1] = { true, SCU0_RESET_CTRL1, BIT(10) },
+ [SCU0_RESET_SM3] = { true, SCU0_RESET_CTRL1, BIT(11) },
+ [SCU0_RESET_SM4] = { true, SCU0_RESET_CTRL1, BIT(12) },
+ [SCU0_RESET_CRT0] = { true, SCU0_RESET_CTRL1, BIT(13) },
+ [SCU0_RESET_ECC] = { true, SCU0_RESET_CTRL1, BIT(14) },
+ [SCU0_RESET_DP_PCI] = { true, SCU0_RESET_CTRL1, BIT(15) },
+ [SCU0_RESET_UFS] = { true, SCU0_RESET_CTRL1, BIT(16) },
+ [SCU0_RESET_EMMC] = { true, SCU0_RESET_CTRL1, BIT(17) },
+ [SCU0_RESET_PCIE1RST] = { true, SCU0_RESET_CTRL1, BIT(18) },
+ [SCU0_RESET_PCIE1RSTOE] = { true, SCU0_RESET_CTRL1, BIT(19) },
+ [SCU0_RESET_PCIE0RST] = { true, SCU0_RESET_CTRL1, BIT(20) },
+ [SCU0_RESET_PCIE0RSTOE] = { true, SCU0_RESET_CTRL1, BIT(21) },
+ [SCU0_RESET_JTAG] = { true, SCU0_RESET_CTRL1, BIT(22) },
+ [SCU0_RESET_MCTP0] = { true, SCU0_RESET_CTRL1, BIT(23) },
+ [SCU0_RESET_MCTP1] = { true, SCU0_RESET_CTRL1, BIT(24) },
+ [SCU0_RESET_XDMA0] = { true, SCU0_RESET_CTRL1, BIT(25) },
+ [SCU0_RESET_XDMA1] = { true, SCU0_RESET_CTRL1, BIT(26) },
+ [SCU0_RESET_H2X1] = { true, SCU0_RESET_CTRL1, BIT(27) },
+ [SCU0_RESET_DP] = { true, SCU0_RESET_CTRL1, BIT(28) },
+ [SCU0_RESET_DP_MCU] = { true, SCU0_RESET_CTRL1, BIT(29) },
+ [SCU0_RESET_SSP] = { true, SCU0_RESET_CTRL1, BIT(30) },
+ [SCU0_RESET_H2X0] = { true, SCU0_RESET_CTRL1, BIT(31) },
+ [SCU0_RESET_PORTA_VHUB] = { true, SCU0_RESET_CTRL2, BIT(0) },
+ [SCU0_RESET_PORTA_PHY3] = { true, SCU0_RESET_CTRL2, BIT(1) },
+ [SCU0_RESET_PORTA_XHCI] = { true, SCU0_RESET_CTRL2, BIT(2) },
+ [SCU0_RESET_PORTB_VHUB] = { true, SCU0_RESET_CTRL2, BIT(3) },
+ [SCU0_RESET_PORTB_PHY3] = { true, SCU0_RESET_CTRL2, BIT(4) },
+ [SCU0_RESET_PORTB_XHCI] = { true, SCU0_RESET_CTRL2, BIT(5) },
+ [SCU0_RESET_PORTA_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(6) },
+ [SCU0_RESET_PORTB_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(7) },
+ [SCU0_RESET_UHCI] = { true, SCU0_RESET_CTRL2, BIT(8) },
+ [SCU0_RESET_TSP] = { true, SCU0_RESET_CTRL2, BIT(9) },
+ [SCU0_RESET_E2M0] = { true, SCU0_RESET_CTRL2, BIT(10) },
+ [SCU0_RESET_E2M1] = { true, SCU0_RESET_CTRL2, BIT(11) },
+ [SCU0_RESET_VLINK] = { true, SCU0_RESET_CTRL2, BIT(12) },
+};
+
+static const struct ast2700_reset_signal ast2700_reset1_signals[] = {
+ [SCU1_RESET_LPC0] = { true, SCU1_RESET_CTRL1, BIT(0) },
+ [SCU1_RESET_LPC1] = { true, SCU1_RESET_CTRL1, BIT(1) },
+ [SCU1_RESET_MII] = { true, SCU1_RESET_CTRL1, BIT(2) },
+ [SCU1_RESET_PECI] = { true, SCU1_RESET_CTRL1, BIT(3) },
+ [SCU1_RESET_PWM] = { true, SCU1_RESET_CTRL1, BIT(4) },
+ [SCU1_RESET_MAC0] = { true, SCU1_RESET_CTRL1, BIT(5) },
+ [SCU1_RESET_MAC1] = { true, SCU1_RESET_CTRL1, BIT(6) },
+ [SCU1_RESET_MAC2] = { true, SCU1_RESET_CTRL1, BIT(7) },
+ [SCU1_RESET_ADC] = { true, SCU1_RESET_CTRL1, BIT(8) },
+ [SCU1_RESET_SD] = { true, SCU1_RESET_CTRL1, BIT(9) },
+ [SCU1_RESET_ESPI0] = { true, SCU1_RESET_CTRL1, BIT(10) },
+ [SCU1_RESET_ESPI1] = { true, SCU1_RESET_CTRL1, BIT(11) },
+ [SCU1_RESET_JTAG1] = { true, SCU1_RESET_CTRL1, BIT(12) },
+ [SCU1_RESET_SPI0] = { true, SCU1_RESET_CTRL1, BIT(13) },
+ [SCU1_RESET_SPI1] = { true, SCU1_RESET_CTRL1, BIT(14) },
+ [SCU1_RESET_SPI2] = { true, SCU1_RESET_CTRL1, BIT(15) },
+ [SCU1_RESET_I3C0] = { true, SCU1_RESET_CTRL1, BIT(16) },
+ [SCU1_RESET_I3C1] = { true, SCU1_RESET_CTRL1, BIT(17) },
+ [SCU1_RESET_I3C2] = { true, SCU1_RESET_CTRL1, BIT(18) },
+ [SCU1_RESET_I3C3] = { true, SCU1_RESET_CTRL1, BIT(19) },
+ [SCU1_RESET_I3C4] = { true, SCU1_RESET_CTRL1, BIT(20) },
+ [SCU1_RESET_I3C5] = { true, SCU1_RESET_CTRL1, BIT(21) },
+ [SCU1_RESET_I3C6] = { true, SCU1_RESET_CTRL1, BIT(22) },
+ [SCU1_RESET_I3C7] = { true, SCU1_RESET_CTRL1, BIT(23) },
+ [SCU1_RESET_I3C8] = { true, SCU1_RESET_CTRL1, BIT(24) },
+ [SCU1_RESET_I3C9] = { true, SCU1_RESET_CTRL1, BIT(25) },
+ [SCU1_RESET_I3C10] = { true, SCU1_RESET_CTRL1, BIT(26) },
+ [SCU1_RESET_I3C11] = { true, SCU1_RESET_CTRL1, BIT(27) },
+ [SCU1_RESET_I3C12] = { true, SCU1_RESET_CTRL1, BIT(28) },
+ [SCU1_RESET_I3C13] = { true, SCU1_RESET_CTRL1, BIT(29) },
+ [SCU1_RESET_I3C14] = { true, SCU1_RESET_CTRL1, BIT(30) },
+ [SCU1_RESET_I3C15] = { true, SCU1_RESET_CTRL1, BIT(31) },
+ [SCU1_RESET_MCU0] = { true, SCU1_RESET_CTRL2, BIT(0) },
+ [SCU1_RESET_MCU1] = { true, SCU1_RESET_CTRL2, BIT(1) },
+ [SCU1_RESET_H2A_SPI1] = { true, SCU1_RESET_CTRL2, BIT(2) },
+ [SCU1_RESET_H2A_SPI2] = { true, SCU1_RESET_CTRL2, BIT(3) },
+ [SCU1_RESET_UART0] = { true, SCU1_RESET_CTRL2, BIT(4) },
+ [SCU1_RESET_UART1] = { true, SCU1_RESET_CTRL2, BIT(5) },
+ [SCU1_RESET_UART2] = { true, SCU1_RESET_CTRL2, BIT(6) },
+ [SCU1_RESET_UART3] = { true, SCU1_RESET_CTRL2, BIT(7) },
+ [SCU1_RESET_I2C_FILTER] = { true, SCU1_RESET_CTRL2, BIT(8) },
+ [SCU1_RESET_CALIPTRA] = { true, SCU1_RESET_CTRL2, BIT(9) },
+ [SCU1_RESET_XDMA] = { true, SCU1_RESET_CTRL2, BIT(10) },
+ [SCU1_RESET_FSI] = { true, SCU1_RESET_CTRL2, BIT(12) },
+ [SCU1_RESET_CAN] = { true, SCU1_RESET_CTRL2, BIT(13) },
+ [SCU1_RESET_MCTP] = { true, SCU1_RESET_CTRL2, BIT(14) },
+ [SCU1_RESET_I2C] = { true, SCU1_RESET_CTRL2, BIT(15) },
+ [SCU1_RESET_UART6] = { true, SCU1_RESET_CTRL2, BIT(16) },
+ [SCU1_RESET_UART7] = { true, SCU1_RESET_CTRL2, BIT(17) },
+ [SCU1_RESET_UART8] = { true, SCU1_RESET_CTRL2, BIT(18) },
+ [SCU1_RESET_UART9] = { true, SCU1_RESET_CTRL2, BIT(19) },
+ [SCU1_RESET_LTPI0] = { true, SCU1_RESET_CTRL2, BIT(20) },
+ [SCU1_RESET_VGAL] = { true, SCU1_RESET_CTRL2, BIT(21) },
+ [SCU1_RESET_LTPI1] = { true, SCU1_RESET_CTRL2, BIT(22) },
+ [SCU1_RESET_ACE] = { true, SCU1_RESET_CTRL2, BIT(23) },
+ [SCU1_RESET_E2M] = { true, SCU1_RESET_CTRL2, BIT(24) },
+ [SCU1_RESET_UHCI] = { true, SCU1_RESET_CTRL2, BIT(25) },
+ [SCU1_RESET_PORTC_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(26) },
+ [SCU1_RESET_PORTC_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(27) },
+ [SCU1_RESET_PORTD_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(28) },
+ [SCU1_RESET_PORTD_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(29) },
+ [SCU1_RESET_H2X] = { true, SCU1_RESET_CTRL2, BIT(30) },
+ [SCU1_RESET_I3CDMA] = { true, SCU1_RESET_CTRL2, BIT(31) },
+ [SCU1_RESET_PCIE2RST] = { false, SCU1_PCIE3_CTRL, BIT(0) },
+};
+
+static inline struct aspeed_reset *to_aspeed_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct aspeed_reset, rcdev);
+}
+
+static int aspeed_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
+
+ if (rc->info->signal[id].dedicated_clr) {
+ writel(rc->info->signal[id].bit, reg_offset);
+ } else {
+ guard(spinlock_irqsave)(&rc->lock);
+ writel(readl(reg_offset) & ~rc->info->signal[id].bit, reg_offset);
+ }
+
+ return 0;
+}
+
+static int aspeed_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
+
+ if (rc->info->signal[id].dedicated_clr) {
+ writel(rc->info->signal[id].bit, reg_offset + 0x04);
+ } else {
+ guard(spinlock_irqsave)(&rc->lock);
+ writel(readl(reg_offset) | rc->info->signal[id].bit, reg_offset);
+ }
+
+ return 0;
+}
+
+static int aspeed_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
+
+ return (readl(reg_offset) & rc->info->signal[id].bit) ? 1 : 0;
+}
+
+static const struct reset_control_ops aspeed_reset_ops = {
+ .assert = aspeed_reset_assert,
+ .deassert = aspeed_reset_deassert,
+ .status = aspeed_reset_status,
+};
+
+static int aspeed_reset_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct aspeed_reset *reset;
+ struct device *dev = &adev->dev;
+
+ reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ spin_lock_init(&reset->lock);
+
+ reset->info = (struct aspeed_reset_info *)id->driver_data;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.nr_resets = reset->info->nr_resets;
+ reset->rcdev.ops = &aspeed_reset_ops;
+ reset->rcdev.of_node = dev->parent->of_node;
+ reset->rcdev.dev = dev;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->base = (void __iomem *)adev->dev.platform_data;
+
+ return devm_reset_controller_register(dev, &reset->rcdev);
+}
+
+static const struct aspeed_reset_info ast2700_reset0_info = {
+ .nr_resets = ARRAY_SIZE(ast2700_reset0_signals),
+ .signal = ast2700_reset0_signals,
+};
+
+static const struct aspeed_reset_info ast2700_reset1_info = {
+ .nr_resets = ARRAY_SIZE(ast2700_reset1_signals),
+ .signal = ast2700_reset1_signals,
+};
+
+static const struct auxiliary_device_id aspeed_reset_ids[] = {
+ { .name = "clk_ast2700.reset0", .driver_data = (kernel_ulong_t)&ast2700_reset0_info },
+ { .name = "clk_ast2700.reset1", .driver_data = (kernel_ulong_t)&ast2700_reset1_info },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, aspeed_reset_ids);
+
+static struct auxiliary_driver aspeed_reset_driver = {
+ .probe = aspeed_reset_probe,
+ .id_table = aspeed_reset_ids,
+};
+
+module_auxiliary_driver(aspeed_reset_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED SoC Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-bcm6345.c b/drivers/reset/reset-bcm6345.c
index aa9353439e70..56518f7bfbb3 100644
--- a/drivers/reset/reset-bcm6345.c
+++ b/drivers/reset/reset-bcm6345.c
@@ -119,6 +119,7 @@ static int bcm6345_reset_probe(struct platform_device *pdev)
static const struct of_device_id bcm6345_reset_of_match[] = {
{ .compatible = "brcm,bcm6345-reset" },
+ { .compatible = "brcm,bcm63xx-ephy-ctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c
index a5a01388ae7f..a5ce3350cb5e 100644
--- a/drivers/reset/reset-intel-gw.c
+++ b/drivers/reset/reset-intel-gw.c
@@ -40,7 +40,6 @@ static const struct regmap_config intel_rcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
/*
diff --git a/drivers/reset/reset-qcom-pdc.c b/drivers/reset/reset-qcom-pdc.c
index dce1fc1a68ad..ae2b5aba7a59 100644
--- a/drivers/reset/reset-qcom-pdc.c
+++ b/drivers/reset/reset-qcom-pdc.c
@@ -36,7 +36,6 @@ static const struct regmap_config pdc_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20000,
- .fast_io = true,
};
static const struct qcom_pdc_reset_map sdm845_pdc_resets[] = {
diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c
index 7874f0693e1b..14d964a9c6b6 100644
--- a/drivers/reset/reset-th1520.c
+++ b/drivers/reset/reset-th1520.c
@@ -14,10 +14,20 @@
/* register offset in VOSYS_REGMAP */
#define TH1520_GPU_RST_CFG 0x0
#define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0)
+#define TH1520_DPU_RST_CFG 0x4
+#define TH1520_DSI0_RST_CFG 0x8
+#define TH1520_DSI1_RST_CFG 0xc
+#define TH1520_HDMI_RST_CFG 0x14
/* register values */
#define TH1520_GPU_SW_GPU_RST BIT(0)
#define TH1520_GPU_SW_CLKGEN_RST BIT(1)
+#define TH1520_DPU_SW_DPU_HRST BIT(0)
+#define TH1520_DPU_SW_DPU_ARST BIT(1)
+#define TH1520_DPU_SW_DPU_CRST BIT(2)
+#define TH1520_DSI_SW_DSI_PRST BIT(0)
+#define TH1520_HDMI_SW_MAIN_RST BIT(0)
+#define TH1520_HDMI_SW_PRST BIT(1)
struct th1520_reset_priv {
struct reset_controller_dev rcdev;
@@ -37,7 +47,35 @@ static const struct th1520_reset_map th1520_resets[] = {
[TH1520_RESET_ID_GPU_CLKGEN] = {
.bit = TH1520_GPU_SW_CLKGEN_RST,
.reg = TH1520_GPU_RST_CFG,
- }
+ },
+ [TH1520_RESET_ID_DPU_AHB] = {
+ .bit = TH1520_DPU_SW_DPU_HRST,
+ .reg = TH1520_DPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_DPU_AXI] = {
+ .bit = TH1520_DPU_SW_DPU_ARST,
+ .reg = TH1520_DPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_DPU_CORE] = {
+ .bit = TH1520_DPU_SW_DPU_CRST,
+ .reg = TH1520_DPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSI0_APB] = {
+ .bit = TH1520_DSI_SW_DSI_PRST,
+ .reg = TH1520_DSI0_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSI1_APB] = {
+ .bit = TH1520_DSI_SW_DSI_PRST,
+ .reg = TH1520_DSI1_RST_CFG,
+ },
+ [TH1520_RESET_ID_HDMI] = {
+ .bit = TH1520_HDMI_SW_MAIN_RST,
+ .reg = TH1520_HDMI_RST_CFG,
+ },
+ [TH1520_RESET_ID_HDMI_APB] = {
+ .bit = TH1520_HDMI_SW_PRST,
+ .reg = TH1520_HDMI_RST_CFG,
+ },
};
static inline struct th1520_reset_priv *
@@ -78,7 +116,6 @@ static const struct regmap_config th1520_reset_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static int th1520_reset_probe(struct platform_device *pdev)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 506a947d00a5..7765e40f7cea 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -334,6 +334,11 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
lim.max_dev_sectors = device->discipline->max_sectors(block);
lim.max_hw_sectors = lim.max_dev_sectors;
lim.logical_block_size = block->bp_block;
+ /*
+ * Adjust dma_alignment to match block_size - 1
+ * to ensure proper buffer alignment checks in the block layer.
+ */
+ lim.dma_alignment = lim.logical_block_size - 1;
if (device->discipline->has_discard) {
unsigned int max_bytes;
@@ -3114,12 +3119,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
PTR_ERR(cqr) == -ENOMEM ||
PTR_ERR(cqr) == -EAGAIN) {
rc = BLK_STS_RESOURCE;
- goto out;
+ } else if (PTR_ERR(cqr) == -EINVAL) {
+ rc = BLK_STS_INVAL;
+ } else {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "CCW creation failed (rc=%ld) on request %p",
+ PTR_ERR(cqr), req);
+ rc = BLK_STS_IOERR;
}
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "CCW creation failed (rc=%ld) on request %p",
- PTR_ERR(cqr), req);
- rc = BLK_STS_IOERR;
goto out;
}
/*
@@ -3317,11 +3324,11 @@ static void dasd_release(struct gendisk *disk)
/*
* Return disk geometry.
*/
-static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
struct dasd_device *base;
- base = dasd_device_from_gendisk(bdev->bd_disk);
+ base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
@@ -3331,7 +3338,8 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -EINVAL;
}
base->discipline->fill_geometry(base->block, geo);
- geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
+ // geo->start is left unchanged by the above
+ geo->start >>= base->block->s2b_shift;
dasd_put_device(base);
return 0;
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 2b43f6f28362..0fd700c5745a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -81,8 +81,7 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
- depends on PCI
- imply SMC
+ depends on PCI && DIBS
default n
help
Select this option if you want to use the Internal Shared Memory
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 047fa6101555..08d17956cb36 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -5,11 +5,13 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
-#include <linux/ism.h>
-#include <net/smc.h>
+#include <linux/dibs.h>
#include <asm/pci_insn.h>
#define UTIL_STR_LEN 16
+#define ISM_ERROR 0xFFFF
+
+#define ISM_NR_DMBS 1920
/*
* Do not use the first word of the DMB bits to ensure 8 byte aligned access.
@@ -32,6 +34,23 @@
#define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12
+enum ism_event_type {
+ ISM_EVENT_BUF = 0x00,
+ ISM_EVENT_DEV = 0x01,
+ ISM_EVENT_SWR = 0x02
+};
+
+enum ism_event_code {
+ ISM_BUF_DMB_UNREGISTERED = 0x04,
+ ISM_BUF_USING_ISM_DEV_DISABLED = 0x08,
+ ISM_BUF_OWNING_ISM_DEV_IN_ERR_STATE = 0x02,
+ ISM_BUF_USING_ISM_DEV_IN_ERR_STATE = 0x03,
+ ISM_BUF_VLAN_MISMATCH_WITH_OWNER = 0x05,
+ ISM_BUF_VLAN_MISMATCH_WITH_USER = 0x06,
+ ISM_DEV_GID_DISABLED = 0x07,
+ ISM_DEV_GID_ERR_STATE = 0x01
+};
+
struct ism_req_hdr {
u32 cmd;
u16 : 16;
@@ -65,6 +84,15 @@ union ism_reg_ieq {
} response;
} __aligned(16);
+/* ISM-vPCI devices provide 64 Bit GIDs
+ * Map them to ISM UUID GIDs like this:
+ * _________________________________________
+ * | 64 Bit ISM-vPCI GID | 00000000_00000000 |
+ * -----------------------------------------
+ * This will be interpreted as a UIID variant, that is reserved
+ * for NCS backward compatibility. So it will not collide with
+ * proper UUIDs.
+ */
union ism_read_gid {
struct {
struct ism_req_hdr hdr;
@@ -174,6 +202,14 @@ struct ism_eq_header {
u64 : 64;
};
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
struct ism_eq {
struct ism_eq_header header;
struct ism_event entry[15];
@@ -188,6 +224,19 @@ struct ism_sba {
u16 dmbe_mask[ISM_NR_DMBS];
};
+struct ism_dev {
+ spinlock_t cmd_lock; /* serializes cmds */
+ struct dibs_dev *dibs;
+ struct pci_dev *pdev;
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+ int ieq_idx;
+};
+
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 6cd60b174315..f84aa2e676e9 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -31,101 +31,6 @@ MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
-#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
-static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
- /* a list for fast mapping */
-static u8 max_client;
-static DEFINE_MUTEX(clients_lock);
-static bool ism_v2_capable;
-struct ism_dev_list {
- struct list_head list;
- struct mutex mutex; /* protects ism device list */
-};
-
-static struct ism_dev_list ism_dev_list = {
- .list = LIST_HEAD_INIT(ism_dev_list.list),
- .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
-};
-
-static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ism->lock, flags);
- ism->subs[client->id] = client;
- spin_unlock_irqrestore(&ism->lock, flags);
-}
-
-int ism_register_client(struct ism_client *client)
-{
- struct ism_dev *ism;
- int i, rc = -ENOSPC;
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < MAX_CLIENTS; ++i) {
- if (!clients[i]) {
- clients[i] = client;
- client->id = i;
- if (i == max_client)
- max_client++;
- rc = 0;
- break;
- }
- }
- mutex_unlock(&clients_lock);
-
- if (i < MAX_CLIENTS) {
- /* initialize with all devices that we got so far */
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- ism->priv[i] = NULL;
- client->add(ism);
- ism_setup_forwarding(client, ism);
- }
- }
- mutex_unlock(&ism_dev_list.mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(ism_register_client);
-
-int ism_unregister_client(struct ism_client *client)
-{
- struct ism_dev *ism;
- unsigned long flags;
- int rc = 0;
-
- mutex_lock(&ism_dev_list.mutex);
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- spin_lock_irqsave(&ism->lock, flags);
- /* Stop forwarding IRQs and events */
- ism->subs[client->id] = NULL;
- for (int i = 0; i < ISM_NR_DMBS; ++i) {
- if (ism->sba_client_arr[i] == client->id) {
- WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
- __func__, client->name);
- rc = -EBUSY;
- goto err_reg_dmb;
- }
- }
- spin_unlock_irqrestore(&ism->lock, flags);
- }
- mutex_unlock(&ism_dev_list.mutex);
-
- mutex_lock(&clients_lock);
- clients[client->id] = NULL;
- if (client->id + 1 == max_client)
- max_client--;
- mutex_unlock(&clients_lock);
- return rc;
-
-err_reg_dmb:
- spin_unlock_irqrestore(&ism->lock, flags);
- mutex_unlock(&ism_dev_list.mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(ism_unregister_client);
-
static int ism_cmd(struct ism_dev *ism, void *cmd)
{
struct ism_req_hdr *req = cmd;
@@ -273,8 +178,9 @@ static int unregister_ieq(struct ism_dev *ism)
return 0;
}
-static int ism_read_local_gid(struct ism_dev *ism)
+static int ism_read_local_gid(struct dibs_dev *dibs)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_read_gid cmd;
int ret;
@@ -286,20 +192,43 @@ static int ism_read_local_gid(struct ism_dev *ism)
if (ret)
goto out;
- ism->local_gid = cmd.response.gid;
+ memset(&dibs->gid, 0, sizeof(dibs->gid));
+ memcpy(&dibs->gid, &cmd.response.gid, sizeof(cmd.response.gid));
out:
return ret;
}
-static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
{
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ struct ism_dev *ism = dibs->drv_priv;
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_max_dmbs(void)
+{
+ return ISM_NR_DMBS;
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
+{
+ clear_bit(dmb->idx, ism->sba_bitmap);
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
DMA_FROM_DEVICE);
folio_put(virt_to_folio(dmb->cpu_addr));
}
-static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_alloc_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
{
struct folio *folio;
unsigned long bit;
@@ -308,16 +237,16 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
- if (!dmb->sba_idx) {
+ if (!dmb->idx) {
bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
ISM_DMB_BIT_OFFSET);
if (bit == ISM_NR_DMBS)
return -ENOSPC;
- dmb->sba_idx = bit;
+ dmb->idx = bit;
}
- if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
- test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+ if (dmb->idx < ISM_DMB_BIT_OFFSET ||
+ test_and_set_bit(dmb->idx, ism->sba_bitmap))
return -EINVAL;
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
@@ -342,13 +271,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
out_free:
kfree(dmb->cpu_addr);
out_bit:
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ clear_bit(dmb->idx, ism->sba_bitmap);
return rc;
}
-int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
- struct ism_client *client)
+static int ism_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_reg_dmb cmd;
unsigned long flags;
int ret;
@@ -363,10 +293,10 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
cmd.request.dmb = dmb->dma_addr;
cmd.request.dmb_len = dmb->dmb_len;
- cmd.request.sba_idx = dmb->sba_idx;
+ cmd.request.sba_idx = dmb->idx;
cmd.request.vlan_valid = dmb->vlan_valid;
cmd.request.vlan_id = dmb->vlan_id;
- cmd.request.rgid = dmb->rgid;
+ memcpy(&cmd.request.rgid, &dmb->rgid, sizeof(u64));
ret = ism_cmd(ism, &cmd);
if (ret) {
@@ -374,16 +304,16 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
goto out;
}
dmb->dmb_tok = cmd.response.dmb_tok;
- spin_lock_irqsave(&ism->lock, flags);
- ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
- spin_unlock_irqrestore(&ism->lock, flags);
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ism_register_dmb);
-int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_unreg_dmb cmd;
unsigned long flags;
int ret;
@@ -394,9 +324,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
- spin_lock_irqsave(&ism->lock, flags);
- ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
- spin_unlock_irqrestore(&ism->lock, flags);
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
ret = ism_cmd(ism, &cmd);
if (ret && ret != ISM_ERROR)
@@ -406,10 +336,10 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ism_unregister_dmb);
-static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
+static int ism_add_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -421,8 +351,9 @@ static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
+static int ism_del_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -434,15 +365,35 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
+static int ism_signal_ieq(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info)
+{
+ struct ism_dev *ism = dibs->drv_priv;
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
return min(boundary - (start & (boundary - 1)), len);
}
-int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
- unsigned int offset, void *data, unsigned int size)
+static int ism_move(struct dibs_dev *dibs, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size)
{
+ struct ism_dev *ism = dibs->drv_priv;
unsigned int bytes;
u64 dmb_req;
int ret;
@@ -463,24 +414,79 @@ int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
return 0;
}
-EXPORT_SYMBOL_GPL(ism_move);
+
+static u16 ism_get_chid(struct dibs_dev *dibs)
+{
+ struct ism_dev *ism = dibs->drv_priv;
+
+ if (!ism || !ism->pdev)
+ return 0;
+
+ return to_zpci(ism->pdev)->pchid;
+}
+
+static int ism_match_event_type(u32 s390_event_type)
+{
+ switch (s390_event_type) {
+ case ISM_EVENT_BUF:
+ return DIBS_BUF_EVENT;
+ case ISM_EVENT_DEV:
+ return DIBS_DEV_EVENT;
+ case ISM_EVENT_SWR:
+ return DIBS_SW_EVENT;
+ default:
+ return DIBS_OTHER_TYPE;
+ }
+}
+
+static int ism_match_event_subtype(u32 s390_event_subtype)
+{
+ switch (s390_event_subtype) {
+ case ISM_BUF_DMB_UNREGISTERED:
+ return DIBS_BUF_UNREGISTERED;
+ case ISM_DEV_GID_DISABLED:
+ return DIBS_DEV_DISABLED;
+ case ISM_DEV_GID_ERR_STATE:
+ return DIBS_DEV_ERR_STATE;
+ default:
+ return DIBS_OTHER_SUBTYPE;
+ }
+}
static void ism_handle_event(struct ism_dev *ism)
{
+ struct dibs_dev *dibs = ism->dibs;
+ struct dibs_event event;
struct ism_event *entry;
- struct ism_client *clt;
+ struct dibs_client *clt;
int i;
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
- if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+ if (++ism->ieq_idx == ARRAY_SIZE(ism->ieq->entry))
ism->ieq_idx = 0;
entry = &ism->ieq->entry[ism->ieq_idx];
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
- for (i = 0; i < max_client; ++i) {
- clt = ism->subs[i];
+ __memset(&event, 0, sizeof(event));
+ event.type = ism_match_event_type(entry->type);
+ if (event.type == DIBS_SW_EVENT)
+ event.subtype = entry->code;
+ else
+ event.subtype = ism_match_event_subtype(entry->code);
+ event.time = entry->time;
+ event.data = entry->info;
+ switch (event.type) {
+ case DIBS_BUF_EVENT:
+ event.buffer_tok = entry->tok;
+ break;
+ case DIBS_DEV_EVENT:
+ case DIBS_SW_EVENT:
+ memcpy(&event.gid, &entry->tok, sizeof(u64));
+ }
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ clt = dibs->subs[i];
if (clt)
- clt->handle_event(ism, entry);
+ clt->ops->handle_event(dibs, &event);
}
}
}
@@ -489,14 +495,17 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
{
struct ism_dev *ism = data;
unsigned long bit, end;
+ struct dibs_dev *dibs;
unsigned long *bv;
u16 dmbemask;
u8 client_id;
+ dibs = ism->dibs;
+
bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
- spin_lock(&ism->lock);
+ spin_lock(&dibs->lock);
ism->sba->s = 0;
barrier();
for (bit = 0;;) {
@@ -508,10 +517,13 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
- client_id = ism->sba_client_arr[bit];
- if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
+ client_id = dibs->dmb_clientid_arr[bit];
+ if (unlikely(client_id == NO_DIBS_CLIENT ||
+ !dibs->subs[client_id]))
continue;
- ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
+ dibs->subs[client_id]->ops->handle_irq(dibs,
+ bit + ISM_DMB_BIT_OFFSET,
+ dmbemask);
}
if (ism->sba->e) {
@@ -519,28 +531,35 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
barrier();
ism_handle_event(ism);
}
- spin_unlock(&ism->lock);
+ spin_unlock(&dibs->lock);
return IRQ_HANDLED;
}
+static const struct dibs_dev_ops ism_ops = {
+ .get_fabric_id = ism_get_chid,
+ .query_remote_gid = ism_query_rgid,
+ .max_dmbs = ism_max_dmbs,
+ .register_dmb = ism_register_dmb,
+ .unregister_dmb = ism_unregister_dmb,
+ .move_data = ism_move,
+ .add_vlan_id = ism_add_vlan_id,
+ .del_vlan_id = ism_del_vlan_id,
+ .signal_event = ism_signal_ieq,
+};
+
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
- int i, ret;
+ int ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret <= 0)
goto out;
- ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
- if (!ism->sba_client_arr)
- goto free_vectors;
- memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
-
ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
pci_name(pdev), ism);
if (ret)
- goto free_client_arr;
+ goto free_vectors;
ret = register_sba(ism);
if (ret)
@@ -550,57 +569,33 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_sba;
- ret = ism_read_local_gid(ism);
- if (ret)
- goto unreg_ieq;
-
- if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
- /* hardware is V2 capable */
- ism_v2_capable = true;
- else
- ism_v2_capable = false;
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < max_client; ++i) {
- if (clients[i]) {
- clients[i]->add(ism);
- ism_setup_forwarding(clients[i], ism);
- }
- }
- mutex_unlock(&clients_lock);
-
- list_add(&ism->list, &ism_dev_list.list);
- mutex_unlock(&ism_dev_list.mutex);
-
query_info(ism);
return 0;
-unreg_ieq:
- unregister_ieq(ism);
unreg_sba:
unregister_sba(ism);
free_irq:
free_irq(pci_irq_vector(pdev, 0), ism);
-free_client_arr:
- kfree(ism->sba_client_arr);
free_vectors:
pci_free_irq_vectors(pdev);
out:
return ret;
}
-static void ism_dev_release(struct device *dev)
+static void ism_dev_exit(struct ism_dev *ism)
{
- struct ism_dev *ism;
-
- ism = container_of(dev, struct ism_dev, dev);
+ struct pci_dev *pdev = ism->pdev;
- kfree(ism);
+ unregister_ieq(ism);
+ unregister_sba(ism);
+ free_irq(pci_irq_vector(pdev, 0), ism);
+ pci_free_irq_vectors(pdev);
}
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct dibs_dev *dibs;
+ struct zpci_dev *zdev;
struct ism_dev *ism;
int ret;
@@ -608,21 +603,13 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ism)
return -ENOMEM;
- spin_lock_init(&ism->lock);
spin_lock_init(&ism->cmd_lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
- ism->dev.parent = &pdev->dev;
- ism->dev.release = ism_dev_release;
- device_initialize(&ism->dev);
- dev_set_name(&ism->dev, "%s", dev_name(&pdev->dev));
- ret = device_add(&ism->dev);
- if (ret)
- goto err_dev;
ret = pci_enable_device_mem(pdev);
if (ret)
- goto err;
+ goto err_dev;
ret = pci_request_mem_regions(pdev, DRV_NAME);
if (ret)
@@ -636,66 +623,69 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ ret = -ENOMEM;
+ goto err_resource;
+ }
+ /* set this up before we enable interrupts */
+ ism->dibs = dibs;
+ dibs->drv_priv = ism;
+ dibs->ops = &ism_ops;
+
+ /* enable ism device, but any interrupts and events will be ignored
+ * before dibs_dev_add() adds it to any clients.
+ */
ret = ism_dev_init(ism);
if (ret)
- goto err_resource;
+ goto err_dibs;
+
+ /* after ism_dev_init() we can call ism function to set gid */
+ ret = ism_read_local_gid(dibs);
+ if (ret)
+ goto err_ism;
+
+ dibs->dev.parent = &pdev->dev;
+
+ zdev = to_zpci(pdev);
+ dev_set_name(&dibs->dev, "ism%x", zdev->uid ? zdev->uid : zdev->fid);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_ism;
return 0;
+err_ism:
+ ism_dev_exit(ism);
+err_dibs:
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
err_resource:
pci_release_mem_regions(pdev);
err_disable:
pci_disable_device(pdev);
-err:
- device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
- put_device(&ism->dev);
+ kfree(ism);
return ret;
}
-static void ism_dev_exit(struct ism_dev *ism)
-{
- struct pci_dev *pdev = ism->pdev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ism->lock, flags);
- for (i = 0; i < max_client; ++i)
- ism->subs[i] = NULL;
- spin_unlock_irqrestore(&ism->lock, flags);
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < max_client; ++i) {
- if (clients[i])
- clients[i]->remove(ism);
- }
- mutex_unlock(&clients_lock);
-
- if (ism_v2_capable)
- ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
- unregister_ieq(ism);
- unregister_sba(ism);
- free_irq(pci_irq_vector(pdev, 0), ism);
- kfree(ism->sba_client_arr);
- pci_free_irq_vectors(pdev);
- list_del_init(&ism->list);
- mutex_unlock(&ism_dev_list.mutex);
-}
-
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+ struct dibs_dev *dibs = ism->dibs;
+ dibs_dev_del(dibs);
ism_dev_exit(ism);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
- device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
- put_device(&ism->dev);
+ kfree(ism);
}
static struct pci_driver ism_driver = {
@@ -713,8 +703,6 @@ static int __init ism_init(void)
if (!ism_debug_info)
return -ENODEV;
- memset(clients, 0, sizeof(clients));
- max_client = 0;
debug_register_view(ism_debug_info, &debug_hex_ascii_view);
ret = pci_register_driver(&ism_driver);
if (ret)
@@ -731,150 +719,3 @@ static void __exit ism_exit(void)
module_init(ism_init);
module_exit(ism_exit);
-
-/*************************** SMC-D Implementation *****************************/
-
-#if IS_ENABLED(CONFIG_SMC)
-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
- u32 vid)
-{
- union ism_query_rgid cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_QUERY_RGID;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.vlan_valid = vid_valid;
- cmd.request.vlan_id = vid;
-
- return ism_cmd(ism, &cmd);
-}
-
-static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 vid_valid, u32 vid)
-{
- return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid);
-}
-
-static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
- void *client)
-{
- return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
-}
-
-static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
-{
- return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
-}
-
-static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
-{
- return ism_add_vlan_id(smcd->priv, vlan_id);
-}
-
-static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
-{
- return ism_del_vlan_id(smcd->priv, vlan_id);
-}
-
-static int smcd_set_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
-}
-
-static int smcd_reset_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
-}
-
-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
-{
- union ism_sig_ieq cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.trigger_irq = trigger_irq;
- cmd.request.event_code = event_code;
- cmd.request.info = info;
-
- return ism_cmd(ism, &cmd);
-}
-
-static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 trigger_irq, u32 event_code, u64 info)
-{
- return ism_signal_ieq(smcd->priv, rgid->gid,
- trigger_irq, event_code, info);
-}
-
-static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data,
- unsigned int size)
-{
- return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
-}
-
-static int smcd_supports_v2(void)
-{
- return ism_v2_capable;
-}
-
-static u64 ism_get_local_gid(struct ism_dev *ism)
-{
- return ism->local_gid;
-}
-
-static void smcd_get_local_gid(struct smcd_dev *smcd,
- struct smcd_gid *smcd_gid)
-{
- smcd_gid->gid = ism_get_local_gid(smcd->priv);
- smcd_gid->gid_ext = 0;
-}
-
-static u16 ism_get_chid(struct ism_dev *ism)
-{
- if (!ism || !ism->pdev)
- return 0;
-
- return to_zpci(ism->pdev)->pchid;
-}
-
-static u16 smcd_get_chid(struct smcd_dev *smcd)
-{
- return ism_get_chid(smcd->priv);
-}
-
-static inline struct device *smcd_get_dev(struct smcd_dev *dev)
-{
- struct ism_dev *ism = dev->priv;
-
- return &ism->dev;
-}
-
-static const struct smcd_ops ism_ops = {
- .query_remote_gid = smcd_query_rgid,
- .register_dmb = smcd_register_dmb,
- .unregister_dmb = smcd_unregister_dmb,
- .add_vlan_id = smcd_add_vlan_id,
- .del_vlan_id = smcd_del_vlan_id,
- .set_vlan_required = smcd_set_vlan_required,
- .reset_vlan_required = smcd_reset_vlan_required,
- .signal_event = smcd_signal_ieq,
- .move_data = smcd_move,
- .supports_v2 = smcd_supports_v2,
- .get_local_gid = smcd_get_local_gid,
- .get_chid = smcd_get_chid,
- .get_dev = smcd_get_dev,
-};
-
-const struct smcd_ops *ism_get_smcd_ops(void)
-{
- return &ism_ops;
-}
-EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
-#endif
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 883d4a12a172..a377a6f6900a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1695,7 +1695,7 @@ out:
} /* End twa_reset_sequence() */
/* This funciton returns unit geometry in cylinders/heads/sectors */
-static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+static int twa_scsi_biosparam(struct scsi_device *sdev, struct gendisk *unused, sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 8d4174c7107e..e319be7d369c 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1404,7 +1404,7 @@ out:
} /* End twl_reset_device_extension() */
/* This funciton returns unit geometry in cylinders/heads/sectors */
-static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+static int twl_scsi_biosparam(struct scsi_device *sdev, struct gendisk *unused, sector_t capacity, int geom[])
{
int heads, sectors;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 89bd56f78ef9..0306a228c702 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1340,7 +1340,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
} /* End tw_reset_device_extension() */
/* This funciton returns unit geometry in cylinders/heads/sectors */
-static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int tw_scsi_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 1f100270cd38..a86d780d1ba4 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3240,7 +3240,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
the BIOS, and a warning may be displayed.
*/
-static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
+static int blogic_diskparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int *params)
{
struct blogic_adapter *adapter =
@@ -3261,7 +3261,7 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
diskparam->sectors = 32;
}
diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors);
- buf = scsi_bios_ptable(dev);
+ buf = scsi_bios_ptable(disk);
if (buf == NULL)
return 0;
/*
@@ -3715,7 +3715,6 @@ static void __exit blogic_exit(void)
__setup("BusLogic=", blogic_setup);
-#ifdef MODULE
/*static const struct pci_device_id blogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -3725,13 +3724,12 @@ __setup("BusLogic=", blogic_setup);
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};*/
-static const struct pci_device_id blogic_pci_tbl[] = {
+static const struct pci_device_id blogic_pci_tbl[] __maybe_unused = {
{PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)},
{PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)},
{PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)},
{0, },
};
-#endif
MODULE_DEVICE_TABLE(pci, blogic_pci_tbl);
module_init(blogic_init);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 61bf26d4fc10..79de815e33b0 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1273,7 +1273,7 @@ static inline void blogic_incszbucket(unsigned int *cmdsz_buckets,
static const char *blogic_drvr_info(struct Scsi_Host *);
static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *);
-static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *);
+static int blogic_diskparam(struct scsi_device *, struct gendisk *, sector_t, int *);
static int blogic_sdev_configure(struct scsi_device *,
struct queue_limits *lim);
static void blogic_qcompleted_ccb(struct blogic_ccb *);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4b12e6dd8f07..ea66196ef7c7 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -273,7 +273,7 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
/**
* aac_biosparm - return BIOS parameters for disk
* @sdev: The scsi device corresponding to the disk
- * @bdev: the block device corresponding to the disk
+ * @disk: the gendisk corresponding to the disk
* @capacity: the sector capacity of the disk
* @geom: geometry block to fill in
*
@@ -292,7 +292,7 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
* be displayed.
*/
-static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
+static int aac_biosparm(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int *geom)
{
struct diskparm *param = (struct diskparm *)geom;
@@ -324,7 +324,7 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
* entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
- buf = scsi_bios_ptable(bdev);
+ buf = scsi_bios_ptable(disk);
if (!buf)
return 0;
if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) {
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 3a2c336307c0..063e1b5818d3 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7096,7 +7096,7 @@ static int advansys_reset(struct scsi_cmnd *scp)
* ip[2]: cylinders
*/
static int
-advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+advansys_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int ip[])
{
struct asc_board *boardp = shost_priv(sdev->host);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index e94c0a19c435..182aa80ec4c6 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1246,7 +1246,7 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt)
* Return the "logical geometry"
*
*/
-static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int aha152x_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int *info_array)
{
struct Scsi_Host *shpnt = sdev->host;
@@ -1261,7 +1261,7 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
int info[3];
/* try to figure out the geometry from the partition table */
- if (scsicam_bios_param(bdev, capacity, info) < 0 ||
+ if (scsicam_bios_param(disk, capacity, info) < 0 ||
!((info[0] == 64 && info[1] == 32) || (info[0] == 255 && info[1] == 63))) {
if (EXT_TRANS) {
printk(KERN_NOTICE
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 389499d3e00a..371e8300f029 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -992,7 +992,7 @@ static int aha1542_host_reset(struct scsi_cmnd *cmd)
}
static int aha1542_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int geom[])
+ struct gendisk *unused, sector_t capacity, int geom[])
{
struct aha1542_hostdata *aha1542 = shost_priv(sdev->host);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index be7ebbbb9ba8..b234621f6b37 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -510,7 +510,7 @@ static void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
}
static int aha1740_biosparam(struct scsi_device *sdev,
- struct block_device *dev,
+ struct gendisk *unused,
sector_t capacity, int* ip)
{
int size = capacity;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 17dfc3c72110..c3d1b9dd24ae 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -720,7 +720,7 @@ ahd_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
* Return the disk geometry for the given SCSI device.
*/
static int
-ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ahd_linux_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int geom[])
{
int heads;
@@ -731,7 +731,7 @@ ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
ahd = *((struct ahd_softc **)sdev->host->hostdata);
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
heads = 64;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index cebf8c5d0caf..8b2b98666d61 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -683,7 +683,7 @@ ahc_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
* Return the disk geometry for the given SCSI device.
*/
static int
-ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ahc_linux_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int geom[])
{
int heads;
@@ -696,7 +696,7 @@ ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
ahc = *((struct ahc_softc **)sdev->host->hostdata);
channel = sdev_channel(sdev);
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
heads = 64;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 4bfd03724ad6..b26a468ddc98 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -488,7 +488,6 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ssp_task.conn_handle = cpu_to_le16(
(u16)(unsigned long)dev->lldd_dev);
scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
- scb->ssp_task.retry_count = scb->ssp_task.retry_count;
ascb->tasklet_complete = asd_task_tasklet_complete;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index fb57343a97bd..f0c5a30ce51b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -112,7 +112,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *info);
+ struct gendisk *disk, sector_t capacity, int *info);
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
@@ -377,11 +377,11 @@ static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
}
static int arcmsr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *geom)
+ struct gendisk *disk, sector_t capacity, int *geom)
{
int heads, sectors, cylinders, total_capacity;
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
total_capacity = capacity;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 401242912855..df6f40b51deb 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1692,7 +1692,7 @@ static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
}
-static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev,
+static int atp870u_biosparam(struct scsi_device *disk, struct gendisk *unused,
sector_t capacity, int *ip)
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index a99a101b95ef..2559df8baa05 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1282,7 +1282,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
- fwcfg->num_cqs = fwcfg->num_cqs;
fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index a516df019c22..010a1df37f15 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -960,7 +960,7 @@ csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
data_len -= nbytes;
- /* Write the remaining data from the begining of circular buffer */
+ /* Write the remaining data from the beginning of circular buffer */
if (data_len) {
CSIO_DB_ASSERT(data_len <= wrp->size2);
CSIO_DB_ASSERT(wrp->addr2 != NULL);
@@ -1224,7 +1224,7 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
/*
* We need to re-arm SGE interrupts in case we got a stray interrupt,
- * especially in msix mode. With INTx, this may be a common occurence.
+ * especially in msix mode. With INTx, this may be a common occurrence.
*/
if (unlikely(!q->inc_idx)) {
CSIO_INC_STATS(q, n_stray_comp);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 504c4e0c5d17..c0b2a980db34 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -469,10 +469,10 @@ static int fdomain_host_reset(struct scsi_cmnd *cmd)
}
static int fdomain_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity,
+ struct gendisk *disk, sector_t capacity,
int geom[])
{
- unsigned char *p = scsi_bios_ptable(bdev);
+ unsigned char *p = scsi_bios_ptable(disk);
if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
&& p[4]) { /* Partition type */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d1a4cc69d408..30a9c6612651 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -876,7 +876,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
device->lldd_dev = sas_dev;
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(device)) {
int phy_no;
phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 4431698a5d78..f3516a0611dd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -925,7 +925,6 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
- struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u64 sas_addr;
@@ -942,7 +941,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (dev_parent_is_expander(device))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -2494,7 +2493,6 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct domain_device *device = task->dev;
- struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
@@ -2509,7 +2507,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* create header */
/* dw0 */
dw0 = port->id << CMD_HDR_PORT_OFF;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(device)) {
dw0 |= 3 << CMD_HDR_CMD_OFF;
} else {
phy_id = device->phy->identify.phy_identifier;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2f3d61abab3a..2f9e01717ef3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -874,7 +874,6 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
- struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u64 sas_addr;
@@ -891,7 +890,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (dev_parent_is_expander(device))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -1476,7 +1475,6 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct domain_device *device = task->dev;
- struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
@@ -1487,7 +1485,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(device)) {
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
} else {
phy_id = device->phy->identify.phy_identifier;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c73a71ac3c29..3654b12c5d5a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2662,10 +2662,8 @@ static void complete_scsi_command(struct CommandList *cp)
case CMD_TARGET_STATUS:
cmd->result |= ei->ScsiStatus;
/* copy the sense data */
- if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
- sense_data_size = SCSI_SENSE_BUFFERSIZE;
- else
- sense_data_size = sizeof(ei->SenseInfo);
+ sense_data_size = min_t(unsigned long, SCSI_SENSE_BUFFERSIZE,
+ sizeof(ei->SenseInfo));
if (ei->SenseLen < sense_data_size)
sense_data_size = ei->SenseLen;
memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
@@ -3628,10 +3626,7 @@ static bool hpsa_vpd_page_supported(struct ctlr_info *h,
if (rc != 0)
goto exit_unsupported;
pages = buf[3];
- if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
- bufsize = pages + HPSA_VPD_HEADER_SZ;
- else
- bufsize = 255;
+ bufsize = min(pages + HPSA_VPD_HEADER_SZ, 255);
/* Get the whole VPD page list */
rc = hpsa_scsi_do_inquiry(h, scsi3addr,
@@ -6407,18 +6402,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h,
return -EINVAL;
}
if (iocommand->buf_size > 0) {
- buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
- if (buff == NULL)
- return -ENOMEM;
if (iocommand->Request.Type.Direction & XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand->buf,
- iocommand->buf_size)) {
- rc = -EFAULT;
- goto out_kfree;
- }
+ buff = memdup_user(iocommand->buf, iocommand->buf_size);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
} else {
- memset(buff, 0, iocommand->buf_size);
+ buff = kzalloc(iocommand->buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
}
}
c = cmd_alloc(h);
@@ -6478,7 +6469,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h,
}
out:
cmd_free(h, c);
-out_kfree:
kfree(buff);
return rc;
}
@@ -6522,18 +6512,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
while (left) {
sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
buff_size[sg_used] = sz;
- buff[sg_used] = kmalloc(sz, GFP_KERNEL);
- if (buff[sg_used] == NULL) {
- status = -ENOMEM;
- goto cleanup1;
- }
+
if (ioc->Request.Type.Direction & XFER_WRITE) {
- if (copy_from_user(buff[sg_used], data_ptr, sz)) {
- status = -EFAULT;
+ buff[sg_used] = memdup_user(data_ptr, sz);
+ if (IS_ERR(buff[sg_used])) {
+ status = PTR_ERR(buff[sg_used]);
goto cleanup1;
}
- } else
- memset(buff[sg_used], 0, sz);
+ } else {
+ buff[sg_used] = kzalloc(sz, GFP_KERNEL);
+ if (!buff[sg_used]) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ }
+
left -= sz;
data_ptr += sz;
sg_used++;
@@ -7632,8 +7625,8 @@ static void hpsa_free_cfgtables(struct ctlr_info *h)
}
/* Find and map CISS config table and transfer table
-+ * several items must be unmapped (freed) later
-+ * */
+ * several items must be unmapped (freed) later
+ */
static int hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 0821cf994b98..5c602c057798 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -954,7 +954,7 @@ static DEF_SCSI_QCMD(imm_queuecommand)
* be done in sd.c. Even if it gets fixed there, this will still
* work.
*/
-static int imm_biosparam(struct scsi_device *sdev, struct block_device *dev,
+static int imm_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int ip[])
{
ip[0] = 0x40;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 8648bd965287..ed34ad92c807 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2645,7 +2645,7 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
/**
* i91u_biosparam - return the "logical geometry
* @sdev: SCSI device
- * @dev: Matching block device
+ * @unused: Matching gendisk
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
@@ -2655,7 +2655,7 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
* FIXME: limited to 2^32 sector devices.
*/
-static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
+static int i91u_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int *info_array)
{
struct initio_host *host; /* Point to Host adapter control block */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d06b79f03538..44214884deaf 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4281,11 +4281,11 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
}
if (ioa_cfg->sis64)
- ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
- sizeof(__be32 *)));
+ ioa_data = vmalloc_array(IPR_FMT3_MAX_NUM_DUMP_PAGES,
+ sizeof(__be32 *));
else
- ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
- sizeof(__be32 *)));
+ ioa_data = vmalloc_array(IPR_FMT2_MAX_NUM_DUMP_PAGES,
+ sizeof(__be32 *));
if (!ioa_data) {
ipr_err("Dump memory allocation failed\n");
@@ -4644,10 +4644,10 @@ ATTRIBUTE_GROUPS(ipr_dev);
/**
* ipr_biosparam - Return the HSC mapping
- * @sdev: scsi device struct
- * @block_device: block device pointer
+ * @sdev: scsi device struct
+ * @unused: gendisk pointer
* @capacity: capacity of the device
- * @parm: Array containing returned HSC values.
+ * @parm: Array containing returned HSC values.
*
* This function generates the HSC parms that fdisk uses.
* We want to make sure we return something that places partitions
@@ -4657,7 +4657,7 @@ ATTRIBUTE_GROUPS(ipr_dev);
* 0 on success
**/
static int ipr_biosparam(struct scsi_device *sdev,
- struct block_device *block_device,
+ struct gendisk *unused,
sector_t capacity, int *parm)
{
int heads, sectors;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 94adb6ac02a4..3393a288fd23 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1123,7 +1123,7 @@ static DEF_SCSI_QCMD(ips_queue)
/* Set bios geometry for the controller */
/* */
/****************************************************************************/
-static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int ips_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 8ac932ec4444..30a4d4a580e9 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -398,7 +398,7 @@
/*
* Scsi_Host Template
*/
- static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ static int ips_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[]);
static int ips_sdev_configure(struct scsi_device *SDptr,
struct queue_limits *lim);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 82deb6a83a8c..4c7462965ea1 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1434,7 +1434,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
struct domain_device *dev = idev->domain_dev;
enum sci_status status;
- if (dev->parent && dev_is_expander(dev->parent->dev_type))
+ if (dev_parent_is_expander(dev))
status = sci_remote_device_ea_construct(iport, idev);
else
status = sci_remote_device_da_construct(iport, idev);
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 02e31db31d68..e046091a549a 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -356,7 +356,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
put_unaligned_be16(len, &entry->len);
snprintf((char *)&entry->value,
FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
- "%s v%s",
+ "%.62s v%.62s",
init_utsname()->sysname,
init_utsname()->release);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 869b5d4db44c..d953225f6cc2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1313,10 +1313,7 @@ static int sas_check_parent_topology(struct domain_device *child)
int i;
int res = 0;
- if (!child->parent)
- return 0;
-
- if (!dev_is_expander(child->parent->dev_type))
+ if (!dev_parent_is_expander(child))
return 0;
parent_ex = &child->parent->ex_dev;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 928723c90b75..ffa5b49aaf08 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -845,7 +845,7 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
int sas_bios_param(struct scsi_device *scsi_dev,
- struct block_device *bdev,
+ struct gendisk *unused,
sector_t capacity, int *hsc)
{
hsc[0] = 255;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index fe4fb67eb50c..224edacf2d8e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -661,15 +661,12 @@ struct lpfc_vport {
uint32_t num_disc_nodes; /* in addition to hba_state */
uint32_t gidft_inp; /* cnt of outstanding GID_FTs */
- uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
struct lpfc_name fc_nodename; /* fc nodename */
struct lpfc_name fc_portname; /* fc portname */
- struct lpfc_work_evt disc_timeout_evt;
-
struct timer_list fc_disctmo; /* Discovery rescue timer */
uint8_t fc_ns_retry; /* retries for fabric nameserver */
uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
@@ -744,12 +741,6 @@ struct lpfc_vport {
struct lpfc_vmid_priority_info vmid_priority;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- struct dentry *debug_disc_trc;
- struct dentry *debug_nodelist;
- struct dentry *debug_nvmestat;
- struct dentry *debug_scsistat;
- struct dentry *debug_ioktime;
- struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@@ -767,7 +758,6 @@ struct lpfc_vport {
/* There is a single nvme instance per vport. */
struct nvme_fc_local_port *localport;
uint8_t nvmei_support; /* driver supports NVME Initiator */
- uint32_t last_fcp_wqidx;
uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
};
@@ -1060,8 +1050,6 @@ struct lpfc_hba {
struct lpfc_dmabuf hbqslimp;
- uint16_t pci_cfg_value;
-
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */
@@ -1088,7 +1076,6 @@ struct lpfc_hba {
struct lpfc_stats fc_stat;
- struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
uint8_t wwnn[8];
@@ -1229,9 +1216,6 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
- atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
- atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
-
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */
@@ -1348,30 +1332,9 @@ struct lpfc_hba {
unsigned long last_ramp_down_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
- atomic_t debugfs_vport_count;
- struct dentry *debug_multixri_pools;
- struct dentry *debug_hbqinfo;
- struct dentry *debug_dumpHostSlim;
- struct dentry *debug_dumpHBASlim;
- struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
- struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
- struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
- struct dentry *debug_writeGuard; /* inject write guard_tag errors */
- struct dentry *debug_writeApp; /* inject write app_tag errors */
- struct dentry *debug_writeRef; /* inject write ref_tag errors */
- struct dentry *debug_readGuard; /* inject read guard_tag errors */
- struct dentry *debug_readApp; /* inject read app_tag errors */
- struct dentry *debug_readRef; /* inject read ref_tag errors */
-
- struct dentry *debug_nvmeio_trc;
+ unsigned int debugfs_vport_count;
+
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
- struct dentry *debug_hdwqinfo;
-#ifdef LPFC_HDWQ_LOCK_STAT
- struct dentry *debug_lockstat;
-#endif
- struct dentry *debug_cgn_buffer;
- struct dentry *debug_rx_monitor;
- struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1388,19 +1351,10 @@ struct lpfc_hba {
sector_t lpfc_injerr_lba;
#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
- struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
/* iDiag debugfs sub-directory */
struct dentry *idiag_root;
- struct dentry *idiag_pci_cfg;
- struct dentry *idiag_bar_acc;
- struct dentry *idiag_que_info;
- struct dentry *idiag_que_acc;
- struct dentry *idiag_drb_acc;
- struct dentry *idiag_ctl_acc;
- struct dentry *idiag_mbx_acc;
- struct dentry *idiag_ext_acc;
uint8_t lpfc_idiag_last_eq;
#endif
uint16_t nvmeio_trc_on;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 7c4d7bb3a56f..92b5b2dbe847 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2373,93 +2373,117 @@ out:
static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_hba *phba = file->private_data;
int kind = debugfs_get_aux_num(file);
- char cbuf[32];
- uint64_t tmp = 0;
+ char cbuf[32] = {0};
int cnt = 0;
- if (kind == writeGuard)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
- else if (kind == writeApp)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
- else if (kind == writeRef)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
- else if (kind == readGuard)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
- else if (kind == readApp)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
- else if (kind == readRef)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
- else if (kind == InjErrNPortID)
- cnt = scnprintf(cbuf, 32, "0x%06x\n",
+ switch (kind) {
+ case writeGuard:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_wgrd_cnt);
+ break;
+ case writeApp:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_wapp_cnt);
+ break;
+ case writeRef:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_wref_cnt);
+ break;
+ case readGuard:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_rgrd_cnt);
+ break;
+ case readApp:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_rapp_cnt);
+ break;
+ case readRef:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_rref_cnt);
+ break;
+ case InjErrNPortID:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "0x%06x\n",
phba->lpfc_injerr_nportid);
- else if (kind == InjErrWWPN) {
- memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
- tmp = cpu_to_be64(tmp);
- cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
- } else if (kind == InjErrLBA) {
- if (phba->lpfc_injerr_lba == (sector_t)(-1))
- cnt = scnprintf(cbuf, 32, "off\n");
+ break;
+ case InjErrWWPN:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "0x%016llx\n",
+ be64_to_cpu(phba->lpfc_injerr_wwpn.u.wwn_be));
+ break;
+ case InjErrLBA:
+ if (phba->lpfc_injerr_lba == LPFC_INJERR_LBA_OFF)
+ cnt = scnprintf(cbuf, sizeof(cbuf), "off\n");
else
- cnt = scnprintf(cbuf, 32, "0x%llx\n",
- (uint64_t) phba->lpfc_injerr_lba);
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0547 Unknown debugfs error injection entry\n");
+ cnt = scnprintf(cbuf, sizeof(cbuf), "0x%llx\n",
+ (uint64_t)phba->lpfc_injerr_lba);
+ break;
+ default:
+ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
+ "0547 Unknown debugfs error injection entry\n");
+ break;
+ }
return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
}
static ssize_t
lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_hba *phba = file->private_data;
int kind = debugfs_get_aux_num(file);
- char dstbuf[33];
- uint64_t tmp = 0;
- int size;
+ char dstbuf[33] = {0};
+ unsigned long long tmp;
+ unsigned long size;
- memset(dstbuf, 0, 33);
- size = (nbytes < 32) ? nbytes : 32;
+ size = (nbytes < (sizeof(dstbuf) - 1)) ? nbytes : (sizeof(dstbuf) - 1);
if (copy_from_user(dstbuf, buf, size))
return -EFAULT;
- if (kind == InjErrLBA) {
- if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
- (dstbuf[2] == 'f'))
- tmp = (uint64_t)(-1);
+ if (kstrtoull(dstbuf, 0, &tmp)) {
+ if (kind != InjErrLBA || !strstr(dstbuf, "off"))
+ return -EINVAL;
}
- if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
- return -EINVAL;
-
- if (kind == writeGuard)
+ switch (kind) {
+ case writeGuard:
phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
- else if (kind == writeApp)
+ break;
+ case writeApp:
phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
- else if (kind == writeRef)
+ break;
+ case writeRef:
phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
- else if (kind == readGuard)
+ break;
+ case readGuard:
phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
- else if (kind == readApp)
+ break;
+ case readApp:
phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
- else if (kind == readRef)
+ break;
+ case readRef:
phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
- else if (kind == InjErrLBA)
- phba->lpfc_injerr_lba = (sector_t)tmp;
- else if (kind == InjErrNPortID)
+ break;
+ case InjErrLBA:
+ if (strstr(dstbuf, "off"))
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ else
+ phba->lpfc_injerr_lba = (sector_t)tmp;
+ break;
+ case InjErrNPortID:
phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
- else if (kind == InjErrWWPN) {
- tmp = cpu_to_be64(tmp);
- memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0548 Unknown debugfs error injection entry\n");
-
+ break;
+ case InjErrWWPN:
+ phba->lpfc_injerr_wwpn.u.wwn_be = cpu_to_be64(tmp);
+ break;
+ default:
+ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
+ "0548 Unknown debugfs error injection entry\n");
+ break;
+ }
return nbytes;
}
@@ -5728,7 +5752,7 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
};
static struct dentry *lpfc_debugfs_root = NULL;
-static atomic_t lpfc_debugfs_hba_count;
+static unsigned int lpfc_debugfs_hba_count;
/*
* File operations for the iDiag debugfs
@@ -6050,7 +6074,12 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
/* Setup lpfc root directory */
if (!lpfc_debugfs_root) {
lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
- atomic_set(&lpfc_debugfs_hba_count, 0);
+ lpfc_debugfs_hba_count = 0;
+ if (IS_ERR(lpfc_debugfs_root)) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
+ "0527 Cannot create debugfs lpfc\n");
+ return;
+ }
}
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
@@ -6061,150 +6090,96 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
- atomic_inc(&lpfc_debugfs_hba_count);
- atomic_set(&phba->debugfs_vport_count, 0);
+ phba->debugfs_vport_count = 0;
+ if (IS_ERR(phba->hba_debugfs_root)) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
+ "0528 Cannot create debugfs %s\n", name);
+ return;
+ }
+ lpfc_debugfs_hba_count++;
/* Multi-XRI pools */
- snprintf(name, sizeof(name), "multixripools");
- phba->debug_multixri_pools =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba,
- &lpfc_debugfs_op_multixripools);
- if (IS_ERR(phba->debug_multixri_pools)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0527 Cannot create debugfs multixripools\n");
- goto debug_failed;
- }
+ debugfs_create_file("multixripools", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_multixripools);
/* Congestion Info Buffer */
- scnprintf(name, sizeof(name), "cgn_buffer");
- phba->debug_cgn_buffer =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_cgn_buffer_op);
- if (IS_ERR(phba->debug_cgn_buffer)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "6527 Cannot create debugfs "
- "cgn_buffer\n");
- goto debug_failed;
- }
+ debugfs_create_file("cgn_buffer", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_cgn_buffer_op);
/* RX Monitor */
- scnprintf(name, sizeof(name), "rx_monitor");
- phba->debug_rx_monitor =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_rx_monitor_op);
- if (IS_ERR(phba->debug_rx_monitor)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "6528 Cannot create debugfs "
- "rx_monitor\n");
- goto debug_failed;
- }
+ debugfs_create_file("rx_monitor", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_rx_monitor_op);
/* RAS log */
- snprintf(name, sizeof(name), "ras_log");
- phba->debug_ras_log =
- debugfs_create_file(name, 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_ras_log);
- if (IS_ERR(phba->debug_ras_log)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "6148 Cannot create debugfs"
- " ras_log\n");
- goto debug_failed;
- }
+ debugfs_create_file("ras_log", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
/* Setup hbqinfo */
- snprintf(name, sizeof(name), "hbqinfo");
- phba->debug_hbqinfo =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_hbqinfo);
+ debugfs_create_file("hbqinfo", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_hbqinfo);
#ifdef LPFC_HDWQ_LOCK_STAT
/* Setup lockstat */
- snprintf(name, sizeof(name), "lockstat");
- phba->debug_lockstat =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_lockstat);
- if (IS_ERR(phba->debug_lockstat)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4610 Can't create debugfs lockstat\n");
- goto debug_failed;
- }
+ debugfs_create_file("lockstat", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_lockstat);
#endif
-
- /* Setup dumpHBASlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
- snprintf(name, sizeof(name), "dumpHBASlim");
- phba->debug_dumpHBASlim =
- debugfs_create_file(name,
- S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHBASlim);
- } else
- phba->debug_dumpHBASlim = NULL;
+ /* Setup dumpHBASlim */
+ debugfs_create_file("dumpHBASlim", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_dumpHBASlim);
+ }
- /* Setup dumpHostSlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
- snprintf(name, sizeof(name), "dumpHostSlim");
- phba->debug_dumpHostSlim =
- debugfs_create_file(name,
- S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHostSlim);
- } else
- phba->debug_dumpHostSlim = NULL;
+ /* Setup dumpHostSlim */
+ debugfs_create_file("dumpHostSlim", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_dumpHostSlim);
+ }
/* Setup DIF Error Injections */
- phba->debug_InjErrLBA =
- debugfs_create_file_aux_num("InjErrLBA", 0644,
- phba->hba_debugfs_root,
- phba, InjErrLBA, &lpfc_debugfs_op_dif_err);
+ debugfs_create_file_aux_num("InjErrLBA", 0644,
+ phba->hba_debugfs_root, phba,
+ InjErrLBA,
+ &lpfc_debugfs_op_dif_err);
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- phba->debug_InjErrNPortID =
- debugfs_create_file_aux_num("InjErrNPortID", 0644,
- phba->hba_debugfs_root,
- phba, InjErrNPortID, &lpfc_debugfs_op_dif_err);
-
- phba->debug_InjErrWWPN =
- debugfs_create_file_aux_num("InjErrWWPN", 0644,
- phba->hba_debugfs_root,
- phba, InjErrWWPN, &lpfc_debugfs_op_dif_err);
-
- phba->debug_writeGuard =
- debugfs_create_file_aux_num("writeGuardInjErr", 0644,
- phba->hba_debugfs_root,
- phba, writeGuard, &lpfc_debugfs_op_dif_err);
-
- phba->debug_writeApp =
- debugfs_create_file_aux_num("writeAppInjErr", 0644,
- phba->hba_debugfs_root,
- phba, writeApp, &lpfc_debugfs_op_dif_err);
-
- phba->debug_writeRef =
- debugfs_create_file_aux_num("writeRefInjErr", 0644,
- phba->hba_debugfs_root,
- phba, writeRef, &lpfc_debugfs_op_dif_err);
-
- phba->debug_readGuard =
- debugfs_create_file_aux_num("readGuardInjErr", 0644,
- phba->hba_debugfs_root,
- phba, readGuard, &lpfc_debugfs_op_dif_err);
-
- phba->debug_readApp =
- debugfs_create_file_aux_num("readAppInjErr", 0644,
- phba->hba_debugfs_root,
- phba, readApp, &lpfc_debugfs_op_dif_err);
-
- phba->debug_readRef =
- debugfs_create_file_aux_num("readRefInjErr", 0644,
- phba->hba_debugfs_root,
- phba, readRef, &lpfc_debugfs_op_dif_err);
+ debugfs_create_file_aux_num("InjErrNPortID", 0644,
+ phba->hba_debugfs_root, phba,
+ InjErrNPortID,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("InjErrWWPN", 0644,
+ phba->hba_debugfs_root, phba,
+ InjErrWWPN,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("writeGuardInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ writeGuard,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("writeAppInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ writeApp, &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("writeRefInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ writeRef, &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("readGuardInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ readGuard,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("readAppInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ readApp, &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("readRefInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ readRef, &lpfc_debugfs_op_dif_err);
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
@@ -6224,11 +6199,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
- snprintf(name, sizeof(name), "slow_ring_trace");
- phba->debug_slow_ring_trc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_slow_ring_trc);
+ debugfs_create_file("slow_ring_trace", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
phba->slow_ring_trc = kcalloc(
lpfc_debugfs_max_slow_ring_trc,
@@ -6238,16 +6211,13 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0416 Cannot create debugfs "
"slow_ring buffer\n");
- goto debug_failed;
+ goto out;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
}
- snprintf(name, sizeof(name), "nvmeio_trc");
- phba->debug_nvmeio_trc =
- debugfs_create_file(name, 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_nvmeio_trc);
+ debugfs_create_file("nvmeio_trc", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_nvmeio_trc);
atomic_set(&phba->nvmeio_trc_cnt, 0);
if (lpfc_debugfs_max_nvmeio_trc) {
@@ -6293,7 +6263,12 @@ nvmeio_off:
if (!vport->vport_debugfs_root) {
vport->vport_debugfs_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
- atomic_inc(&phba->debugfs_vport_count);
+ if (IS_ERR(vport->vport_debugfs_root)) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
+ "0529 Cannot create debugfs %s\n", name);
+ return;
+ }
+ phba->debugfs_vport_count++;
}
if (lpfc_debugfs_max_disc_trc) {
@@ -6320,54 +6295,27 @@ nvmeio_off:
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0418 Cannot create debugfs disc trace "
"buffer\n");
- goto debug_failed;
+ goto out;
}
atomic_set(&vport->disc_trc_cnt, 0);
- snprintf(name, sizeof(name), "discovery_trace");
- vport->debug_disc_trc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_disc_trc);
- snprintf(name, sizeof(name), "nodelist");
- vport->debug_nodelist =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nodelist);
-
- snprintf(name, sizeof(name), "nvmestat");
- vport->debug_nvmestat =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nvmestat);
-
- snprintf(name, sizeof(name), "scsistat");
- vport->debug_scsistat =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_scsistat);
- if (IS_ERR(vport->debug_scsistat)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4611 Cannot create debugfs scsistat\n");
- goto debug_failed;
- }
+ debugfs_create_file("discovery_trace", 0644, vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_disc_trc);
- snprintf(name, sizeof(name), "ioktime");
- vport->debug_ioktime =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_ioktime);
- if (IS_ERR(vport->debug_ioktime)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0815 Cannot create debugfs ioktime\n");
- goto debug_failed;
- }
+ debugfs_create_file("nodelist", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_nodelist);
+
+ debugfs_create_file("nvmestat", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_nvmestat);
- snprintf(name, sizeof(name), "hdwqstat");
- vport->debug_hdwqstat =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_hdwqstat);
+ debugfs_create_file("scsistat", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_scsistat);
+
+ debugfs_create_file("ioktime", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_ioktime);
+
+ debugfs_create_file("hdwqstat", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@@ -6375,93 +6323,58 @@ nvmeio_off:
*/
if (!pport_setup)
- goto debug_failed;
+ return;
/*
* iDiag debugfs root entry points for SLI4 device only
*/
if (phba->sli_rev < LPFC_SLI_REV4)
- goto debug_failed;
+ return;
- snprintf(name, sizeof(name), "iDiag");
if (!phba->idiag_root) {
phba->idiag_root =
- debugfs_create_dir(name, phba->hba_debugfs_root);
+ debugfs_create_dir("iDiag", phba->hba_debugfs_root);
/* Initialize iDiag data structure */
memset(&idiag, 0, sizeof(idiag));
}
/* iDiag read PCI config space */
- snprintf(name, sizeof(name), "pciCfg");
- if (!phba->idiag_pci_cfg) {
- phba->idiag_pci_cfg =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
- idiag.offset.last_rd = 0;
- }
+ debugfs_create_file("pciCfg", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_pciCfg);
+ idiag.offset.last_rd = 0;
/* iDiag PCI BAR access */
- snprintf(name, sizeof(name), "barAcc");
- if (!phba->idiag_bar_acc) {
- phba->idiag_bar_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
- idiag.offset.last_rd = 0;
- }
+ debugfs_create_file("barAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_barAcc);
+ idiag.offset.last_rd = 0;
/* iDiag get PCI function queue information */
- snprintf(name, sizeof(name), "queInfo");
- if (!phba->idiag_que_info) {
- phba->idiag_que_info =
- debugfs_create_file(name, S_IFREG|S_IRUGO,
- phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
- }
+ debugfs_create_file("queInfo", 0444, phba->idiag_root, phba,
+ &lpfc_idiag_op_queInfo);
/* iDiag access PCI function queue */
- snprintf(name, sizeof(name), "queAcc");
- if (!phba->idiag_que_acc) {
- phba->idiag_que_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
- }
+ debugfs_create_file("queAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_queAcc);
/* iDiag access PCI function doorbell registers */
- snprintf(name, sizeof(name), "drbAcc");
- if (!phba->idiag_drb_acc) {
- phba->idiag_drb_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
- }
+ debugfs_create_file("drbAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_drbAcc);
/* iDiag access PCI function control registers */
- snprintf(name, sizeof(name), "ctlAcc");
- if (!phba->idiag_ctl_acc) {
- phba->idiag_ctl_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
- }
+ debugfs_create_file("ctlAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_ctlAcc);
/* iDiag access mbox commands */
- snprintf(name, sizeof(name), "mbxAcc");
- if (!phba->idiag_mbx_acc) {
- phba->idiag_mbx_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
- }
+ debugfs_create_file("mbxAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_mbxAcc);
/* iDiag extents access commands */
if (phba->sli4_hba.extents_in_use) {
- snprintf(name, sizeof(name), "extAcc");
- if (!phba->idiag_ext_acc) {
- phba->idiag_ext_acc =
- debugfs_create_file(name,
- S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba,
- &lpfc_idiag_op_extAcc);
- }
+ debugfs_create_file("extAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_extAcc);
}
-
-debug_failed:
+out:
+ /* alloc'ed items are kfree'd in lpfc_debugfs_terminate */
return;
#endif
}
@@ -6486,145 +6399,26 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
kfree(vport->disc_trc);
vport->disc_trc = NULL;
- debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
- vport->debug_disc_trc = NULL;
-
- debugfs_remove(vport->debug_nodelist); /* nodelist */
- vport->debug_nodelist = NULL;
-
- debugfs_remove(vport->debug_nvmestat); /* nvmestat */
- vport->debug_nvmestat = NULL;
-
- debugfs_remove(vport->debug_scsistat); /* scsistat */
- vport->debug_scsistat = NULL;
-
- debugfs_remove(vport->debug_ioktime); /* ioktime */
- vport->debug_ioktime = NULL;
-
- debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
- vport->debug_hdwqstat = NULL;
-
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
- atomic_dec(&phba->debugfs_vport_count);
+ phba->debugfs_vport_count--;
}
- if (atomic_read(&phba->debugfs_vport_count) == 0) {
-
- debugfs_remove(phba->debug_multixri_pools); /* multixripools*/
- phba->debug_multixri_pools = NULL;
-
- debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
- phba->debug_hbqinfo = NULL;
-
- debugfs_remove(phba->debug_cgn_buffer);
- phba->debug_cgn_buffer = NULL;
-
- debugfs_remove(phba->debug_rx_monitor);
- phba->debug_rx_monitor = NULL;
-
- debugfs_remove(phba->debug_ras_log);
- phba->debug_ras_log = NULL;
-
-#ifdef LPFC_HDWQ_LOCK_STAT
- debugfs_remove(phba->debug_lockstat); /* lockstat */
- phba->debug_lockstat = NULL;
-#endif
- debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
- phba->debug_dumpHBASlim = NULL;
-
- debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
- phba->debug_dumpHostSlim = NULL;
-
- debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
- phba->debug_InjErrLBA = NULL;
-
- debugfs_remove(phba->debug_InjErrNPortID);
- phba->debug_InjErrNPortID = NULL;
-
- debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
- phba->debug_InjErrWWPN = NULL;
-
- debugfs_remove(phba->debug_writeGuard); /* writeGuard */
- phba->debug_writeGuard = NULL;
-
- debugfs_remove(phba->debug_writeApp); /* writeApp */
- phba->debug_writeApp = NULL;
-
- debugfs_remove(phba->debug_writeRef); /* writeRef */
- phba->debug_writeRef = NULL;
-
- debugfs_remove(phba->debug_readGuard); /* readGuard */
- phba->debug_readGuard = NULL;
-
- debugfs_remove(phba->debug_readApp); /* readApp */
- phba->debug_readApp = NULL;
-
- debugfs_remove(phba->debug_readRef); /* readRef */
- phba->debug_readRef = NULL;
-
+ if (!phba->debugfs_vport_count) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;
- /* slow_ring_trace */
- debugfs_remove(phba->debug_slow_ring_trc);
- phba->debug_slow_ring_trc = NULL;
-
- debugfs_remove(phba->debug_nvmeio_trc);
- phba->debug_nvmeio_trc = NULL;
-
kfree(phba->nvmeio_trc);
phba->nvmeio_trc = NULL;
- /*
- * iDiag release
- */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- /* iDiag extAcc */
- debugfs_remove(phba->idiag_ext_acc);
- phba->idiag_ext_acc = NULL;
-
- /* iDiag mbxAcc */
- debugfs_remove(phba->idiag_mbx_acc);
- phba->idiag_mbx_acc = NULL;
-
- /* iDiag ctlAcc */
- debugfs_remove(phba->idiag_ctl_acc);
- phba->idiag_ctl_acc = NULL;
-
- /* iDiag drbAcc */
- debugfs_remove(phba->idiag_drb_acc);
- phba->idiag_drb_acc = NULL;
-
- /* iDiag queAcc */
- debugfs_remove(phba->idiag_que_acc);
- phba->idiag_que_acc = NULL;
-
- /* iDiag queInfo */
- debugfs_remove(phba->idiag_que_info);
- phba->idiag_que_info = NULL;
-
- /* iDiag barAcc */
- debugfs_remove(phba->idiag_bar_acc);
- phba->idiag_bar_acc = NULL;
-
- /* iDiag pciCfg */
- debugfs_remove(phba->idiag_pci_cfg);
- phba->idiag_pci_cfg = NULL;
-
- /* Finally remove the iDiag debugfs root */
- debugfs_remove(phba->idiag_root);
- phba->idiag_root = NULL;
- }
-
if (phba->hba_debugfs_root) {
debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
- atomic_dec(&lpfc_debugfs_hba_count);
+ lpfc_debugfs_hba_count--;
}
- if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+ if (!lpfc_debugfs_hba_count) {
debugfs_remove(lpfc_debugfs_root); /* lpfc */
lpfc_debugfs_root = NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f319f3af0400..a1464f8ac331 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,6 +44,9 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
+/* hdwqinfo output buffer size */
+#define LPFC_HDWQINFO_SIZE 8192
+
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_IOKTIME_SIZE 8192
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index fca81e0c7c2e..b71db7d7d747 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3762,7 +3762,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
memset(prdf, 0, cmdsize);
prdf->rdf.fpin_cmd = ELS_RDF;
prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
- sizeof(struct fc_els_rdf));
+ sizeof(struct fc_els_rdf_hdr));
prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
@@ -5339,12 +5339,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0110 ELS response tag x%x completes "
+ "0110 ELS response tag x%x completes fc_flag x%lx"
"Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n",
- iotag, ulp_status, ulp_word4, tmo,
+ iotag, vport->fc_flag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
- if (mbox) {
+ if (mbox && !test_bit(FC_PT2PT, &vport->fc_flag)) {
if (ulp_status == 0 &&
test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
@@ -5403,6 +5403,10 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out_free_mbox:
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ } else if (mbox && test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
+ lpfc_mbx_cmpl_reg_login(phba, mbox);
+ clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
}
out:
if (ndlp && shost) {
@@ -11259,6 +11263,11 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"0126 FDISC cmpl status: x%x/x%x)\n",
ulp_status, ulp_word4);
+
+ /* drop initial reference */
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
+
goto fdisc_failed;
}
@@ -12008,7 +12017,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
sglq_entry->state = SGL_FREED;
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
iflag);
-
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI |
+ LOG_DISCOVERY | LOG_NODE,
+ "0732 ELS XRI ABORT on Node: ndlp=x%px "
+ "xri=x%x\n",
+ ndlp, xri);
if (ndlp) {
lpfc_set_rrq_active(phba, ndlp,
sglq_entry->sli4_lxritag,
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 32298285ea5e..3bc0efa7453e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -366,6 +366,7 @@ struct lpfc_name {
} s;
uint8_t wwn[8];
uint64_t name __packed __aligned(4);
+ __be64 wwn_be __packed __aligned(4);
} u;
};
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bc709786e6af..a7f7ed86d2b0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4909,18 +4909,18 @@ struct send_frame_wqe {
#define ELS_RDF_REG_TAG_CNT 4
struct lpfc_els_rdf_reg_desc {
- struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
+ struct fc_df_desc_fpin_reg_hdr reg_desc; /* descriptor header */
__be32 desc_tags[ELS_RDF_REG_TAG_CNT];
/* tags in reg_desc */
};
struct lpfc_els_rdf_req {
- struct fc_els_rdf rdf; /* hdr up to descriptors */
+ struct fc_els_rdf_hdr rdf; /* hdr up to descriptors */
struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
};
struct lpfc_els_rdf_rsp {
- struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */
+ struct fc_els_rdf_resp_hdr rdf_resp; /* hdr up to descriptors */
struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4081d2a358ee..0ca7429d86b8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3057,13 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (vport->port_type != LPFC_PHYSICAL_PORT &&
- ndlp->nlp_DID == Fabric_DID) {
- /* Just free up ndlp with Fabric_DID for vports */
- lpfc_nlp_put(ndlp);
- continue;
- }
-
if (ndlp->nlp_DID == Fabric_Cntl_DID &&
ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
@@ -8300,10 +8293,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
- if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
- i = phba->cfg_sg_dma_buf_size;
- else
- i = SLI4_PAGE_SIZE;
+ i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
phba->lpfc_sg_dma_buf_pool =
dma_pool_create("lpfc_sg_dma_buf_pool",
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index a596b80d03d4..1e5ef93e67e3 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -326,8 +326,14 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
/* Now that REG_RPI completed successfully,
* we can now proceed with sending the PLOGI ACC.
*/
- rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
- save_iocb, ndlp, NULL);
+ if (test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ } else {
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, NULL);
+ }
+
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4576 PLOGI ACC fails pt2pt discovery: "
@@ -335,9 +341,16 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
}
}
- /* Now process the REG_RPI cmpl */
- lpfc_mbx_cmpl_reg_login(phba, login_mbox);
- clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
+ /* If this is a fabric topology, complete the reg_rpi and prli now.
+ * For Pt2Pt, the reg_rpi and PRLI are deferred until after the LS_ACC
+ * completes. This ensures, in Pt2Pt, that the PLOGI LS_ACC is sent
+ * before the PRLI.
+ */
+ if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
+ /* Now process the REG_RPI cmpl */
+ lpfc_mbx_cmpl_reg_login(phba, login_mbox);
+ clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
+ }
kfree(save_iocb);
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a6647dd360d1..e6f632521cff 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1234,12 +1234,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
if ((phba->cfg_nvme_enable_fb) &&
test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) {
req_len = lpfc_ncmd->nvmeCmd->payload_length;
- if (req_len < pnode->nvme_fb_size)
- wqe->fcp_iwrite.initial_xfer_len =
- req_len;
- else
- wqe->fcp_iwrite.initial_xfer_len =
- pnode->nvme_fb_size;
+ wqe->fcp_iwrite.initial_xfer_len = min(req_len,
+ pnode->nvme_fb_size);
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 508ceeecf2d9..6d9d8c196936 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5935,7 +5935,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
/**
* lpfc_reset_flush_io_context -
* @vport: The virtual port (scsi_host) for the flush context
- * @tgt_id: If aborting by Target contect - specifies the target id
+ * @tgt_id: If aborting by Target context - specifies the target id
* @lun_id: If aborting by Lun context - specifies the lun id
* @context: specifies the context level to flush at.
*
@@ -6109,8 +6109,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
spin_unlock_irqrestore(&pnode->lock, flags);
}
- lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
- LPFC_CTX_TGT);
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ if (status != SUCCESS) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0726 Target Reset flush status x%x\n",
+ status);
+ return status;
+ }
return FAST_IO_FAIL;
}
@@ -6202,7 +6208,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
int rc, ret = SUCCESS;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "3172 SCSI layer issued Host Reset Data:\n");
+ "3172 SCSI layer issued Host Reset\n");
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a8fbdf7119d8..7ea7c4245c69 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8820,7 +8820,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0381 Error %d during queue setup.\n", rc);
- goto out_stop_timers;
+ goto out_destroy_queue;
}
/* Initialize the driver internal SLI layer lists. */
lpfc_sli4_setup(phba);
@@ -9103,7 +9103,6 @@ out_free_iocblist:
lpfc_free_iocb_list(phba);
out_destroy_queue:
lpfc_sli4_queue_destroy(phba);
-out_stop_timers:
lpfc_stop_hba_timers(phba);
out_free_mbox:
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -12439,19 +12438,11 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/*
- * If we're unloading, don't abort iocb on the ELS ring, but change
- * the callback so that nothing happens when it finishes.
+ * Always abort the outstanding WQE and set the IA bit correctly
+ * for the context. This is necessary for correctly removing
+ * outstanding ndlp reference counts when the CQE completes with
+ * the XB bit set.
*/
- if (test_bit(FC_UNLOADING, &vport->load_flag) &&
- pring->ringno == LPFC_ELS_RING) {
- if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
- return retval;
- }
-
- /* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return IOCB_NORESOURCE;
@@ -21373,7 +21364,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
unsigned long iflags;
- uint32_t ret = 0;
+ int ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9ee3a3a4ec4d..31c3c5abdca6 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.10"
+#define LPFC_DRIVER_VERSION "14.4.0.11"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 2006094af418..a00622c0c526 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2780,7 +2780,7 @@ static inline void mega_create_proc_entry(int index, struct proc_dir_entry *pare
* Return the disk geometry for a particular disk
*/
static int
-megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+megaraid_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int geom[])
{
adapter_t *adapter;
@@ -2813,7 +2813,7 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
geom[2] = cylinders;
}
else {
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
dev_info(&adapter->dev->dev,
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 013fbfb911b9..d6bfd26a8843 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -975,7 +975,7 @@ static void mega_free_scb(adapter_t *, scb_t *);
static int megaraid_abort(struct scsi_cmnd *);
static int megaraid_reset(struct scsi_cmnd *);
static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int);
-static int megaraid_biosparam(struct scsi_device *, struct block_device *,
+static int megaraid_biosparam(struct scsi_device *, struct gendisk *,
sector_t, int []);
static int mega_build_sglist (adapter_t *adapter, scb_t *scb,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 615e06fd4ee8..abbbc4b36cd1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3137,12 +3137,12 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
/**
* megasas_bios_param - Returns disk geometry for a disk
* @sdev: device handle
- * @bdev: block device
+ * @unused: gendisk
* @capacity: drive capacity
* @geom: geometry parameters
*/
static int
-megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+megasas_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 96401eb7e231..8c8bfbbdd34e 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -322,6 +322,9 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_WARNING (0x00)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_CRITICAL (0x01)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_FATAL (0x02)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
@@ -1250,6 +1253,37 @@ struct mpi3_io_unit_page17 {
__le32 current_key[];
};
#define MPI3_IOUNIT17_PAGEVERSION (0x00)
+struct mpi3_io_unit_page18 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 poll_interval;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#define MPI3_IOUNIT18_PAGEVERSION (0x00)
+#define MPI3_IOUNIT18_FLAGS_DIRECTATTACHED_ENABLE (0x01)
+#define MPI3_IOUNIT18_POLLINTERVAL_DISABLE (0x00)
+#ifndef MPI3_IOUNIT19_DEVICE_MAX
+#define MPI3_IOUNIT19_DEVICE_MAX (1)
+#endif
+struct mpi3_iounit19_device {
+ __le16 temperature;
+ __le16 dev_handle;
+ __le16 persistent_id;
+ __le16 reserved06;
+};
+
+#define MPI3_IOUNIT19_DEVICE_TEMPERATURE_UNAVAILABLE (0x8000)
+struct mpi3_io_unit_page19 {
+ struct mpi3_config_page_header header;
+ __le16 num_devices;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ struct mpi3_iounit19_device device[MPI3_IOUNIT19_DEVICE_MAX];
+};
+
+#define MPI3_IOUNIT19_PAGEVERSION (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -2356,7 +2390,9 @@ struct mpi3_device0_vd_format {
__le16 io_throttle_group;
__le16 io_throttle_group_low;
__le16 io_throttle_group_high;
- __le32 reserved0c;
+ u8 vd_abort_to;
+ u8 vd_reset_to;
+ __le16 reserved0e;
};
#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
index 7c15e5851ce4..4eeb11c3c73e 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -9,9 +9,11 @@
#define MPI3_NVME_ENCAP_CMD_MAX (1)
#endif
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_SHIFT (1)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_SHIFT (0)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index 4a93c67d335f..190b06508b00 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -11,6 +11,7 @@
#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_SHIFT (0)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 5c522e2531c3..28ab2efb3baa 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (35)
+#define MPI3_VERSION_UNIT (37)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 8d4ef49e04d1..6742684e2990 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.14.0.5.50"
-#define MPI3MR_DRIVER_RELDATE "27-June-2025"
+#define MPI3MR_DRIVER_VERSION "8.15.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "12-August-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -697,6 +697,8 @@ struct tgt_dev_vd {
u16 tg_id;
u32 tg_high;
u32 tg_low;
+ u8 abort_to;
+ u8 reset_to;
struct mpi3mr_throttle_group_info *tg;
};
@@ -738,6 +740,8 @@ enum mpi3mr_dev_state {
* @wwid: World wide ID
* @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
+ * @abort_to: Timeout for abort TM
+ * @reset_to: Timeout for Target/LUN reset TM
* @ref_count: Reference count
* @state: device state
*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0152d31d430a..8fe6e0bf342e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2353,6 +2353,8 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
u16 num_queues = 0, i = 0, msix_count_op_q = 1;
+ u32 ioc_status;
+ enum mpi3mr_iocstate ioc_state;
num_queues = min_t(int, mrioc->facts.max_op_reply_q,
mrioc->facts.max_op_req_q);
@@ -2408,6 +2410,14 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
retval = -1;
goto out_failed;
}
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ ioc_state != MRIOC_STATE_READY) {
+ mpi3mr_print_fault_info(mrioc);
+ retval = -1;
+ goto out_failed;
+ }
mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
ioc_info(mrioc,
"successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
@@ -5420,6 +5430,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_reset_rc_name(reset_reason));
mrioc->device_refresh_on = 0;
+ scsi_block_requests(mrioc->shost);
mrioc->reset_in_progress = 1;
mrioc->stop_bsgs = 1;
mrioc->prev_reset_result = -1;
@@ -5528,6 +5539,7 @@ out:
if (!retval) {
mrioc->diagsave_timeout = 0;
mrioc->reset_in_progress = 0;
+ scsi_unblock_requests(mrioc->shost);
mrioc->pel_abort_requested = 0;
if (mrioc->pel_enabled) {
mrioc->pel_cmds.retry_count = 0;
@@ -5552,6 +5564,7 @@ out:
mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
+ scsi_unblock_requests(mrioc->shost);
mrioc->stop_bsgs = 0;
retval = -1;
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index e467b56949e9..b88633e1efe2 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -1308,6 +1308,12 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
tgtdev->non_stl = 1;
+ tgtdev->dev_spec.vd_inf.reset_to =
+ max_t(u8, vdinf->vd_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
+ tgtdev->dev_spec.vd_inf.abort_to =
+ max_t(u8, vdinf->vd_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
tgtdev->dev_spec.vd_inf.tg_high =
le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
@@ -2049,8 +2055,8 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
if (!fwevt->process_evt)
goto evt_ack;
- dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n",
- fwevt->event_id);
+ dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n",
+ fwevt->event_id, fwevt->evt_ctx);
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
@@ -2866,12 +2872,14 @@ static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
"prepare for reset event top half with rc=start\n");
if (mrioc->prepare_for_reset)
return;
+ scsi_block_requests(mrioc->shost);
mrioc->prepare_for_reset = 1;
mrioc->prepare_for_reset_timeout_counter = 0;
} else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
dprint_event_th(mrioc,
"prepare for reset top half with rc=abort\n");
mrioc->prepare_for_reset = 0;
+ scsi_unblock_requests(mrioc->shost);
mrioc->prepare_for_reset_timeout_counter = 0;
}
if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
@@ -3076,8 +3084,8 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
}
if (process_evt_bh || ack_req) {
dprint_event_th(mrioc,
- "scheduling bottom half handler for event(0x%02x),ack_required=%d\n",
- evt_type, ack_req);
+ "scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n",
+ evt_type, le32_to_cpu(event_reply->event_context), ack_req);
sz = event_reply->event_data_length * 4;
fwevt = mpi3mr_alloc_fwevt(sz);
if (!fwevt) {
@@ -3915,11 +3923,13 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
if (scsi_tgt_priv_data)
atomic_inc(&scsi_tgt_priv_data->block_io);
- if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
- if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
- timeout = tgtdev->dev_spec.pcie_inf.abort_to;
- else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
- timeout = tgtdev->dev_spec.pcie_inf.reset_to;
+ if (tgtdev) {
+ if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to
+ : tgtdev->dev_spec.pcie_inf.reset_to;
+ else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to
+ : tgtdev->dev_spec.vd_inf.reset_to;
}
init_completion(&drv_cmd->done);
@@ -4031,7 +4041,7 @@ out:
/**
* mpi3mr_bios_param - BIOS param callback
* @sdev: SCSI device reference
- * @bdev: Block device reference
+ * @unused: gendisk reference
* @capacity: Capacity in logical sectors
* @params: Parameter array
*
@@ -4040,7 +4050,7 @@ out:
* Return: 0 always
*/
static int mpi3mr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int params[])
+ struct gendisk *unused, sector_t capacity, int params[])
{
int heads;
int sectors;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index c8d6ced5640e..d70f002d6487 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -413,9 +413,11 @@ static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
sas_address, hba_port);
if (tgtdev) {
if (!list_empty(&tgtdev->list)) {
- list_del_init(&tgtdev->list);
was_on_tgtdev_list = 1;
- mpi3mr_tgtdev_put(tgtdev);
+ if (tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) {
+ list_del_init(&tgtdev->list);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
}
}
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
@@ -2079,6 +2081,8 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
link_rate = (expander_pg1.negotiated_link_rate &
MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ if (link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
mpi3mr_update_links(mrioc, sas_address_parent,
handle, i, link_rate, hba_port);
}
@@ -2388,6 +2392,9 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+ if (link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+
mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
parent_phy_number, link_rate, hba_port);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index bd3efa5b46c7..0d652db8fe24 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1420,7 +1420,13 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
- _base_sas_log_info(ioc, loginfo);
+ if (ioc->logging_level & MPT_DEBUG_REPLY)
+ _base_sas_log_info(ioc, loginfo);
+ else {
+ if (!((ioc_status & MPI2_IOCSTATUS_MASK) &
+ MPI2_IOCSTATUS_CONFIG_INVALID_PAGE))
+ _base_sas_log_info(ioc, loginfo);
+ }
}
if (ioc_status || loginfo) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 939141cde3ca..e6a6f21d309b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 52
+#define MPT3SAS_DRIVER_VERSION "54.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 54
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 967af259118e..7092d0debef3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2754,7 +2754,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
/**
* scsih_bios_param - fetch head, sector, cylinder info for a disk
* @sdev: scsi device struct
- * @bdev: pointer to block device context
+ * @unused: pointer to gendisk
* @capacity: device size (in 512 byte sectors)
* @params: three element array to place output:
* params[0] number of heads (max 255)
@@ -2762,7 +2762,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
* params[2] number of cylinders
*/
static int
-scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+scsih_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int params[])
{
int heads;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index dc74ebc6405a..f3400d01cc2a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -166,6 +166,9 @@ _transport_convert_phy_link_rate(u8 link_rate)
case MPI25_SAS_NEG_LINK_RATE_12_0:
rc = SAS_LINK_RATE_12_0_GBPS;
break;
+ case MPI26_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
rc = SAS_PHY_DISABLED;
break;
@@ -987,11 +990,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
list_for_each_entry_safe(mpt3sas_phy, next_phy,
&mpt3sas_port->phy_list, port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
- dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
- "remove: sas_addr(0x%016llx), phy(%d)\n",
- (unsigned long long)
- mpt3sas_port->remote_identify.sas_address,
- mpt3sas_phy->phy_id);
+ ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
mpt3sas_phy->phy_belongs_to_port = 0;
if (!ioc->remove_host)
sas_port_delete_phy(mpt3sas_port->port,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 15b3d9d55a4b..f2e7997d5b9d 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1175,7 +1175,7 @@ static int mvs_dev_found_notify(struct domain_device *dev, int lock)
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
mvi_device->sas_device = dev;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 96549e7f5705..bdc2f2f17753 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2142,7 +2142,7 @@ static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd)
}
static int
-mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+mvumi_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads, sectors;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 486db5b2f05d..b8453c0333dc 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1745,7 +1745,7 @@ static void myrb_sdev_destroy(struct scsi_device *sdev)
kfree(sdev->hostdata);
}
-static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int myrb_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
struct myrb_hba *cb = shost_priv(sdev->host);
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 95af3bb03834..a58abd796603 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
/* Temporary dma mapping, used only in the scope of this function */
mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
&mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, mbox_addr))
+ if (!mbox)
return false;
/* These are the base addresses for the command memory mailbox array */
cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
&cs->cmd_mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+ if (!cmd_mbox) {
dev_err(&pdev->dev, "Failed to map command mailbox\n");
goto out_free;
}
@@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
&cs->stat_mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+ if (!stat_mbox) {
dev_err(&pdev->dev, "Failed to map status mailbox\n");
goto out_free;
}
@@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
sizeof(struct myrs_fwstat),
&cs->fwstat_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+ if (!cs->fwstat_buf) {
dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
cs->fwstat_buf = NULL;
goto out_free;
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 278c78d066c4..a3b505240351 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -597,7 +597,7 @@ SYM53C500_host_reset(struct scsi_cmnd *SCpnt)
static int
SYM53C500_biosparm(struct scsi_device *disk,
- struct block_device *dev,
+ struct gendisk *unused,
sector_t capacity, int *info_array)
{
int size;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7618f9cc9986..cbfda8c04e95 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -534,23 +534,25 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
char *str = buf;
u32 read_size =
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024;
- static u32 start, end, count;
u32 max_read_times = 32;
u32 max_count = (read_size * 1024) / (max_read_times * 4);
u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr;
- if ((count % max_count) == 0) {
- start = 0;
- end = max_read_times;
- count = 0;
+ mutex_lock(&pm8001_ha->iop_log_lock);
+
+ if ((pm8001_ha->iop_log_count % max_count) == 0) {
+ pm8001_ha->iop_log_start = 0;
+ pm8001_ha->iop_log_end = max_read_times;
+ pm8001_ha->iop_log_count = 0;
} else {
- start = end;
- end = end + max_read_times;
+ pm8001_ha->iop_log_start = pm8001_ha->iop_log_end;
+ pm8001_ha->iop_log_end = pm8001_ha->iop_log_end + max_read_times;
}
- for (; start < end; start++)
- str += sprintf(str, "%08x ", *(temp+start));
- count++;
+ for (; pm8001_ha->iop_log_start < pm8001_ha->iop_log_end; pm8001_ha->iop_log_start++)
+ str += sprintf(str, "%08x ", *(temp+pm8001_ha->iop_log_start));
+ pm8001_ha->iop_log_count++;
+ mutex_unlock(&pm8001_ha->iop_log_lock);
return str - buf;
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
@@ -680,7 +682,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
- u32 ret;
+ int ret;
u32 length = 1024 * 5 + sizeof(*payload) - 1;
if (pm8001_ha->fw_image->size > 4096) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 42a4eeac24c9..8005995a317c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2163,8 +2163,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
- if (!((t->dev->parent) &&
- (dev_is_expander(t->dev->parent->dev_type)))) {
+ if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
@@ -4168,7 +4167,6 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
- struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@@ -4186,10 +4184,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
- phy_id = parent_dev->ex_dev.ex_phy->phy_id;
- else
- phy_id = pm8001_dev->attached_phy;
+
+ phy_id = pm80xx_get_local_phy_id(dev);
+
opc = OPC_INB_REG_DEV;
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index fc2127dcb58d..f1ce8df082b0 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -339,8 +339,10 @@ struct ssp_completion_resp {
__le32 status;
__le32 param;
__le32 ssptag_rescv_rescpad;
+
+ /* Must be last --ends in a flexible-array member. */
struct ssp_response_iu ssp_resp_iu;
- __le32 residual_count;
+ /* __le32 residual_count; */
} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 599410bcdfea..8ff4b89ff81e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -552,6 +552,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = logging_level;
pm8001_ha->non_fatal_count = 0;
+ mutex_init(&pm8001_ha->iop_log_lock);
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f7067878b34f..6a8d35aea93a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -130,6 +130,16 @@ static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
}
}
+u32 pm80xx_get_local_phy_id(struct domain_device *dev)
+{
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+
+ if (dev_parent_is_expander(dev))
+ return dev->parent->ex_dev.ex_phy->phy_id;
+
+ return pm8001_dev->attached_phy;
+}
+
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *target_pm8001_dev)
{
@@ -477,7 +487,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
- struct pm8001_port *port = NULL;
+ struct pm8001_port *port;
struct pm8001_ccb_info *ccb;
unsigned long flags;
u32 n_elem = 0;
@@ -502,8 +512,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
- pm8001_dev = dev->lldd_dev;
- port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
+ port = dev->port->lldd_port;
if (!internal_abort &&
(DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
@@ -701,7 +710,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
@@ -766,7 +775,16 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
- pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
+
+ /*
+ * The phy array only contains local phys. Thus, we cannot clear
+ * phy_attached for a device behind an expander.
+ */
+ if (!dev_parent_is_expander(dev)) {
+ u32 phy_id = pm80xx_get_local_phy_id(dev);
+
+ pm8001_ha->phy[phy_id].phy_attached = 0;
+ }
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
@@ -1048,7 +1066,7 @@ int pm8001_abort_task(struct sas_task *task)
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id, port_id;
+ u32 port_id;
struct sas_task_slow slow_task;
if (!task->lldd_task || !task->dev)
@@ -1057,7 +1075,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- phy_id = pm8001_dev->attached_phy;
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
// If the controller is seeing fatal errors
@@ -1089,7 +1106,8 @@ int pm8001_abort_task(struct sas_task *task)
if (pm8001_ha->chip_id == chip_8006) {
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
- struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ u32 phy_id = pm80xx_get_local_phy_id(dev);
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 334485bb2c12..b63b6ffcaaf5 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -547,6 +547,10 @@ struct pm8001_hba_info {
u32 ci_offset;
u32 pi_offset;
u32 max_memcnt;
+ u32 iop_log_start;
+ u32 iop_log_end;
+ u32 iop_log_count;
+ struct mutex iop_log_lock;
};
struct pm8001_work {
@@ -798,6 +802,7 @@ void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *dev);
+u32 pm80xx_get_local_phy_id(struct domain_device *dev);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c1bae995a412..31960b72c1e9 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2340,8 +2340,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
- if (!((t->dev->parent) &&
- (dev_is_expander(t->dev->parent->dev_type)))) {
+ if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
@@ -4780,7 +4779,6 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
- struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@@ -4799,10 +4797,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
- phy_id = parent_dev->ex_dev.ex_phy->phy_id;
- else
- phy_id = pm8001_dev->attached_phy;
+
+ phy_id = pm80xx_get_local_phy_id(dev);
opc = OPC_INB_REG_DEV;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index eb8fd37b2066..d8a63b7fed6a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -558,8 +558,10 @@ struct ssp_completion_resp {
__le32 status;
__le32 param;
__le32 ssptag_rescv_rescpad;
+
+ /* Must be last --ends in a flexible-array member. */
struct ssp_response_iu ssp_resp_iu;
- __le32 residual_count;
+ /* __le32 residual_count; */
} __attribute__((packed, aligned(4)));
#define SSP_RESCV_BIT 0x00010000
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 1ed3171f1797..ea682f3044b6 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -845,7 +845,7 @@ static DEF_SCSI_QCMD(ppa_queuecommand)
* be done in sd.c. Even if it gets fixed there, this will still
* work.
*/
-static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev,
+static int ppa_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int ip[])
{
ip[0] = 0x40;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6af018f1ca22..ef841f643171 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1023,7 +1023,7 @@ qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
}
static int
-qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+qla1280_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 10431a67d202..ccfc2d26dd37 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -3106,8 +3106,8 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
switch (rval) {
case QLA_SUCCESS:
/* Wait for the command completion. */
- ratov_j = ha->r_a_tov / 10 * 4 * 1000;
- ratov_j = msecs_to_jiffies(ratov_j);
+ ratov_j = ha->r_a_tov / 10 * 4;
+ ratov_j = secs_to_jiffies(ratov_j);
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_log(ql_log_info, vha, 0x7089,
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cb95b7b12051..604e66bead1e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4890,9 +4890,7 @@ struct purex_item {
struct purex_item *pkt);
atomic_t in_use;
uint16_t size;
- struct {
- uint8_t iocb[64];
- } iocb;
+ uint8_t iocb[] __counted_by(size);
};
#include "qla_edif.h"
@@ -5101,7 +5099,6 @@ typedef struct scsi_qla_host {
struct list_head head;
spinlock_t lock;
} purex_list;
- struct purex_item default_item;
struct name_list_extended gnl;
/* Count of active session/fcport */
@@ -5130,6 +5127,11 @@ typedef struct scsi_qla_host {
#define DPORT_DIAG_IN_PROGRESS BIT_0
#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
uint16_t dport_status;
+
+ /* Must be last --ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct purex_item, default_item, iocb,
+ uint8_t __default_item_iocb[QLA_DEFAULT_PAYLOAD_SIZE];
+ );
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 91bbd3b75bff..ccd4485087a1 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1798,7 +1798,7 @@ retry:
switch (rval) {
case QLA_SUCCESS:
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
@@ -3649,7 +3649,7 @@ retry:
p->e.extra_rx_xchg_address, p->e.extra_control_flags,
sp->handle, sp->remap.req.len, bsg_job);
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index be211ff22acb..6a2e1c7fd125 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res)
int cnt = 5; \
do { \
if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
- _rval = EINVAL; \
+ _rval = -EINVAL; \
break; \
} \
_rval = qla2x00_start_sp(_sp); \
- if (_rval == EAGAIN) \
+ if (_rval == -EAGAIN) \
msleep(1); \
else \
break; \
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index c4c6b5c6658c..4559b490614d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1077,17 +1077,17 @@ static struct purex_item *
qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
{
struct purex_item *item = NULL;
- uint8_t item_hdr_size = sizeof(*item);
if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
- item = kzalloc(item_hdr_size +
- (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
+ item = kzalloc(struct_size(item, iocb, size), GFP_ATOMIC);
} else {
if (atomic_inc_return(&vha->default_item.in_use) == 1) {
item = &vha->default_item;
goto initialize_purex_header;
} else {
- item = kzalloc(item_hdr_size, GFP_ATOMIC);
+ item = kzalloc(
+ struct_size(item, iocb, QLA_DEFAULT_PAYLOAD_SIZE),
+ GFP_ATOMIC);
}
}
if (!item) {
@@ -1127,17 +1127,16 @@ qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
* @vha: SCSI driver HA context
* @pkt: ELS packet
*/
-static struct purex_item
-*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
+static struct purex_item *
+qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
{
struct purex_item *item;
- item = qla24xx_alloc_purex_item(vha,
- QLA_DEFAULT_PAYLOAD_SIZE);
+ item = qla24xx_alloc_purex_item(vha, QLA_DEFAULT_PAYLOAD_SIZE);
if (!item)
return item;
- memcpy(&item->iocb, pkt, sizeof(item->iocb));
+ memcpy(&item->iocb, pkt, QLA_DEFAULT_PAYLOAD_SIZE);
return item;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8ee2e337c9e1..065f9bcca26f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -419,7 +419,7 @@ retry:
switch (rval) {
case QLA_SUCCESS:
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(PURLS_MSLEEP_INTERVAL);
cnt++;
if (cnt < PURLS_RETRY_COUNT)
@@ -1308,7 +1308,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
ql_dbg(ql_dbg_unsol, vha, 0x2121,
"PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
- item->iocb.iocb[3], item->size, uctx->exchange_address,
+ item->iocb[3], item->size, uctx->exchange_address,
fcport->d_id.b24);
/* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
* ----- -----------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d4b484c0fd9d..98a5c105fdfd 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1291,8 +1291,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
/* Wait for the command completion. */
- ratov_j = ha->r_a_tov/10 * 4 * 1000;
- ratov_j = msecs_to_jiffies(ratov_j);
+ ratov_j = ha->r_a_tov / 10 * 4;
+ ratov_j = secs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (!wait_for_completion_timeout(&comp, ratov_j)) {
@@ -1806,8 +1806,8 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
rval = ha->isp_ops->abort_command(sp);
/* Wait for command completion. */
ret_cmd = false;
- ratov_j = ha->r_a_tov/10 * 4 * 1000;
- ratov_j = msecs_to_jiffies(ratov_j);
+ ratov_j = ha->r_a_tov / 10 * 4;
+ ratov_j = secs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (wait_for_completion_timeout(&comp, ratov_j)) {
@@ -6459,9 +6459,10 @@ dealloc:
void
qla24xx_free_purex_item(struct purex_item *item)
{
- if (item == &item->vha->default_item)
+ if (item == &item->vha->default_item) {
memset(&item->vha->default_item, 0, sizeof(struct purex_item));
- else
+ memset(&item->vha->__default_item_iocb, 0, QLA_DEFAULT_PAYLOAD_SIZE);
+ } else
kfree(item);
}
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 3e065d5fc80c..1ce469b7db99 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -492,7 +492,7 @@ DEF_SCSI_QCMD(qlogicfas408_queuecommand)
* Return bios parameters
*/
-int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
+int qlogicfas408_biosparam(struct scsi_device *disk, struct gendisk *unused,
sector_t capacity, int ip[])
{
/* This should mimic the DOS Qlogic driver's behavior exactly */
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
index a971db11d293..83ef86c71f2f 100644
--- a/drivers/scsi/qlogicfas408.h
+++ b/drivers/scsi/qlogicfas408.h
@@ -106,7 +106,7 @@ struct qlogicfas408_priv {
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id);
int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd);
int qlogicfas408_biosparam(struct scsi_device * disk,
- struct block_device *dev,
+ struct gendisk *unused,
sector_t capacity, int ip[]);
int qlogicfas408_abort(struct scsi_cmnd * cmd);
extern int qlogicfas408_host_reset(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 353cb60e1abe..b2ab97be5db3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1155,14 +1155,9 @@ static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
struct sdebug_err_inject *inject;
struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
- buf = kzalloc(count + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, ubuf, count)) {
- kfree(buf);
- return -EFAULT;
- }
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
if (buf[0] == '-')
return sdebug_err_remove(sdev, buf, count);
@@ -8805,8 +8800,8 @@ static int sdebug_add_store(void)
/* Logical Block Provisioning */
if (scsi_debug_lbp()) {
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- sip->map_storep = vmalloc(array_size(sizeof(long),
- BITS_TO_LONGS(map_size)));
+ sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
+ sizeof(long));
pr_info("%lu provisioning blocks\n", map_size);
@@ -8815,8 +8810,6 @@ static int sdebug_add_store(void)
goto err;
}
- bitmap_zero(sip->map_storep, map_size);
-
/* Map first 1KB for partition table */
if (sdebug_num_parts)
map_region(sip, 0, 2);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0c65ecfedfbd..d7e42293b864 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -3148,8 +3148,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
/* Offset starting from the beginning of first page in this sg-entry */
*offset = *offset - len_complete + sg->offset;
- /* Assumption: contiguous pages can be accessed as "page + i" */
- page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
+ page = sg_page(sg) + (*offset >> PAGE_SHIFT);
*offset &= ~PAGE_MASK;
/* Bytes in this sg-entry from *offset to the end of the page */
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 19e6c3852d50..887de505bcf9 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -30,9 +30,9 @@
* starting at offset %0x1be.
* Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error.
*/
-unsigned char *scsi_bios_ptable(struct block_device *dev)
+unsigned char *scsi_bios_ptable(struct gendisk *dev)
{
- struct address_space *mapping = bdev_whole(dev)->bd_mapping;
+ struct address_space *mapping = dev->part0->bd_mapping;
unsigned char *res = NULL;
struct folio *folio;
@@ -48,7 +48,7 @@ EXPORT_SYMBOL(scsi_bios_ptable);
/**
* scsi_partsize - Parse cylinders/heads/sectors from PC partition table
- * @bdev: block device to parse
+ * @disk: gendisk of the disk to parse
* @capacity: size of the disk in sectors
* @geom: output in form of [hds, cylinders, sectors]
*
@@ -57,7 +57,7 @@ EXPORT_SYMBOL(scsi_bios_ptable);
*
* Returns: %false on failure, %true on success.
*/
-bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3])
+bool scsi_partsize(struct gendisk *disk, sector_t capacity, int geom[3])
{
int cyl, ext_cyl, end_head, end_cyl, end_sector;
unsigned int logical_end, physical_end, ext_physical_end;
@@ -65,7 +65,7 @@ bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3])
void *buf;
int ret = false;
- buf = scsi_bios_ptable(bdev);
+ buf = scsi_bios_ptable(disk);
if (!buf)
return false;
@@ -205,7 +205,7 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
/**
* scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
- * @bdev: which device
+ * @disk: which device
* @capacity: size of the disk in sectors
* @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
*
@@ -215,13 +215,13 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
*
* Returns : -1 on failure, 0 on success.
*/
-int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
+int scsicam_bios_param(struct gendisk *disk, sector_t capacity, int *ip)
{
u64 capacity64 = capacity; /* Suppress gcc warning */
int ret = 0;
/* try to infer mapping from partition table */
- if (scsi_partsize(bdev, capacity, ip))
+ if (scsi_partsize(disk, capacity, ip))
return 0;
if (capacity64 < (1ULL << 32)) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5b8668accf8e..0252d3f6bed1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -106,7 +106,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
unsigned int mode);
static void sd_config_write_same(struct scsi_disk *sdkp,
struct queue_limits *lim);
-static int sd_revalidate_disk(struct gendisk *);
+static void sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static void sd_shutdown(struct device *);
static void scsi_disk_release(struct device *cdev);
@@ -1599,9 +1599,9 @@ static void sd_release(struct gendisk *disk)
scsi_device_put(sdev);
}
-static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int sd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
@@ -1614,9 +1614,9 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
- host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
+ host->hostt->bios_param(sdp, disk, capacity, diskinfo);
else
- scsicam_bios_param(bdev, capacity, diskinfo);
+ scsicam_bios_param(disk, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@@ -3691,13 +3691,13 @@ static void sd_read_block_zero(struct scsi_disk *sdkp)
* performs disk spin up, read_capacity, etc.
* @disk: struct gendisk we care about
**/
-static int sd_revalidate_disk(struct gendisk *disk)
+static void sd_revalidate_disk(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
sector_t old_capacity = sdkp->capacity;
- struct queue_limits lim;
- unsigned char *buffer;
+ struct queue_limits *lim = NULL;
+ unsigned char *buffer = NULL;
unsigned int dev_max;
int err;
@@ -3709,25 +3709,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
* of the other niceties.
*/
if (!scsi_device_online(sdp))
- goto out;
+ return;
+
+ lim = kmalloc(sizeof(*lim), GFP_KERNEL);
+ if (!lim)
+ return;
buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
- if (!buffer) {
- sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
- "allocation failure.\n");
+ if (!buffer)
goto out;
- }
sd_spinup_disk(sdkp);
- lim = queue_limits_start_update(sdkp->disk->queue);
+ *lim = queue_limits_start_update(sdkp->disk->queue);
/*
* Without media there is no reason to ask; moreover, some devices
* react badly if we do.
*/
if (sdkp->media_present) {
- sd_read_capacity(sdkp, &lim, buffer);
+ sd_read_capacity(sdkp, lim, buffer);
/*
* Some USB/UAS devices return generic values for mode pages
* until the media has been accessed. Trigger a READ operation
@@ -3741,17 +3742,17 @@ static int sd_revalidate_disk(struct gendisk *disk)
* cause this to be updated correctly and any device which
* doesn't support it should be treated as rotational.
*/
- lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
+ lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
- sd_read_block_limits(sdkp, &lim);
+ sd_read_block_limits(sdkp, lim);
sd_read_block_limits_ext(sdkp);
- sd_read_block_characteristics(sdkp, &lim);
- sd_zbc_read_zones(sdkp, &lim, buffer);
+ sd_read_block_characteristics(sdkp, lim);
+ sd_zbc_read_zones(sdkp, lim, buffer);
}
- sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp));
+ sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
sd_print_capacity(sdkp, old_capacity);
@@ -3761,47 +3762,46 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_config_protection(sdkp, &lim);
+ sd_config_protection(sdkp, lim);
}
/*
* We now have all cache related info, determine how we deal
* with flush requests.
*/
- sd_set_flush_flag(sdkp, &lim);
+ sd_set_flush_flag(sdkp, lim);
/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
/* Some devices report a maximum block count for READ/WRITE requests. */
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
- lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ lim->max_dev_sectors = logical_to_sectors(sdp, dev_max);
if (sd_validate_min_xfer_size(sdkp))
- lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
+ lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
else
- lim.io_min = 0;
+ lim->io_min = 0;
/*
* Limit default to SCSI host optimal sector limit if set. There may be
* an impact on performance for when the size of a request exceeds this
* host limit.
*/
- lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
+ lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
- lim.io_opt = min_not_zero(lim.io_opt,
+ lim->io_opt = min_not_zero(lim->io_opt,
logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
}
sdkp->first_scan = 0;
set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
- sd_config_write_same(sdkp, &lim);
- kfree(buffer);
+ sd_config_write_same(sdkp, lim);
- err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim);
if (err)
- return err;
+ goto out;
/*
* Query concurrent positioning ranges after
@@ -3820,7 +3820,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
set_capacity_and_notify(disk, 0);
out:
- return 0;
+ kfree(buffer);
+ kfree(lim);
+
}
/**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3c02a5f7b5f3..4c62c597c7be 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1235,8 +1235,7 @@ sg_vma_fault(struct vm_fault *vmf)
len = vma->vm_end - sa;
len = (len < length) ? len : length;
if (offset < len) {
- struct page *page = nth_page(rsv_schp->pages[k],
- offset >> PAGE_SHIFT);
+ struct page *page = rsv_schp->pages[k] + (offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 125944941601..03c97e60d36f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/crash_dump.h>
+#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -6774,17 +6775,15 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
}
if (iocommand.buf_size > 0) {
- kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
- if (!kernel_buffer)
- return -ENOMEM;
if (iocommand.Request.Type.Direction & XFER_WRITE) {
- if (copy_from_user(kernel_buffer, iocommand.buf,
- iocommand.buf_size)) {
- rc = -EFAULT;
- goto out;
- }
+ kernel_buffer = memdup_user(iocommand.buf,
+ iocommand.buf_size);
+ if (IS_ERR(kernel_buffer))
+ return PTR_ERR(kernel_buffer);
} else {
- memset(kernel_buffer, 0, iocommand.buf_size);
+ kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL);
+ if (!kernel_buffer)
+ return -ENOMEM;
}
}
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 63ed7f9aaa93..d8ad02c29320 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1457,7 +1457,7 @@ static void stex_reset_work(struct work_struct *work)
}
static int stex_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int geom[])
+ struct gendisk *unused, sector_t capacity, int geom[])
{
int heads = 255, sectors = 63;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d9e59204a9c3..567f9cd29102 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1615,7 +1615,7 @@ static int storvsc_sdev_configure(struct scsi_device *sdevice,
return 0;
}
-static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
+static int storvsc_get_chs(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int *info)
{
sector_t nsect = capacity;
@@ -1941,8 +1941,8 @@ static int storvsc_probe(struct hv_device *device,
int num_present_cpus = num_present_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
- bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
- bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
+ bool dev_is_ide = dev_id->driver_data == IDE_GUID;
+ bool is_fc = dev_id->driver_data == SFC_GUID;
int target = 0;
struct storvsc_device *stor_device;
int max_sub_channels = 0;
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 5a380eecfc75..0c9987828774 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -544,7 +544,7 @@ static int wd719x_host_reset(struct scsi_cmnd *cmd)
return wd719x_chip_init(wd) == 0 ? SUCCESS : FAILED;
}
-static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int wd719x_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
if (capacity >= 0x200000) {
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index 6388cbe1e56b..ad6736889231 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -8,7 +8,6 @@ config APPLE_MAILBOX
tristate "Apple SoC mailboxes"
depends on PM
depends on ARCH_APPLE || (64BIT && COMPILE_TEST)
- default ARCH_APPLE
help
Apple SoCs have various co-processors required for certain
peripherals to work (NVMe, display controller, etc.). This
@@ -21,7 +20,6 @@ config APPLE_RTKIT
tristate "Apple RTKit co-processor IPC protocol"
depends on APPLE_MAILBOX
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Apple SoCs such as the M1 come with various co-processors running
their proprietary RTKit operating system. This option enables support
@@ -33,7 +31,6 @@ config APPLE_RTKIT
config APPLE_SART
tristate "Apple SART DMA address filter"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Apple SART is a simple DMA address filter used on Apple SoCs such
as the M1. It is usually required for the NVMe coprocessor which does
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
index 49a0955e82d6..8f29108dc69a 100644
--- a/drivers/soc/apple/mailbox.c
+++ b/drivers/soc/apple/mailbox.c
@@ -47,6 +47,9 @@
#define APPLE_ASC_MBOX_I2A_RECV0 0x830
#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+#define APPLE_T8015_MBOX_A2I_CONTROL 0x108
+#define APPLE_T8015_MBOX_I2A_CONTROL 0x10c
+
#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
@@ -382,6 +385,21 @@ static int apple_mbox_probe(struct platform_device *pdev)
return 0;
}
+static const struct apple_mbox_hw apple_mbox_t8015_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_T8015_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_T8015_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
static const struct apple_mbox_hw apple_mbox_asc_hw = {
.control_full = APPLE_ASC_MBOX_CONTROL_FULL,
.control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
@@ -418,6 +436,7 @@ static const struct apple_mbox_hw apple_mbox_m3_hw = {
static const struct of_device_id apple_mbox_of_match[] = {
{ .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,t8015-asc-mailbox", .data = &apple_mbox_t8015_hw },
{ .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
{}
};
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
index afa111736899..4ff1942b82a7 100644
--- a/drivers/soc/apple/sart.c
+++ b/drivers/soc/apple/sart.c
@@ -25,8 +25,17 @@
#define APPLE_SART_MAX_ENTRIES 16
-/* This is probably a bitfield but the exact meaning of each bit is unknown. */
-#define APPLE_SART_FLAGS_ALLOW 0xff
+/* SARTv0 registers */
+#define APPLE_SART0_CONFIG(idx) (0x00 + 4 * (idx))
+#define APPLE_SART0_CONFIG_FLAGS GENMASK(28, 24)
+#define APPLE_SART0_CONFIG_SIZE GENMASK(18, 0)
+#define APPLE_SART0_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART0_CONFIG_SIZE_MAX GENMASK(18, 0)
+
+#define APPLE_SART0_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART0_PADDR_SHIFT 12
+
+#define APPLE_SART0_FLAGS_ALLOW 0xf
/* SARTv2 registers */
#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
@@ -38,6 +47,8 @@
#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
#define APPLE_SART2_PADDR_SHIFT 12
+#define APPLE_SART2_FLAGS_ALLOW 0xff
+
/* SARTv3 registers */
#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
@@ -48,11 +59,15 @@
#define APPLE_SART3_SIZE_SHIFT 12
#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
+#define APPLE_SART3_FLAGS_ALLOW 0xff
+
struct apple_sart_ops {
void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size);
void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr_shifted, size_t size_shifted);
+ /* This is probably a bitfield but the exact meaning of each bit is unknown. */
+ unsigned int flags_allow;
unsigned int size_shift;
unsigned int paddr_shift;
size_t size_max;
@@ -68,6 +83,39 @@ struct apple_sart {
unsigned long used_entries;
};
+static void sart0_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ u32 cfg = readl(sart->regs + APPLE_SART0_CONFIG(index));
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART0_PADDR(index));
+ size_t size_ = FIELD_GET(APPLE_SART0_CONFIG_SIZE, cfg);
+
+ *flags = FIELD_GET(APPLE_SART0_CONFIG_FLAGS, cfg);
+ *size = size_ << APPLE_SART0_CONFIG_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART0_PADDR_SHIFT;
+}
+
+static void sart0_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ u32 cfg;
+
+ cfg = FIELD_PREP(APPLE_SART0_CONFIG_FLAGS, flags);
+ cfg |= FIELD_PREP(APPLE_SART0_CONFIG_SIZE, size_shifted);
+
+ writel(paddr_shifted, sart->regs + APPLE_SART0_PADDR(index));
+ writel(cfg, sart->regs + APPLE_SART0_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v0 = {
+ .get_entry = sart0_get_entry,
+ .set_entry = sart0_set_entry,
+ .flags_allow = APPLE_SART0_FLAGS_ALLOW,
+ .size_shift = APPLE_SART0_CONFIG_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART0_PADDR_SHIFT,
+ .size_max = APPLE_SART0_CONFIG_SIZE_MAX,
+};
+
static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size)
{
@@ -95,6 +143,7 @@ static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
static struct apple_sart_ops sart_ops_v2 = {
.get_entry = sart2_get_entry,
.set_entry = sart2_set_entry,
+ .flags_allow = APPLE_SART2_FLAGS_ALLOW,
.size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
.paddr_shift = APPLE_SART2_PADDR_SHIFT,
.size_max = APPLE_SART2_CONFIG_SIZE_MAX,
@@ -122,6 +171,7 @@ static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
static struct apple_sart_ops sart_ops_v3 = {
.get_entry = sart3_get_entry,
.set_entry = sart3_set_entry,
+ .flags_allow = APPLE_SART3_FLAGS_ALLOW,
.size_shift = APPLE_SART3_SIZE_SHIFT,
.paddr_shift = APPLE_SART3_PADDR_SHIFT,
.size_max = APPLE_SART3_SIZE_MAX,
@@ -233,7 +283,7 @@ int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
if (test_and_set_bit(i, &sart->used_entries))
continue;
- ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+ ret = sart_set_entry(sart, i, sart->ops->flags_allow, paddr,
size);
if (ret) {
dev_dbg(sart->dev,
@@ -314,6 +364,10 @@ static const struct of_device_id apple_sart_of_match[] = {
.compatible = "apple,t8103-sart",
.data = &sart_ops_v2,
},
+ {
+ .compatible = "apple,t8015-sart",
+ .data = &sart_ops_v0,
+ },
{}
};
MODULE_DEVICE_TABLE(of, apple_sart_of_match);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index ee58151bd69e..b7dbb12bd095 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -254,17 +255,8 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, lpc_ctrl);
/* If memory-region is described in device tree then store */
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node) {
- dev_dbg(dev, "Didn't find reserved memory\n");
- } else {
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENXIO;
- }
-
+ rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm);
+ if (!rc) {
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
index 6cc943744e12..3be2e1b1085b 100644
--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -334,7 +334,6 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
struct aspeed_p2a_ctrl *misc_ctrl;
struct device *dev;
struct resource resm;
- struct device_node *node;
int rc = 0;
dev = &pdev->dev;
@@ -346,15 +345,8 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
mutex_init(&misc_ctrl->tracking);
/* optional. */
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENODEV;
- }
-
+ rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm);
+ if (!rc) {
misc_ctrl->mem_size = resource_size(&resm);
misc_ctrl->mem_base = resm.start;
}
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
index 3f759121dc00..67e9ac3d08ec 100644
--- a/drivers/soc/aspeed/aspeed-socinfo.c
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -27,6 +27,10 @@ static struct {
{ "AST2620", 0x05010203 },
{ "AST2605", 0x05030103 },
{ "AST2625", 0x05030403 },
+ /* AST2700 */
+ { "AST2750", 0x06000003 },
+ { "AST2700", 0x06000103 },
+ { "AST2720", 0x06000203 },
};
static const char *siliconid_to_name(u32 siliconid)
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index f4d3c2146f4f..6f7597950aa3 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -103,7 +103,7 @@ static int on_all_cpus(int (*fn)(void))
{
int cpu;
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
struct bstrap bstrap = {
.fn = fn,
.started = ATOMIC_INIT(0)
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 8df1e8fa86a5..c54154b404df 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -12,18 +12,19 @@
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/property.h>
+#include <linux/platform_device.h>
#include <soc/fsl/qe/qe.h>
+#define PIN_MASK(gpio) (1UL << (QE_PIO_PINS - 1 - (gpio)))
+
struct qe_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
+ struct gpio_chip gc;
+ void __iomem *regs;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
@@ -33,11 +34,9 @@ struct qe_gpio_chip {
struct qe_pio_regs saved_regs;
};
-static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+static void qe_gpio_save_regs(struct qe_gpio_chip *qe_gc)
{
- struct qe_gpio_chip *qe_gc =
- container_of(mm_gc, struct qe_gpio_chip, mm_gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
qe_gc->cpdata = ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
@@ -50,20 +49,19 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
- u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
+ u32 pin_mask = PIN_MASK(gpio);
return !!(ioread32be(&regs->cpdata) & pin_mask);
}
static int qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
- u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+ u32 pin_mask = PIN_MASK(gpio);
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -82,9 +80,8 @@ static int qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int qe_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
int i;
@@ -95,9 +92,9 @@ static int qe_gpio_set_multiple(struct gpio_chip *gc,
break;
if (__test_and_clear_bit(i, mask)) {
if (test_bit(i, bits))
- qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ qe_gc->cpdata |= PIN_MASK(i);
else
- qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ qe_gc->cpdata &= ~PIN_MASK(i);
}
}
@@ -110,13 +107,12 @@ static int qe_gpio_set_multiple(struct gpio_chip *gc,
static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
- __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+ __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
@@ -125,7 +121,6 @@ static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -133,7 +128,7 @@ static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&qe_gc->lock, flags);
- __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+ __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
@@ -239,7 +234,7 @@ EXPORT_SYMBOL(qe_pin_free);
void qe_pin_set_dedicated(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
- struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
struct qe_pio_regs *sregs = &qe_gc->saved_regs;
int pin = qe_pin->num;
u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
@@ -268,7 +263,6 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
iowrite32be(qe_gc->cpdata, &regs->cpdata);
qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
-
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
EXPORT_SYMBOL(qe_pin_set_dedicated);
@@ -283,7 +277,7 @@ EXPORT_SYMBOL(qe_pin_set_dedicated);
void qe_pin_set_gpio(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
- struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -295,45 +289,62 @@ void qe_pin_set_gpio(struct qe_pin *qe_pin)
}
EXPORT_SYMBOL(qe_pin_set_gpio);
-static int __init qe_add_gpiochips(void)
+static int qe_gpio_probe(struct platform_device *ofdev)
{
- struct device_node *np;
-
- for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
- int ret;
- struct qe_gpio_chip *qe_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
-
- qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
- if (!qe_gc) {
- ret = -ENOMEM;
- goto err;
- }
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
+ struct qe_gpio_chip *qe_gc;
+ struct gpio_chip *gc;
- spin_lock_init(&qe_gc->lock);
-
- mm_gc = &qe_gc->mm_gc;
- gc = &mm_gc->gc;
-
- mm_gc->save_regs = qe_gpio_save_regs;
- gc->ngpio = QE_PIO_PINS;
- gc->direction_input = qe_gpio_dir_in;
- gc->direction_output = qe_gpio_dir_out;
- gc->get = qe_gpio_get;
- gc->set = qe_gpio_set;
- gc->set_multiple = qe_gpio_set_multiple;
-
- ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
- if (ret)
- goto err;
- continue;
-err:
- pr_err("%pOF: registration failed with status %d\n",
- np, ret);
- kfree(qe_gc);
- /* try others anyway */
- }
- return 0;
+ qe_gc = devm_kzalloc(dev, sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&qe_gc->lock);
+
+ gc = &qe_gc->gc;
+
+ gc->base = -1;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ qe_gc->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qe_gc->regs))
+ return PTR_ERR(qe_gc->regs);
+
+ qe_gpio_save_regs(qe_gc);
+
+ return devm_gpiochip_add_data(dev, gc, qe_gc);
+}
+
+static const struct of_device_id qe_gpio_match[] = {
+ {
+ .compatible = "fsl,mpc8323-qe-pario-bank",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qe_gpio_match);
+
+static struct platform_driver qe_gpio_driver = {
+ .probe = qe_gpio_probe,
+ .driver = {
+ .name = "qe-gpio",
+ .of_match_table = qe_gpio_match,
+ },
+};
+
+static int __init qe_gpio_init(void)
+{
+ return platform_driver_register(&qe_gpio_driver);
}
-arch_initcall(qe_add_gpiochips);
+arch_initcall(qe_gpio_init);
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 36c0ccc06151..da5ea6d35618 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -461,9 +461,16 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
ctrl = qmc_read16(&bd->cbd_sc);
if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
- /* We are full ... */
- ret = -EBUSY;
- goto end;
+ if (!(ctrl & (QMC_BD_TX_R | QMC_BD_TX_I)) && bd == chan->txbd_done) {
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_done = chan->txbds;
+ else
+ chan->txbd_done++;
+ } else {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
}
qmc_write16(&bd->cbd_datlen, length);
@@ -475,6 +482,10 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Activate the descriptor */
ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
+ if (complete)
+ ctrl |= QMC_BD_TX_I;
+ else
+ ctrl &= ~QMC_BD_TX_I;
wmb(); /* Be sure to flush the descriptor before control update */
qmc_write16(&bd->cbd_sc, ctrl);
@@ -569,9 +580,16 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
ctrl = qmc_read16(&bd->cbd_sc);
if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
- /* We are full ... */
- ret = -EBUSY;
- goto end;
+ if (!(ctrl & (QMC_BD_RX_E | QMC_BD_RX_I)) && bd == chan->rxbd_done) {
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_done = chan->rxbds;
+ else
+ chan->rxbd_done++;
+ } else {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
}
qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
@@ -587,6 +605,10 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Activate the descriptor */
ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
+ if (complete)
+ ctrl |= QMC_BD_RX_I;
+ else
+ ctrl &= ~QMC_BD_RX_I;
wmb(); /* Be sure to flush data before descriptor activation */
qmc_write16(&bd->cbd_sc, ctrl);
@@ -1482,19 +1504,19 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
/* Init Rx BDs and set Wrap bit on last descriptor */
BUILD_BUG_ON(QMC_NB_RXBDS == 0);
- val = QMC_BD_RX_I;
for (i = 0; i < QMC_NB_RXBDS; i++) {
bd = chan->rxbds + i;
- qmc_write16(&bd->cbd_sc, val);
+ qmc_write16(&bd->cbd_sc, 0);
}
bd = chan->rxbds + QMC_NB_RXBDS - 1;
- qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
+ qmc_write16(&bd->cbd_sc, QMC_BD_RX_W);
/* Init Tx BDs and set Wrap bit on last descriptor */
BUILD_BUG_ON(QMC_NB_TXBDS == 0);
- val = QMC_BD_TX_I;
if (chan->mode == QMC_HDLC)
- val |= QMC_BD_TX_L | QMC_BD_TX_TC;
+ val = QMC_BD_TX_L | QMC_BD_TX_TC;
+ else
+ val = 0;
for (i = 0; i < QMC_NB_TXBDS; i++) {
bd = chan->txbds + i;
qmc_write16(&bd->cbd_sc, val);
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 65ff45fdcac7..006fec47ea10 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -1464,7 +1464,7 @@ static ssize_t dec_lane_of_type_store(struct kobject *kobj, struct kobj_attribut
goto out;
if (!all_in_idle) {
ret = -EBUSY;
- dev_err(hdev->dev, "please don't decrese lanes on high load with %s, ret = %d.\n",
+ dev_err(hdev->dev, "please don't decrease lanes on high load with %s, ret = %d.\n",
hccs_port_type_to_name(hdev, port_type), ret);
goto out;
}
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index 7c349a94b45c..f45537546553 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -2165,10 +2165,18 @@ static struct device *svs_add_device_link(struct svs_platform *svsp,
return dev;
}
+static void svs_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int svs_mt8192_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
u32 idx;
+ int ret;
svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
if (IS_ERR(svsp->rst))
@@ -2179,6 +2187,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get lvts device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
struct svs_bank *svsb = &svsp->banks[idx];
@@ -2188,6 +2197,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
case SVSB_SWID_CPU_LITTLE:
case SVSB_SWID_CPU_BIG:
svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
@@ -2207,6 +2217,11 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
@@ -2216,11 +2231,13 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
u32 idx;
+ int ret;
dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get thermal device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
struct svs_bank *svsb = &svsp->banks[idx];
@@ -2230,6 +2247,7 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
case SVSB_SWID_CPU_LITTLE:
case SVSB_SWID_CPU_BIG:
svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
@@ -2246,6 +2264,11 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index 3dfa448bf8cf..597f9025e422 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -656,6 +656,9 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp))
+ return IRQ_HANDLED;
+
bwmon->target_kbps = bw_kbps;
bw_kbps--;
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 192edc3f64dc..857ead56b37d 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -4409,7 +4409,6 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
base = devm_platform_ioremap_resource(pdev, index);
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 5710ac0c07a8..a5c80d4fcc36 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -304,7 +304,7 @@ out:
}
EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
-static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_name)
+static bool qcom_mdt_bins_are_split(const struct firmware *fw)
{
const struct elf32_phdr *phdrs;
const struct elf32_hdr *ehdr;
@@ -333,9 +333,9 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na
}
static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, void *mem_region,
+ const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
- phys_addr_t *reloc_base, bool pas_init)
+ phys_addr_t *reloc_base)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
@@ -355,7 +355,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (!mdt_header_valid(fw))
return -EINVAL;
- is_split = qcom_mdt_bins_are_split(fw, fw_name);
+ is_split = qcom_mdt_bins_are_split(fw);
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
@@ -460,8 +460,8 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (ret)
return ret;
- return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
- mem_size, reloc_base, true);
+ return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys,
+ mem_size, reloc_base);
}
EXPORT_SYMBOL_GPL(qcom_mdt_load);
@@ -470,7 +470,6 @@ EXPORT_SYMBOL_GPL(qcom_mdt_load);
* @dev: device handle to associate resources with
* @fw: firmware object for the mdt file
* @firmware: name of the firmware, for construction of segment file names
- * @pas_id: PAS identifier
* @mem_region: allocated memory region to load firmware into
* @mem_phys: physical address of allocated memory region
* @mem_size: size of the allocated memory region
@@ -479,12 +478,11 @@ EXPORT_SYMBOL_GPL(qcom_mdt_load);
* Returns 0 on success, negative errno otherwise.
*/
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id,
- void *mem_region, phys_addr_t mem_phys,
+ const char *firmware, void *mem_region, phys_addr_t mem_phys,
size_t mem_size, phys_addr_t *reloc_base)
{
- return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
- mem_size, reloc_base, false);
+ return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys,
+ mem_size, reloc_base);
}
EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 3c3b796333a6..cd1779b6a91a 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
#define __DISABLE_TRACE_MMIO__
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -110,22 +115,94 @@ struct geni_se_desc {
static const char * const icc_path_names[] = {"qup-core", "qup-config",
"qup-memory"};
-#define QUP_HW_VER_REG 0x4
+static const char * const protocol_name[] = { "None", "SPI", "UART", "I2C", "I3C", "SPI SLAVE" };
+
+/**
+ * struct se_fw_hdr - Serial Engine firmware configuration header
+ *
+ * This structure defines the SE firmware header, which together with the
+ * firmware payload is stored in individual ELF segments.
+ *
+ * @magic: Set to 'SEFW'.
+ * @version: Structure version number.
+ * @core_version: QUPV3 hardware version.
+ * @serial_protocol: Encoded in GENI_FW_REVISION.
+ * @fw_version: Firmware version, from GENI_FW_REVISION.
+ * @cfg_version: Configuration version, from GENI_INIT_CFG_REVISION.
+ * @fw_size_in_items: Number of 32-bit words in GENI_FW_RAM.
+ * @fw_offset: Byte offset to GENI_FW_RAM array.
+ * @cfg_size_in_items: Number of GENI_FW_CFG index/value pairs.
+ * @cfg_idx_offset: Byte offset to GENI_FW_CFG index array.
+ * @cfg_val_offset: Byte offset to GENI_FW_CFG values array.
+ */
+struct se_fw_hdr {
+ __le32 magic;
+ __le32 version;
+ __le32 core_version;
+ __le16 serial_protocol;
+ __le16 fw_version;
+ __le16 cfg_version;
+ __le16 fw_size_in_items;
+ __le16 fw_offset;
+ __le16 cfg_size_in_items;
+ __le16 cfg_idx_offset;
+ __le16 cfg_val_offset;
+};
+
+/*Magic numbers*/
+#define SE_MAGIC_NUM 0x57464553
+
+#define MAX_GENI_CFG_RAMn_CNT 455
+
+#define MI_PBT_NON_PAGED_SEGMENT 0x0
+#define MI_PBT_HASH_SEGMENT 0x2
+#define MI_PBT_NOTUSED_SEGMENT 0x3
+#define MI_PBT_SHARED_SEGMENT 0x4
+
+#define MI_PBT_FLAG_PAGE_MODE BIT(20)
+#define MI_PBT_FLAG_SEGMENT_TYPE GENMASK(26, 24)
+#define MI_PBT_FLAG_ACCESS_TYPE GENMASK(23, 21)
+
+#define MI_PBT_PAGE_MODE_VALUE(x) FIELD_GET(MI_PBT_FLAG_PAGE_MODE, x)
+
+#define MI_PBT_SEGMENT_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_SEGMENT_TYPE, x)
+
+#define MI_PBT_ACCESS_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_ACCESS_TYPE, x)
+
+#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \
+ M_IO_DATA_DEASSERT_EN | \
+ M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
+ M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
+ M_TX_FIFO_WR_ERR_EN)
+
+/* Common QUPV3 registers */
+#define QUPV3_HW_VER_REG 0x4
+#define QUPV3_SE_AHB_M_CFG 0x118
+#define QUPV3_COMMON_CFG 0x120
+#define QUPV3_COMMON_CGC_CTRL 0x21c
+
+/* QUPV3_COMMON_CFG fields */
+#define FAST_SWITCH_TO_HIGH_DISABLE BIT(0)
+
+/* QUPV3_SE_AHB_M_CFG fields */
+#define AHB_M_CLK_CGC_ON BIT(0)
+
+/* QUPV3_COMMON_CGC_CTRL fields */
+#define COMMON_CSR_SLV_CLK_CGC_ON BIT(0)
/* Common SE registers */
-#define GENI_INIT_CFG_REVISION 0x0
-#define GENI_S_INIT_CFG_REVISION 0x4
-#define GENI_OUTPUT_CTRL 0x24
-#define GENI_CGC_CTRL 0x28
-#define GENI_CLK_CTRL_RO 0x60
-#define GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_INIT_CFG_REVISION 0x0
+#define SE_GENI_S_INIT_CFG_REVISION 0x4
+#define SE_GENI_CGC_CTRL 0x28
+#define SE_GENI_CLK_CTRL_RO 0x60
+#define SE_GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_CFG_REG0 0x100
#define SE_GENI_BYTE_GRAN 0x254
#define SE_GENI_TX_PACKING_CFG0 0x260
#define SE_GENI_TX_PACKING_CFG1 0x264
#define SE_GENI_RX_PACKING_CFG0 0x284
#define SE_GENI_RX_PACKING_CFG1 0x288
-#define SE_GENI_M_GP_LENGTH 0x910
-#define SE_GENI_S_GP_LENGTH 0x914
+#define SE_GENI_S_IRQ_ENABLE 0x644
#define SE_DMA_TX_PTR_L 0xc30
#define SE_DMA_TX_PTR_H 0xc34
#define SE_DMA_TX_ATTR 0xc38
@@ -142,12 +219,20 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config",
#define SE_DMA_RX_IRQ_EN 0xd48
#define SE_DMA_RX_IRQ_EN_SET 0xd4c
#define SE_DMA_RX_IRQ_EN_CLR 0xd50
-#define SE_DMA_RX_LEN_IN 0xd54
#define SE_DMA_RX_MAX_BURST 0xd5c
#define SE_DMA_RX_FLUSH 0xd60
#define SE_GSI_EVENT_EN 0xe18
#define SE_IRQ_EN 0xe1c
#define SE_DMA_GENERAL_CFG 0xe30
+#define SE_GENI_FW_REVISION 0x1000
+#define SE_GENI_S_FW_REVISION 0x1004
+#define SE_GENI_CFG_RAMN 0x1010
+#define SE_GENI_CLK_CTRL 0x2000
+#define SE_DMA_IF_EN 0x2004
+#define SE_FIFO_IF_DISABLE 0x2008
+
+/* GENI_FW_REVISION_RO fields */
+#define FW_REV_VERSION_MSK GENMASK(7, 0)
/* GENI_OUTPUT_CTRL fields */
#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0)
@@ -179,13 +264,22 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config",
/* SE_DMA_GENERAL_CFG */
#define DMA_RX_CLK_CGC_ON BIT(0)
#define DMA_TX_CLK_CGC_ON BIT(1)
-#define DMA_AHB_SLV_CFG_ON BIT(2)
+#define DMA_AHB_SLV_CLK_CGC_ON BIT(2)
#define AHB_SEC_SLV_CLK_CGC_ON BIT(3)
#define DUMMY_RX_NON_BUFFERABLE BIT(4)
#define RX_DMA_ZERO_PADDING_EN BIT(5)
#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6)
#define RX_DMA_IRQ_DELAY_SHFT 6
+/* GENI_CLK_CTRL fields */
+#define SER_CLK_SEL BIT(0)
+
+/* GENI_DMA_IF_EN fields */
+#define DMA_IF_EN BIT(0)
+
+#define geni_setbits32(_addr, _v) writel(readl(_addr) | (_v), _addr)
+#define geni_clrbits32(_addr, _v) writel(readl(_addr) & ~(_v), _addr)
+
/**
* geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
* @se: Pointer to the corresponding serial engine.
@@ -196,7 +290,7 @@ u32 geni_se_get_qup_hw_version(struct geni_se *se)
{
struct geni_wrapper *wrapper = se->wrapper;
- return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+ return readl_relaxed(wrapper->base + QUPV3_HW_VER_REG);
}
EXPORT_SYMBOL_GPL(geni_se_get_qup_hw_version);
@@ -220,12 +314,12 @@ static void geni_se_io_init(void __iomem *base)
{
u32 val;
- val = readl_relaxed(base + GENI_CGC_CTRL);
+ val = readl_relaxed(base + SE_GENI_CGC_CTRL);
val |= DEFAULT_CGC_EN;
- writel_relaxed(val, base + GENI_CGC_CTRL);
+ writel_relaxed(val, base + SE_GENI_CGC_CTRL);
val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
- val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+ val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON;
val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
@@ -658,9 +752,12 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
}
EXPORT_SYMBOL_GPL(geni_se_clk_freq_match);
-#define GENI_SE_DMA_DONE_EN BIT(0)
-#define GENI_SE_DMA_EOT_EN BIT(1)
-#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_RESET_DONE_EN BIT(3)
+#define GENI_SE_DMA_FLUSH_DONE BIT(4)
+
#define GENI_SE_DMA_EOT_BUF BIT(0)
/**
@@ -891,6 +988,377 @@ int geni_icc_disable(struct geni_se *se)
}
EXPORT_SYMBOL_GPL(geni_icc_disable);
+/**
+ * geni_find_protocol_fw() - Locate and validate SE firmware for a protocol.
+ * @dev: Pointer to the device structure.
+ * @fw: Pointer to the firmware image.
+ * @protocol: Expected serial engine protocol type.
+ *
+ * Identifies the appropriate firmware image or configuration required for a
+ * specific communication protocol instance running on a Qualcomm GENI
+ * controller.
+ *
+ * Return: pointer to a valid 'struct se_fw_hdr' if found, or NULL otherwise.
+ */
+static struct se_fw_hdr *geni_find_protocol_fw(struct device *dev, const struct firmware *fw,
+ enum geni_se_protocol_type protocol)
+{
+ const struct elf32_hdr *ehdr;
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ struct se_fw_hdr *sefw;
+ u32 fw_end, cfg_idx_end, cfg_val_end;
+ u16 fw_size;
+ int i;
+
+ if (!fw || fw->size < sizeof(struct elf32_hdr))
+ return NULL;
+
+ ehdr = (const struct elf32_hdr *)fw->data;
+ phdrs = (const struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+
+ /*
+ * The firmware is expected to have at least two program headers (segments).
+ * One for metadata and the other for the actual protocol-specific firmware.
+ */
+ if (ehdr->e_phnum < 2) {
+ dev_err(dev, "Invalid firmware: less than 2 program headers\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (fw->size < phdr->p_offset + phdr->p_filesz) {
+ dev_err(dev, "Firmware size (%zu) < expected offset (%u) + size (%u)\n",
+ fw->size, phdr->p_offset, phdr->p_filesz);
+ return NULL;
+ }
+
+ if (phdr->p_type != PT_LOAD || !phdr->p_memsz)
+ continue;
+
+ if (MI_PBT_PAGE_MODE_VALUE(phdr->p_flags) != MI_PBT_NON_PAGED_SEGMENT ||
+ MI_PBT_SEGMENT_TYPE_VALUE(phdr->p_flags) == MI_PBT_HASH_SEGMENT ||
+ MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_NOTUSED_SEGMENT ||
+ MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_SHARED_SEGMENT)
+ continue;
+
+ if (phdr->p_filesz < sizeof(struct se_fw_hdr))
+ continue;
+
+ sefw = (struct se_fw_hdr *)(fw->data + phdr->p_offset);
+ fw_size = le16_to_cpu(sefw->fw_size_in_items);
+ fw_end = le16_to_cpu(sefw->fw_offset) + fw_size * sizeof(u32);
+ cfg_idx_end = le16_to_cpu(sefw->cfg_idx_offset) +
+ le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u8);
+ cfg_val_end = le16_to_cpu(sefw->cfg_val_offset) +
+ le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u32);
+
+ if (le32_to_cpu(sefw->magic) != SE_MAGIC_NUM || le32_to_cpu(sefw->version) != 1)
+ continue;
+
+ if (le32_to_cpu(sefw->serial_protocol) != protocol)
+ continue;
+
+ if (fw_size % 2 != 0) {
+ fw_size++;
+ sefw->fw_size_in_items = cpu_to_le16(fw_size);
+ }
+
+ if (fw_size >= MAX_GENI_CFG_RAMn_CNT) {
+ dev_err(dev,
+ "Firmware size (%u) exceeds max allowed RAMn count (%u)\n",
+ fw_size, MAX_GENI_CFG_RAMn_CNT);
+ continue;
+ }
+
+ if (fw_end > phdr->p_filesz || cfg_idx_end > phdr->p_filesz ||
+ cfg_val_end > phdr->p_filesz) {
+ dev_err(dev, "Truncated or corrupt SE FW segment found at index %d\n", i);
+ continue;
+ }
+
+ return sefw;
+ }
+
+ dev_err(dev, "Failed to get %s protocol firmware\n", protocol_name[protocol]);
+ return NULL;
+}
+
+/**
+ * geni_configure_xfer_mode() - Set the transfer mode.
+ * @se: Pointer to the concerned serial engine.
+ * @mode: SE data transfer mode.
+ *
+ * Set the transfer mode to either FIFO or DMA according to the mode specified
+ * by the protocol driver.
+ *
+ * Return: 0 if successful, otherwise return an error value.
+ */
+static int geni_configure_xfer_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+ /* Configure SE FIFO, DMA or GSI mode. */
+ switch (mode) {
+ case GENI_GPI_DMA:
+ geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(0x0, se->base + SE_IRQ_EN);
+ writel(DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN,
+ se->base + SE_GSI_EVENT_EN);
+ break;
+
+ case GENI_SE_FIFO:
+ geni_clrbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN,
+ se->base + SE_IRQ_EN);
+ writel(0x0, se->base + SE_GSI_EVENT_EN);
+ break;
+
+ case GENI_SE_DMA:
+ geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN,
+ se->base + SE_IRQ_EN);
+ writel(0x0, se->base + SE_GSI_EVENT_EN);
+ break;
+
+ default:
+ dev_err(se->dev, "Invalid geni-se transfer mode: %d\n", mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * geni_enable_interrupts() - Enable interrupts.
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Enable the required interrupts during the firmware load process.
+ */
+static void geni_enable_interrupts(struct geni_se *se)
+{
+ u32 val;
+
+ /* Enable required interrupts. */
+ writel(M_COMMON_GENI_M_IRQ_EN, se->base + SE_GENI_M_IRQ_EN);
+
+ val = S_CMD_OVERRUN_EN | S_ILLEGAL_CMD_EN | S_CMD_CANCEL_EN | S_CMD_ABORT_EN |
+ S_GP_IRQ_0_EN | S_GP_IRQ_1_EN | S_GP_IRQ_2_EN | S_GP_IRQ_3_EN |
+ S_RX_FIFO_WR_ERR_EN | S_RX_FIFO_RD_ERR_EN;
+ writel(val, se->base + SE_GENI_S_IRQ_ENABLE);
+
+ /* DMA mode configuration. */
+ val = GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN | GENI_SE_DMA_DONE_EN;
+ writel(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+ val = GENI_SE_DMA_FLUSH_DONE | GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN |
+ GENI_SE_DMA_DONE_EN;
+ writel(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+}
+
+/**
+ * geni_write_fw_revision() - Write the firmware revision.
+ * @se: Pointer to the concerned serial engine.
+ * @serial_protocol: serial protocol type.
+ * @fw_version: QUP firmware version.
+ *
+ * Write the firmware revision and protocol into the respective register.
+ */
+static void geni_write_fw_revision(struct geni_se *se, u16 serial_protocol, u16 fw_version)
+{
+ u32 reg;
+
+ reg = FIELD_PREP(FW_REV_PROTOCOL_MSK, serial_protocol);
+ reg |= FIELD_PREP(FW_REV_VERSION_MSK, fw_version);
+
+ writel(reg, se->base + SE_GENI_FW_REVISION);
+ writel(reg, se->base + SE_GENI_S_FW_REVISION);
+}
+
+/**
+ * geni_load_se_fw() - Load Serial Engine specific firmware.
+ * @se: Pointer to the concerned serial engine.
+ * @fw: Pointer to the firmware structure.
+ * @mode: SE data transfer mode.
+ * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C).
+ *
+ * Load the protocol firmware into the IRAM of the Serial Engine.
+ *
+ * Return: 0 if successful, otherwise return an error value.
+ */
+static int geni_load_se_fw(struct geni_se *se, const struct firmware *fw,
+ enum geni_se_xfer_mode mode, enum geni_se_protocol_type protocol)
+{
+ const u32 *fw_data, *cfg_val_arr;
+ const u8 *cfg_idx_arr;
+ u32 i, reg_value;
+ int ret;
+ struct se_fw_hdr *hdr;
+
+ hdr = geni_find_protocol_fw(se->dev, fw, protocol);
+ if (!hdr)
+ return -EINVAL;
+
+ fw_data = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->fw_offset));
+ cfg_idx_arr = (const u8 *)hdr + le16_to_cpu(hdr->cfg_idx_offset);
+ cfg_val_arr = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->cfg_val_offset));
+
+ ret = geni_icc_set_bw(se);
+ if (ret)
+ return ret;
+
+ ret = geni_icc_enable(se);
+ if (ret)
+ return ret;
+
+ ret = geni_se_resources_on(se);
+ if (ret)
+ goto out_icc_disable;
+
+ /*
+ * Disable high-priority interrupts until all currently executing
+ * low-priority interrupts have been fully handled.
+ */
+ geni_setbits32(se->wrapper->base + QUPV3_COMMON_CFG, FAST_SWITCH_TO_HIGH_DISABLE);
+
+ /* Set AHB_M_CLK_CGC_ON to indicate hardware controls se-wrapper cgc clock. */
+ geni_setbits32(se->wrapper->base + QUPV3_SE_AHB_M_CFG, AHB_M_CLK_CGC_ON);
+
+ /* Let hardware to control common cgc. */
+ geni_setbits32(se->wrapper->base + QUPV3_COMMON_CGC_CTRL, COMMON_CSR_SLV_CLK_CGC_ON);
+
+ /*
+ * Setting individual bits in GENI_OUTPUT_CTRL activates corresponding output lines,
+ * allowing the hardware to drive data as configured.
+ */
+ writel(0x0, se->base + GENI_OUTPUT_CTRL);
+
+ /* Set SCLK and HCLK to program RAM */
+ geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+ writel(0x0, se->base + SE_GENI_CLK_CTRL);
+ geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+
+ /* Enable required clocks for DMA CSR, TX and RX. */
+ reg_value = AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON |
+ DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+ geni_setbits32(se->base + SE_DMA_GENERAL_CFG, reg_value);
+
+ /* Let hardware control CGC by default. */
+ writel(DEFAULT_CGC_EN, se->base + SE_GENI_CGC_CTRL);
+
+ /* Set version of the configuration register part of firmware. */
+ writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_INIT_CFG_REVISION);
+ writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_S_INIT_CFG_REVISION);
+
+ /* Configure GENI primitive table. */
+ for (i = 0; i < le16_to_cpu(hdr->cfg_size_in_items); i++)
+ writel(cfg_val_arr[i],
+ se->base + SE_GENI_CFG_REG0 + (cfg_idx_arr[i] * sizeof(u32)));
+
+ /* Configure condition for assertion of RX_RFR_WATERMARK condition. */
+ reg_value = geni_se_get_rx_fifo_depth(se);
+ writel(reg_value - 2, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+ /* Let hardware control CGC */
+ geni_setbits32(se->base + GENI_OUTPUT_CTRL, DEFAULT_IO_OUTPUT_CTRL_MSK);
+
+ ret = geni_configure_xfer_mode(se, mode);
+ if (ret)
+ goto out_resources_off;
+
+ geni_enable_interrupts(se);
+
+ geni_write_fw_revision(se, le16_to_cpu(hdr->serial_protocol), le16_to_cpu(hdr->fw_version));
+
+ /* Program RAM address space. */
+ memcpy_toio(se->base + SE_GENI_CFG_RAMN, fw_data,
+ le16_to_cpu(hdr->fw_size_in_items) * sizeof(u32));
+
+ /* Put default values on GENI's output pads. */
+ writel_relaxed(0x1, se->base + GENI_FORCE_DEFAULT_REG);
+
+ /* Toggle SCLK/HCLK from high to low to finalize RAM programming and apply config. */
+ geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+ geni_setbits32(se->base + SE_GENI_CLK_CTRL, SER_CLK_SEL);
+ geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+
+ /* Serial engine DMA interface is enabled. */
+ geni_setbits32(se->base + SE_DMA_IF_EN, DMA_IF_EN);
+
+ /* Enable or disable FIFO interface of the serial engine. */
+ if (mode == GENI_SE_FIFO)
+ geni_clrbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE);
+ else
+ geni_setbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE);
+
+out_resources_off:
+ geni_se_resources_off(se);
+
+out_icc_disable:
+ geni_icc_disable(se);
+ return ret;
+}
+
+/**
+ * geni_load_se_firmware() - Load firmware for SE based on protocol
+ * @se: Pointer to the concerned serial engine.
+ * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C).
+ *
+ * Retrieves the firmware name from device properties and sets the transfer mode
+ * (FIFO or GSI DMA) based on device tree configuration. Enforces FIFO mode for
+ * UART protocol due to lack of GSI DMA support. Requests the firmware and loads
+ * it into the SE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol)
+{
+ const char *fw_name;
+ const struct firmware *fw;
+ enum geni_se_xfer_mode mode = GENI_SE_FIFO;
+ int ret;
+
+ if (protocol >= ARRAY_SIZE(protocol_name)) {
+ dev_err(se->dev, "Invalid geni-se protocol: %d", protocol);
+ return -EINVAL;
+ }
+
+ ret = device_property_read_string(se->wrapper->dev, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(se->dev, "Failed to read firmware-name property: %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(se->dev->of_node, "qcom,enable-gsi-dma"))
+ mode = GENI_GPI_DMA;
+
+ /* GSI mode is not supported by the UART driver; therefore, setting FIFO mode */
+ if (protocol == GENI_SE_UART)
+ mode = GENI_SE_FIFO;
+
+ ret = request_firmware(&fw, fw_name, se->dev);
+ if (ret) {
+ if (ret == -ENOENT)
+ return -EPROBE_DEFER;
+
+ dev_err(se->dev, "Failed to request firmware '%s' for protocol %d: ret: %d\n",
+ fw_name, protocol, ret);
+ return ret;
+ }
+
+ ret = geni_load_se_fw(se, fw, mode, protocol);
+ release_firmware(fw);
+
+ if (ret) {
+ dev_err(se->dev, "Failed to load SE firmware for protocol %d: ret: %d\n",
+ protocol, ret);
+ return ret;
+ }
+
+ dev_dbg(se->dev, "Firmware load for %s protocol is successful for xfer mode: %d\n",
+ protocol_name[protocol], mode);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(geni_load_se_firmware);
+
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 3abea241b1c4..6384f271953d 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -584,6 +584,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm8450", .data = sm8350_domains, },
{ .compatible = "qcom,sm8550", .data = sm8550_domains, },
{ .compatible = "qcom,sm8650", .data = sm8550_domains, },
+ { .compatible = "qcom,sm8750", .data = sm8550_domains, },
{ .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
{ .compatible = "qcom,x1p42100", .data = x1e80100_domains, },
{},
diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c
index 349bdfbc61ef..15782bed2925 100644
--- a/drivers/soc/qcom/ramp_controller.c
+++ b/drivers/soc/qcom/ramp_controller.c
@@ -229,7 +229,6 @@ static const struct regmap_config qrc_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x68,
- .fast_io = true,
};
static const struct reg_sequence msm8976_cfg_dfs_sid[] = {
diff --git a/drivers/soc/qcom/rpm_master_stats.c b/drivers/soc/qcom/rpm_master_stats.c
index 49e4f9457279..c7788337e164 100644
--- a/drivers/soc/qcom/rpm_master_stats.c
+++ b/drivers/soc/qcom/rpm_master_stats.c
@@ -78,7 +78,7 @@ static int master_stats_probe(struct platform_device *pdev)
if (count < 0)
return count;
- data = devm_kzalloc(dev, count * sizeof(*data), GFP_KERNEL);
+ data = devm_kcalloc(dev, count, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index fdab2b1067db..c6f7d5c9c493 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -453,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
trace_rpmh_tx_done(drv, i, req);
- /*
- * If wake tcs was re-purposed for sending active
- * votes, clear AMC trigger & enable modes and
+ /* Clear AMC trigger & enable modes and
* disable interrupt for this TCS
*/
- if (!drv->tcs[ACTIVE_TCS].num_tcs)
- __tcs_set_trigger(drv, i, false);
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index cf425930539e..c4c45f15dca4 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -898,7 +898,7 @@ static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
if (IS_ERR_OR_NULL(ptable))
return SMEM_ITEM_COUNT;
- info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ info = (struct smem_info *)&ptable->entry[le32_to_cpu(ptable->num_entries)];
if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
return SMEM_ITEM_COUNT;
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 719b7f4f376f..340a1ff7e92b 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -39,6 +39,10 @@ config ARCH_RCAR_GEN4
bool
select ARCH_RCAR_GEN3
+config ARCH_RCAR_GEN5
+ bool
+ select ARCH_RCAR_GEN4
+
config ARCH_RMOBILE
bool
select PM
@@ -348,6 +352,14 @@ config ARCH_R8A779H0
help
This enables support for the Renesas R-Car V4M SoC.
+config ARCH_R8A78000
+ bool "ARM64 Platform support for R8A78000 (R-Car X5H)"
+ default y if ARCH_RENESAS
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN5
+ help
+ This enables support for the Renesas R-Car X5H SoC.
+
config ARCH_R9A07G043
bool "ARM64 Platform support for R9A07G043U (RZ/G2UL)"
default y if ARCH_RENESAS
@@ -449,6 +461,7 @@ config RST_RCAR
config SYSC_RZ
bool "System controller for RZ SoCs" if COMPILE_TEST
+ select MFD_SYSCON
config SYSC_R9A08G045
bool "Renesas System controller support for R9A08G045 (RZ/G3S)" if COMPILE_TEST
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
index f4db1431e036..0504d4e68761 100644
--- a/drivers/soc/renesas/r9a08g045-sysc.c
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -20,4 +20,5 @@ static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initc
const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
.soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+ .max_register = 0xe20,
};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
index cd2eb7782cfe..2e8426c03050 100644
--- a/drivers/soc/renesas/r9a09g047-sys.c
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -64,4 +64,5 @@ static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initco
const struct rz_sysc_init_data rzg3e_sys_init_data = {
.soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+ .max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
index 4c21cc29edbc..e3390e7c7fe5 100644
--- a/drivers/soc/renesas/r9a09g057-sys.c
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -64,4 +64,5 @@ static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initco
const struct rz_sysc_init_data rzv2h_sys_init_data = {
.soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+ .max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index df2b38417b80..1eb52356b996 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -36,6 +36,10 @@ static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
.name = "R-Car Gen4",
};
+static const struct renesas_family fam_rcar_gen5 __initconst __maybe_unused = {
+ .name = "R-Car Gen5",
+};
+
static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
.name = "R-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -266,6 +270,11 @@ static const struct renesas_soc soc_rcar_v4m __initconst __maybe_unused = {
.id = 0x5d,
};
+static const struct renesas_soc soc_rcar_x5h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen5,
+ .id = 0x60,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -378,6 +387,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R8A779H0
{ .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m },
#endif
+#ifdef CONFIG_ARCH_R8A78000
+ { .compatible = "renesas,r8a78000", .data = &soc_rcar_x5h },
+#endif
#ifdef CONFIG_ARCH_R9A07G043
#ifdef CONFIG_RISCV
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index ffa65fb4dade..9f79e299e6f4 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -5,9 +5,13 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+#include <linux/cleanup.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#include <linux/sys_soc.h>
#include "rz-sysc.h"
@@ -100,14 +104,23 @@ MODULE_DEVICE_TABLE(of, rz_sysc_match);
static int rz_sysc_probe(struct platform_device *pdev)
{
+ const struct rz_sysc_init_data *data;
const struct of_device_id *match;
struct device *dev = &pdev->dev;
+ struct regmap *regmap;
struct rz_sysc *sysc;
+ int ret;
+
+ struct regmap_config *regmap_cfg __free(kfree) = kzalloc(sizeof(*regmap_cfg), GFP_KERNEL);
+ if (!regmap_cfg)
+ return -ENOMEM;
match = of_match_node(rz_sysc_match, dev->of_node);
if (!match)
return -ENODEV;
+ data = match->data;
+
sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
if (!sysc)
return -ENOMEM;
@@ -117,7 +130,22 @@ static int rz_sysc_probe(struct platform_device *pdev)
return PTR_ERR(sysc->base);
sysc->dev = dev;
- return rz_sysc_soc_init(sysc, match);
+ ret = rz_sysc_soc_init(sysc, match);
+ if (ret)
+ return ret;
+
+ regmap_cfg->name = "rz_sysc_regs";
+ regmap_cfg->reg_bits = 32;
+ regmap_cfg->reg_stride = 4;
+ regmap_cfg->val_bits = 32;
+ regmap_cfg->fast_io = true;
+ regmap_cfg->max_register = data->max_register;
+
+ regmap = devm_regmap_init_mmio(dev, sysc->base, regmap_cfg);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return of_syscon_register_regmap(dev->of_node, regmap);
}
static struct platform_driver rz_sysc_driver = {
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
index 56bc047a1bff..8eec355d5d56 100644
--- a/drivers/soc/renesas/rz-sysc.h
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -34,9 +34,11 @@ struct rz_sysc_soc_id_init_data {
/**
* struct rz_sysc_init_data - RZ SYSC initialization data
* @soc_id_init_data: RZ SYSC SoC ID initialization data
+ * @max_register: Maximum SYSC register offset to be used by the regmap config
*/
struct rz_sysc_init_data {
const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+ u32 max_register;
};
extern const struct rz_sysc_init_data rzg3e_sys_init_data;
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 1eab4bb0eacf..344870da7675 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -6,13 +6,12 @@
*/
#include <linux/err.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
struct rockchip_grf_value {
const char *desc;
@@ -32,7 +31,7 @@ static const struct rockchip_grf_value rk3036_defaults[] __initconst = {
* Disable auto jtag/sdmmc switching that causes issues with the
* clock-framework and the mmc controllers making them unreliable.
*/
- { "jtag switching", RK3036_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 11) },
+ { "jtag switching", RK3036_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(11), 0) },
};
static const struct rockchip_grf_info rk3036_grf __initconst = {
@@ -44,8 +43,8 @@ static const struct rockchip_grf_info rk3036_grf __initconst = {
#define RK3128_GRF_SOC_CON1 0x144
static const struct rockchip_grf_value rk3128_defaults[] __initconst = {
- { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) },
- { "vpu main clock", RK3128_GRF_SOC_CON1, HIWORD_UPDATE(0, 1, 10) },
+ { "jtag switching", RK3128_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(8), 0) },
+ { "vpu main clock", RK3128_GRF_SOC_CON1, FIELD_PREP_WM16_CONST(BIT(10), 0) },
};
static const struct rockchip_grf_info rk3128_grf __initconst = {
@@ -56,7 +55,7 @@ static const struct rockchip_grf_info rk3128_grf __initconst = {
#define RK3228_GRF_SOC_CON6 0x418
static const struct rockchip_grf_value rk3228_defaults[] __initconst = {
- { "jtag switching", RK3228_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 8) },
+ { "jtag switching", RK3228_GRF_SOC_CON6, FIELD_PREP_WM16_CONST(BIT(8), 0) },
};
static const struct rockchip_grf_info rk3228_grf __initconst = {
@@ -68,8 +67,8 @@ static const struct rockchip_grf_info rk3228_grf __initconst = {
#define RK3288_GRF_SOC_CON2 0x24c
static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
- { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
- { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
+ { "jtag switching", RK3288_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(12), 0) },
+ { "pwm select", RK3288_GRF_SOC_CON2, FIELD_PREP_WM16_CONST(BIT(0), 1) },
};
static const struct rockchip_grf_info rk3288_grf __initconst = {
@@ -80,7 +79,7 @@ static const struct rockchip_grf_info rk3288_grf __initconst = {
#define RK3328_GRF_SOC_CON4 0x410
static const struct rockchip_grf_value rk3328_defaults[] __initconst = {
- { "jtag switching", RK3328_GRF_SOC_CON4, HIWORD_UPDATE(0, 1, 12) },
+ { "jtag switching", RK3328_GRF_SOC_CON4, FIELD_PREP_WM16_CONST(BIT(12), 0) },
};
static const struct rockchip_grf_info rk3328_grf __initconst = {
@@ -91,7 +90,7 @@ static const struct rockchip_grf_info rk3328_grf __initconst = {
#define RK3368_GRF_SOC_CON15 0x43c
static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
- { "jtag switching", RK3368_GRF_SOC_CON15, HIWORD_UPDATE(0, 1, 13) },
+ { "jtag switching", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(13), 0) },
};
static const struct rockchip_grf_info rk3368_grf __initconst = {
@@ -102,7 +101,7 @@ static const struct rockchip_grf_info rk3368_grf __initconst = {
#define RK3399_GRF_SOC_CON7 0xe21c
static const struct rockchip_grf_value rk3399_defaults[] __initconst = {
- { "jtag switching", RK3399_GRF_SOC_CON7, HIWORD_UPDATE(0, 1, 12) },
+ { "jtag switching", RK3399_GRF_SOC_CON7, FIELD_PREP_WM16_CONST(BIT(12), 0) },
};
static const struct rockchip_grf_info rk3399_grf __initconst = {
@@ -113,9 +112,9 @@ static const struct rockchip_grf_info rk3399_grf __initconst = {
#define RK3566_GRF_USB3OTG0_CON1 0x0104
static const struct rockchip_grf_value rk3566_defaults[] __initconst = {
- { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(0, 1, 12) },
- { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 7) },
- { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 0) },
+ { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(12), 0) },
+ { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(7), 1) },
+ { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(0), 1) },
};
static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
@@ -126,8 +125,8 @@ static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
#define RK3576_SYSGRF_SOC_CON1 0x0004
static const struct rockchip_grf_value rk3576_defaults_sys_grf[] __initconst = {
- { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 6) },
- { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 8) },
+ { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, FIELD_PREP_WM16_CONST(GENMASK(7, 6), 3) },
+ { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, FIELD_PREP_WM16_CONST(GENMASK(9, 8), 3) },
};
static const struct rockchip_grf_info rk3576_sysgrf __initconst = {
@@ -138,7 +137,7 @@ static const struct rockchip_grf_info rk3576_sysgrf __initconst = {
#define RK3576_IOCGRF_MISC_CON 0x04F0
static const struct rockchip_grf_value rk3576_defaults_ioc_grf[] __initconst = {
- { "jtag switching", RK3576_IOCGRF_MISC_CON, HIWORD_UPDATE(0, 1, 1) },
+ { "jtag switching", RK3576_IOCGRF_MISC_CON, FIELD_PREP_WM16_CONST(BIT(1), 0) },
};
static const struct rockchip_grf_info rk3576_iocgrf __initconst = {
@@ -149,7 +148,7 @@ static const struct rockchip_grf_info rk3576_iocgrf __initconst = {
#define RK3588_GRF_SOC_CON6 0x0318
static const struct rockchip_grf_value rk3588_defaults[] __initconst = {
- { "jtag switching", RK3588_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 14) },
+ { "jtag switching", RK3588_GRF_SOC_CON6, FIELD_PREP_WM16_CONST(BIT(14), 0) },
};
static const struct rockchip_grf_info rk3588_sysgrf __initconst = {
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index a77288f49d24..22c50ca2aa79 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -7,7 +7,9 @@
#include <linux/array_size.h>
#include <linux/arm-smccc.h>
+#include <linux/bitmap.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -15,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
@@ -35,6 +38,15 @@ struct exynos_pmu_context {
const struct exynos_pmu_data *pmu_data;
struct regmap *pmureg;
struct regmap *pmuintrgen;
+ /*
+ * Serialization lock for CPU hot plug and cpuidle ACPM hint
+ * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
+ * flags.
+ */
+ raw_spinlock_t cpupm_lock;
+ unsigned long *in_cpuhp;
+ bool sys_insuspend;
+ bool sys_inreboot;
};
void __iomem *pmu_base_addr;
@@ -221,6 +233,15 @@ static const struct regmap_config regmap_smccfg = {
.reg_read = tensor_sec_reg_read,
.reg_write = tensor_sec_reg_write,
.reg_update_bits = tensor_sec_update_bits,
+ .use_raw_spinlock = true,
+};
+
+static const struct regmap_config regmap_pmu_intr = {
+ .name = "pmu_intr_gen",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .use_raw_spinlock = true,
};
static const struct exynos_pmu_data gs101_pmu_data = {
@@ -330,13 +351,19 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
/*
- * CPU_INFORM register hint values which are used by
- * EL3 firmware (el3mon).
+ * CPU_INFORM register "hint" values are required to be programmed in addition to
+ * the standard PSCI calls to have functional CPU hotplug and CPU idle states.
+ * This is required to workaround limitations in the el3mon/ACPM firmware.
*/
#define CPU_INFORM_CLEAR 0
#define CPU_INFORM_C2 1
-static int gs101_cpuhp_pmu_online(unsigned int cpu)
+/*
+ * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
+ * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
+ * disabled and cpupm_lock held.
+ */
+static int __gs101_cpu_pmu_online(unsigned int cpu)
{
unsigned int cpuhint = smp_processor_id();
u32 reg, mask;
@@ -358,10 +385,48 @@ static int gs101_cpuhp_pmu_online(unsigned int cpu)
return 0;
}
-static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_online(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+
+ if (pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ cpu = smp_processor_id();
+ __gs101_cpu_pmu_online(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+
+ __gs101_cpu_pmu_online(cpu);
+ /*
+ * Mark this CPU as having finished the hotplug.
+ * This means this CPU can now enter C2 idle state.
+ */
+ clear_bit(cpu, pmu_context->in_cpuhp);
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+/* Common function shared by both CPU hot plug and CPUIdle */
+static int __gs101_cpu_pmu_offline(unsigned int cpu)
{
- u32 reg, mask;
unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
/* set cpu inform hint */
regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
@@ -379,6 +444,165 @@ static int gs101_cpuhp_pmu_offline(unsigned int cpu)
regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
reg & mask);
+
+ return 0;
+}
+
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_offline(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ cpu = smp_processor_id();
+
+ if (test_bit(cpu, pmu_context->in_cpuhp)) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_BAD;
+ }
+
+ /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
+ if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ __gs101_cpu_pmu_offline(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ /*
+ * Mark this CPU as entering hotplug. So as not to confuse
+ * ACPM the CPU entering hotplug should not enter C2 idle state.
+ */
+ set_bit(cpu, pmu_context->in_cpuhp);
+ __gs101_cpu_pmu_offline(cpu);
+
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
+ unsigned long action, void *v)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ return gs101_cpu_pmu_offline();
+
+ case CPU_PM_EXIT:
+ return gs101_cpu_pmu_online();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gs101_cpu_pm_notifier = {
+ .notifier_call = gs101_cpu_pm_notify_callback,
+ /*
+ * We want to be called first, as the ACPM hint and handshake is what
+ * puts the CPU into C2.
+ */
+ .priority = INT_MAX
+};
+
+static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
+ unsigned long event, void *v)
+{
+ unsigned long flags;
+
+ switch (event) {
+ case SYS_POWER_OFF:
+ case SYS_RESTART:
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ pmu_context->sys_inreboot = true;
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpupm_reboot_nb = {
+ .priority = INT_MAX,
+ .notifier_call = exynos_cpupm_reboot_notifier,
+};
+
+static int setup_cpuhp_and_cpuidle(struct device *dev)
+{
+ struct device_node *intr_gen_node;
+ struct resource intrgen_res;
+ void __iomem *virt_addr;
+ int ret, cpu;
+
+ intr_gen_node = of_parse_phandle(dev->of_node,
+ "google,pmu-intr-gen-syscon", 0);
+ if (!intr_gen_node) {
+ /*
+ * To maintain support for older DTs that didn't specify syscon
+ * phandle just issue a warning rather than fail to probe.
+ */
+ dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
+ return 0;
+ }
+
+ /*
+ * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
+ * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
+ * syscon provided regmap.
+ */
+ ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
+ of_node_put(intr_gen_node);
+
+ virt_addr = devm_ioremap(dev, intrgen_res.start,
+ resource_size(&intrgen_res));
+ if (!virt_addr)
+ return -ENOMEM;
+
+ pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
+ &regmap_pmu_intr);
+ if (IS_ERR(pmu_context->pmuintrgen)) {
+ dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
+ return PTR_ERR(pmu_context->pmuintrgen);
+ }
+
+ /* register custom mmio regmap with syscon */
+ ret = of_syscon_register_regmap(intr_gen_node,
+ pmu_context->pmuintrgen);
+ if (ret)
+ return ret;
+
+ pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
+ GFP_KERNEL);
+ if (!pmu_context->in_cpuhp)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&pmu_context->cpupm_lock);
+ pmu_context->sys_inreboot = false;
+ pmu_context->sys_insuspend = false;
+
+ /* set PMU to power on */
+ for_each_online_cpu(cpu)
+ gs101_cpuhp_pmu_online(cpu);
+
+ /* register CPU hotplug callbacks */
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare",
+ gs101_cpuhp_pmu_online, NULL);
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
+ NULL, gs101_cpuhp_pmu_offline);
+
+ /* register CPU PM notifiers for cpuidle */
+ cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
+ register_reboot_notifier(&exynos_cpupm_reboot_nb);
return 0;
}
@@ -435,23 +659,9 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context->dev = dev;
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
- pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node,
- "google,pmu-intr-gen-syscon");
- if (IS_ERR(pmu_context->pmuintrgen)) {
- /*
- * To maintain support for older DTs that didn't specify syscon phandle
- * just issue a warning rather than fail to probe.
- */
- dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n");
- } else {
- cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,
- "soc/exynos-pmu:prepare",
- gs101_cpuhp_pmu_online, NULL);
-
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
- "soc/exynos-pmu:online",
- NULL, gs101_cpuhp_pmu_offline);
- }
+ ret = setup_cpuhp_and_cpuidle(dev);
+ if (ret)
+ return ret;
}
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
@@ -471,10 +681,32 @@ static int exynos_pmu_probe(struct platform_device *pdev)
return 0;
}
+static int exynos_cpupm_suspend_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = true;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static int exynos_cpupm_resume_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = false;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static const struct dev_pm_ops cpupm_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
+ exynos_cpupm_resume_noirq)
+};
+
static struct platform_driver exynos_pmu_driver = {
.driver = {
.name = "exynos-pmu",
.of_match_table = exynos_pmu_of_device_ids,
+ .pm = pm_sleep_ptr(&cpupm_pm_ops),
},
.probe = exynos_pmu_probe,
};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 2781a091a6a6..446b9fc1f175 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -310,6 +311,10 @@ static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
.has_ths_offset = true,
};
+static const struct sunxi_sramc_variant sun55i_a523_sramc_variant = {
+ .num_emac_clocks = 2,
+};
+
#define SUNXI_SRAM_THS_OFFSET_REG 0x0
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
#define SUNXI_SYS_LDO_CTRL_REG 0x150
@@ -363,6 +368,7 @@ static int __init sunxi_sram_probe(struct platform_device *pdev)
const struct sunxi_sramc_variant *variant;
struct device *dev = &pdev->dev;
struct regmap *regmap;
+ int ret;
sram_dev = &pdev->dev;
@@ -380,6 +386,10 @@ static int __init sunxi_sram_probe(struct platform_device *pdev)
regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+
+ ret = of_syscon_register_regmap(dev->of_node, regmap);
+ if (ret)
+ return ret;
}
of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -430,6 +440,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = {
.compatible = "allwinner,sun50i-h616-system-control",
.data = &sun50i_h616_sramc_variant,
},
+ {
+ .compatible = "allwinner,sun55i-a523-system-control",
+ .data = &sun55i_a523_sramc_variant,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 9392c2c43cc8..c0fc54c3cd35 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -96,6 +96,7 @@ config ARCH_TEGRA_210_SOC
config ARCH_TEGRA_186_SOC
bool "NVIDIA Tegra186 SoC"
depends on !CPU_BIG_ENDIAN
+ select PINCTRL_TEGRA186
select MAILBOX
select SOC_TEGRA_PMC
help
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e24ab5f7d2bf..524fa1b0cd3d 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -117,6 +117,124 @@ const struct tegra_fuse_soc tegra30_fuse_soc = {
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct nvmem_cell_info tegra114_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x08c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra114_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ },
+};
+
static const struct tegra_fuse_info tegra114_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x2a0,
@@ -127,6 +245,10 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra114_init_speedo_data,
.info = &tegra114_fuse_info,
+ .lookups = tegra114_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra114_fuse_lookups),
+ .cells = tegra114_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra114_fuse_cells),
.soc_attr_group = &tegra_soc_attr_group,
.clk_suspend_on = false,
};
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index d716be113c84..50c170a995f9 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -66,6 +66,10 @@ static const char * const j721e_rev_string_map[] = {
"1.0", "1.1", "2.0",
};
+static const char * const am62lx_rev_string_map[] = {
+ "1.0", "1.1",
+};
+
static int
k3_chipinfo_partno_to_names(unsigned int partno,
struct soc_device_attribute *soc_dev_attr)
@@ -92,6 +96,12 @@ k3_chipinfo_variant_to_sr(unsigned int partno, unsigned int variant,
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s",
j721e_rev_string_map[variant]);
break;
+ case JTAG_ID_PARTNO_AM62LX:
+ if (variant >= ARRAY_SIZE(am62lx_rev_string_map))
+ goto err_unknown_variant;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s",
+ am62lx_rev_string_map[variant]);
+ break;
default:
variant++;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0",
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index d7634bf5413a..038576805bfa 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -449,7 +449,7 @@ static int pruss_of_setup_memories(struct device *dev, struct pruss *pruss)
pruss->mem_regions[i].pa = res.start;
pruss->mem_regions[i].size = resource_size(&res);
- dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %p\n",
mem_names[i], &pruss->mem_regions[i].pa,
pruss->mem_regions[i].size, pruss->mem_regions[i].va);
}
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 4fd5cac799c5..55c1db816534 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1360,6 +1360,18 @@ int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
}
EXPORT_SYMBOL(sdw_slave_get_scale_index);
+int sdw_slave_get_current_bank(struct sdw_slave *slave)
+{
+ int tmp;
+
+ tmp = sdw_read(slave, SDW_SCP_CTRL);
+ if (tmp < 0)
+ return tmp;
+
+ return FIELD_GET(SDW_SCP_STAT_CURR_BANK, tmp);
+}
+EXPORT_SYMBOL_GPL(sdw_slave_get_current_bank);
+
static int sdw_slave_set_frequency(struct sdw_slave *slave)
{
int scale_index;
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index d2d99555ec5a..3d4d00188c26 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -273,4 +273,10 @@ int sdw_of_find_slaves(struct sdw_bus *bus)
return 0;
}
+struct device *of_sdw_find_device_by_node(struct device_node *np)
+{
+ return bus_find_device_by_of_node(&sdw_bus_type, np);
+}
+EXPORT_SYMBOL_GPL(of_sdw_find_device_by_node);
+
MODULE_IMPORT_NS("SND_SOC_SDCA");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index e8a39e304c7e..4d8f00c850c1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -164,7 +164,7 @@ config SPI_ASPEED_SMC
config SPI_ATMEL
tristate "Atmel SPI Controller"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
depends on OF
help
This selects a driver for the Atmel SPI Controller, present on
diff --git a/drivers/spi/spi-apple.c b/drivers/spi/spi-apple.c
index 6273352a2b28..2fee7057ecc9 100644
--- a/drivers/spi/spi-apple.c
+++ b/drivers/spi/spi-apple.c
@@ -511,6 +511,7 @@ static int apple_spi_probe(struct platform_device *pdev)
}
static const struct of_device_id apple_spi_of_match[] = {
+ { .compatible = "apple,t8103-spi", },
{ .compatible = "apple,spi", },
{}
};
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 768d7482102a..a0d8d3425c6c 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -671,6 +671,12 @@ static int spi_geni_init(struct spi_geni_master *mas)
goto out_pm;
}
spi_slv_setup(mas);
+ } else if (proto == GENI_SE_INVALID_PROTO) {
+ ret = geni_load_se_firmware(se, GENI_SE_SPI);
+ if (ret) {
+ dev_err(mas->dev, "spi master firmware load failed ret: %d\n", ret);
+ goto out_pm;
+ }
} else if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
goto out_pm;
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index ef2094c22347..7d447ba7a7e1 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -26,12 +26,3 @@ config VIDEO_ATOMISP_GC2235
GC2235 is a 2M raw sensor.
It currently only works with the atomisp driver.
-
-config VIDEO_ATOMISP_GC0310
- tristate "GC0310 sensor support"
- depends on ACPI
- depends on I2C && VIDEO_DEV
- select V4L2_CCI_I2C
- help
- This is a Video4Linux2 sensor-level driver for the Galaxycore
- GC0310 0.3MP sensor.
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index e1637417e5c5..161aa542ef34 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -5,4 +5,3 @@
obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
-obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 22c0ae0b1b7b..3d56ca83ecb7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -161,13 +161,6 @@ static int isp_subdev_subscribe_event(struct v4l2_subdev *sd,
return v4l2_event_subscribe(fh, sub, 16, NULL);
}
-static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd,
- struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- return v4l2_event_unsubscribe(fh, sub);
-}
-
/*
* isp_subdev_enum_mbus_code - Handle pixel format enumeration
* @sd: pointer to v4l2 subdev structure
@@ -575,7 +568,7 @@ static int isp_subdev_set_format(struct v4l2_subdev *sd,
static const struct v4l2_subdev_core_ops isp_subdev_v4l2_core_ops = {
.ioctl = isp_subdev_ioctl,
.subscribe_event = isp_subdev_subscribe_event,
- .unsubscribe_event = isp_subdev_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
/* V4L2 subdev pad operations */
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
index bda35614c862..0f0d16f4ce7c 100644
--- a/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
@@ -497,7 +497,7 @@ void ia_css_bufq_dump_queue_info(void)
for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
- "host2sp_buffer_queue[%u][%u]", i, j);
+ "host2sp_buffer_queue[%d][%d]", i, j);
bufq_dump_queue_info(prefix,
&css_queues.host2sp_buffer_queue_handles[i][j]);
}
@@ -505,7 +505,7 @@ void ia_css_bufq_dump_queue_info(void)
for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
- "sp2host_buffer_queue[%u]", i);
+ "sp2host_buffer_queue[%d]", i);
bufq_dump_queue_info(prefix,
&css_queues.sp2host_buffer_queue_handles[i]);
}
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index 19fd31cb9bb0..1869c5792ecb 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -23,8 +23,6 @@
#include "imx-media.h"
-#define fh_to_ctx(__fh) container_of(__fh, struct ipu_csc_scaler_ctx, fh)
-
#define IMX_CSC_SCALER_NAME "imx-csc-scaler"
enum {
@@ -66,6 +64,11 @@ struct ipu_csc_scaler_ctx {
unsigned int sequence;
};
+static inline struct ipu_csc_scaler_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct ipu_csc_scaler_ctx, fh);
+}
+
static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -179,7 +182,7 @@ static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
struct ipu_csc_scaler_q_data *q_data;
q_data = get_q_data(ctx, f->type);
@@ -192,7 +195,7 @@ static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
struct ipu_image test_in, test_out;
enum v4l2_field field;
@@ -240,8 +243,8 @@ static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
struct ipu_csc_scaler_q_data *q_data;
- struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *vq;
int ret;
@@ -296,7 +299,7 @@ static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
struct ipu_csc_scaler_q_data *q_data;
switch (s->target) {
@@ -334,7 +337,7 @@ static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
struct ipu_csc_scaler_q_data *q_data;
switch (s->target) {
@@ -760,8 +763,7 @@ static int ipu_csc_scaler_open(struct file *file)
ctx->rot_mode = IPU_ROTATE_NONE;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->priv = priv;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
@@ -788,7 +790,7 @@ static int ipu_csc_scaler_open(struct file *file)
err_ctrls:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_ctx:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
return ret;
@@ -797,13 +799,13 @@ err_ctx:
static int ipu_csc_scaler_release(struct file *file)
{
struct ipu_csc_scaler_priv *priv = video_drvdata(file);
- struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(file->private_data);
+ struct ipu_csc_scaler_ctx *ctx = file_to_ctx(file);
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index a7cd3ef95fc3..fd7e37d803e7 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1751,12 +1751,6 @@ static int csi_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_subscribe(fh, sub, 0, NULL);
}
-static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- return v4l2_event_unsubscribe(fh, sub);
-}
-
static int csi_registered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1872,7 +1866,7 @@ static const struct media_entity_operations csi_entity_ops = {
static const struct v4l2_subdev_core_ops csi_core_ops = {
.subscribe_event = csi_subscribe_event,
- .unsubscribe_event = csi_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops csi_video_ops = {
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 1b0a59b78949..777cac1c27bf 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "ipu3.h"
#include "ipu3-css.h"
@@ -226,7 +227,7 @@ int imgu_css_set_powerup(struct device *dev, void __iomem *base,
state = readl(base + IMGU_REG_STATE);
dev_dbg(dev, "CSS pm_ctrl 0x%x state 0x%x (power %s)\n",
- pm_ctrl, state, state & IMGU_STATE_POWER_DOWN ? "down" : "up");
+ pm_ctrl, state, str_down_up(state & IMGU_STATE_POWER_DOWN));
/* Power up CSS using wrapper */
if (state & IMGU_STATE_POWER_DOWN) {
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index ad6095bf717d..2f6041d342f4 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
@@ -287,7 +288,7 @@ static int imgu_link_setup(struct media_entity *entity,
WARN_ON(pad >= IMGU_NODE_NUM);
dev_dbg(&imgu->pci_dev->dev, "pipe %u pad %u is %s", pipe, pad,
- flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
+ str_enabled_disabled(flags & MEDIA_LNK_FL_ENABLED));
imgu_pipe = &imgu->imgu_pipe[pipe];
imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
@@ -302,7 +303,7 @@ static int imgu_link_setup(struct media_entity *entity,
__clear_bit(pipe, imgu->css.enabled_pipes);
dev_dbg(&imgu->pci_dev->dev, "pipe %u is %s", pipe,
- flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
+ str_enabled_disabled(flags & MEDIA_LNK_FL_ENABLED));
return 0;
}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-csi2.c b/drivers/staging/media/ipu7/ipu7-isys-csi2.c
index 9c16ae9a0e5b..4023db4a6466 100644
--- a/drivers/staging/media/ipu7/ipu7-isys-csi2.c
+++ b/drivers/staging/media/ipu7/ipu7-isys-csi2.c
@@ -77,7 +77,7 @@ static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
case V4L2_EVENT_FRAME_SYNC:
return v4l2_event_subscribe(fh, sub, 10, NULL);
case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/ipu7/ipu7-isys-queue.c b/drivers/staging/media/ipu7/ipu7-isys-queue.c
index 7046c29141f8..434d9d9c7158 100644
--- a/drivers/staging/media/ipu7/ipu7-isys-queue.c
+++ b/drivers/staging/media/ipu7/ipu7-isys-queue.c
@@ -442,14 +442,13 @@ static int ipu7_isys_link_fmt_validate(struct ipu7_isys_queue *aq)
media_pad_remote_pad_first(av->vdev.entity.pads);
struct v4l2_mbus_framefmt format;
struct v4l2_subdev *sd;
- u32 r_stream, code;
+ u32 r_stream = 0, code;
int ret;
if (!remote_pad)
return -ENOTCONN;
sd = media_entity_to_v4l2_subdev(remote_pad->entity);
- r_stream = ipu7_isys_get_src_stream_by_src_pad(sd, remote_pad->index);
ret = ipu7_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream,
&format);
diff --git a/drivers/staging/media/ipu7/ipu7-isys-subdev.c b/drivers/staging/media/ipu7/ipu7-isys-subdev.c
index 98b6ef6a2f21..67a776033d5b 100644
--- a/drivers/staging/media/ipu7/ipu7-isys-subdev.c
+++ b/drivers/staging/media/ipu7/ipu7-isys-subdev.c
@@ -194,13 +194,22 @@ static int subdev_set_routing(struct v4l2_subdev *sd,
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
.field = V4L2_FIELD_NONE,
};
+ struct v4l2_subdev_route *route;
int ret;
ret = v4l2_subdev_routing_validate(sd, routing,
- V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);
if (ret)
return ret;
+ /*
+ * The device doesn't support source multiplexing, set all source
+ * streams to 0 to simplify stream handling through the driver.
+ */
+ for_each_active_route(routing, route)
+ route->source_stream = 0;
+
return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &fmt);
}
@@ -222,30 +231,6 @@ int ipu7_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
return fmt ? 0 : -EINVAL;
}
-u32 ipu7_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad)
-{
- struct v4l2_subdev_state *state;
- struct v4l2_subdev_route *routes;
- u32 source_stream = 0;
- unsigned int i;
-
- state = v4l2_subdev_lock_and_get_active_state(sd);
- if (!state)
- return 0;
-
- routes = state->routing.routes;
- for (i = 0; i < state->routing.num_routes; i++) {
- if (routes[i].source_pad == pad) {
- source_stream = routes[i].source_stream;
- break;
- }
- }
-
- v4l2_subdev_unlock_state(state);
-
- return source_stream;
-}
-
static int ipu7_isys_subdev_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state)
{
diff --git a/drivers/staging/media/ipu7/ipu7-isys-subdev.h b/drivers/staging/media/ipu7/ipu7-isys-subdev.h
index 1057ec39ae39..faa50031cf24 100644
--- a/drivers/staging/media/ipu7/ipu7-isys-subdev.h
+++ b/drivers/staging/media/ipu7/ipu7-isys-subdev.h
@@ -37,7 +37,6 @@ int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum
*code);
-u32 ipu7_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad);
int ipu7_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
struct v4l2_mbus_framefmt *format);
int ipu7_isys_subdev_set_routing(struct v4l2_subdev *sd,
diff --git a/drivers/staging/media/ipu7/ipu7-isys-video.c b/drivers/staging/media/ipu7/ipu7-isys-video.c
index 8756da3a8fb0..1a7c8a91fffb 100644
--- a/drivers/staging/media/ipu7/ipu7-isys-video.c
+++ b/drivers/staging/media/ipu7/ipu7-isys-video.c
@@ -291,7 +291,7 @@ static int link_validate(struct media_link *link)
struct v4l2_mbus_framefmt *s_fmt;
struct v4l2_subdev *s_sd;
struct media_pad *s_pad;
- u32 s_stream, code;
+ u32 s_stream = 0, code;
int ret = -EPIPE;
if (!link->source->entity)
@@ -307,7 +307,6 @@ static int link_validate(struct media_link *link)
link->sink->entity->name);
s_pad = media_pad_remote_pad_first(&av->pad);
- s_stream = ipu7_isys_get_src_stream_by_src_pad(s_sd, s_pad->index);
v4l2_subdev_lock_state(s_state);
@@ -370,10 +369,9 @@ static int ipu7_isys_fw_pin_cfg(struct ipu7_isys_video *av,
struct device *dev = &isys->adev->auxdev.dev;
struct v4l2_mbus_framefmt fmt;
int output_pins;
- u32 src_stream;
+ u32 src_stream = 0;
int ret;
- src_stream = ipu7_isys_get_src_stream_by_src_pad(sd, src_pad->index);
ret = ipu7_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream,
&fmt);
if (ret < 0) {
@@ -781,32 +779,6 @@ ipu7_isys_query_stream_by_source(struct ipu7_isys *isys, int source, u8 vc)
return stream;
}
-static u32 get_remote_pad_stream(struct media_pad *r_pad)
-{
- struct v4l2_subdev_state *state;
- struct v4l2_subdev *sd;
- u32 stream_id = 0;
- unsigned int i;
-
- sd = media_entity_to_v4l2_subdev(r_pad->entity);
- state = v4l2_subdev_lock_and_get_active_state(sd);
- if (!state)
- return 0;
-
- for (i = 0; i < state->stream_configs.num_configs; i++) {
- struct v4l2_subdev_stream_config *cfg =
- &state->stream_configs.configs[i];
- if (cfg->pad == r_pad->index) {
- stream_id = cfg->stream;
- break;
- }
- }
-
- v4l2_subdev_unlock_state(state);
-
- return stream_id;
-}
-
int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state,
struct ipu7_isys_buffer_list *bl)
{
@@ -814,7 +786,7 @@ int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state,
struct device *dev = &av->isys->adev->auxdev.dev;
struct media_pad *r_pad;
struct v4l2_subdev *sd;
- u32 r_stream;
+ u32 r_stream = 0;
int ret = 0;
dev_dbg(dev, "set stream: %d\n", state);
@@ -824,7 +796,6 @@ int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state,
sd = &stream->asd->sd;
r_pad = media_pad_remote_pad_first(&av->pad);
- r_stream = get_remote_pad_stream(r_pad);
if (!state) {
stop_streaming_firmware(av);
@@ -946,6 +917,7 @@ void ipu7_isys_fw_close(struct ipu7_isys *isys)
ipu7_fw_isys_close(isys);
mutex_unlock(&isys->mutex);
+ pm_runtime_put(&isys->adev->auxdev.dev);
}
int ipu7_isys_setup_video(struct ipu7_isys_video *av,
@@ -1082,7 +1054,6 @@ int ipu7_isys_video_init(struct ipu7_isys_video *av)
__ipu_isys_vidioc_try_fmt_vid_cap(av, &format);
av->pix_fmt = format.fmt.pix;
- set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
video_set_drvdata(&av->vdev, av);
ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
diff --git a/drivers/staging/media/ipu7/ipu7.c b/drivers/staging/media/ipu7/ipu7.c
index 1b4f01db13ca..5cddc09c72bf 100644
--- a/drivers/staging/media/ipu7/ipu7.c
+++ b/drivers/staging/media/ipu7/ipu7.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <media/ipu-bridge.h>
@@ -2248,20 +2247,13 @@ void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev)
}
EXPORT_SYMBOL_NS_GPL(ipu7_dump_fw_error_log, "INTEL_IPU7");
-static int ipu7_pci_config_setup(struct pci_dev *dev)
+static void ipu7_pci_config_setup(struct pci_dev *dev)
{
u16 pci_command;
- int ret;
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_command);
-
- ret = pci_enable_msi(dev);
- if (ret)
- dev_err(&dev->dev, "Failed to enable msi (%d)\n", ret);
-
- return ret;
}
static int ipu7_map_fw_code_region(struct ipu7_bus_device *sys,
@@ -2435,7 +2427,6 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!isp)
return -ENOMEM;
- dev_set_name(dev, "intel-ipu7");
isp->pdev = pdev;
INIT_LIST_HEAD(&isp->devices);
@@ -2510,13 +2501,15 @@ static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(dev, UINT_MAX);
- ret = ipu7_pci_config_setup(pdev);
- if (ret)
- return ret;
+ ipu7_pci_config_setup(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to alloc irq vector\n");
ret = ipu_buttress_init(isp);
if (ret)
- return ret;
+ goto pci_irq_free;
dev_info(dev, "firmware cpd file: %s\n", isp->cpd_fw_name);
@@ -2632,6 +2625,8 @@ out_ipu_bus_del_devices:
release_firmware(isp->cpd_fw);
buttress_exit:
ipu_buttress_exit(isp);
+pci_irq_free:
+ pci_free_irq_vectors(pdev);
return ret;
}
@@ -2648,6 +2643,9 @@ static void ipu7_pci_remove(struct pci_dev *pdev)
if (!IS_ERR_OR_NULL(isp->fw_code_region))
vfree(isp->fw_code_region);
+ ipu7_mmu_cleanup(isp->isys->mmu);
+ ipu7_mmu_cleanup(isp->psys->mmu);
+
ipu7_bus_del_devices(pdev);
pm_runtime_forbid(&pdev->dev);
@@ -2656,9 +2654,6 @@ static void ipu7_pci_remove(struct pci_dev *pdev)
ipu_buttress_exit(isp);
release_firmware(isp->cpd_fw);
-
- ipu7_mmu_cleanup(isp->psys->mmu);
- ipu7_mmu_cleanup(isp->isys->mmu);
}
static void ipu7_pci_reset_prepare(struct pci_dev *pdev)
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 6d34a482492e..49e497a32973 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -558,8 +558,7 @@ vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
@@ -568,8 +567,7 @@ static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
@@ -596,8 +594,7 @@ static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
u32 num_formats = sess->core->platform->num_formats;
const struct amvdec_format *fmt_out;
@@ -658,8 +655,7 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
const struct vdec_platform *platform = sess->core->platform;
const struct amvdec_format *fmt_out;
@@ -688,8 +684,7 @@ static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int vdec_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
const struct amvdec_format *formats = sess->core->platform->formats;
const struct amvdec_format *fmt;
u32 num_formats = sess->core->platform->num_formats;
@@ -713,8 +708,7 @@ static int vdec_enum_framesizes(struct file *file, void *fh,
static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
struct device *dev = sess->core->dev;
int ret;
@@ -773,8 +767,7 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
static int vdec_g_pixelaspect(struct file *file, void *fh, int type,
struct v4l2_fract *f)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
@@ -915,9 +908,8 @@ static int vdec_open(struct file *file)
v4l2_fh_init(&sess->fh, core->vdev_dec);
sess->fh.ctrl_handler = &sess->ctrl_handler;
- v4l2_fh_add(&sess->fh);
+ v4l2_fh_add(&sess->fh, file);
sess->fh.m2m_ctx = sess->m2m_ctx;
- file->private_data = &sess->fh;
return 0;
@@ -930,12 +922,11 @@ err_free_sess:
static int vdec_close(struct file *file)
{
- struct amvdec_session *sess =
- container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_session *sess = file_to_amvdec_session(file);
v4l2_m2m_ctx_release(sess->m2m_ctx);
v4l2_m2m_release(sess->m2m_dev);
- v4l2_fh_del(&sess->fh);
+ v4l2_fh_del(&sess->fh, file);
v4l2_fh_exit(&sess->fh);
mutex_destroy(&sess->lock);
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index 258685177700..7a5d8e871d70 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -282,6 +282,11 @@ struct amvdec_session {
void *priv;
};
+static inline struct amvdec_session *file_to_amvdec_session(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct amvdec_session, fh);
+}
+
u32 amvdec_get_output_size(struct amvdec_session *sess);
#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 52a9588462ce..bff42ea1871f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -366,7 +366,6 @@ static int cedrus_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
ctx->bit_depth = 8;
@@ -383,7 +382,7 @@ static int cedrus_open(struct file *file)
if (ret)
goto err_m2m_release;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_unlock(&dev->dev_mutex);
@@ -401,12 +400,11 @@ err_free:
static int cedrus_release(struct file *file)
{
struct cedrus_dev *dev = video_drvdata(file);
- struct cedrus_ctx *ctx = container_of(file->private_data,
- struct cedrus_ctx, fh);
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
mutex_lock(&dev->dev_mutex);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->hdl);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 522c184e2afc..c4b1217c14b6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -155,6 +155,11 @@ struct cedrus_ctx {
} codec;
};
+static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
+{
+ return container_of(file_to_v4l2_fh(file), struct cedrus_ctx, fh);
+}
+
struct cedrus_dec_ops {
void (*irq_clear)(struct cedrus_ctx *ctx);
void (*irq_disable)(struct cedrus_ctx *ctx);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 9fae2c7493d0..ad4ec3490775 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -83,11 +83,6 @@ static struct cedrus_format cedrus_formats[] = {
#define CEDRUS_FORMATS_COUNT ARRAY_SIZE(cedrus_formats)
-static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
-{
- return container_of(file->private_data, struct cedrus_ctx, fh);
-}
-
static struct cedrus_format *cedrus_find_format(struct cedrus_ctx *ctx,
u32 pixelformat, u32 directions)
{
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
index 24899f41dc1c..e7b99cee63d6 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
@@ -419,7 +419,7 @@ static void sun6i_isp_capture_format_prepare(struct v4l2_format *format)
pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun6i_isp_capture_querycap(struct file *file, void *private,
+static int sun6i_isp_capture_querycap(struct file *file, void *priv,
struct v4l2_capability *capability)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
@@ -433,7 +433,7 @@ static int sun6i_isp_capture_querycap(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_enum_fmt(struct file *file, void *private,
+static int sun6i_isp_capture_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmtdesc)
{
u32 index = fmtdesc->index;
@@ -446,7 +446,7 @@ static int sun6i_isp_capture_enum_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_g_fmt(struct file *file, void *private,
+static int sun6i_isp_capture_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
@@ -456,7 +456,7 @@ static int sun6i_isp_capture_g_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_s_fmt(struct file *file, void *private,
+static int sun6i_isp_capture_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
@@ -471,7 +471,7 @@ static int sun6i_isp_capture_s_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_try_fmt(struct file *file, void *private,
+static int sun6i_isp_capture_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
sun6i_isp_capture_format_prepare(format);
@@ -479,7 +479,7 @@ static int sun6i_isp_capture_try_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_enum_input(struct file *file, void *private,
+static int sun6i_isp_capture_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
if (input->index != 0)
@@ -491,7 +491,7 @@ static int sun6i_isp_capture_enum_input(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_g_input(struct file *file, void *private,
+static int sun6i_isp_capture_g_input(struct file *file, void *priv,
unsigned int *index)
{
*index = 0;
@@ -499,7 +499,7 @@ static int sun6i_isp_capture_g_input(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_capture_s_input(struct file *file, void *private,
+static int sun6i_isp_capture_s_input(struct file *file, void *priv,
unsigned int index)
{
if (index != 0)
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
index 3d95ed0b023e..77c2d06c0436 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
@@ -383,7 +383,7 @@ static const struct vb2_ops sun6i_isp_params_queue_ops = {
/* Video Device */
-static int sun6i_isp_params_querycap(struct file *file, void *private,
+static int sun6i_isp_params_querycap(struct file *file, void *priv,
struct v4l2_capability *capability)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
@@ -397,7 +397,7 @@ static int sun6i_isp_params_querycap(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_params_enum_fmt(struct file *file, void *private,
+static int sun6i_isp_params_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmtdesc)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
@@ -412,7 +412,7 @@ static int sun6i_isp_params_enum_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_isp_params_g_fmt(struct file *file, void *private,
+static int sun6i_isp_params_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_isp_device *isp_dev = video_drvdata(file);
diff --git a/drivers/staging/media/tegra-video/tegra20.c b/drivers/staging/media/tegra-video/tegra20.c
index 7b8f8f810b35..1473f1b1f203 100644
--- a/drivers/staging/media/tegra-video/tegra20.c
+++ b/drivers/staging/media/tegra-video/tegra20.c
@@ -42,7 +42,7 @@
#define VI_INPUT_BT656 BIT(25)
#define VI_INPUT_YUV_INPUT_FORMAT_SFT 8 /* bits [9:8] */
#define VI_INPUT_YUV_INPUT_FORMAT_UYVY (0 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
-#define VI_INPUT_YUV_INPUT_FORMAT_VYUY (1 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
+#define VI_INPUT_YUV_INPUT_FORMAT_VYUY BIT(VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_YUV_INPUT_FORMAT_YUYV (2 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_YUV_INPUT_FORMAT_YVYU (3 << VI_INPUT_YUV_INPUT_FORMAT_SFT)
#define VI_INPUT_INPUT_FORMAT_SFT 2 /* bits [5:2] */
@@ -73,7 +73,7 @@
#define VI_OUTPUT_H_DIRECTION BIT(19)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT 17
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_UYVY (0 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
-#define VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY (1 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
+#define VI_OUTPUT_YUV_OUTPUT_FORMAT_VYUY BIT(VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YUYV (2 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_YUV_OUTPUT_FORMAT_YVYU (3 << VI_OUTPUT_YUV_OUTPUT_FORMAT_SFT)
#define VI_OUTPUT_OUTPUT_BYTE_SWAP BIT(16)
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 2b3cdb1ce140..32f71d9a9cf7 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -52,6 +52,11 @@ struct comp_fh {
u32 offs;
};
+static inline struct comp_fh *to_comp_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct comp_fh, fh);
+}
+
static LIST_HEAD(video_devices);
static DEFINE_SPINLOCK(list_lock);
@@ -91,9 +96,7 @@ static int comp_vdev_open(struct file *filp)
fh->mdev = mdev;
v4l2_fh_init(&fh->fh, vdev);
- filp->private_data = fh;
-
- v4l2_fh_add(&fh->fh);
+ v4l2_fh_add(&fh->fh, filp);
ret = most_start_channel(mdev->iface, mdev->ch_idx, &comp);
if (ret) {
@@ -104,7 +107,7 @@ static int comp_vdev_open(struct file *filp)
return 0;
err_rm:
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, filp);
v4l2_fh_exit(&fh->fh);
err_dec:
@@ -115,7 +118,7 @@ err_dec:
static int comp_vdev_close(struct file *filp)
{
- struct comp_fh *fh = filp->private_data;
+ struct comp_fh *fh = to_comp_fh(filp);
struct most_video_dev *mdev = fh->mdev;
struct mbo *mbo, *tmp;
@@ -140,7 +143,7 @@ static int comp_vdev_close(struct file *filp)
most_stop_channel(mdev->iface, mdev->ch_idx, &comp);
mdev->mute = false;
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, filp);
v4l2_fh_exit(&fh->fh);
atomic_dec(&mdev->access_ref);
@@ -151,7 +154,7 @@ static int comp_vdev_close(struct file *filp)
static ssize_t comp_vdev_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
- struct comp_fh *fh = filp->private_data;
+ struct comp_fh *fh = to_comp_fh(filp);
struct most_video_dev *mdev = fh->mdev;
int ret = 0;
@@ -200,7 +203,7 @@ static ssize_t comp_vdev_read(struct file *filp, char __user *buf,
static __poll_t comp_vdev_poll(struct file *filp, poll_table *wait)
{
- struct comp_fh *fh = filp->private_data;
+ struct comp_fh *fh = to_comp_fh(filp);
struct most_video_dev *mdev = fh->mdev;
__poll_t mask = 0;
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 261f8dbdc382..0ba240e634a1 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -346,8 +346,7 @@ netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* The skbuff will be reused without ever being freed. We must
* cleanup a bunch of core things.
*/
- dst_release(skb_dst(skb));
- skb_dst_set(skb, NULL);
+ skb_dst_drop(skb);
skb_ext_reset(skb);
nf_reset_ct(skb);
skb_reset_redirect(skb);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index ac3d085808e9..315bab373729 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2441,13 +2441,6 @@ exit:
return ret;
}
-static int cfg80211_rtw_change_bss(struct wiphy *wiphy,
- struct net_device *ndev,
- struct bss_parameters *params)
-{
- return 0;
-}
-
void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame, uint frame_len, const char *msg)
{
s32 freq;
@@ -2704,7 +2697,6 @@ static struct cfg80211_ops rtw_cfg80211_ops = {
.del_station = cfg80211_rtw_del_station,
.change_station = cfg80211_rtw_change_station,
.dump_station = cfg80211_rtw_dump_station,
- .change_bss = cfg80211_rtw_change_bss,
.mgmt_tx = cfg80211_rtw_mgmt_tx,
};
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 88db94f382bb..efe8cdb20060 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -665,7 +665,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
}
acl_ci = &se_nacl->acl_group.cg_item;
if (!acl_ci) {
- pr_err("Unable to locatel acl_ci\n");
+ pr_err("Unable to locate acl_ci\n");
return -EINVAL;
}
tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
@@ -684,7 +684,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
- pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+ pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for "
"InitiatorName: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci), cmdsn_depth,
config_item_name(acl_ci));
@@ -1131,7 +1131,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
/* End items for lio_target_tiqn_cit */
-/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+/* Start LIO-Target TIQN struct config_item lio_target_cit */
static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
char *page)
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index f60b156ede12..620de3910599 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -112,7 +112,8 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
- u64 ret, ref_lun;
+ u64 ref_lun;
+ int ret;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index f991cf759836..db4e09042469 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -861,7 +861,7 @@ new_bio:
bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
if (!bio)
goto fail;
- bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs,
+ bio_init_inline(bio, NULL, nr_vecs,
rw ? REQ_OP_WRITE : REQ_OP_READ);
bio->bi_end_io = pscsi_bi_endio;
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 61b507c18780..98c3ad083940 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -3,8 +3,7 @@
menuconfig TEE
tristate "Trusted Execution Environment support"
depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
- select CRYPTO
- select CRYPTO_SHA1
+ select CRYPTO_LIB_SHA1
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -13,8 +12,14 @@ menuconfig TEE
if TEE
+config TEE_DMABUF_HEAPS
+ bool
+ depends on HAS_DMA && DMABUF_HEAPS
+ default y
+
source "drivers/tee/optee/Kconfig"
source "drivers/tee/amdtee/Kconfig"
source "drivers/tee/tstee/Kconfig"
+source "drivers/tee/qcomtee/Kconfig"
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 5488cba30bd2..3239b91dee96 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEE) += tee.o
tee-objs += tee_core.o
+tee-objs += tee_heap.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
obj-$(CONFIG_AMDTEE) += amdtee/
obj-$(CONFIG_ARM_TSTEE) += tstee/
+obj-$(CONFIG_QCOMTEE) += qcomtee/
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 7bb7990d0b07..50d2051f7f20 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -25,3 +25,8 @@ config OPTEE_INSECURE_LOAD_IMAGE
Additional documentation on kernel security risks are at
Documentation/tee/op-tee.rst.
+
+config OPTEE_STATIC_PROTMEM_POOL
+ bool
+ depends on HAS_IOMEM && TEE_DMABUF_HEAPS
+ default y
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index a6eff388d300..ad7049c1c107 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,6 +4,7 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += notif.o
optee-objs += rpc.o
+optee-objs += protmem.o
optee-objs += supp.o
optee-objs += device.o
optee-objs += smc_abi.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index c75fddc83576..5b62139714ce 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -56,6 +56,13 @@ int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
return 0;
}
+int optee_set_dma_mask(struct optee *optee, u_int pa_width)
+{
+ u64 mask = DMA_BIT_MASK(min(64, pa_width));
+
+ return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask);
+}
+
static void optee_bus_scan(struct work_struct *work)
{
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
@@ -72,7 +79,7 @@ static ssize_t rpmb_routing_model_show(struct device *dev,
else
s = "user";
- return scnprintf(buf, PAGE_SIZE, "%s\n", s);
+ return sysfs_emit(buf, "%s\n", s);
}
static DEVICE_ATTR_RO(rpmb_routing_model);
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index a963eed70c1d..bf8390789ecf 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -649,6 +649,124 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread);
}
+static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = cookie;
+ msg_arg->params[0].u.value.b = use_case;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
+static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attrs, unsigned int ma_count,
+ u32 use_case)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ struct ffa_send_direct_data data;
+ struct ffa_mem_region_attributes *mem_attr;
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .tag = use_case,
+ };
+ struct page *page;
+ struct scatterlist sgl;
+ unsigned int n;
+ int rc;
+
+ mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL);
+ for (n = 0; n < ma_count; n++) {
+ mem_attr[n].receiver = mem_attrs[n] & U16_MAX;
+ mem_attr[n].attrs = mem_attrs[n] >> 16;
+ }
+ args.attrs = mem_attr;
+ args.nattrs = ma_count;
+
+ page = phys_to_page(protmem->paddr);
+ sg_init_table(&sgl, 1);
+ sg_set_page(&sgl, page, protmem->size, 0);
+
+ args.sg = &sgl;
+ rc = mem_ops->memory_lend(&args);
+ kfree(mem_attr);
+ if (rc)
+ return rc;
+
+ rc = do_call_lend_protmem(optee, args.g_handle, use_case);
+ if (rc)
+ goto err_reclaim;
+
+ rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle);
+ if (rc)
+ goto err_unreg;
+
+ protmem->sec_world_id = args.g_handle;
+
+ return 0;
+
+err_unreg:
+ data = (struct ffa_send_direct_data){
+ .data0 = OPTEE_FFA_RELEASE_PROTMEM,
+ .data1 = (u32)args.g_handle,
+ .data2 = (u32)(args.g_handle >> 32),
+ };
+ msg_ops->sync_send_receive(ffa_dev, &data);
+err_reclaim:
+ mem_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+}
+
+static int optee_ffa_reclaim_protmem(struct optee *optee,
+ struct tee_shm *protmem)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ u64 global_handle = protmem->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_RELEASE_PROTMEM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ protmem->sec_world_id = 0;
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
+}
+
/*
* 6. Driver initialization
*
@@ -819,6 +937,8 @@ static const struct optee_ops optee_ffa_ops = {
.do_call_with_arg = optee_ffa_do_call_with_arg,
.to_msg_param = optee_ffa_to_msg_param,
.from_msg_param = optee_ffa_from_msg_param,
+ .lend_protmem = optee_ffa_lend_protmem,
+ .reclaim_protmem = optee_ffa_reclaim_protmem,
};
static void optee_ffa_remove(struct ffa_device *ffa_dev)
@@ -891,6 +1011,25 @@ err:
return rc;
}
+static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps)
+{
+ enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+ struct tee_protmem_pool *pool;
+ int rc = 0;
+
+ if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) {
+ pool = optee_protmem_alloc_dyn_pool(optee, id);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rc = tee_device_register_dma_heap(optee->teedev, id, pool);
+ if (rc)
+ pool->ops->destroy_pool(pool);
+ }
+
+ return rc;
+}
+
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
const struct ffa_notifier_ops *notif_ops;
@@ -941,7 +1080,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err_free_pool;
+ goto err_free_shm_pool;
}
optee->teedev = teedev;
@@ -988,6 +1127,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
rc);
}
+ if (optee_ffa_protmem_pool_init(optee, sec_caps))
+ pr_info("Protected memory service not available\n");
+
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc)
goto err_unregister_devices;
@@ -1018,7 +1160,7 @@ err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
err_unreg_teedev:
tee_device_unregister(optee->teedev);
-err_free_pool:
+err_free_shm_pool:
tee_shm_pool_free(pool);
err_free_optee:
kfree(optee);
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index 257735ae5b56..cc257e7956a3 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -81,7 +81,7 @@
* as the second MSG arg struct for
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
* Bit[31:8]: Reserved (MBZ)
- * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
+ * w5: Bitfield of OP-TEE capabilities OPTEE_FFA_SEC_CAP_*
* w6: The maximum secure world notification number
* w7: Not used (MBZ)
*/
@@ -94,6 +94,8 @@
#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
/* OP-TEE supports probing for RPMB device if needed */
#define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2)
+/* OP-TEE supports Protected Memory for secure data path */
+#define OPTEE_FFA_SEC_CAP_PROTMEM BIT(3)
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
@@ -108,7 +110,7 @@
*
* Return register usage:
* w3: Error code, 0 on success
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*/
#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
@@ -119,16 +121,31 @@
* Call register usage:
* w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF
* w4: Notification value to request bottom half processing, should be
- * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE.
+ * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE
* w5-w7: Not used (MBZ)
*
* Return register usage:
* w3: Error code, 0 on success
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*/
#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
-#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+
+/*
+ * Release Protected memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_RECLAIM_PROTMEM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_RELEASE_PROTMEM OPTEE_FFA_BLOCKING_CALL(8)
/*
* Call with struct optee_msg_arg as argument in the supplied shared memory
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index e8840a82b983..838e1d4a22f0 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -133,13 +133,13 @@ struct optee_msg_param_rmem {
};
/**
- * struct optee_msg_param_fmem - ffa memory reference parameter
+ * struct optee_msg_param_fmem - FF-A memory reference parameter
* @offs_lower: Lower bits of offset into shared memory reference
* @offs_upper: Upper bits of offset into shared memory reference
* @internal_offs: Internal offset into the first page of shared memory
* reference
* @size: Size of the buffer
- * @global_id: Global identifier of Shared memory
+ * @global_id: Global identifier of the shared memory
*/
struct optee_msg_param_fmem {
u32 offs_low;
@@ -165,7 +165,7 @@ struct optee_msg_param_value {
* @attr: attributes
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
- * @fmem: parameter by ffa registered memory reference
+ * @fmem: parameter by FF-A registered memory reference
* @value: parameter by opaque value
* @octets: parameter by octet string
*
@@ -297,6 +297,18 @@ struct optee_msg_arg {
#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
/*
+ * Values used in OPTEE_MSG_CMD_LEND_PROTMEM below
+ * OPTEE_MSG_PROTMEM_RESERVED Reserved
+ * OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY Secure Video Playback
+ * OPTEE_MSG_PROTMEM_TRUSTED_UI Trused UI
+ * OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD Secure Video Recording
+ */
+#define OPTEE_MSG_PROTMEM_RESERVED 0
+#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY 1
+#define OPTEE_MSG_PROTMEM_TRUSTED_UI 2
+#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD 3
+
+/*
* Do a secure call with struct optee_msg_arg as argument
* The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
*
@@ -337,15 +349,63 @@ struct optee_msg_arg {
* OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
* normal world unable to process asynchronous notifications. Typically
* used when the driver is shut down.
+ *
+ * OPTEE_MSG_CMD_LEND_PROTMEM lends protected memory. The passed normal
+ * physical memory is protected from normal world access. The memory
+ * should be unmapped prior to this call since it becomes inaccessible
+ * during the request.
+ * Parameters are passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a OPTEE_MSG_PROTMEM_* defined above
+ * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [in] param[1].u.tmem.buf_ptr physical address
+ * [in] param[1].u.tmem.size size
+ * [in] param[1].u.tmem.shm_ref holds protected memory reference
+ *
+ * OPTEE_MSG_CMD_RECLAIM_PROTMEM reclaims a previously lent protected
+ * memory reference. The physical memory is accessible by the normal world
+ * after this function has return and can be mapped again. The information
+ * is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a holds protected memory cookie
+ *
+ * OPTEE_MSG_CMD_GET_PROTMEM_CONFIG get configuration for a specific
+ * protected memory use case. Parameters are passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INOUT
+ * [in] param[0].value.a OPTEE_MSG_PROTMEM_*
+ * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_{R,F}MEM_OUTPUT
+ * [in] param[1].u.{r,f}mem Buffer or NULL
+ * [in] param[1].u.{r,f}mem.size Provided size of buffer or 0 for query
+ * output for the protected use case:
+ * [out] param[0].value.a Minimal size of protected memory
+ * [out] param[0].value.b Required alignment of size and start of
+ * protected memory
+ * [out] param[0].value.c PA width, max 64
+ * [out] param[1].{r,f}mem.size Size of output data
+ * [out] param[1].{r,f}mem If non-NULL, contains an array of
+ * uint32_t memory attributes that must be
+ * included when lending memory for this
+ * use case
+ *
+ * OPTEE_MSG_CMD_ASSIGN_PROTMEM assigns use-case to protected memory
+ * previously lent using the FFA_LEND framework ABI. Parameters are passed
+ * as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a holds protected memory cookie
+ * [in] param[0].u.value.b OPTEE_MSG_PROTMEM_* defined above
*/
-#define OPTEE_MSG_CMD_OPEN_SESSION 0
-#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
-#define OPTEE_MSG_CMD_CLOSE_SESSION 2
-#define OPTEE_MSG_CMD_CANCEL 3
-#define OPTEE_MSG_CMD_REGISTER_SHM 4
-#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
-#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
-#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
-#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
+#define OPTEE_MSG_CMD_LEND_PROTMEM 8
+#define OPTEE_MSG_CMD_RECLAIM_PROTMEM 9
+#define OPTEE_MSG_CMD_GET_PROTMEM_CONFIG 10
+#define OPTEE_MSG_CMD_ASSIGN_PROTMEM 11
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 9526087f0e68..db9ea673fbca 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -176,9 +176,14 @@ struct optee;
* @do_call_with_arg: enters OP-TEE in secure world
* @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
* @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ * @lend_protmem: lends physically contiguous memory as restricted
+ * memory, inaccessible by the kernel
+ * @reclaim_protmem: reclaims restricted memory previously lent with
+ * @lend_protmem() and makes it accessible by the
+ * kernel again
*
* These OPs are only supposed to be used internally in the OP-TEE driver
- * as a way of abstracting the different methogs of entering OP-TEE in
+ * as a way of abstracting the different methods of entering OP-TEE in
* secure world.
*/
struct optee_ops {
@@ -191,6 +196,10 @@ struct optee_ops {
int (*from_msg_param)(struct optee *optee, struct tee_param *params,
size_t num_params,
const struct optee_msg_param *msg_params);
+ int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attr, unsigned int ma_count,
+ u32 use_case);
+ int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem);
};
/**
@@ -274,6 +283,8 @@ struct optee_call_ctx {
extern struct blocking_notifier_head optee_rpmb_intf_added;
+int optee_set_dma_mask(struct optee *optee, u_int pa_width);
+
int optee_notif_init(struct optee *optee, u_int max_key);
void optee_notif_uninit(struct optee *optee);
int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
@@ -285,6 +296,8 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
+struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
+ enum tee_dma_heap_id id);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 879426300821..accf76a99288 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -264,7 +264,6 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
-
/*
* Secure world supports commands "register/unregister shared memory",
* secure world accepts command buffers located in any parts of non-secure RAM
@@ -280,6 +279,10 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
/* Secure world supports probing for RPMB device if needed */
#define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7)
+/* Secure world supports protected memory */
+#define OPTEE_SMC_SEC_CAP_PROTMEM BIT(8)
+/* Secure world supports dynamic protected memory */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM BIT(9)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -451,6 +454,38 @@ struct optee_smc_disable_shm_cache_result {
/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
+/*
+ * Get protected memory config
+ *
+ * Returns the protected memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_PROTMEM_CONFIG
+ * a2-6 Not used, must be zero
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of protected memory
+ * a2 Size of protected memory
+ * a3 PA width, max 64
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG 20
+#define OPTEE_SMC_GET_PROTMEM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG)
+
+struct optee_smc_get_protmem_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long pa_width;
+};
/*
* Resume from RPC (for example after processing a foreign interrupt)
diff --git a/drivers/tee/optee/protmem.c b/drivers/tee/optee/protmem.c
new file mode 100644
index 000000000000..2eba48d5ac73
--- /dev/null
+++ b/drivers/tee/optee/protmem.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Linaro Limited
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include "optee_private.h"
+
+struct optee_protmem_dyn_pool {
+ struct tee_protmem_pool pool;
+ struct gen_pool *gen_pool;
+ struct optee *optee;
+ size_t page_count;
+ u32 *mem_attrs;
+ u_int mem_attr_count;
+ refcount_t refcount;
+ u32 use_case;
+ struct tee_shm *protmem;
+ /* Protects when initializing and tearing down this struct */
+ struct mutex mutex;
+};
+
+static struct optee_protmem_dyn_pool *
+to_protmem_dyn_pool(struct tee_protmem_pool *pool)
+{
+ return container_of(pool, struct optee_protmem_dyn_pool, pool);
+}
+
+static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ int rc;
+
+ rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count);
+ if (IS_ERR(rp->protmem)) {
+ rc = PTR_ERR(rp->protmem);
+ goto err_null_protmem;
+ }
+
+ /*
+ * TODO unmap the memory range since the physical memory will
+ * become inaccesible after the lend_protmem() call.
+ *
+ * If the platform supports a hypervisor at EL2, it will unmap the
+ * intermediate physical memory for us and stop cache pre-fetch of
+ * the memory.
+ */
+ rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem,
+ rp->mem_attrs,
+ rp->mem_attr_count, rp->use_case);
+ if (rc)
+ goto err_put_shm;
+ rp->protmem->flags |= TEE_SHM_DYNAMIC;
+
+ rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!rp->gen_pool) {
+ rc = -ENOMEM;
+ goto err_reclaim;
+ }
+
+ rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr,
+ rp->protmem->size, -1);
+ if (rc)
+ goto err_free_pool;
+
+ refcount_set(&rp->refcount, 1);
+ return 0;
+
+err_free_pool:
+ gen_pool_destroy(rp->gen_pool);
+ rp->gen_pool = NULL;
+err_reclaim:
+ rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
+err_put_shm:
+ tee_shm_put(rp->protmem);
+err_null_protmem:
+ rp->protmem = NULL;
+ return rc;
+}
+
+static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ int rc = 0;
+
+ if (!refcount_inc_not_zero(&rp->refcount)) {
+ mutex_lock(&rp->mutex);
+ if (rp->gen_pool) {
+ /*
+ * Another thread has already initialized the pool
+ * before us, or the pool was just about to be torn
+ * down. Either way we only need to increase the
+ * refcount and we're done.
+ */
+ refcount_inc(&rp->refcount);
+ } else {
+ rc = init_dyn_protmem(rp);
+ }
+ mutex_unlock(&rp->mutex);
+ }
+
+ return rc;
+}
+
+static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ gen_pool_destroy(rp->gen_pool);
+ rp->gen_pool = NULL;
+
+ rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
+ rp->protmem->flags &= ~TEE_SHM_DYNAMIC;
+
+ WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount");
+ tee_shm_put(rp->protmem);
+ rp->protmem = NULL;
+}
+
+static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ if (refcount_dec_and_test(&rp->refcount)) {
+ mutex_lock(&rp->mutex);
+ if (rp->gen_pool)
+ release_dyn_protmem(rp);
+ mutex_unlock(&rp->mutex);
+ }
+}
+
+static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t size,
+ size_t *offs)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+ size_t sz = ALIGN(size, PAGE_SIZE);
+ phys_addr_t pa;
+ int rc;
+
+ rc = get_dyn_protmem(rp);
+ if (rc)
+ return rc;
+
+ pa = gen_pool_alloc(rp->gen_pool, sz);
+ if (!pa) {
+ rc = -ENOMEM;
+ goto err_put;
+ }
+
+ rc = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+
+ sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
+ *offs = pa - rp->protmem->paddr;
+
+ return 0;
+err_free:
+ gen_pool_free(rp->gen_pool, pa, size);
+err_put:
+ put_dyn_protmem(rp);
+
+ return rc;
+}
+
+static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool,
+ struct sg_table *sgt)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length);
+ sg_free_table(sgt);
+ put_dyn_protmem(rp);
+}
+
+static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t offs,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+
+ *parent_shm = rp->protmem;
+
+ return 0;
+}
+
+static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+
+ mutex_destroy(&rp->mutex);
+ kfree(rp);
+}
+
+static struct tee_protmem_pool_ops protmem_pool_ops_dyn = {
+ .alloc = protmem_pool_op_dyn_alloc,
+ .free = protmem_pool_op_dyn_free,
+ .update_shm = protmem_pool_op_dyn_update_shm,
+ .destroy_pool = pool_op_dyn_destroy_pool,
+};
+
+static int get_protmem_config(struct optee *optee, u32 use_case,
+ size_t *min_size, u_int *pa_width,
+ u32 *mem_attrs, u_int *ma_count)
+{
+ struct tee_param params[2] = {
+ [0] = {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT,
+ .u.value.a = use_case,
+ },
+ [1] = {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT,
+ },
+ };
+ struct optee_shm_arg_entry *entry;
+ struct tee_shm *shm_param = NULL;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ if (mem_attrs && *ma_count) {
+ params[1].u.memref.size = *ma_count * sizeof(*mem_attrs);
+ shm_param = tee_shm_alloc_priv_buf(optee->ctx,
+ params[1].u.memref.size);
+ if (IS_ERR(shm_param))
+ return PTR_ERR(shm_param);
+ params[1].u.memref.shm = shm_param;
+ }
+
+ msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry,
+ &shm, &offs);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out_free_shm;
+ }
+ msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG;
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params,
+ ARRAY_SIZE(params), params);
+ if (rc)
+ goto out_free_msg;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out_free_msg;
+ if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) {
+ rc = -EINVAL;
+ goto out_free_msg;
+ }
+
+ rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params),
+ msg_arg->params);
+ if (rc)
+ goto out_free_msg;
+
+ if (!msg_arg->ret && mem_attrs &&
+ *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) {
+ rc = -EINVAL;
+ goto out_free_msg;
+ }
+
+ *min_size = params[0].u.value.a;
+ *pa_width = params[0].u.value.c;
+ *ma_count = params[1].u.memref.size / sizeof(*mem_attrs);
+
+ if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) {
+ rc = -ENOSPC;
+ goto out_free_msg;
+ }
+
+ if (mem_attrs)
+ memcpy(mem_attrs, tee_shm_get_va(shm_param, 0),
+ params[1].u.memref.size);
+
+out_free_msg:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+out_free_shm:
+ if (shm_param)
+ tee_shm_free(shm_param);
+ return rc;
+}
+
+struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
+ enum tee_dma_heap_id id)
+{
+ struct optee_protmem_dyn_pool *rp;
+ size_t min_size;
+ u_int pa_width;
+ int rc;
+
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return ERR_PTR(-ENOMEM);
+ rp->use_case = id;
+
+ rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL,
+ &rp->mem_attr_count);
+ if (rc) {
+ if (rc != -ENOSPC)
+ goto err;
+ rp->mem_attrs = kcalloc(rp->mem_attr_count,
+ sizeof(*rp->mem_attrs), GFP_KERNEL);
+ if (!rp->mem_attrs) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ rc = get_protmem_config(optee, id, &min_size, &pa_width,
+ rp->mem_attrs, &rp->mem_attr_count);
+ if (rc)
+ goto err_kfree_eps;
+ }
+
+ rc = optee_set_dma_mask(optee, pa_width);
+ if (rc)
+ goto err_kfree_eps;
+
+ rp->pool.ops = &protmem_pool_ops_dyn;
+ rp->optee = optee;
+ rp->page_count = min_size / PAGE_SIZE;
+ mutex_init(&rp->mutex);
+
+ return &rp->pool;
+
+err_kfree_eps:
+ kfree(rp->mem_attrs);
+err:
+ kfree(rp);
+ return ERR_PTR(rc);
+}
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 26f8f7bbbe56..0be663fcd52b 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -965,6 +965,70 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
+static int optee_smc_lend_protmem(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attrs, unsigned int ma_count,
+ u32 use_case)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_LEND_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = use_case;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ msg_arg->params[1].u.tmem.buf_ptr = protmem->paddr;
+ msg_arg->params[1].u.tmem.size = protmem->size;
+ msg_arg->params[1].u.tmem.shm_ref = (u_long)protmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+ protmem->sec_world_id = (u_long)protmem;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
+static int optee_smc_reclaim_protmem(struct optee *optee,
+ struct tee_shm *protmem)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (u_long)protmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
/*
* 5. Asynchronous notification
*/
@@ -1216,6 +1280,8 @@ static const struct optee_ops optee_ops = {
.do_call_with_arg = optee_smc_do_call_with_arg,
.to_msg_param = optee_to_msg_param,
.from_msg_param = optee_from_msg_param,
+ .lend_protmem = optee_smc_lend_protmem,
+ .reclaim_protmem = optee_smc_reclaim_protmem,
};
static int enable_async_notif(optee_invoke_fn *invoke_fn)
@@ -1583,6 +1649,74 @@ static inline int optee_load_fw(struct platform_device *pdev,
}
#endif
+static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee)
+{
+#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL)
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_protmem_config_result result;
+ } res;
+ struct tee_protmem_pool *pool;
+ void *p;
+ int rc;
+
+ optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0,
+ 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return ERR_PTR(-EINVAL);
+
+ rc = optee_set_dma_mask(optee, res.result.pa_width);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /*
+ * Map the memory as uncached to make sure the kernel can work with
+ * __pfn_to_page() and friends since that's needed when passing the
+ * protected DMA-buf to a device. The memory should otherwise not
+ * be touched by the kernel since it's likely to cause an external
+ * abort due to the protection status.
+ */
+ p = devm_memremap(&optee->teedev->dev, res.result.start,
+ res.result.size, MEMREMAP_WC);
+ if (IS_ERR(p))
+ return p;
+
+ pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size);
+ if (IS_ERR(pool))
+ devm_memunmap(&optee->teedev->dev, p);
+
+ return pool;
+#else
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static int optee_protmem_pool_init(struct optee *optee)
+{
+ bool protm = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM;
+ bool dyn_protm = optee->smc.sec_caps &
+ OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM;
+ enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+ struct tee_protmem_pool *pool = ERR_PTR(-EINVAL);
+ int rc = -EINVAL;
+
+ if (!protm && !dyn_protm)
+ return 0;
+
+ if (protm)
+ pool = static_protmem_pool_init(optee);
+ if (dyn_protm && IS_ERR(pool))
+ pool = optee_protmem_alloc_dyn_pool(optee, heap_id);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool);
+ if (rc)
+ pool->ops->destroy_pool(pool);
+
+ return rc;
+}
+
static int optee_probe(struct platform_device *pdev)
{
optee_invoke_fn *invoke_fn;
@@ -1678,7 +1812,7 @@ static int optee_probe(struct platform_device *pdev)
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;
- goto err_free_pool;
+ goto err_free_shm_pool;
}
optee->ops = &optee_ops;
@@ -1751,6 +1885,9 @@ static int optee_probe(struct platform_device *pdev)
pr_info("Asynchronous notifications enabled\n");
}
+ if (optee_protmem_pool_init(optee))
+ pr_info("Protected memory service not available\n");
+
/*
* Ensure that there are no pre-existing shm objects before enabling
* the shm cache so that there's no chance of receiving an invalid
@@ -1802,7 +1939,7 @@ err_unreg_teedev:
tee_device_unregister(optee->teedev);
err_free_optee:
kfree(optee);
-err_free_pool:
+err_free_shm_pool:
tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig
new file mode 100644
index 000000000000..927686abceb1
--- /dev/null
+++ b/drivers/tee/qcomtee/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Qualcomm Trusted Execution Environment Configuration
+config QCOMTEE
+ tristate "Qualcomm TEE Support"
+ depends on !CPU_BIG_ENDIAN
+ select QCOM_SCM
+ select QCOM_TZMEM_MODE_SHMBRIDGE
+ help
+ This option enables the Qualcomm Trusted Execution Environment (QTEE)
+ driver. It provides an API to access services offered by QTEE and
+ its loaded Trusted Applications (TAs). Additionally, it facilitates
+ the export of userspace services provided by supplicants to QTEE.
diff --git a/drivers/tee/qcomtee/Makefile b/drivers/tee/qcomtee/Makefile
new file mode 100644
index 000000000000..7c466c9f32af
--- /dev/null
+++ b/drivers/tee/qcomtee/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_QCOMTEE) += qcomtee.o
+qcomtee-objs += async.o
+qcomtee-objs += call.o
+qcomtee-objs += core.o
+qcomtee-objs += mem_obj.o
+qcomtee-objs += primordial_obj.o
+qcomtee-objs += shm.o
+qcomtee-objs += user_obj.o
diff --git a/drivers/tee/qcomtee/async.c b/drivers/tee/qcomtee/async.c
new file mode 100644
index 000000000000..31bff4309e67
--- /dev/null
+++ b/drivers/tee/qcomtee/async.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "qcomtee.h"
+
+#define QCOMTEE_ASYNC_VERSION_1_0 0x00010000U /* Maj: 0x0001, Min: 0x0000. */
+#define QCOMTEE_ASYNC_VERSION_1_1 0x00010001U /* Maj: 0x0001, Min: 0x0001. */
+#define QCOMTEE_ASYNC_VERSION_1_2 0x00010002U /* Maj: 0x0001, Min: 0x0002. */
+#define QCOMTEE_ASYNC_VERSION_CURRENT QCOMTEE_ASYNC_VERSION_1_2
+
+#define QCOMTEE_ASYNC_VERSION_MAJOR(n) upper_16_bits(n)
+#define QCOMTEE_ASYNC_VERSION_MINOR(n) lower_16_bits(n)
+
+#define QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR \
+ QCOMTEE_ASYNC_VERSION_MAJOR(QCOMTEE_ASYNC_VERSION_CURRENT)
+#define QCOMTEE_ASYNC_VERSION_CURRENT_MINOR \
+ QCOMTEE_ASYNC_VERSION_MINOR(QCOMTEE_ASYNC_VERSION_CURRENT)
+
+/**
+ * struct qcomtee_async_msg_hdr - Asynchronous message header format.
+ * @version: current async protocol version of the remote endpoint.
+ * @op: async operation.
+ *
+ * @version specifies the endpoint's (QTEE or driver) supported async protocol.
+ * For example, if QTEE sets @version to %QCOMTEE_ASYNC_VERSION_1_1, QTEE
+ * handles operations supported in %QCOMTEE_ASYNC_VERSION_1_1 or
+ * %QCOMTEE_ASYNC_VERSION_1_0. @op determines the message format.
+ */
+struct qcomtee_async_msg_hdr {
+ u32 version;
+ u32 op;
+};
+
+/* Size of an empty async message. */
+#define QCOMTEE_ASYNC_MSG_ZERO sizeof(struct qcomtee_async_msg_hdr)
+
+/**
+ * struct qcomtee_async_release_msg - Release asynchronous message.
+ * @hdr: message header as &struct qcomtee_async_msg_hdr.
+ * @counts: number of objects in @object_ids.
+ * @object_ids: array of object IDs that should be released.
+ *
+ * Available in Maj = 0x0001, Min >= 0x0000.
+ */
+struct qcomtee_async_release_msg {
+ struct qcomtee_async_msg_hdr hdr;
+ u32 counts;
+ u32 object_ids[] __counted_by(counts);
+};
+
+/**
+ * qcomtee_get_async_buffer() - Get the start of the asynchronous message.
+ * @oic: context used for the current invocation.
+ * @async_buffer: return buffer to extract from or fill in async messages.
+ *
+ * If @oic is used for direct object invocation, the whole outbound buffer
+ * is available for the async message. If @oic is used for a callback request,
+ * the tail of the outbound buffer (after the callback request message) is
+ * available for the async message.
+ *
+ * The start of the async buffer is aligned, see qcomtee_msg_offset_align().
+ */
+static void qcomtee_get_async_buffer(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_buffer *async_buffer)
+{
+ struct qcomtee_msg_callback *msg;
+ unsigned int offset;
+ int i;
+
+ if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY)) {
+ /* The outbound buffer is empty. Using the whole buffer. */
+ offset = 0;
+ } else {
+ msg = (struct qcomtee_msg_callback *)oic->out_msg.addr;
+
+ /* Start offset in a message for buffer arguments. */
+ offset = qcomtee_msg_buffer_args(struct qcomtee_msg_callback,
+ qcomtee_msg_args(msg));
+
+ /* Add size of IB arguments. */
+ qcomtee_msg_for_each_input_buffer(i, msg)
+ offset += qcomtee_msg_offset_align(msg->args[i].b.size);
+
+ /* Add size of OB arguments. */
+ qcomtee_msg_for_each_output_buffer(i, msg)
+ offset += qcomtee_msg_offset_align(msg->args[i].b.size);
+ }
+
+ async_buffer->addr = oic->out_msg.addr + offset;
+ async_buffer->size = oic->out_msg.size - offset;
+}
+
+/**
+ * async_release() - Process QTEE async release requests.
+ * @oic: context used for the current invocation.
+ * @msg: async message for object release.
+ * @size: size of the async buffer available.
+ *
+ * Return: Size of the outbound buffer used when processing @msg.
+ */
+static size_t async_release(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_async_msg_hdr *async_msg,
+ size_t size)
+{
+ struct qcomtee_async_release_msg *msg;
+ struct qcomtee_object *object;
+ int i;
+
+ msg = (struct qcomtee_async_release_msg *)async_msg;
+
+ for (i = 0; i < msg->counts; i++) {
+ object = qcomtee_idx_erase(oic, msg->object_ids[i]);
+ qcomtee_object_put(object);
+ }
+
+ return struct_size(msg, object_ids, msg->counts);
+}
+
+/**
+ * qcomtee_fetch_async_reqs() - Fetch and process asynchronous messages.
+ * @oic: context used for the current invocation.
+ *
+ * Calls handlers to process the requested operations in the async message.
+ * Currently, only supports async release requests.
+ */
+void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_async_msg_hdr *async_msg;
+ struct qcomtee_buffer async_buffer;
+ size_t consumed, used = 0;
+ u16 major_ver;
+
+ qcomtee_get_async_buffer(oic, &async_buffer);
+
+ while (async_buffer.size - used > QCOMTEE_ASYNC_MSG_ZERO) {
+ async_msg = (struct qcomtee_async_msg_hdr *)(async_buffer.addr +
+ used);
+ /*
+ * QTEE assumes that the unused space of the async buffer is
+ * zeroed; so if version is zero, the buffer is unused.
+ */
+ if (async_msg->version == 0)
+ goto out;
+
+ major_ver = QCOMTEE_ASYNC_VERSION_MAJOR(async_msg->version);
+ /* Major version mismatch is a compatibility break. */
+ if (major_ver != QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR) {
+ pr_err("Async message version mismatch (%u != %u)\n",
+ major_ver, QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR);
+
+ goto out;
+ }
+
+ switch (async_msg->op) {
+ case QCOMTEE_MSG_OBJECT_OP_RELEASE:
+ consumed = async_release(oic, async_msg,
+ async_buffer.size - used);
+ break;
+ default:
+ pr_err("Unsupported async message %u\n", async_msg->op);
+ goto out;
+ }
+
+ /* Supported operation but unable to parse the message. */
+ if (!consumed) {
+ pr_err("Unable to parse async message for op %u\n",
+ async_msg->op);
+ goto out;
+ }
+
+ /* Next async message. */
+ used += qcomtee_msg_offset_align(consumed);
+ }
+
+out:
+ /* Reset the async buffer so async requests do not loop to QTEE. */
+ memzero_explicit(async_buffer.addr, async_buffer.size);
+}
diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c
new file mode 100644
index 000000000000..cc17a48d0ab7
--- /dev/null
+++ b/drivers/tee/qcomtee/call.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/tee.h>
+#include <linux/platform_device.h>
+#include <linux/xarray.h>
+
+#include "qcomtee.h"
+
+static int find_qtee_object(struct qcomtee_object **object, unsigned long id,
+ struct qcomtee_context_data *ctxdata)
+{
+ int err = 0;
+
+ guard(rcu)();
+ /* Object release is RCU protected. */
+ *object = idr_find(&ctxdata->qtee_objects_idr, id);
+ if (!qcomtee_object_get(*object))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void del_qtee_object(unsigned long id,
+ struct qcomtee_context_data *ctxdata)
+{
+ struct qcomtee_object *object;
+
+ scoped_guard(mutex, &ctxdata->qtee_lock)
+ object = idr_remove(&ctxdata->qtee_objects_idr, id);
+
+ qcomtee_object_put(object);
+}
+
+/**
+ * qcomtee_context_add_qtee_object() - Add a QTEE object to the context.
+ * @param: TEE parameter representing @object.
+ * @object: QTEE object.
+ * @ctx: context to add the object.
+ *
+ * It assumes @object is %QCOMTEE_OBJECT_TYPE_TEE and the caller has already
+ * issued qcomtee_object_get() for @object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_context_add_qtee_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx)
+{
+ int ret;
+ struct qcomtee_context_data *ctxdata = ctx->data;
+
+ scoped_guard(mutex, &ctxdata->qtee_lock)
+ ret = idr_alloc(&ctxdata->qtee_objects_idr, object, 0, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ param->u.objref.id = ret;
+ /* QTEE Object: QCOMTEE_OBJREF_FLAG_TEE set. */
+ param->u.objref.flags = QCOMTEE_OBJREF_FLAG_TEE;
+
+ return 0;
+}
+
+/* Retrieve the QTEE object added with qcomtee_context_add_qtee_object(). */
+int qcomtee_context_find_qtee_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+
+ return find_qtee_object(object, param->u.objref.id, ctxdata);
+}
+
+/**
+ * qcomtee_context_del_qtee_object() - Delete a QTEE object from the context.
+ * @param: TEE parameter representing @object.
+ * @ctx: context for deleting the object.
+ *
+ * The @param has been initialized by qcomtee_context_add_qtee_object().
+ */
+void qcomtee_context_del_qtee_object(struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ /* 'qtee_objects_idr' stores QTEE objects only. */
+ if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE)
+ del_qtee_object(param->u.objref.id, ctxdata);
+}
+
+/**
+ * qcomtee_objref_to_arg() - Convert OBJREF parameter to QTEE argument.
+ * @arg: QTEE argument.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It assumes @param is an OBJREF.
+ * It does not set @arg.type; the caller should initialize it to a correct
+ * &enum qcomtee_arg_type value. It gets the object's refcount in @arg;
+ * the caller should manage to put it afterward.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param,
+ struct tee_context *ctx)
+{
+ int err = -EINVAL;
+
+ arg->o = NULL_QCOMTEE_OBJECT;
+ /* param is a NULL object: */
+ if (param->u.objref.id == TEE_OBJREF_NULL)
+ return 0;
+
+ /* param is a callback object: */
+ if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_USER)
+ err = qcomtee_user_param_to_object(&arg->o, param, ctx);
+ /* param is a QTEE object: */
+ else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE)
+ err = qcomtee_context_find_qtee_object(&arg->o, param, ctx);
+ /* param is a memory object: */
+ else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_MEM)
+ err = qcomtee_memobj_param_to_object(&arg->o, param, ctx);
+
+ /*
+ * For callback objects, call qcomtee_object_get() to keep a temporary
+ * copy for the driver, as these objects are released asynchronously
+ * and may disappear even before returning from QTEE.
+ *
+ * - For direct object invocations, the matching put is called in
+ * qcomtee_object_invoke() when parsing the QTEE response.
+ * - For callback responses, put is called in qcomtee_user_object_notify()
+ * after QTEE has received its copies.
+ */
+
+ if (!err && (typeof_qcomtee_object(arg->o) == QCOMTEE_OBJECT_TYPE_CB))
+ qcomtee_object_get(arg->o);
+
+ return err;
+}
+
+/**
+ * qcomtee_objref_from_arg() - Convert QTEE argument to OBJREF param.
+ * @param: TEE parameter.
+ * @arg: QTEE argument.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It assumes @arg is of %QCOMTEE_ARG_TYPE_IO or %QCOMTEE_ARG_TYPE_OO.
+ * It does not set @param.attr; the caller should initialize it to a
+ * correct type.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg,
+ struct tee_context *ctx)
+{
+ struct qcomtee_object *object = arg->o;
+
+ switch (typeof_qcomtee_object(object)) {
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ param->u.objref.id = TEE_OBJREF_NULL;
+
+ return 0;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ /* object is a callback object: */
+ if (is_qcomtee_user_object(object))
+ return qcomtee_user_param_from_object(param, object,
+ ctx);
+ /* object is a memory object: */
+ else if (is_qcomtee_memobj_object(object))
+ return qcomtee_memobj_param_from_object(param, object,
+ ctx);
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ return qcomtee_context_add_qtee_object(param, object, ctx);
+
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_params_to_args() - Convert TEE parameters to QTEE arguments.
+ * @u: QTEE arguments.
+ * @params: TEE parameters.
+ * @num_params: number of elements in the parameter array.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It assumes @u has at least @num_params + 1 entries and has been initialized
+ * with %QCOMTEE_ARG_TYPE_INV as &struct qcomtee_arg.type.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_params_to_args(struct qcomtee_arg *u,
+ struct tee_param *params, int num_params,
+ struct tee_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < num_params; i++) {
+ switch (params[i].attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ u[i].flags = QCOMTEE_ARG_FLAGS_UADDR;
+ u[i].b.uaddr = params[i].u.ubuf.uaddr;
+ u[i].b.size = params[i].u.ubuf.size;
+
+ if (params[i].attr ==
+ TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT)
+ u[i].type = QCOMTEE_ARG_TYPE_IB;
+ else /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */
+ u[i].type = QCOMTEE_ARG_TYPE_OB;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ u[i].type = QCOMTEE_ARG_TYPE_IO;
+ if (qcomtee_objref_to_arg(&u[i], &params[i], ctx))
+ goto out_failed;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ u[i].type = QCOMTEE_ARG_TYPE_OO;
+ u[i].o = NULL_QCOMTEE_OBJECT;
+ break;
+ default:
+ goto out_failed;
+ }
+ }
+
+ return 0;
+
+out_failed:
+ /* Undo qcomtee_objref_to_arg(). */
+ for (i--; i >= 0; i--) {
+ if (u[i].type != QCOMTEE_ARG_TYPE_IO)
+ continue;
+
+ qcomtee_user_object_set_notify(u[i].o, false);
+ /* See docs for qcomtee_objref_to_arg() for double put. */
+ if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)
+ qcomtee_object_put(u[i].o);
+
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_params_from_args() - Convert QTEE arguments to TEE parameters.
+ * @params: TEE parameters.
+ * @u: QTEE arguments.
+ * @num_params: number of elements in the parameter array.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @u should have already been initialized by qcomtee_params_to_args().
+ * This also represents the end of a QTEE invocation that started with
+ * qcomtee_params_to_args() by releasing %QCOMTEE_ARG_TYPE_IO objects.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_params_from_args(struct tee_param *params,
+ struct qcomtee_arg *u, int num_params,
+ struct tee_context *ctx)
+{
+ int i, np;
+
+ qcomtee_arg_for_each(np, u) {
+ switch (u[np].type) {
+ case QCOMTEE_ARG_TYPE_OB:
+ /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */
+ params[np].u.ubuf.size = u[np].b.size;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IO:
+ /* IEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT */
+ qcomtee_object_put(u[np].o);
+
+ break;
+ case QCOMTEE_ARG_TYPE_OO:
+ /* TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT */
+ if (qcomtee_objref_from_arg(&params[np], &u[np], ctx))
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IB:
+ default:
+ break;
+ }
+ }
+
+ return 0;
+
+out_failed:
+ /* Undo qcomtee_objref_from_arg(). */
+ for (i = 0; i < np; i++) {
+ if (params[i].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT)
+ qcomtee_context_del_qtee_object(&params[i], ctx);
+ }
+
+ /* Release any IO and OO objects not processed. */
+ for (; u[i].type && i < num_params; i++) {
+ if (u[i].type == QCOMTEE_ARG_TYPE_OO ||
+ u[i].type == QCOMTEE_ARG_TYPE_IO)
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/* TEE Device Ops. */
+
+static int qcomtee_params_check(struct tee_param *params, int num_params)
+{
+ int io = 0, oo = 0, ib = 0, ob = 0;
+ int i;
+
+ /* QTEE can accept 64 arguments. */
+ if (num_params > QCOMTEE_ARGS_MAX)
+ return -EINVAL;
+
+ /* Supported parameter types. */
+ for (i = 0; i < num_params; i++) {
+ switch (params[i].attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ ib++;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ ob++;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ io++;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ oo++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* QTEE can accept 16 arguments of each supported types. */
+ if (io > QCOMTEE_ARGS_PER_TYPE || oo > QCOMTEE_ARGS_PER_TYPE ||
+ ib > QCOMTEE_ARGS_PER_TYPE || ob > QCOMTEE_ARGS_PER_TYPE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Check if an operation on ROOT_QCOMTEE_OBJECT from userspace is permitted. */
+static int qcomtee_root_object_check(u32 op, struct tee_param *params,
+ int num_params)
+{
+ /* Some privileged operations recognized by QTEE. */
+ if (op == QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE ||
+ op == QCOMTEE_ROOT_OP_ADCI_ACCEPT ||
+ op == QCOMTEE_ROOT_OP_ADCI_SHUTDOWN)
+ return -EINVAL;
+
+ /*
+ * QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS is to register with QTEE
+ * by passing a credential object as input OBJREF. TEE_OBJREF_NULL as a
+ * credential object represents a privileged client for QTEE and
+ * is used by the kernel only.
+ */
+ if (op == QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS && num_params == 2) {
+ if (params[0].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT &&
+ params[1].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) {
+ if (params[0].u.objref.id == TEE_OBJREF_NULL)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qcomtee_object_invoke() - Invoke a QTEE object.
+ * @ctx: TEE context.
+ * @arg: ioctl arguments.
+ * @params: parameters for the object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_object_invoke(struct tee_context *ctx,
+ struct tee_ioctl_object_invoke_arg *arg,
+ struct tee_param *params)
+{
+ struct qcomtee_object_invoke_ctx *oic __free(kfree) = NULL;
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_arg *u __free(kfree) = NULL;
+ struct qcomtee_object *object;
+ int i, ret, result;
+
+ if (qcomtee_params_check(params, arg->num_params))
+ return -EINVAL;
+
+ /* First, handle reserved operations: */
+ if (arg->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) {
+ del_qtee_object(arg->id, ctxdata);
+
+ return 0;
+ }
+
+ /* Otherwise, invoke a QTEE object: */
+ oic = qcomtee_object_invoke_ctx_alloc(ctx);
+ if (!oic)
+ return -ENOMEM;
+
+ /* +1 for ending QCOMTEE_ARG_TYPE_INV. */
+ u = kcalloc(arg->num_params + 1, sizeof(*u), GFP_KERNEL);
+ if (!u)
+ return -ENOMEM;
+
+ /* Get an object to invoke. */
+ if (arg->id == TEE_OBJREF_NULL) {
+ /* Use ROOT if TEE_OBJREF_NULL is invoked. */
+ if (qcomtee_root_object_check(arg->op, params, arg->num_params))
+ return -EINVAL;
+
+ object = ROOT_QCOMTEE_OBJECT;
+ } else if (find_qtee_object(&object, arg->id, ctxdata)) {
+ return -EINVAL;
+ }
+
+ ret = qcomtee_params_to_args(u, params, arg->num_params, ctx);
+ if (ret)
+ goto out;
+
+ ret = qcomtee_object_do_invoke(oic, object, arg->op, u, &result);
+ if (ret) {
+ qcomtee_arg_for_each_input_object(i, u) {
+ qcomtee_user_object_set_notify(u[i].o, false);
+ qcomtee_object_put(u[i].o);
+ }
+
+ goto out;
+ }
+
+ /* Prase QTEE response and put driver's object copies: */
+
+ if (!result) {
+ /* Assume service is UNAVAIL if unable to process the result. */
+ if (qcomtee_params_from_args(params, u, arg->num_params, ctx))
+ result = QCOMTEE_MSG_ERROR_UNAVAIL;
+ } else {
+ /*
+ * qcomtee_params_to_args() gets a copy of IO for the driver to
+ * make sure they do not get released while in the middle of
+ * invocation. On success (!result), qcomtee_params_from_args()
+ * puts them; Otherwise, put them here.
+ */
+ qcomtee_arg_for_each_input_object(i, u)
+ qcomtee_object_put(u[i].o);
+ }
+
+ arg->ret = result;
+out:
+ qcomtee_object_put(object);
+
+ return ret;
+}
+
+/**
+ * qcomtee_supp_recv() - Wait for a request for the supplicant.
+ * @ctx: TEE context.
+ * @op: requested operation on the object.
+ * @num_params: number of elements in the parameter array.
+ * @params: parameters for @op.
+ *
+ * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT.
+ * On input, it provides a user buffer. This buffer is used for parameters of
+ * type %TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT in qcomtee_cb_params_from_args().
+ * On output, the object ID and request ID are stored in the meta parameter.
+ *
+ * @num_params is updated to the number of parameters that actually exist
+ * in @params on return.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_supp_recv(struct tee_context *ctx, u32 *op, u32 *num_params,
+ struct tee_param *params)
+{
+ struct qcomtee_user_object_request_data data;
+ void __user *uaddr;
+ size_t ubuf_size;
+ int i, ret;
+
+ if (!*num_params)
+ return -EINVAL;
+
+ /* First parameter should be an INOUT + meta parameter. */
+ if (params->attr !=
+ (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | TEE_IOCTL_PARAM_ATTR_META))
+ return -EINVAL;
+
+ /* Other parameters are none. */
+ for (i = 1; i < *num_params; i++)
+ if (params[i].attr)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(params->u.value.a, 8))
+ return -EINVAL;
+
+ /* User buffer and size from meta parameter. */
+ uaddr = u64_to_user_ptr(params->u.value.a);
+ ubuf_size = params->u.value.b;
+ /* Process TEE parameters. +/-1 to ignore the meta parameter. */
+ ret = qcomtee_user_object_select(ctx, params + 1, *num_params - 1,
+ uaddr, ubuf_size, &data);
+ if (ret)
+ return ret;
+
+ params->u.value.a = data.object_id;
+ params->u.value.b = data.id;
+ params->u.value.c = 0;
+ *op = data.op;
+ *num_params = data.np + 1;
+
+ return 0;
+}
+
+/**
+ * qcomtee_supp_send() - Submit a response for a request.
+ * @ctx: TEE context.
+ * @errno: return value for the request.
+ * @num_params: number of elements in the parameter array.
+ * @params: returned parameters.
+ *
+ * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT.
+ * It specifies the request ID this response belongs to.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_supp_send(struct tee_context *ctx, u32 errno, u32 num_params,
+ struct tee_param *params)
+{
+ int req_id;
+
+ if (!num_params)
+ return -EINVAL;
+
+ /* First parameter should be an OUTPUT + meta parameter. */
+ if (params->attr != (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT |
+ TEE_IOCTL_PARAM_ATTR_META))
+ return -EINVAL;
+
+ req_id = params->u.value.a;
+ /* Process TEE parameters. +/-1 to ignore the meta parameter. */
+ return qcomtee_user_object_submit(ctx, params + 1, num_params - 1,
+ req_id, errno);
+}
+
+static int qcomtee_open(struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata __free(kfree) = NULL;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ /*
+ * In the QTEE driver, the same context is used to refcount resources
+ * shared by QTEE. For example, teedev_ctx_get() is called for any
+ * instance of callback objects (see qcomtee_user_param_to_object()).
+ *
+ * Maintain a copy of teedev for QTEE as it serves as a direct user of
+ * this context. The teedev will be released in the context's release().
+ *
+ * tee_device_unregister() will remain blocked until all contexts
+ * are released. This includes contexts owned by the user, which are
+ * closed by teedev_close_context(), as well as those owned by QTEE
+ * closed by teedev_ctx_put() in object's release().
+ */
+ if (!tee_device_get(ctx->teedev))
+ return -EINVAL;
+
+ idr_init(&ctxdata->qtee_objects_idr);
+ mutex_init(&ctxdata->qtee_lock);
+ idr_init(&ctxdata->reqs_idr);
+ INIT_LIST_HEAD(&ctxdata->reqs_list);
+ mutex_init(&ctxdata->reqs_lock);
+ init_completion(&ctxdata->req_c);
+
+ ctx->data = no_free_ptr(ctxdata);
+
+ return 0;
+}
+
+/* Gets called when the user closes the device */
+static void qcomtee_close_context(struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_object *object;
+ int id;
+
+ /* Process QUEUED or PROCESSING requests. */
+ qcomtee_requests_destroy(ctxdata);
+ /* Release QTEE objects. */
+ idr_for_each_entry(&ctxdata->qtee_objects_idr, object, id)
+ qcomtee_object_put(object);
+}
+
+/* Gets called when the final reference to the context goes away. */
+static void qcomtee_release(struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+
+ idr_destroy(&ctxdata->qtee_objects_idr);
+ idr_destroy(&ctxdata->reqs_idr);
+ kfree(ctxdata);
+
+ /* There is nothing shared in this context with QTEE. */
+ tee_device_put(ctx->teedev);
+}
+
+static void qcomtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_QTEE,
+ .gen_caps = TEE_GEN_CAP_OBJREF,
+ };
+
+ *vers = v;
+}
+
+/**
+ * qcomtee_get_qtee_feature_list() - Query QTEE features versions.
+ * @ctx: TEE context.
+ * @id: ID of the feature to query.
+ * @version: version of the feature.
+ *
+ * Used to query the verion of features supported by QTEE.
+ */
+static void qcomtee_get_qtee_feature_list(struct tee_context *ctx, u32 id,
+ u32 *version)
+{
+ struct qcomtee_object_invoke_ctx *oic __free(kfree);
+ struct qcomtee_object *client_env, *service;
+ struct qcomtee_arg u[3] = { 0 };
+ int result;
+
+ oic = qcomtee_object_invoke_ctx_alloc(ctx);
+ if (!oic)
+ return;
+
+ client_env = qcomtee_object_get_client_env(oic);
+ if (client_env == NULL_QCOMTEE_OBJECT)
+ return;
+
+ /* Get ''FeatureVersions Service'' object. */
+ service = qcomtee_object_get_service(oic, client_env,
+ QCOMTEE_FEATURE_VER_UID);
+ if (service == NULL_QCOMTEE_OBJECT)
+ goto out_failed;
+
+ /* IB: Feature to query. */
+ u[0].b.addr = &id;
+ u[0].b.size = sizeof(id);
+ u[0].type = QCOMTEE_ARG_TYPE_IB;
+
+ /* OB: Version returned. */
+ u[1].b.addr = version;
+ u[1].b.size = sizeof(*version);
+ u[1].type = QCOMTEE_ARG_TYPE_OB;
+
+ qcomtee_object_do_invoke(oic, service, QCOMTEE_FEATURE_VER_OP_GET, u,
+ &result);
+
+out_failed:
+ qcomtee_object_put(service);
+ qcomtee_object_put(client_env);
+}
+
+static const struct tee_driver_ops qcomtee_ops = {
+ .get_version = qcomtee_get_version,
+ .open = qcomtee_open,
+ .close_context = qcomtee_close_context,
+ .release = qcomtee_release,
+ .object_invoke_func = qcomtee_object_invoke,
+ .supp_recv = qcomtee_supp_recv,
+ .supp_send = qcomtee_supp_send,
+};
+
+static const struct tee_desc qcomtee_desc = {
+ .name = "qcomtee",
+ .ops = &qcomtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int qcomtee_probe(struct platform_device *pdev)
+{
+ struct workqueue_struct *async_wq;
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct tee_context *ctx;
+ struct qcomtee *qcomtee;
+ int err;
+
+ qcomtee = kzalloc(sizeof(*qcomtee), GFP_KERNEL);
+ if (!qcomtee)
+ return -ENOMEM;
+
+ pool = qcomtee_shm_pool_alloc();
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+
+ goto err_free_qcomtee;
+ }
+
+ teedev = tee_device_alloc(&qcomtee_desc, NULL, pool, qcomtee);
+ if (IS_ERR(teedev)) {
+ err = PTR_ERR(teedev);
+
+ goto err_pool_destroy;
+ }
+
+ qcomtee->teedev = teedev;
+ qcomtee->pool = pool;
+ err = tee_device_register(qcomtee->teedev);
+ if (err)
+ goto err_unreg_teedev;
+
+ platform_set_drvdata(pdev, qcomtee);
+ /* Start async wq. */
+ async_wq = alloc_ordered_workqueue("qcomtee_wq", 0);
+ if (!async_wq) {
+ err = -ENOMEM;
+
+ goto err_unreg_teedev;
+ }
+
+ qcomtee->wq = async_wq;
+ /* Driver context used for async operations of teedev. */
+ ctx = teedev_open(qcomtee->teedev);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+
+ goto err_dest_wq;
+ }
+
+ qcomtee->ctx = ctx;
+ /* Init Object table. */
+ qcomtee->xa_last_id = 0;
+ xa_init_flags(&qcomtee->xa_local_objects, XA_FLAGS_ALLOC);
+ /* Get QTEE verion. */
+ qcomtee_get_qtee_feature_list(qcomtee->ctx,
+ QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID,
+ &qcomtee->qtee_version);
+
+ pr_info("QTEE version %u.%u.%u\n",
+ QTEE_VERSION_GET_MAJOR(qcomtee->qtee_version),
+ QTEE_VERSION_GET_MINOR(qcomtee->qtee_version),
+ QTEE_VERSION_GET_PATCH(qcomtee->qtee_version));
+
+ return 0;
+
+err_dest_wq:
+ destroy_workqueue(qcomtee->wq);
+err_unreg_teedev:
+ tee_device_unregister(qcomtee->teedev);
+err_pool_destroy:
+ tee_shm_pool_free(pool);
+err_free_qcomtee:
+ kfree(qcomtee);
+
+ return err;
+}
+
+/**
+ * qcomtee_remove() - Device Removal Routine.
+ * @pdev: platform device information struct.
+ *
+ * It is called by the platform subsystem to alert the driver that it should
+ * release the device.
+ *
+ * QTEE does not provide an API to inform it about a callback object going away.
+ * However, when releasing QTEE objects, any callback object sent to QTEE
+ * previously would be released by QTEE as part of the object release.
+ */
+static void qcomtee_remove(struct platform_device *pdev)
+{
+ struct qcomtee *qcomtee = platform_get_drvdata(pdev);
+
+ teedev_close_context(qcomtee->ctx);
+ /* Wait for RELEASE operations to be processed for QTEE objects. */
+ tee_device_unregister(qcomtee->teedev);
+ destroy_workqueue(qcomtee->wq);
+ tee_shm_pool_free(qcomtee->pool);
+ kfree(qcomtee);
+}
+
+static const struct platform_device_id qcomtee_ids[] = { { "qcomtee", 0 }, {} };
+MODULE_DEVICE_TABLE(platform, qcomtee_ids);
+
+static struct platform_driver qcomtee_platform_driver = {
+ .probe = qcomtee_probe,
+ .remove = qcomtee_remove,
+ .driver = {
+ .name = "qcomtee",
+ },
+ .id_table = qcomtee_ids,
+};
+
+module_platform_driver(qcomtee_platform_driver);
+
+MODULE_AUTHOR("Qualcomm");
+MODULE_DESCRIPTION("QTEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c
new file mode 100644
index 000000000000..783acc59cfa9
--- /dev/null
+++ b/drivers/tee/qcomtee/core.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
+
+#include "qcomtee.h"
+
+/* QTEE root object. */
+struct qcomtee_object qcomtee_object_root = {
+ .name = "root",
+ .object_type = QCOMTEE_OBJECT_TYPE_ROOT,
+ .info.qtee_id = QCOMTEE_MSG_OBJECT_ROOT,
+};
+
+/* Next argument of type @type after index @i. */
+int qcomtee_next_arg_type(struct qcomtee_arg *u, int i,
+ enum qcomtee_arg_type type)
+{
+ while (u[i].type != QCOMTEE_ARG_TYPE_INV && u[i].type != type)
+ i++;
+ return i;
+}
+
+/*
+ * QTEE expects IDs with QCOMTEE_MSG_OBJECT_NS_BIT set for objects of
+ * QCOMTEE_OBJECT_TYPE_CB type. The first ID with QCOMTEE_MSG_OBJECT_NS_BIT
+ * set is reserved for the primordial object.
+ */
+#define QCOMTEE_OBJECT_PRIMORDIAL (QCOMTEE_MSG_OBJECT_NS_BIT)
+#define QCOMTEE_OBJECT_ID_START (QCOMTEE_OBJECT_PRIMORDIAL + 1)
+#define QCOMTEE_OBJECT_ID_END (U32_MAX)
+
+#define QCOMTEE_OBJECT_SET(p, type, ...) \
+ __QCOMTEE_OBJECT_SET(p, type, ##__VA_ARGS__, 0UL)
+#define __QCOMTEE_OBJECT_SET(p, type, optr, ...) \
+ do { \
+ (p)->object_type = (type); \
+ (p)->info.qtee_id = (unsigned long)(optr); \
+ } while (0)
+
+static struct qcomtee_object *
+qcomtee_qtee_object_alloc(struct qcomtee_object_invoke_ctx *oic,
+ unsigned int object_id)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+ struct qcomtee_object *object;
+
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object)
+ return NULL_QCOMTEE_OBJECT;
+
+ /* If failed, "no-name". */
+ object->name = kasprintf(GFP_KERNEL, "qcomtee-%u", object_id);
+ QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_TEE, object_id);
+ kref_init(&object->refcount);
+ /* A QTEE object requires a context for async operations. */
+ object->info.qcomtee_async_ctx = qcomtee->ctx;
+ teedev_ctx_get(object->info.qcomtee_async_ctx);
+
+ return object;
+}
+
+static void qcomtee_qtee_object_free(struct qcomtee_object *object)
+{
+ /* See qcomtee_qtee_object_alloc(). */
+ teedev_ctx_put(object->info.qcomtee_async_ctx);
+
+ kfree(object->name);
+ kfree(object);
+}
+
+static void qcomtee_do_release_qtee_object(struct work_struct *work)
+{
+ struct qcomtee_object *object;
+ struct qcomtee *qcomtee;
+ int ret, result;
+
+ /* RELEASE does not require any argument. */
+ struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } };
+
+ object = container_of(work, struct qcomtee_object, work);
+ qcomtee = tee_get_drvdata(object->info.qcomtee_async_ctx->teedev);
+ /* Get the TEE context used for asynchronous operations. */
+ qcomtee->oic.ctx = object->info.qcomtee_async_ctx;
+
+ ret = qcomtee_object_do_invoke_internal(&qcomtee->oic, object,
+ QCOMTEE_MSG_OBJECT_OP_RELEASE,
+ args, &result);
+
+ /* Is it safe to retry the release? */
+ if (ret && ret != -ENODEV) {
+ queue_work(qcomtee->wq, &object->work);
+ } else {
+ if (ret || result)
+ pr_err("%s release failed, ret = %d (%x)\n",
+ qcomtee_object_name(object), ret, result);
+ qcomtee_qtee_object_free(object);
+ }
+}
+
+static void qcomtee_release_qtee_object(struct qcomtee_object *object)
+{
+ struct qcomtee *qcomtee =
+ tee_get_drvdata(object->info.qcomtee_async_ctx->teedev);
+
+ INIT_WORK(&object->work, qcomtee_do_release_qtee_object);
+ queue_work(qcomtee->wq, &object->work);
+}
+
+static void qcomtee_object_release(struct kref *refcount)
+{
+ struct qcomtee_object *object;
+ const char *name;
+
+ object = container_of(refcount, struct qcomtee_object, refcount);
+
+ /*
+ * qcomtee_object_get() is called in a RCU read lock. synchronize_rcu()
+ * to avoid releasing the object while it is being accessed in
+ * qcomtee_object_get().
+ */
+ synchronize_rcu();
+
+ switch (typeof_qcomtee_object(object)) {
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ qcomtee_release_qtee_object(object);
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ name = object->name;
+
+ if (object->ops->release)
+ object->ops->release(object);
+
+ kfree_const(name);
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ default:
+ break;
+ }
+}
+
+/**
+ * qcomtee_object_get() - Increase the object's reference count.
+ * @object: object to increase the reference count.
+ *
+ * Context: The caller should hold RCU read lock.
+ */
+int qcomtee_object_get(struct qcomtee_object *object)
+{
+ if (object != &qcomtee_primordial_object &&
+ object != NULL_QCOMTEE_OBJECT &&
+ object != ROOT_QCOMTEE_OBJECT)
+ return kref_get_unless_zero(&object->refcount);
+
+ return 0;
+}
+
+/**
+ * qcomtee_object_put() - Decrease the object's reference count.
+ * @object: object to decrease the reference count.
+ */
+void qcomtee_object_put(struct qcomtee_object *object)
+{
+ if (object != &qcomtee_primordial_object &&
+ object != NULL_QCOMTEE_OBJECT &&
+ object != ROOT_QCOMTEE_OBJECT)
+ kref_put(&object->refcount, qcomtee_object_release);
+}
+
+static int qcomtee_idx_alloc(struct qcomtee_object_invoke_ctx *oic, u32 *idx,
+ struct qcomtee_object *object)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+
+ /* Every ID allocated here has QCOMTEE_MSG_OBJECT_NS_BIT set. */
+ return xa_alloc_cyclic(&qcomtee->xa_local_objects, idx, object,
+ XA_LIMIT(QCOMTEE_OBJECT_ID_START,
+ QCOMTEE_OBJECT_ID_END),
+ &qcomtee->xa_last_id, GFP_KERNEL);
+}
+
+struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic,
+ u32 idx)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+
+ if (idx < QCOMTEE_OBJECT_ID_START || idx > QCOMTEE_OBJECT_ID_END)
+ return NULL_QCOMTEE_OBJECT;
+
+ return xa_erase(&qcomtee->xa_local_objects, idx);
+}
+
+/**
+ * qcomtee_object_id_get() - Get an ID for an object to send to QTEE.
+ * @oic: context to use for the invocation.
+ * @object: object to assign an ID.
+ * @object_id: object ID.
+ *
+ * Called on the path to QTEE to construct the message; see
+ * qcomtee_prepare_msg() and qcomtee_update_msg().
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_object_id_get(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object,
+ unsigned int *object_id)
+{
+ u32 idx;
+
+ switch (typeof_qcomtee_object(object)) {
+ case QCOMTEE_OBJECT_TYPE_CB:
+ if (qcomtee_idx_alloc(oic, &idx, object) < 0)
+ return -ENOSPC;
+
+ *object_id = idx;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ *object_id = object->info.qtee_id;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ *object_id = QCOMTEE_MSG_OBJECT_NULL;
+
+ break;
+ }
+
+ return 0;
+}
+
+/* Release object ID assigned in qcomtee_object_id_get. */
+static void qcomtee_object_id_put(struct qcomtee_object_invoke_ctx *oic,
+ unsigned int object_id)
+{
+ qcomtee_idx_erase(oic, object_id);
+}
+
+/**
+ * qcomtee_local_object_get() - Get the object referenced by the ID.
+ * @oic: context to use for the invocation.
+ * @object_id: object ID.
+ *
+ * It is called on the path from QTEE.
+ * It is called on behalf of QTEE to obtain an instance of an object
+ * for a given ID. It increases the object's reference count on success.
+ *
+ * Return: On error, returns %NULL_QCOMTEE_OBJECT.
+ * On success, returns the object.
+ */
+static struct qcomtee_object *
+qcomtee_local_object_get(struct qcomtee_object_invoke_ctx *oic,
+ unsigned int object_id)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+ struct qcomtee_object *object;
+
+ if (object_id == QCOMTEE_OBJECT_PRIMORDIAL)
+ return &qcomtee_primordial_object;
+
+ guard(rcu)();
+ object = xa_load(&qcomtee->xa_local_objects, object_id);
+ /* It already checks for %NULL_QCOMTEE_OBJECT. */
+ qcomtee_object_get(object);
+
+ return object;
+}
+
+/**
+ * qcomtee_object_user_init() - Initialize an object for the user.
+ * @object: object to initialize.
+ * @ot: type of object as &enum qcomtee_object_type.
+ * @ops: instance of callbacks.
+ * @fmt: name assigned to the object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_user_init(struct qcomtee_object *object,
+ enum qcomtee_object_type ot,
+ struct qcomtee_object_operations *ops,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ kref_init(&object->refcount);
+ QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_NULL);
+
+ va_start(ap, fmt);
+ switch (ot) {
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ ret = 0;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ object->ops = ops;
+ if (!object->ops->dispatch)
+ return -EINVAL;
+
+ /* If failed, "no-name". */
+ object->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_CB);
+
+ ret = 0;
+ break;
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ default:
+ ret = -EINVAL;
+ }
+ va_end(ap);
+
+ return ret;
+}
+
+/**
+ * qcomtee_object_type() - Returns the type of object represented by an ID.
+ * @object_id: object ID for the object.
+ *
+ * Similar to typeof_qcomtee_object(), but instead of receiving an object as
+ * an argument, it receives an object ID. It is used internally on the return
+ * path from QTEE.
+ *
+ * Return: Returns the type of object referenced by @object_id.
+ */
+static enum qcomtee_object_type qcomtee_object_type(unsigned int object_id)
+{
+ if (object_id == QCOMTEE_MSG_OBJECT_NULL)
+ return QCOMTEE_OBJECT_TYPE_NULL;
+
+ if (object_id & QCOMTEE_MSG_OBJECT_NS_BIT)
+ return QCOMTEE_OBJECT_TYPE_CB;
+
+ return QCOMTEE_OBJECT_TYPE_TEE;
+}
+
+/**
+ * qcomtee_object_qtee_init() - Initialize an object for QTEE.
+ * @oic: context to use for the invocation.
+ * @object: object returned.
+ * @object_id: object ID received from QTEE.
+ *
+ * Return: On failure, returns < 0 and sets @object to %NULL_QCOMTEE_OBJECT.
+ * On success, returns 0
+ */
+static int qcomtee_object_qtee_init(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object **object,
+ unsigned int object_id)
+{
+ int ret = 0;
+
+ switch (qcomtee_object_type(object_id)) {
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ *object = NULL_QCOMTEE_OBJECT;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ *object = qcomtee_local_object_get(oic, object_id);
+ if (*object == NULL_QCOMTEE_OBJECT)
+ ret = -EINVAL;
+
+ break;
+
+ default: /* QCOMTEE_OBJECT_TYPE_TEE */
+ *object = qcomtee_qtee_object_alloc(oic, object_id);
+ if (*object == NULL_QCOMTEE_OBJECT)
+ ret = -ENOMEM;
+
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * ''Marshaling API''
+ * qcomtee_prepare_msg - Prepare the inbound buffer for sending to QTEE
+ * qcomtee_update_args - Parse the QTEE response in the inbound buffer
+ * qcomtee_prepare_args - Parse the QTEE request from the outbound buffer
+ * qcomtee_update_msg - Update the outbound buffer with the response for QTEE
+ */
+
+static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u)
+{
+ struct qcomtee_msg_object_invoke *msg;
+ unsigned int object_id;
+ int i, ib, ob, io, oo;
+ size_t offset;
+
+ /* Use the input message buffer in 'oic'. */
+ msg = oic->in_msg.addr;
+
+ /* Start offset in a message for buffer arguments. */
+ offset = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke,
+ qcomtee_args_len(u));
+
+ /* Get the ID of the object being invoked. */
+ if (qcomtee_object_id_get(oic, object, &object_id))
+ return -ENOSPC;
+
+ ib = 0;
+ qcomtee_arg_for_each_input_buffer(i, u) {
+ void *msgptr; /* Address of buffer payload: */
+ /* Overflow already checked in qcomtee_msg_buffers_alloc(). */
+ msg->args[ib].b.offset = offset;
+ msg->args[ib].b.size = u[i].b.size;
+
+ msgptr = qcomtee_msg_offset_to_ptr(msg, offset);
+ /* Userspace client or kernel client!? */
+ if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
+ memcpy(msgptr, u[i].b.addr, u[i].b.size);
+ else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size))
+ return -EINVAL;
+
+ offset += qcomtee_msg_offset_align(u[i].b.size);
+ ib++;
+ }
+
+ ob = ib;
+ qcomtee_arg_for_each_output_buffer(i, u) {
+ /* Overflow already checked in qcomtee_msg_buffers_alloc(). */
+ msg->args[ob].b.offset = offset;
+ msg->args[ob].b.size = u[i].b.size;
+
+ offset += qcomtee_msg_offset_align(u[i].b.size);
+ ob++;
+ }
+
+ io = ob;
+ qcomtee_arg_for_each_input_object(i, u) {
+ if (qcomtee_object_id_get(oic, u[i].o, &msg->args[io].o)) {
+ qcomtee_object_id_put(oic, object_id);
+ for (io--; io >= ob; io--)
+ qcomtee_object_id_put(oic, msg->args[io].o);
+
+ return -ENOSPC;
+ }
+
+ io++;
+ }
+
+ oo = io;
+ qcomtee_arg_for_each_output_object(i, u)
+ oo++;
+
+ /* Set object, operation, and argument counts. */
+ qcomtee_msg_init(msg, object_id, op, ib, ob, io, oo);
+
+ return 0;
+}
+
+/**
+ * qcomtee_update_args() - Parse the QTEE response in the inbound buffer.
+ * @u: array of arguments for the invocation.
+ * @oic: context to use for the invocation.
+ *
+ * @u must be the same as the one used in qcomtee_prepare_msg() when
+ * initializing the inbound buffer.
+ *
+ * On failure, it continues processing the QTEE message. The caller should
+ * do the necessary cleanup, including calling qcomtee_object_put()
+ * on the output objects.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_update_args(struct qcomtee_arg *u,
+ struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_msg_object_invoke *msg;
+ int i, ib, ob, io, oo;
+ int ret = 0;
+
+ /* Use the input message buffer in 'oic'. */
+ msg = oic->in_msg.addr;
+
+ ib = 0;
+ qcomtee_arg_for_each_input_buffer(i, u)
+ ib++;
+
+ ob = ib;
+ qcomtee_arg_for_each_output_buffer(i, u) {
+ void *msgptr; /* Address of buffer payload: */
+ /* QTEE can override the size to a smaller value. */
+ u[i].b.size = msg->args[ob].b.size;
+
+ msgptr = qcomtee_msg_offset_to_ptr(msg, msg->args[ob].b.offset);
+ /* Userspace client or kernel client!? */
+ if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
+ memcpy(u[i].b.addr, msgptr, u[i].b.size);
+ else if (copy_to_user(u[i].b.uaddr, msgptr, u[i].b.size))
+ ret = -EINVAL;
+
+ ob++;
+ }
+
+ io = ob;
+ qcomtee_arg_for_each_input_object(i, u)
+ io++;
+
+ oo = io;
+ qcomtee_arg_for_each_output_object(i, u) {
+ if (qcomtee_object_qtee_init(oic, &u[i].o, msg->args[oo].o))
+ ret = -EINVAL;
+
+ oo++;
+ }
+
+ return ret;
+}
+
+/**
+ * qcomtee_prepare_args() - Parse the QTEE request from the outbound buffer.
+ * @oic: context to use for the invocation.
+ *
+ * It initializes &qcomtee_object_invoke_ctx->u based on the QTEE request in
+ * the outbound buffer. It sets %QCOMTEE_ARG_TYPE_INV at the end of the array.
+ *
+ * On failure, it continues processing the QTEE message. The caller should
+ * do the necessary cleanup, including calling qcomtee_object_put()
+ * on the input objects.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_prepare_args(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_msg_callback *msg;
+ int i, ret = 0;
+
+ /* Use the output message buffer in 'oic'. */
+ msg = oic->out_msg.addr;
+
+ qcomtee_msg_for_each_input_buffer(i, msg) {
+ oic->u[i].b.addr =
+ qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset);
+ oic->u[i].b.size = msg->args[i].b.size;
+ oic->u[i].type = QCOMTEE_ARG_TYPE_IB;
+ }
+
+ qcomtee_msg_for_each_output_buffer(i, msg) {
+ oic->u[i].b.addr =
+ qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset);
+ oic->u[i].b.size = msg->args[i].b.size;
+ oic->u[i].type = QCOMTEE_ARG_TYPE_OB;
+ }
+
+ qcomtee_msg_for_each_input_object(i, msg) {
+ if (qcomtee_object_qtee_init(oic, &oic->u[i].o, msg->args[i].o))
+ ret = -EINVAL;
+
+ oic->u[i].type = QCOMTEE_ARG_TYPE_IO;
+ }
+
+ qcomtee_msg_for_each_output_object(i, msg)
+ oic->u[i].type = QCOMTEE_ARG_TYPE_OO;
+
+ /* End of Arguments. */
+ oic->u[i].type = QCOMTEE_ARG_TYPE_INV;
+
+ return ret;
+}
+
+static int qcomtee_update_msg(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_msg_callback *msg;
+ int i, ib, ob, io, oo;
+
+ /* Use the output message buffer in 'oic'. */
+ msg = oic->out_msg.addr;
+
+ ib = 0;
+ qcomtee_arg_for_each_input_buffer(i, oic->u)
+ ib++;
+
+ ob = ib;
+ qcomtee_arg_for_each_output_buffer(i, oic->u) {
+ /* Only reduce size; never increase it. */
+ if (msg->args[ob].b.size < oic->u[i].b.size)
+ return -EINVAL;
+
+ msg->args[ob].b.size = oic->u[i].b.size;
+ ob++;
+ }
+
+ io = ob;
+ qcomtee_arg_for_each_input_object(i, oic->u)
+ io++;
+
+ oo = io;
+ qcomtee_arg_for_each_output_object(i, oic->u) {
+ if (qcomtee_object_id_get(oic, oic->u[i].o, &msg->args[oo].o)) {
+ for (oo--; oo >= io; oo--)
+ qcomtee_object_id_put(oic, msg->args[oo].o);
+
+ return -ENOSPC;
+ }
+
+ oo++;
+ }
+
+ return 0;
+}
+
+/* Invoke a callback object. */
+static void qcomtee_cb_object_invoke(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_msg_callback *msg)
+{
+ int i, errno;
+ u32 op;
+
+ /* Get the object being invoked. */
+ unsigned int object_id = msg->cxt;
+ struct qcomtee_object *object;
+
+ /* QTEE cannot invoke a NULL object or objects it hosts. */
+ if (qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_NULL ||
+ qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_TEE) {
+ errno = -EINVAL;
+ goto out;
+ }
+
+ object = qcomtee_local_object_get(oic, object_id);
+ if (object == NULL_QCOMTEE_OBJECT) {
+ errno = -EINVAL;
+ goto out;
+ }
+
+ oic->object = object;
+
+ /* Filter bits used by transport. */
+ op = msg->op & QCOMTEE_MSG_OBJECT_OP_MASK;
+
+ switch (op) {
+ case QCOMTEE_MSG_OBJECT_OP_RELEASE:
+ qcomtee_object_id_put(oic, object_id);
+ qcomtee_object_put(object);
+ errno = 0;
+
+ break;
+ case QCOMTEE_MSG_OBJECT_OP_RETAIN:
+ qcomtee_object_get(object);
+ errno = 0;
+
+ break;
+ default:
+ errno = qcomtee_prepare_args(oic);
+ if (errno) {
+ /* Release any object that arrived as input. */
+ qcomtee_arg_for_each_input_buffer(i, oic->u)
+ qcomtee_object_put(oic->u[i].o);
+
+ break;
+ }
+
+ errno = object->ops->dispatch(oic, object, op, oic->u);
+ if (!errno) {
+ /* On success, notify at the appropriate time. */
+ oic->flags |= QCOMTEE_OIC_FLAG_NOTIFY;
+ }
+ }
+
+out:
+
+ oic->errno = errno;
+}
+
+static int
+qcomtee_object_invoke_ctx_invoke(struct qcomtee_object_invoke_ctx *oic,
+ int *result, u64 *res_type)
+{
+ phys_addr_t out_msg_paddr;
+ phys_addr_t in_msg_paddr;
+ int ret;
+ u64 res;
+
+ tee_shm_get_pa(oic->out_shm, 0, &out_msg_paddr);
+ tee_shm_get_pa(oic->in_shm, 0, &in_msg_paddr);
+ if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY))
+ ret = qcom_scm_qtee_invoke_smc(in_msg_paddr, oic->in_msg.size,
+ out_msg_paddr, oic->out_msg.size,
+ &res, res_type);
+ else
+ ret = qcom_scm_qtee_callback_response(out_msg_paddr,
+ oic->out_msg.size,
+ &res, res_type);
+
+ if (ret)
+ pr_err("QTEE returned with %d.\n", ret);
+ else
+ *result = (int)res;
+
+ return ret;
+}
+
+/**
+ * qcomtee_qtee_objects_put() - Put the callback objects in the argument array.
+ * @u: array of arguments.
+ *
+ * When qcomtee_object_do_invoke_internal() is successfully invoked,
+ * QTEE takes ownership of the callback objects. If the invocation fails,
+ * qcomtee_object_do_invoke_internal() calls qcomtee_qtee_objects_put()
+ * to mimic the release of callback objects by QTEE.
+ */
+static void qcomtee_qtee_objects_put(struct qcomtee_arg *u)
+{
+ int i;
+
+ qcomtee_arg_for_each_input_object(i, u) {
+ if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)
+ qcomtee_object_put(u[i].o);
+ }
+}
+
+/**
+ * qcomtee_object_do_invoke_internal() - Submit an invocation for an object.
+ * @oic: context to use for the current invocation.
+ * @object: object being invoked.
+ * @op: requested operation on the object.
+ * @u: array of arguments for the current invocation.
+ * @result: result returned from QTEE.
+ *
+ * The caller is responsible for keeping track of the refcount for each
+ * object, including @object. On return, the caller loses ownership of all
+ * input objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result)
+{
+ struct qcomtee_msg_callback *cb_msg;
+ struct qcomtee_object *qto;
+ int i, ret, errno;
+ u64 res_type;
+
+ /* Allocate inbound and outbound buffers. */
+ ret = qcomtee_msg_buffers_alloc(oic, u);
+ if (ret) {
+ qcomtee_qtee_objects_put(u);
+
+ return ret;
+ }
+
+ ret = qcomtee_prepare_msg(oic, object, op, u);
+ if (ret) {
+ qcomtee_qtee_objects_put(u);
+
+ goto out;
+ }
+
+ /* Use input message buffer in 'oic'. */
+ cb_msg = oic->out_msg.addr;
+
+ while (1) {
+ if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) {
+ errno = oic->errno;
+ if (!errno)
+ errno = qcomtee_update_msg(oic);
+ qcomtee_msg_set_result(cb_msg, errno);
+ }
+
+ /* Invoke the remote object. */
+ ret = qcomtee_object_invoke_ctx_invoke(oic, result, &res_type);
+ /* Return form callback objects result submission: */
+ if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) {
+ qto = oic->object;
+ if (qto) {
+ if (oic->flags & QCOMTEE_OIC_FLAG_NOTIFY) {
+ if (qto->ops->notify)
+ qto->ops->notify(oic, qto,
+ errno || ret);
+ }
+
+ /* Get is in qcomtee_cb_object_invoke(). */
+ qcomtee_object_put(qto);
+ }
+
+ oic->object = NULL_QCOMTEE_OBJECT;
+ oic->flags &= ~(QCOMTEE_OIC_FLAG_BUSY |
+ QCOMTEE_OIC_FLAG_NOTIFY);
+ }
+
+ if (ret) {
+ /*
+ * Unable to finished the invocation.
+ * If QCOMTEE_OIC_FLAG_SHARED is not set, put
+ * QCOMTEE_OBJECT_TYPE_CB input objects.
+ */
+ if (!(oic->flags & QCOMTEE_OIC_FLAG_SHARED))
+ qcomtee_qtee_objects_put(u);
+ else
+ ret = -ENODEV;
+
+ goto out;
+
+ } else {
+ /*
+ * QTEE obtained ownership of QCOMTEE_OBJECT_TYPE_CB
+ * input objects in 'u'. On further failure, QTEE is
+ * responsible for releasing them.
+ */
+ oic->flags |= QCOMTEE_OIC_FLAG_SHARED;
+ }
+
+ /* Is it a callback request? */
+ if (res_type != QCOMTEE_RESULT_INBOUND_REQ_NEEDED) {
+ /*
+ * Parse results. If failed, assume the service
+ * was unavailable (i.e. QCOMTEE_MSG_ERROR_UNAVAIL)
+ * and put output objects to initiate cleanup.
+ */
+ if (!*result && qcomtee_update_args(u, oic)) {
+ *result = QCOMTEE_MSG_ERROR_UNAVAIL;
+ qcomtee_arg_for_each_output_object(i, u)
+ qcomtee_object_put(u[i].o);
+ }
+
+ break;
+
+ } else {
+ oic->flags |= QCOMTEE_OIC_FLAG_BUSY;
+ qcomtee_fetch_async_reqs(oic);
+ qcomtee_cb_object_invoke(oic, cb_msg);
+ }
+ }
+
+ qcomtee_fetch_async_reqs(oic);
+out:
+ qcomtee_msg_buffers_free(oic);
+
+ return ret;
+}
+
+int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result)
+{
+ /* User can not set bits used by transport. */
+ if (op & ~QCOMTEE_MSG_OBJECT_OP_MASK)
+ return -EINVAL;
+
+ /* User can only invoke QTEE hosted objects. */
+ if (typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_TEE &&
+ typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_ROOT)
+ return -EINVAL;
+
+ /* User cannot directly issue these operations to QTEE. */
+ if (op == QCOMTEE_MSG_OBJECT_OP_RELEASE ||
+ op == QCOMTEE_MSG_OBJECT_OP_RETAIN)
+ return -EINVAL;
+
+ return qcomtee_object_do_invoke_internal(oic, object, op, u, result);
+}
+
+/**
+ * qcomtee_object_get_client_env() - Get a privileged client env. object.
+ * @oic: context to use for the current invocation.
+ *
+ * The caller should call qcomtee_object_put() on the returned object
+ * to release it.
+ *
+ * Return: On error, returns %NULL_QCOMTEE_OBJECT.
+ * On success, returns the object.
+ */
+struct qcomtee_object *
+qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_arg u[3] = { 0 };
+ int ret, result;
+
+ u[0].o = NULL_QCOMTEE_OBJECT;
+ u[0].type = QCOMTEE_ARG_TYPE_IO;
+ u[1].type = QCOMTEE_ARG_TYPE_OO;
+ ret = qcomtee_object_do_invoke(oic, ROOT_QCOMTEE_OBJECT,
+ QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS, u,
+ &result);
+ if (ret || result)
+ return NULL_QCOMTEE_OBJECT;
+
+ return u[1].o;
+}
+
+struct qcomtee_object *
+qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *client_env, u32 uid)
+{
+ struct qcomtee_arg u[3] = { 0 };
+ int ret, result;
+
+ u[0].b.addr = &uid;
+ u[0].b.size = sizeof(uid);
+ u[0].type = QCOMTEE_ARG_TYPE_IB;
+ u[1].type = QCOMTEE_ARG_TYPE_OO;
+ ret = qcomtee_object_do_invoke(oic, client_env, QCOMTEE_CLIENT_ENV_OPEN,
+ u, &result);
+
+ if (ret || result)
+ return NULL_QCOMTEE_OBJECT;
+
+ return u[1].o;
+}
diff --git a/drivers/tee/qcomtee/mem_obj.c b/drivers/tee/qcomtee/mem_obj.c
new file mode 100644
index 000000000000..228a3e30a31b
--- /dev/null
+++ b/drivers/tee/qcomtee/mem_obj.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/mm.h>
+
+#include "qcomtee.h"
+
+/**
+ * DOC: Memory and Mapping Objects
+ *
+ * QTEE uses memory objects for memory sharing with Linux.
+ * A memory object can be a standard dma_buf or a contiguous memory range,
+ * e.g., tee_shm. A memory object should support one operation: map. When
+ * invoked by QTEE, a mapping object is generated. A mapping object supports
+ * one operation: unmap.
+ *
+ * (1) To map a memory object, QTEE invokes the primordial object with
+ * %QCOMTEE_OBJECT_OP_MAP_REGION operation; see
+ * qcomtee_primordial_obj_dispatch().
+ * (2) To unmap a memory object, QTEE releases the mapping object which
+ * calls qcomtee_mem_object_release().
+ *
+ * The map operation is implemented in the primordial object as a privileged
+ * operation instead of qcomtee_mem_object_dispatch(). Otherwise, on
+ * platforms without shm_bridge, a user can trick QTEE into writing to the
+ * kernel memory by passing a user object as a memory object and returning a
+ * random physical address as the result of the mapping request.
+ */
+
+struct qcomtee_mem_object {
+ struct qcomtee_object object;
+ struct tee_shm *shm;
+ /* QTEE requires these felids to be page aligned. */
+ phys_addr_t paddr; /* Physical address of range. */
+ size_t size; /* Size of the range. */
+};
+
+#define to_qcomtee_mem_object(o) \
+ container_of((o), struct qcomtee_mem_object, object)
+
+static struct qcomtee_object_operations qcomtee_mem_object_ops;
+
+/* Is it a memory object using tee_shm? */
+int is_qcomtee_memobj_object(struct qcomtee_object *object)
+{
+ return object != NULL_QCOMTEE_OBJECT &&
+ typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB &&
+ object->ops == &qcomtee_mem_object_ops;
+}
+
+static int qcomtee_mem_object_dispatch(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *args)
+{
+ return -EINVAL;
+}
+
+static void qcomtee_mem_object_release(struct qcomtee_object *object)
+{
+ struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object);
+
+ /* Matching get is in qcomtee_memobj_param_to_object(). */
+ tee_shm_put(mem_object->shm);
+ kfree(mem_object);
+}
+
+static struct qcomtee_object_operations qcomtee_mem_object_ops = {
+ .release = qcomtee_mem_object_release,
+ .dispatch = qcomtee_mem_object_dispatch,
+};
+
+/**
+ * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object.
+ * @object: object returned.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_memobj_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_mem_object *mem_object __free(kfree) = NULL;
+ struct tee_shm *shm;
+ int err;
+
+ mem_object = kzalloc(sizeof(*mem_object), GFP_KERNEL);
+ if (!mem_object)
+ return -ENOMEM;
+
+ shm = tee_shm_get_from_id(ctx, param->u.objref.id);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /* mem-object wrapping the memref. */
+ err = qcomtee_object_user_init(&mem_object->object,
+ QCOMTEE_OBJECT_TYPE_CB,
+ &qcomtee_mem_object_ops, "tee-shm-%d",
+ shm->id);
+ if (err) {
+ tee_shm_put(shm);
+
+ return err;
+ }
+
+ mem_object->paddr = shm->paddr;
+ mem_object->size = shm->size;
+ mem_object->shm = shm;
+
+ *object = &no_free_ptr(mem_object)->object;
+
+ return 0;
+}
+
+/* Reverse what qcomtee_memobj_param_to_object() does. */
+int qcomtee_memobj_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx)
+{
+ struct qcomtee_mem_object *mem_object;
+
+ mem_object = to_qcomtee_mem_object(object);
+ /* Sure if the memobj is in a same context it is originated from. */
+ if (mem_object->shm->ctx != ctx)
+ return -EINVAL;
+
+ param->u.objref.id = mem_object->shm->id;
+ param->u.objref.flags = QCOMTEE_OBJREF_FLAG_MEM;
+
+ /* Passing shm->id to userspace; drop the reference. */
+ qcomtee_object_put(object);
+
+ return 0;
+}
+
+/**
+ * qcomtee_mem_object_map() - Map a memory object.
+ * @object: memory object.
+ * @map_object: created mapping object.
+ * @mem_paddr: physical address of the memory.
+ * @mem_size: size of the memory.
+ * @perms: QTEE access permissions.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_mem_object_map(struct qcomtee_object *object,
+ struct qcomtee_object **map_object, u64 *mem_paddr,
+ u64 *mem_size, u32 *perms)
+{
+ struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object);
+
+ /* Reuses the memory object as a mapping object by re-sharing it. */
+ qcomtee_object_get(&mem_object->object);
+
+ *map_object = &mem_object->object;
+ *mem_paddr = mem_object->paddr;
+ *mem_size = mem_object->size;
+ *perms = QCOM_SCM_PERM_RW;
+
+ return 0;
+}
diff --git a/drivers/tee/qcomtee/primordial_obj.c b/drivers/tee/qcomtee/primordial_obj.c
new file mode 100644
index 000000000000..b6f811e83b11
--- /dev/null
+++ b/drivers/tee/qcomtee/primordial_obj.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/delay.h>
+#include "qcomtee.h"
+
+/**
+ * DOC: Primordial Object
+ *
+ * After boot, the kernel provides a static object of type
+ * %QCOMTEE_OBJECT_TYPE_CB called the primordial object. This object is used
+ * for native kernel services or privileged operations.
+ *
+ * We support:
+ * - %QCOMTEE_OBJECT_OP_MAP_REGION to map a memory object and return mapping
+ * object and mapping information (see qcomtee_mem_object_map()).
+ * - %QCOMTEE_OBJECT_OP_YIELD to yield by the thread running in QTEE.
+ * - %QCOMTEE_OBJECT_OP_SLEEP to wait for a period of time.
+ */
+
+#define QCOMTEE_OBJECT_OP_MAP_REGION 0
+#define QCOMTEE_OBJECT_OP_YIELD 1
+#define QCOMTEE_OBJECT_OP_SLEEP 2
+
+/* Mapping information format as expected by QTEE. */
+struct qcomtee_mapping_info {
+ u64 paddr;
+ u64 len;
+ u32 perms;
+} __packed;
+
+static int
+qcomtee_primordial_obj_dispatch(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *primordial_object_unused,
+ u32 op, struct qcomtee_arg *args)
+{
+ struct qcomtee_mapping_info *map_info;
+ struct qcomtee_object *mem_object;
+ struct qcomtee_object *map_object;
+ int err = 0;
+
+ switch (op) {
+ case QCOMTEE_OBJECT_OP_YIELD:
+ cond_resched();
+ /* No output object. */
+ oic->data = NULL;
+
+ break;
+ case QCOMTEE_OBJECT_OP_SLEEP:
+ /* Check message format matched QCOMTEE_OBJECT_OP_SLEEP op. */
+ if (qcomtee_args_len(args) != 1 ||
+ args[0].type != QCOMTEE_ARG_TYPE_IB ||
+ args[0].b.size < sizeof(u32))
+ return -EINVAL;
+
+ msleep(*(u32 *)(args[0].b.addr));
+ /* No output object. */
+ oic->data = NULL;
+
+ break;
+ case QCOMTEE_OBJECT_OP_MAP_REGION:
+ if (qcomtee_args_len(args) != 3 ||
+ args[0].type != QCOMTEE_ARG_TYPE_OB ||
+ args[1].type != QCOMTEE_ARG_TYPE_IO ||
+ args[2].type != QCOMTEE_ARG_TYPE_OO ||
+ args[0].b.size < sizeof(struct qcomtee_mapping_info))
+ return -EINVAL;
+
+ map_info = args[0].b.addr;
+ mem_object = args[1].o;
+
+ qcomtee_mem_object_map(mem_object, &map_object,
+ &map_info->paddr, &map_info->len,
+ &map_info->perms);
+
+ args[2].o = map_object;
+ /* One output object; pass it for cleanup to notify. */
+ oic->data = map_object;
+
+ qcomtee_object_put(mem_object);
+
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/* Called after submitting the callback response. */
+static void qcomtee_primordial_obj_notify(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *unused,
+ int err)
+{
+ struct qcomtee_object *object = oic->data;
+
+ /* If err, QTEE did not obtain mapping object. Drop it. */
+ if (object && err)
+ qcomtee_object_put(object);
+}
+
+static struct qcomtee_object_operations qcomtee_primordial_obj_ops = {
+ .dispatch = qcomtee_primordial_obj_dispatch,
+ .notify = qcomtee_primordial_obj_notify,
+};
+
+struct qcomtee_object qcomtee_primordial_object = {
+ .name = "primordial",
+ .object_type = QCOMTEE_OBJECT_TYPE_CB,
+ .ops = &qcomtee_primordial_obj_ops
+};
diff --git a/drivers/tee/qcomtee/qcomtee.h b/drivers/tee/qcomtee/qcomtee.h
new file mode 100644
index 000000000000..f39bf63fd1c2
--- /dev/null
+++ b/drivers/tee/qcomtee/qcomtee.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef QCOMTEE_H
+#define QCOMTEE_H
+
+#include <linux/kobject.h>
+#include <linux/tee_core.h>
+
+#include "qcomtee_msg.h"
+#include "qcomtee_object.h"
+
+/* Flags relating to object reference. */
+#define QCOMTEE_OBJREF_FLAG_TEE BIT(0)
+#define QCOMTEE_OBJREF_FLAG_USER BIT(1)
+#define QCOMTEE_OBJREF_FLAG_MEM BIT(2)
+
+/**
+ * struct qcomtee - Main service struct.
+ * @teedev: client device.
+ * @pool: shared memory pool.
+ * @ctx: driver private context.
+ * @oic: context to use for the current driver invocation.
+ * @wq: workqueue for QTEE async operations.
+ * @xa_local_objects: array of objects exported to QTEE.
+ * @xa_last_id: next ID to allocate.
+ * @qtee_version: QTEE version.
+ */
+struct qcomtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct tee_context *ctx;
+ struct qcomtee_object_invoke_ctx oic;
+ struct workqueue_struct *wq;
+ struct xarray xa_local_objects;
+ u32 xa_last_id;
+ u32 qtee_version;
+};
+
+void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic);
+struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic,
+ u32 idx);
+
+struct tee_shm_pool *qcomtee_shm_pool_alloc(void);
+void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic);
+int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_arg *u);
+
+/**
+ * qcomtee_object_do_invoke_internal() - Submit an invocation for an object.
+ * @oic: context to use for the current invocation.
+ * @object: object being invoked.
+ * @op: requested operation on the object.
+ * @u: array of arguments for the current invocation.
+ * @result: result returned from QTEE.
+ *
+ * The caller is responsible for keeping track of the refcount for each
+ * object, including @object. On return, the caller loses ownership of all
+ * input objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result);
+
+/**
+ * struct qcomtee_context_data - Clients' or supplicants' context.
+ * @qtee_objects_idr: QTEE objects in this context.
+ * @qtee_lock: mutex for @qtee_objects_idr.
+ * @reqs_idr: requests in this context that hold ID.
+ * @reqs_list: FIFO for requests in PROCESSING or QUEUED state.
+ * @reqs_lock: mutex for @reqs_idr, @reqs_list and request states.
+ * @req_c: completion used when the supplicant is waiting for requests.
+ * @released: state of this context.
+ */
+struct qcomtee_context_data {
+ struct idr qtee_objects_idr;
+ /* Synchronize access to @qtee_objects_idr. */
+ struct mutex qtee_lock;
+
+ struct idr reqs_idr;
+ struct list_head reqs_list;
+ /* Synchronize access to @reqs_idr, @reqs_list and updating requests states. */
+ struct mutex reqs_lock;
+
+ struct completion req_c;
+
+ bool released;
+};
+
+int qcomtee_context_add_qtee_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx);
+int qcomtee_context_find_qtee_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx);
+void qcomtee_context_del_qtee_object(struct tee_param *param,
+ struct tee_context *ctx);
+
+int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param,
+ struct tee_context *ctx);
+int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg,
+ struct tee_context *ctx);
+
+/* OBJECTS: */
+
+/* (1) User Object API. */
+
+int is_qcomtee_user_object(struct qcomtee_object *object);
+void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify);
+void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata);
+int qcomtee_user_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx);
+int qcomtee_user_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx);
+
+/**
+ * struct qcomtee_user_object_request_data - Data for user object request.
+ * @id: ID assigned to the request.
+ * @object_id: Object ID being invoked by QTEE.
+ * @op: Requested operation on object.
+ * @np: Number of parameters in the request.
+ */
+struct qcomtee_user_object_request_data {
+ int id;
+ u64 object_id;
+ u32 op;
+ int np;
+};
+
+int qcomtee_user_object_select(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ void __user *uaddr, size_t size,
+ struct qcomtee_user_object_request_data *data);
+int qcomtee_user_object_submit(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ int req_id, int errno);
+
+/* (2) Primordial Object. */
+extern struct qcomtee_object qcomtee_primordial_object;
+
+/* (3) Memory Object API. */
+
+/* Is it a memory object using tee_shm? */
+int is_qcomtee_memobj_object(struct qcomtee_object *object);
+
+/**
+ * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object.
+ * @object: object returned.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_memobj_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx);
+
+/* Reverse what qcomtee_memobj_param_to_object() does. */
+int qcomtee_memobj_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx);
+
+/**
+ * qcomtee_mem_object_map() - Map a memory object.
+ * @object: memory object.
+ * @map_object: created mapping object.
+ * @mem_paddr: physical address of the memory.
+ * @mem_size: size of the memory.
+ * @perms: QTEE access permissions.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_mem_object_map(struct qcomtee_object *object,
+ struct qcomtee_object **map_object, u64 *mem_paddr,
+ u64 *mem_size, u32 *perms);
+
+#endif /* QCOMTEE_H */
diff --git a/drivers/tee/qcomtee/qcomtee_msg.h b/drivers/tee/qcomtee/qcomtee_msg.h
new file mode 100644
index 000000000000..878f70178a5b
--- /dev/null
+++ b/drivers/tee/qcomtee/qcomtee_msg.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef QCOMTEE_MSG_H
+#define QCOMTEE_MSG_H
+
+#include <linux/bitfield.h>
+
+/**
+ * DOC: ''Qualcomm TEE'' (QTEE) Transport Message
+ *
+ * There are two buffers shared with QTEE: inbound and outbound buffers.
+ * The inbound buffer is used for direct object invocation, and the outbound
+ * buffer is used to make a request from QTEE to the kernel; i.e., a callback
+ * request.
+ *
+ * The unused tail of the outbound buffer is also used for sending and
+ * receiving asynchronous messages. An asynchronous message is independent of
+ * the current object invocation (i.e., contents of the inbound buffer) or
+ * callback request (i.e., the head of the outbound buffer); see
+ * qcomtee_get_async_buffer(). It is used by endpoints (QTEE or kernel) as an
+ * optimization to reduce the number of context switches between the secure and
+ * non-secure worlds.
+ *
+ * For instance, QTEE never sends an explicit callback request to release an
+ * object in the kernel. Instead, it sends asynchronous release messages in the
+ * outbound buffer when QTEE returns from the previous direct object invocation,
+ * or appends asynchronous release messages after the current callback request.
+ *
+ * QTEE supports two types of arguments in a message: buffer and object
+ * arguments. Depending on the direction of data flow, they could be input
+ * buffer (IO) to QTEE, output buffer (OB) from QTEE, input object (IO) to QTEE,
+ * or output object (OO) from QTEE. Object arguments hold object IDs. Buffer
+ * arguments hold (offset, size) pairs into the inbound or outbound buffers.
+ *
+ * QTEE holds an object table for objects it hosts and exposes to the kernel.
+ * An object ID is an index to the object table in QTEE.
+ *
+ * For the direct object invocation message format in the inbound buffer, see
+ * &struct qcomtee_msg_object_invoke. For the callback request message format
+ * in the outbound buffer, see &struct qcomtee_msg_callback. For the message
+ * format for asynchronous messages in the outbound buffer, see
+ * &struct qcomtee_async_msg_hdr.
+ */
+
+/**
+ * define QCOMTEE_MSG_OBJECT_NS_BIT - Non-secure bit
+ *
+ * Object ID is a globally unique 32-bit number. IDs referencing objects
+ * in the kernel should have %QCOMTEE_MSG_OBJECT_NS_BIT set.
+ */
+#define QCOMTEE_MSG_OBJECT_NS_BIT BIT(31)
+
+/* Static object IDs recognized by QTEE. */
+#define QCOMTEE_MSG_OBJECT_NULL (0U)
+#define QCOMTEE_MSG_OBJECT_ROOT (1U)
+
+/* Definitions from QTEE as part of the transport protocol. */
+
+/* qcomtee_msg_arg is an argument as recognized by QTEE. */
+union qcomtee_msg_arg {
+ struct {
+ u32 offset;
+ u32 size;
+ } b;
+ u32 o;
+};
+
+/* BI and BO payloads in QTEE messages should be at 64-bit boundaries. */
+#define qcomtee_msg_offset_align(o) ALIGN((o), sizeof(u64))
+
+/* Operations for objects are 32-bit. Transport uses the upper 16 bits. */
+#define QCOMTEE_MSG_OBJECT_OP_MASK GENMASK(15, 0)
+
+/* Reserved Operation IDs sent to QTEE: */
+/* QCOMTEE_MSG_OBJECT_OP_RELEASE - Reduces the refcount and releases the object.
+ * QCOMTEE_MSG_OBJECT_OP_RETAIN - Increases the refcount.
+ *
+ * These operation IDs are valid for all objects.
+ */
+
+#define QCOMTEE_MSG_OBJECT_OP_RELEASE (QCOMTEE_MSG_OBJECT_OP_MASK - 0)
+#define QCOMTEE_MSG_OBJECT_OP_RETAIN (QCOMTEE_MSG_OBJECT_OP_MASK - 1)
+
+/* Subset of operations supported by QTEE root object. */
+
+#define QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS 5
+#define QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE 4
+#define QCOMTEE_ROOT_OP_ADCI_ACCEPT 8
+#define QCOMTEE_ROOT_OP_ADCI_SHUTDOWN 9
+
+/* Subset of operations supported by client_env object. */
+
+#define QCOMTEE_CLIENT_ENV_OPEN 0
+
+/* List of available QTEE service UIDs and subset of operations. */
+
+#define QCOMTEE_FEATURE_VER_UID 2033
+#define QCOMTEE_FEATURE_VER_OP_GET 0
+/* Get QTEE version number. */
+#define QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID 10
+#define QTEE_VERSION_GET_MAJOR(x) (((x) >> 22) & 0xffU)
+#define QTEE_VERSION_GET_MINOR(x) (((x) >> 12) & 0xffU)
+#define QTEE_VERSION_GET_PATCH(x) ((x) >> 0 & 0xfffU)
+
+/* Response types as returned from qcomtee_object_invoke_ctx_invoke(). */
+
+/* The message contains a callback request. */
+#define QCOMTEE_RESULT_INBOUND_REQ_NEEDED 3
+
+/**
+ * struct qcomtee_msg_object_invoke - Direct object invocation message.
+ * @ctx: object ID hosted in QTEE.
+ * @op: operation for the object.
+ * @counts: number of different types of arguments in @args.
+ * @args: array of arguments.
+ *
+ * @counts consists of 4 * 4-bit fields. Bits 0 - 3 represent the number of
+ * input buffers, bits 4 - 7 represent the number of output buffers,
+ * bits 8 - 11 represent the number of input objects, and bits 12 - 15
+ * represent the number of output objects. The remaining bits should be zero.
+ *
+ * 15 12 11 8 7 4 3 0
+ * +----------------+----------------+----------------+----------------+
+ * | #OO objects | #IO objects | #OB buffers | #IB buffers |
+ * +----------------+----------------+----------------+----------------+
+ *
+ * The maximum number of arguments of each type is defined by
+ * %QCOMTEE_ARGS_PER_TYPE.
+ */
+struct qcomtee_msg_object_invoke {
+ u32 cxt;
+ u32 op;
+ u32 counts;
+ union qcomtee_msg_arg args[];
+};
+
+/* Bit masks for the four 4-bit nibbles holding the counts. */
+#define QCOMTEE_MASK_IB GENMASK(3, 0)
+#define QCOMTEE_MASK_OB GENMASK(7, 4)
+#define QCOMTEE_MASK_IO GENMASK(11, 8)
+#define QCOMTEE_MASK_OO GENMASK(15, 12)
+
+/**
+ * struct qcomtee_msg_callback - Callback request message.
+ * @result: result of operation @op on the object referenced by @cxt.
+ * @cxt: object ID hosted in the kernel.
+ * @op: operation for the object.
+ * @counts: number of different types of arguments in @args.
+ * @args: array of arguments.
+ *
+ * For details of @counts, see &qcomtee_msg_object_invoke.counts.
+ */
+struct qcomtee_msg_callback {
+ u32 result;
+ u32 cxt;
+ u32 op;
+ u32 counts;
+ union qcomtee_msg_arg args[];
+};
+
+/* Offset in the message for the beginning of the buffer argument's contents. */
+#define qcomtee_msg_buffer_args(t, n) \
+ qcomtee_msg_offset_align(struct_size_t(t, args, n))
+/* Pointer to the beginning of a buffer argument's content at an offset. */
+#define qcomtee_msg_offset_to_ptr(m, off) ((void *)&((char *)(m))[(off)])
+
+/* Some helpers to manage msg.counts. */
+
+static inline unsigned int qcomtee_msg_num_ib(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_IB, counts);
+}
+
+static inline unsigned int qcomtee_msg_num_ob(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_OB, counts);
+}
+
+static inline unsigned int qcomtee_msg_num_io(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_IO, counts);
+}
+
+static inline unsigned int qcomtee_msg_num_oo(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_OO, counts);
+}
+
+static inline unsigned int qcomtee_msg_idx_ib(u32 counts)
+{
+ return 0;
+}
+
+static inline unsigned int qcomtee_msg_idx_ob(u32 counts)
+{
+ return qcomtee_msg_num_ib(counts);
+}
+
+static inline unsigned int qcomtee_msg_idx_io(u32 counts)
+{
+ return qcomtee_msg_idx_ob(counts) + qcomtee_msg_num_ob(counts);
+}
+
+static inline unsigned int qcomtee_msg_idx_oo(u32 counts)
+{
+ return qcomtee_msg_idx_io(counts) + qcomtee_msg_num_io(counts);
+}
+
+#define qcomtee_msg_for_each(i, first, num) \
+ for ((i) = (first); (i) < (first) + (num); (i)++)
+
+#define qcomtee_msg_for_each_input_buffer(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_ib((m)->counts), \
+ qcomtee_msg_num_ib((m)->counts))
+
+#define qcomtee_msg_for_each_output_buffer(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_ob((m)->counts), \
+ qcomtee_msg_num_ob((m)->counts))
+
+#define qcomtee_msg_for_each_input_object(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_io((m)->counts), \
+ qcomtee_msg_num_io((m)->counts))
+
+#define qcomtee_msg_for_each_output_object(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_oo((m)->counts), \
+ qcomtee_msg_num_oo((m)->counts))
+
+/* Sum of arguments in a message. */
+#define qcomtee_msg_args(m) \
+ (qcomtee_msg_idx_oo((m)->counts) + qcomtee_msg_num_oo((m)->counts))
+
+static inline void qcomtee_msg_init(struct qcomtee_msg_object_invoke *msg,
+ u32 cxt, u32 op, int in_buffer,
+ int out_buffer, int in_object,
+ int out_object)
+{
+ u32 counts = 0;
+
+ counts |= (in_buffer & 0xfU);
+ counts |= ((out_buffer - in_buffer) & 0xfU) << 4;
+ counts |= ((in_object - out_buffer) & 0xfU) << 8;
+ counts |= ((out_object - in_object) & 0xfU) << 12;
+
+ msg->cxt = cxt;
+ msg->op = op;
+ msg->counts = counts;
+}
+
+/* Generic error codes. */
+#define QCOMTEE_MSG_OK 0 /* non-specific success code. */
+#define QCOMTEE_MSG_ERROR 1 /* non-specific error. */
+#define QCOMTEE_MSG_ERROR_INVALID 2 /* unsupported/unrecognized request. */
+#define QCOMTEE_MSG_ERROR_SIZE_IN 3 /* supplied buffer/string too large. */
+#define QCOMTEE_MSG_ERROR_SIZE_OUT 4 /* supplied output buffer too small. */
+#define QCOMTEE_MSG_ERROR_USERBASE 10 /* start of user-defined error range. */
+
+/* Transport layer error codes. */
+#define QCOMTEE_MSG_ERROR_DEFUNCT -90 /* object no longer exists. */
+#define QCOMTEE_MSG_ERROR_ABORT -91 /* calling thread must exit. */
+#define QCOMTEE_MSG_ERROR_BADOBJ -92 /* invalid object context. */
+#define QCOMTEE_MSG_ERROR_NOSLOTS -93 /* caller's object table full. */
+#define QCOMTEE_MSG_ERROR_MAXARGS -94 /* too many args. */
+#define QCOMTEE_MSG_ERROR_MAXDATA -95 /* buffers too large. */
+#define QCOMTEE_MSG_ERROR_UNAVAIL -96 /* the request could not be processed. */
+#define QCOMTEE_MSG_ERROR_KMEM -97 /* kernel out of memory. */
+#define QCOMTEE_MSG_ERROR_REMOTE -98 /* local method sent to remote object. */
+#define QCOMTEE_MSG_ERROR_BUSY -99 /* Object is busy. */
+#define QCOMTEE_MSG_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */
+
+static inline void qcomtee_msg_set_result(struct qcomtee_msg_callback *cb_msg,
+ int err)
+{
+ if (!err) {
+ cb_msg->result = QCOMTEE_MSG_OK;
+ } else if (err < 0) {
+ /* If err < 0, then it is a transport error. */
+ switch (err) {
+ case -ENOMEM:
+ cb_msg->result = QCOMTEE_MSG_ERROR_KMEM;
+ break;
+ case -ENODEV:
+ cb_msg->result = QCOMTEE_MSG_ERROR_DEFUNCT;
+ break;
+ case -ENOSPC:
+ case -EBUSY:
+ cb_msg->result = QCOMTEE_MSG_ERROR_BUSY;
+ break;
+ case -EBADF:
+ case -EINVAL:
+ cb_msg->result = QCOMTEE_MSG_ERROR_UNAVAIL;
+ break;
+ default:
+ cb_msg->result = QCOMTEE_MSG_ERROR;
+ }
+ } else {
+ /* If err > 0, then it is user defined error, pass it as is. */
+ cb_msg->result = err;
+ }
+}
+
+#endif /* QCOMTEE_MSG_H */
diff --git a/drivers/tee/qcomtee/qcomtee_object.h b/drivers/tee/qcomtee/qcomtee_object.h
new file mode 100644
index 000000000000..5221449be7db
--- /dev/null
+++ b/drivers/tee/qcomtee/qcomtee_object.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef QCOMTEE_OBJECT_H
+#define QCOMTEE_OBJECT_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+struct qcomtee_object;
+
+/**
+ * DOC: Overview
+ *
+ * qcomtee_object provides object refcounting, ID allocation for objects hosted
+ * in the kernel, and necessary message marshaling for Qualcomm TEE (QTEE).
+ *
+ * To invoke an object in QTEE, the user calls qcomtee_object_do_invoke()
+ * while passing an instance of &struct qcomtee_object and the requested
+ * operation + arguments.
+ *
+ * After boot, QTEE provides a static object %ROOT_QCOMTEE_OBJECT (type of
+ * %QCOMTEE_OBJECT_TYPE_ROOT). The root object is invoked to pass the user's
+ * credentials and obtain other instances of &struct qcomtee_object (type of
+ * %QCOMTEE_OBJECT_TYPE_TEE) that represent services and TAs in QTEE;
+ * see &enum qcomtee_object_type.
+ *
+ * The objects received from QTEE are refcounted. So the owner of these objects
+ * can issue qcomtee_object_get() to increase the refcount and pass objects
+ * to other clients, or issue qcomtee_object_put() to decrease the refcount
+ * and release the resources in QTEE.
+ *
+ * The kernel can host services accessible to QTEE. A driver should embed
+ * an instance of &struct qcomtee_object in the struct it wants to export to
+ * QTEE (this is called a callback object). It issues qcomtee_object_user_init()
+ * to set the dispatch() operation for the callback object and set its type
+ * to %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * core.c holds an object table for callback objects. An object ID is assigned
+ * to each callback object, which is an index to the object table. QTEE uses
+ * these IDs to reference or invoke callback objects.
+ *
+ * If QTEE invokes a callback object in the kernel, the dispatch() operation is
+ * called in the context of the thread that originally called
+ * qcomtee_object_do_invoke().
+ */
+
+/**
+ * enum qcomtee_object_type - Object types.
+ * @QCOMTEE_OBJECT_TYPE_TEE: object hosted on QTEE.
+ * @QCOMTEE_OBJECT_TYPE_CB: object hosted on kernel.
+ * @QCOMTEE_OBJECT_TYPE_ROOT: 'primordial' object.
+ * @QCOMTEE_OBJECT_TYPE_NULL: NULL object.
+ *
+ * The primordial object is used for bootstrapping the IPC connection between
+ * the kernel and QTEE. It is invoked by the kernel when it wants to get a
+ * 'client env'.
+ */
+enum qcomtee_object_type {
+ QCOMTEE_OBJECT_TYPE_TEE,
+ QCOMTEE_OBJECT_TYPE_CB,
+ QCOMTEE_OBJECT_TYPE_ROOT,
+ QCOMTEE_OBJECT_TYPE_NULL,
+};
+
+/**
+ * enum qcomtee_arg_type - Type of QTEE argument.
+ * @QCOMTEE_ARG_TYPE_INV: invalid type.
+ * @QCOMTEE_ARG_TYPE_OB: output buffer (OB).
+ * @QCOMTEE_ARG_TYPE_OO: output object (OO).
+ * @QCOMTEE_ARG_TYPE_IB: input buffer (IB).
+ * @QCOMTEE_ARG_TYPE_IO: input object (IO).
+ *
+ * Use the invalid type to specify the end of the argument array.
+ */
+enum qcomtee_arg_type {
+ QCOMTEE_ARG_TYPE_INV = 0,
+ QCOMTEE_ARG_TYPE_OB,
+ QCOMTEE_ARG_TYPE_OO,
+ QCOMTEE_ARG_TYPE_IB,
+ QCOMTEE_ARG_TYPE_IO,
+ QCOMTEE_ARG_TYPE_NR,
+};
+
+/**
+ * define QCOMTEE_ARGS_PER_TYPE - Maximum arguments of a specific type.
+ *
+ * The QTEE transport protocol limits the maximum number of arguments of
+ * a specific type (i.e., IB, OB, IO, and OO).
+ */
+#define QCOMTEE_ARGS_PER_TYPE 16
+
+/* Maximum arguments that can fit in a QTEE message, ignoring the type. */
+#define QCOMTEE_ARGS_MAX (QCOMTEE_ARGS_PER_TYPE * (QCOMTEE_ARG_TYPE_NR - 1))
+
+struct qcomtee_buffer {
+ union {
+ void *addr;
+ void __user *uaddr;
+ };
+ size_t size;
+};
+
+/**
+ * struct qcomtee_arg - Argument for QTEE object invocation.
+ * @type: type of argument as &enum qcomtee_arg_type.
+ * @flags: extra flags.
+ * @b: address and size if the type of argument is a buffer.
+ * @o: object instance if the type of argument is an object.
+ *
+ * &qcomtee_arg.flags only accepts %QCOMTEE_ARG_FLAGS_UADDR for now, which
+ * states that &qcomtee_arg.b contains a userspace address in uaddr.
+ */
+struct qcomtee_arg {
+ enum qcomtee_arg_type type;
+/* 'b.uaddr' holds a __user address. */
+#define QCOMTEE_ARG_FLAGS_UADDR BIT(0)
+ unsigned int flags;
+ union {
+ struct qcomtee_buffer b;
+ struct qcomtee_object *o;
+ };
+};
+
+static inline int qcomtee_args_len(struct qcomtee_arg *args)
+{
+ int i = 0;
+
+ while (args[i].type != QCOMTEE_ARG_TYPE_INV)
+ i++;
+ return i;
+}
+
+/* Context is busy (callback is in progress). */
+#define QCOMTEE_OIC_FLAG_BUSY BIT(1)
+/* Context needs to notify the current object. */
+#define QCOMTEE_OIC_FLAG_NOTIFY BIT(2)
+/* Context has shared state with QTEE. */
+#define QCOMTEE_OIC_FLAG_SHARED BIT(3)
+
+/**
+ * struct qcomtee_object_invoke_ctx - QTEE context for object invocation.
+ * @ctx: TEE context for this invocation.
+ * @flags: flags for the invocation context.
+ * @errno: error code for the invocation.
+ * @object: current object invoked in this callback context.
+ * @u: array of arguments for the current invocation (+1 for ending arg).
+ * @in_msg: inbound buffer shared with QTEE.
+ * @out_msg: outbound buffer shared with QTEE.
+ * @in_shm: TEE shm allocated for inbound buffer.
+ * @out_shm: TEE shm allocated for outbound buffer.
+ * @data: extra data attached to this context.
+ */
+struct qcomtee_object_invoke_ctx {
+ struct tee_context *ctx;
+ unsigned long flags;
+ int errno;
+
+ struct qcomtee_object *object;
+ struct qcomtee_arg u[QCOMTEE_ARGS_MAX + 1];
+
+ struct qcomtee_buffer in_msg;
+ struct qcomtee_buffer out_msg;
+ struct tee_shm *in_shm;
+ struct tee_shm *out_shm;
+
+ void *data;
+};
+
+static inline struct qcomtee_object_invoke_ctx *
+qcomtee_object_invoke_ctx_alloc(struct tee_context *ctx)
+{
+ struct qcomtee_object_invoke_ctx *oic;
+
+ oic = kzalloc(sizeof(*oic), GFP_KERNEL);
+ if (oic)
+ oic->ctx = ctx;
+ return oic;
+}
+
+/**
+ * qcomtee_object_do_invoke() - Submit an invocation for an object.
+ * @oic: context to use for the current invocation.
+ * @object: object being invoked.
+ * @op: requested operation on the object.
+ * @u: array of arguments for the current invocation.
+ * @result: result returned from QTEE.
+ *
+ * The caller is responsible for keeping track of the refcount for each object,
+ * including @object. On return, the caller loses ownership of all input
+ * objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * @object can be of %QCOMTEE_OBJECT_TYPE_ROOT or %QCOMTEE_OBJECT_TYPE_TEE.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result);
+
+/**
+ * struct qcomtee_object_operations - Callback object operations.
+ * @release: release the object if QTEE is not using it.
+ * @dispatch: dispatch the operation requested by QTEE.
+ * @notify: report the status of any pending response submitted by @dispatch.
+ */
+struct qcomtee_object_operations {
+ void (*release)(struct qcomtee_object *object);
+ int (*dispatch)(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *args);
+ void (*notify)(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, int err);
+};
+
+/**
+ * struct qcomtee_object - QTEE or kernel object.
+ * @name: object name.
+ * @refcount: reference counter.
+ * @object_type: object type as &enum qcomtee_object_type.
+ * @info: extra information for the object.
+ * @ops: callback operations for objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ * @work: work for async operations on the object.
+ *
+ * @work is used for releasing objects of %QCOMTEE_OBJECT_TYPE_TEE type.
+ */
+struct qcomtee_object {
+ const char *name;
+ struct kref refcount;
+
+ enum qcomtee_object_type object_type;
+ struct object_info {
+ unsigned long qtee_id;
+ /* TEE context for QTEE object async requests. */
+ struct tee_context *qcomtee_async_ctx;
+ } info;
+
+ struct qcomtee_object_operations *ops;
+ struct work_struct work;
+};
+
+/* Static instances of qcomtee_object objects. */
+#define NULL_QCOMTEE_OBJECT ((struct qcomtee_object *)(0))
+extern struct qcomtee_object qcomtee_object_root;
+#define ROOT_QCOMTEE_OBJECT (&qcomtee_object_root)
+
+static inline enum qcomtee_object_type
+typeof_qcomtee_object(struct qcomtee_object *object)
+{
+ if (object == NULL_QCOMTEE_OBJECT)
+ return QCOMTEE_OBJECT_TYPE_NULL;
+ return object->object_type;
+}
+
+static inline const char *qcomtee_object_name(struct qcomtee_object *object)
+{
+ if (object == NULL_QCOMTEE_OBJECT)
+ return "null";
+
+ if (!object->name)
+ return "no-name";
+ return object->name;
+}
+
+/**
+ * qcomtee_object_user_init() - Initialize an object for the user.
+ * @object: object to initialize.
+ * @ot: type of object as &enum qcomtee_object_type.
+ * @ops: instance of callbacks.
+ * @fmt: name assigned to the object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_user_init(struct qcomtee_object *object,
+ enum qcomtee_object_type ot,
+ struct qcomtee_object_operations *ops,
+ const char *fmt, ...) __printf(4, 5);
+
+/* Object release is RCU protected. */
+int qcomtee_object_get(struct qcomtee_object *object);
+void qcomtee_object_put(struct qcomtee_object *object);
+
+#define qcomtee_arg_for_each(i, args) \
+ for (i = 0; args[i].type != QCOMTEE_ARG_TYPE_INV; i++)
+
+/* Next argument of type @type after index @i. */
+int qcomtee_next_arg_type(struct qcomtee_arg *u, int i,
+ enum qcomtee_arg_type type);
+
+/* Iterate over argument of given type. */
+#define qcomtee_arg_for_each_type(i, args, at) \
+ for (i = qcomtee_next_arg_type(args, 0, at); \
+ args[i].type != QCOMTEE_ARG_TYPE_INV; \
+ i = qcomtee_next_arg_type(args, i + 1, at))
+
+#define qcomtee_arg_for_each_input_buffer(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IB)
+#define qcomtee_arg_for_each_output_buffer(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OB)
+#define qcomtee_arg_for_each_input_object(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IO)
+#define qcomtee_arg_for_each_output_object(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OO)
+
+struct qcomtee_object *
+qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic);
+
+struct qcomtee_object *
+qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *client_env, u32 uid);
+
+#endif /* QCOMTEE_OBJECT_H */
diff --git a/drivers/tee/qcomtee/shm.c b/drivers/tee/qcomtee/shm.c
new file mode 100644
index 000000000000..580bd25f98ed
--- /dev/null
+++ b/drivers/tee/qcomtee/shm.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware/qcom/qcom_tzmem.h>
+#include <linux/mm.h>
+
+#include "qcomtee.h"
+
+/**
+ * define MAX_OUTBOUND_BUFFER_SIZE - Maximum size of outbound buffers.
+ *
+ * The size of outbound buffer depends on QTEE callback requests.
+ */
+#define MAX_OUTBOUND_BUFFER_SIZE SZ_4K
+
+/**
+ * define MAX_INBOUND_BUFFER_SIZE - Maximum size of the inbound buffer.
+ *
+ * The size of the inbound buffer depends on the user's requests,
+ * specifically the number of IB and OB arguments. If an invocation
+ * requires a size larger than %MAX_INBOUND_BUFFER_SIZE, the user should
+ * consider using another form of shared memory with QTEE.
+ */
+#define MAX_INBOUND_BUFFER_SIZE SZ_4M
+
+/**
+ * qcomtee_msg_buffers_alloc() - Allocate inbound and outbound buffers.
+ * @oic: context to use for the current invocation.
+ * @u: array of arguments for the current invocation.
+ *
+ * It calculates the size of inbound and outbound buffers based on the
+ * arguments in @u. It allocates the buffers from the teedev pool.
+ *
+ * Return: On success, returns 0. On error, returns < 0.
+ */
+int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_arg *u)
+{
+ struct tee_context *ctx = oic->ctx;
+ struct tee_shm *shm;
+ size_t size;
+ int i;
+
+ /* Start offset in a message for buffer arguments. */
+ size = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke,
+ qcomtee_args_len(u));
+ if (size > MAX_INBOUND_BUFFER_SIZE)
+ return -EINVAL;
+
+ /* Add size of IB arguments. */
+ qcomtee_arg_for_each_input_buffer(i, u) {
+ size = size_add(size, qcomtee_msg_offset_align(u[i].b.size));
+ if (size > MAX_INBOUND_BUFFER_SIZE)
+ return -EINVAL;
+ }
+
+ /* Add size of OB arguments. */
+ qcomtee_arg_for_each_output_buffer(i, u) {
+ size = size_add(size, qcomtee_msg_offset_align(u[i].b.size));
+ if (size > MAX_INBOUND_BUFFER_SIZE)
+ return -EINVAL;
+ }
+
+ shm = tee_shm_alloc_priv_buf(ctx, size);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /* Allocate inbound buffer. */
+ oic->in_shm = shm;
+ shm = tee_shm_alloc_priv_buf(ctx, MAX_OUTBOUND_BUFFER_SIZE);
+ if (IS_ERR(shm)) {
+ tee_shm_free(oic->in_shm);
+
+ return PTR_ERR(shm);
+ }
+ /* Allocate outbound buffer. */
+ oic->out_shm = shm;
+
+ oic->in_msg.addr = tee_shm_get_va(oic->in_shm, 0);
+ oic->in_msg.size = tee_shm_get_size(oic->in_shm);
+ oic->out_msg.addr = tee_shm_get_va(oic->out_shm, 0);
+ oic->out_msg.size = tee_shm_get_size(oic->out_shm);
+ /* QTEE assume unused buffers are zeroed. */
+ memzero_explicit(oic->in_msg.addr, oic->in_msg.size);
+ memzero_explicit(oic->out_msg.addr, oic->out_msg.size);
+
+ return 0;
+}
+
+void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic)
+{
+ tee_shm_free(oic->in_shm);
+ tee_shm_free(oic->out_shm);
+}
+
+/* Dynamic shared memory pool based on tee_dyn_shm_alloc_helper(). */
+
+static int qcomtee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ return qcom_tzmem_shm_bridge_create(shm->paddr, shm->size,
+ &shm->sec_world_id);
+}
+
+static int qcomtee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ qcom_tzmem_shm_bridge_delete(shm->sec_world_id);
+
+ return 0;
+}
+
+static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
+{
+ return tee_dyn_shm_alloc_helper(shm, size, align, qcomtee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
+{
+ tee_dyn_shm_free_helper(shm, qcomtee_shm_unregister);
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+struct tee_shm_pool *qcomtee_shm_pool_alloc(void)
+{
+ struct tee_shm_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
diff --git a/drivers/tee/qcomtee/user_obj.c b/drivers/tee/qcomtee/user_obj.c
new file mode 100644
index 000000000000..0139905f2684
--- /dev/null
+++ b/drivers/tee/qcomtee/user_obj.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "qcomtee.h"
+
+/**
+ * DOC: User Objects aka Supplicants
+ *
+ * Any userspace process with access to the TEE device file can behave as a
+ * supplicant by creating a user object. Any TEE parameter of type OBJREF with
+ * %QCOMTEE_OBJREF_FLAG_USER flag set is considered a user object.
+ *
+ * A supplicant uses qcomtee_user_object_select() (i.e. TEE_IOC_SUPPL_RECV) to
+ * receive a QTEE user object request and qcomtee_user_object_submit()
+ * (i.e. TEE_IOC_SUPPL_SEND) to submit a response. QTEE expects to receive the
+ * response, including OB and OO in a specific order in the message; parameters
+ * submitted with qcomtee_user_object_submit() should maintain this order.
+ */
+
+/**
+ * struct qcomtee_user_object - User object.
+ * @object: &struct qcomtee_object representing the user object.
+ * @ctx: context for which the user object is defined.
+ * @object_id: object ID in @ctx.
+ * @notify: notify on release.
+ *
+ * Any object managed in userspace is represented by this struct.
+ * If @notify is set, a notification message is sent back to userspace
+ * upon release.
+ */
+struct qcomtee_user_object {
+ struct qcomtee_object object;
+ struct tee_context *ctx;
+ u64 object_id;
+ bool notify;
+};
+
+#define to_qcomtee_user_object(o) \
+ container_of((o), struct qcomtee_user_object, object)
+
+static struct qcomtee_object_operations qcomtee_user_object_ops;
+
+/* Is it a user object? */
+int is_qcomtee_user_object(struct qcomtee_object *object)
+{
+ return object != NULL_QCOMTEE_OBJECT &&
+ typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB &&
+ object->ops == &qcomtee_user_object_ops;
+}
+
+/* Set the user object's 'notify on release' flag. */
+void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify)
+{
+ if (is_qcomtee_user_object(object))
+ to_qcomtee_user_object(object)->notify = notify;
+}
+
+/* Supplicant Requests: */
+
+/**
+ * enum qcomtee_req_state - Current state of request.
+ * @QCOMTEE_REQ_QUEUED: Request is waiting for supplicant.
+ * @QCOMTEE_REQ_PROCESSING: Request has been picked by the supplicant.
+ * @QCOMTEE_REQ_PROCESSED: Response has been submitted for the request.
+ */
+enum qcomtee_req_state {
+ QCOMTEE_REQ_QUEUED = 1,
+ QCOMTEE_REQ_PROCESSING,
+ QCOMTEE_REQ_PROCESSED,
+};
+
+/* User requests sent to supplicants. */
+struct qcomtee_ureq {
+ enum qcomtee_req_state state;
+
+ /* User Request: */
+ int req_id;
+ u64 object_id;
+ u32 op;
+ struct qcomtee_arg *args;
+ int errno;
+
+ struct list_head node;
+ struct completion c; /* Completion for whoever wait. */
+};
+
+/*
+ * Placeholder for a PROCESSING request in qcomtee_context.reqs_idr.
+ *
+ * If the thread that calls qcomtee_object_invoke() dies and the supplicant
+ * is processing the request, replace the entry in qcomtee_context.reqs_idr
+ * with empty_ureq. This ensures that (1) the req_id remains busy and is not
+ * reused, and (2) the supplicant fails to submit the response and performs
+ * the necessary rollback.
+ */
+static struct qcomtee_ureq empty_ureq = { .state = QCOMTEE_REQ_PROCESSING };
+
+/* Enqueue a user request for a context and assign a request ID. */
+static int ureq_enqueue(struct qcomtee_context_data *ctxdata,
+ struct qcomtee_ureq *ureq)
+{
+ int ret;
+
+ guard(mutex)(&ctxdata->reqs_lock);
+ /* Supplicant is dying. */
+ if (ctxdata->released)
+ return -ENODEV;
+
+ /* Allocate an ID and queue the request. */
+ ret = idr_alloc(&ctxdata->reqs_idr, ureq, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ureq->req_id = ret;
+ ureq->state = QCOMTEE_REQ_QUEUED;
+ list_add_tail(&ureq->node, &ctxdata->reqs_list);
+
+ return 0;
+}
+
+/**
+ * ureq_dequeue() - Dequeue a user request from a context.
+ * @ctxdata: context data for a context to dequeue the request.
+ * @req_id: ID of the request to be dequeued.
+ *
+ * It dequeues a user request and releases its request ID.
+ *
+ * Context: The caller should hold &qcomtee_context_data->reqs_lock.
+ * Return: Returns the user request associated with this ID; otherwise, NULL.
+ */
+static struct qcomtee_ureq *ureq_dequeue(struct qcomtee_context_data *ctxdata,
+ int req_id)
+{
+ struct qcomtee_ureq *ureq;
+
+ ureq = idr_remove(&ctxdata->reqs_idr, req_id);
+ if (ureq == &empty_ureq || !ureq)
+ return NULL;
+
+ list_del(&ureq->node);
+
+ return ureq;
+}
+
+/**
+ * ureq_select() - Select the next request in a context.
+ * @ctxdata: context data for a context to pop a request.
+ * @ubuf_size: size of the available buffer for UBUF parameters.
+ * @num_params: number of entries for the TEE parameter array.
+ *
+ * It checks if @num_params is large enough to fit the next request arguments.
+ * It checks if @ubuf_size is large enough to fit IB buffer arguments.
+ *
+ * Context: The caller should hold &qcomtee_context_data->reqs_lock.
+ * Return: On success, returns a request;
+ * on failure, returns NULL and ERR_PTR.
+ */
+static struct qcomtee_ureq *ureq_select(struct qcomtee_context_data *ctxdata,
+ size_t ubuf_size, int num_params)
+{
+ struct qcomtee_ureq *req, *ureq = NULL;
+ struct qcomtee_arg *u;
+ int i;
+
+ /* Find the a queued request. */
+ list_for_each_entry(req, &ctxdata->reqs_list, node) {
+ if (req->state == QCOMTEE_REQ_QUEUED) {
+ ureq = req;
+ break;
+ }
+ }
+
+ if (!ureq)
+ return NULL;
+
+ u = ureq->args;
+ /* (1) Is there enough TEE parameters? */
+ if (num_params < qcomtee_args_len(u))
+ return ERR_PTR(-EINVAL);
+ /* (2) Is there enough space to pass input buffers? */
+ qcomtee_arg_for_each_input_buffer(i, u) {
+ ubuf_size = size_sub(ubuf_size, u[i].b.size);
+ if (ubuf_size == SIZE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ ubuf_size = round_down(ubuf_size, 8);
+ }
+
+ return ureq;
+}
+
+/* Gets called when the user closes the device. */
+void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata)
+{
+ struct qcomtee_ureq *req, *ureq;
+
+ guard(mutex)(&ctxdata->reqs_lock);
+ /* So ureq_enqueue() refuses new requests from QTEE. */
+ ctxdata->released = true;
+ /* ureqs in reqs_list are in QUEUED or PROCESSING (!= empty_ureq) state. */
+ list_for_each_entry_safe(ureq, req, &ctxdata->reqs_list, node) {
+ ureq_dequeue(ctxdata, ureq->req_id);
+
+ if (ureq->op != QCOMTEE_MSG_OBJECT_OP_RELEASE) {
+ ureq->state = QCOMTEE_REQ_PROCESSED;
+ ureq->errno = -ENODEV;
+
+ complete(&ureq->c);
+ } else {
+ kfree(ureq);
+ }
+ }
+}
+
+/* User Object API. */
+
+/* User object dispatcher. */
+static int qcomtee_user_object_dispatch(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *args)
+{
+ struct qcomtee_user_object *uo = to_qcomtee_user_object(object);
+ struct qcomtee_context_data *ctxdata = uo->ctx->data;
+ struct qcomtee_ureq *ureq __free(kfree) = NULL;
+ int errno;
+
+ ureq = kzalloc(sizeof(*ureq), GFP_KERNEL);
+ if (!ureq)
+ return -ENOMEM;
+
+ init_completion(&ureq->c);
+ ureq->object_id = uo->object_id;
+ ureq->op = op;
+ ureq->args = args;
+
+ /* Queue the request. */
+ if (ureq_enqueue(ctxdata, ureq))
+ return -ENODEV;
+ /* Wakeup supplicant to process it. */
+ complete(&ctxdata->req_c);
+
+ /*
+ * Wait for the supplicant to process the request. Wait as KILLABLE
+ * in case the supplicant and invoke thread are both running from the
+ * same process, the supplicant crashes, or the shutdown sequence
+ * starts with supplicant dies first; otherwise, it stuck indefinitely.
+ *
+ * If the supplicant processes long-running requests, also use
+ * TASK_FREEZABLE to allow the device to safely suspend if needed.
+ */
+ if (!wait_for_completion_state(&ureq->c,
+ TASK_KILLABLE | TASK_FREEZABLE)) {
+ errno = ureq->errno;
+ if (!errno)
+ oic->data = no_free_ptr(ureq);
+ } else {
+ enum qcomtee_req_state prev_state;
+
+ errno = -ENODEV;
+
+ scoped_guard(mutex, &ctxdata->reqs_lock) {
+ prev_state = ureq->state;
+ /* Replace with empty_ureq to keep req_id reserved. */
+ if (prev_state == QCOMTEE_REQ_PROCESSING) {
+ list_del(&ureq->node);
+ idr_replace(&ctxdata->reqs_idr,
+ &empty_ureq, ureq->req_id);
+
+ /* Remove as supplicant has never seen this request. */
+ } else if (prev_state == QCOMTEE_REQ_QUEUED) {
+ ureq_dequeue(ctxdata, ureq->req_id);
+ }
+ }
+
+ /* Supplicant did some work, do not discard it. */
+ if (prev_state == QCOMTEE_REQ_PROCESSED) {
+ errno = ureq->errno;
+ if (!errno)
+ oic->data = no_free_ptr(ureq);
+ }
+ }
+
+ return errno;
+}
+
+/* Gets called after submitting the dispatcher response. */
+static void qcomtee_user_object_notify(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *unused_object,
+ int err)
+{
+ struct qcomtee_ureq *ureq = oic->data;
+ struct qcomtee_arg *u = ureq->args;
+ int i;
+
+ /*
+ * If err, there was a transport issue, and QTEE did not receive the
+ * response for the dispatcher. Release the callback object created for
+ * QTEE, in addition to the copies of objects kept for the drivers.
+ */
+ qcomtee_arg_for_each_output_object(i, u) {
+ if (err &&
+ (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB))
+ qcomtee_object_put(u[i].o);
+ qcomtee_object_put(u[i].o);
+ }
+
+ kfree(ureq);
+}
+
+static void qcomtee_user_object_release(struct qcomtee_object *object)
+{
+ struct qcomtee_user_object *uo = to_qcomtee_user_object(object);
+ struct qcomtee_context_data *ctxdata = uo->ctx->data;
+ struct qcomtee_ureq *ureq;
+
+ /* RELEASE does not require any argument. */
+ static struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } };
+
+ if (!uo->notify)
+ goto out_no_notify;
+
+ ureq = kzalloc(sizeof(*ureq), GFP_KERNEL);
+ if (!ureq)
+ goto out_no_notify;
+
+ /* QUEUE a release request: */
+ ureq->object_id = uo->object_id;
+ ureq->op = QCOMTEE_MSG_OBJECT_OP_RELEASE;
+ ureq->args = args;
+ if (ureq_enqueue(ctxdata, ureq)) {
+ kfree(ureq);
+ /* Ignore the notification if it cannot be queued. */
+ goto out_no_notify;
+ }
+
+ complete(&ctxdata->req_c);
+
+out_no_notify:
+ teedev_ctx_put(uo->ctx);
+ kfree(uo);
+}
+
+static struct qcomtee_object_operations qcomtee_user_object_ops = {
+ .release = qcomtee_user_object_release,
+ .notify = qcomtee_user_object_notify,
+ .dispatch = qcomtee_user_object_dispatch,
+};
+
+/**
+ * qcomtee_user_param_to_object() - OBJREF parameter to &struct qcomtee_object.
+ * @object: object returned.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_USER flags.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_user_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_user_object *user_object __free(kfree) = NULL;
+ int err;
+
+ user_object = kzalloc(sizeof(*user_object), GFP_KERNEL);
+ if (!user_object)
+ return -ENOMEM;
+
+ user_object->ctx = ctx;
+ user_object->object_id = param->u.objref.id;
+ /* By default, always notify userspace upon release. */
+ user_object->notify = true;
+ err = qcomtee_object_user_init(&user_object->object,
+ QCOMTEE_OBJECT_TYPE_CB,
+ &qcomtee_user_object_ops, "uo-%llu",
+ param->u.objref.id);
+ if (err)
+ return err;
+ /* Matching teedev_ctx_put() is in qcomtee_user_object_release(). */
+ teedev_ctx_get(ctx);
+
+ *object = &no_free_ptr(user_object)->object;
+
+ return 0;
+}
+
+/* Reverse what qcomtee_user_param_to_object() does. */
+int qcomtee_user_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx)
+{
+ struct qcomtee_user_object *uo;
+
+ uo = to_qcomtee_user_object(object);
+ /* Ensure the object is in the same context as the caller. */
+ if (uo->ctx != ctx)
+ return -EINVAL;
+
+ param->u.objref.id = uo->object_id;
+ param->u.objref.flags = QCOMTEE_OBJREF_FLAG_USER;
+
+ /* User objects are valid in userspace; do not keep a copy. */
+ qcomtee_object_put(object);
+
+ return 0;
+}
+
+/**
+ * qcomtee_cb_params_from_args() - Convert QTEE arguments to TEE parameters.
+ * @params: TEE parameters.
+ * @u: QTEE arguments.
+ * @num_params: number of elements in the parameter array.
+ * @ubuf_addr: user buffer for arguments of type %QCOMTEE_ARG_TYPE_IB.
+ * @ubuf_size: size of the user buffer.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It expects @params to have enough entries for @u. Entries in @params are of
+ * %TEE_IOCTL_PARAM_ATTR_TYPE_NONE.
+ *
+ * Return: On success, returns the number of input parameters;
+ * on failure, returns < 0.
+ */
+static int qcomtee_cb_params_from_args(struct tee_param *params,
+ struct qcomtee_arg *u, int num_params,
+ void __user *ubuf_addr, size_t ubuf_size,
+ struct tee_context *ctx)
+{
+ int i, np;
+ void __user *uaddr;
+
+ qcomtee_arg_for_each(i, u) {
+ switch (u[i].type) {
+ case QCOMTEE_ARG_TYPE_IB:
+ params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT;
+
+ /* Underflow already checked in ureq_select(). */
+ ubuf_size = round_down(ubuf_size - u[i].b.size, 8);
+ uaddr = (void __user *)(ubuf_addr + ubuf_size);
+
+ params[i].u.ubuf.uaddr = uaddr;
+ params[i].u.ubuf.size = u[i].b.size;
+ if (copy_to_user(params[i].u.ubuf.uaddr, u[i].b.addr,
+ u[i].b.size))
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OB:
+ params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT;
+ /* Let the user knows the maximum size QTEE expects. */
+ params[i].u.ubuf.size = u[i].b.size;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IO:
+ params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT;
+ if (qcomtee_objref_from_arg(&params[i], &u[i], ctx))
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OO:
+ params[i].attr =
+ TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT;
+
+ break;
+ default: /* Never get here! */
+ goto out_failed;
+ }
+ }
+
+ return i;
+
+out_failed:
+ /* Undo qcomtee_objref_from_arg(). */
+ for (np = i; np >= 0; np--) {
+ if (params[np].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT)
+ qcomtee_context_del_qtee_object(&params[np], ctx);
+ }
+
+ /* Release any IO objects not processed. */
+ for (; u[i].type; i++) {
+ if (u[i].type == QCOMTEE_ARG_TYPE_IO)
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_cb_params_to_args() - Convert TEE parameters to QTEE arguments.
+ * @u: QTEE arguments.
+ * @params: TEE parameters.
+ * @num_params: number of elements in the parameter array.
+ * @ctx: context in which the conversion should happen.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_cb_params_to_args(struct qcomtee_arg *u,
+ struct tee_param *params, int num_params,
+ struct tee_context *ctx)
+{
+ int i;
+
+ qcomtee_arg_for_each(i, u) {
+ switch (u[i].type) {
+ case QCOMTEE_ARG_TYPE_IB:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT)
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OB:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT)
+ goto out_failed;
+
+ /* Client can not send more data than requested. */
+ if (params[i].u.ubuf.size > u[i].b.size)
+ goto out_failed;
+
+ if (copy_from_user(u[i].b.addr, params[i].u.ubuf.uaddr,
+ params[i].u.ubuf.size))
+ goto out_failed;
+
+ u[i].b.size = params[i].u.ubuf.size;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IO:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT)
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OO:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT)
+ goto out_failed;
+
+ if (qcomtee_objref_to_arg(&u[i], &params[i], ctx))
+ goto out_failed;
+
+ break;
+ default: /* Never get here! */
+ goto out_failed;
+ }
+ }
+
+ return 0;
+
+out_failed:
+ /* Undo qcomtee_objref_to_arg(). */
+ for (i--; i >= 0; i--) {
+ if (u[i].type != QCOMTEE_ARG_TYPE_OO)
+ continue;
+
+ qcomtee_user_object_set_notify(u[i].o, false);
+ if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)
+ qcomtee_object_put(u[i].o);
+
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_user_object_select() - Select a request for a user object.
+ * @ctx: context to look for a user object.
+ * @params: parameters for @op.
+ * @num_params: number of elements in the parameter array.
+ * @uaddr: user buffer for output UBUF parameters.
+ * @size: size of user buffer @uaddr.
+ * @data: information for the selected request.
+ *
+ * @params is filled along with @data for the selected request.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_user_object_select(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ void __user *uaddr, size_t size,
+ struct qcomtee_user_object_request_data *data)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_ureq *ureq;
+ int ret;
+
+ /*
+ * Hold the reqs_lock not only for ureq_select() and updating the ureq
+ * state to PROCESSING but for the entire duration of ureq access.
+ * This prevents qcomtee_user_object_dispatch() from freeing
+ * ureq while it is still in use, if client dies.
+ */
+
+ while (1) {
+ scoped_guard(mutex, &ctxdata->reqs_lock) {
+ ureq = ureq_select(ctxdata, size, num_params);
+ if (!ureq)
+ goto wait_for_request;
+
+ if (IS_ERR(ureq))
+ return PTR_ERR(ureq);
+
+ /* Processing the request 'QUEUED -> PROCESSING'. */
+ ureq->state = QCOMTEE_REQ_PROCESSING;
+ /* ''Prepare user request:'' */
+ data->id = ureq->req_id;
+ data->object_id = ureq->object_id;
+ data->op = ureq->op;
+ ret = qcomtee_cb_params_from_args(params, ureq->args,
+ num_params, uaddr,
+ size, ctx);
+ if (ret >= 0)
+ goto done_request;
+
+ /* Something is wrong with the request: */
+ ureq_dequeue(ctxdata, data->id);
+ /* Send error to QTEE. */
+ ureq->state = QCOMTEE_REQ_PROCESSED;
+ ureq->errno = ret;
+
+ complete(&ureq->c);
+ }
+
+ continue;
+wait_for_request:
+ /* Wait for a new QUEUED request. */
+ if (wait_for_completion_interruptible(&ctxdata->req_c))
+ return -ERESTARTSYS;
+ }
+
+done_request:
+ /* No one is waiting for the response. */
+ if (data->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) {
+ scoped_guard(mutex, &ctxdata->reqs_lock)
+ ureq_dequeue(ctxdata, data->id);
+ kfree(ureq);
+ }
+
+ data->np = ret;
+
+ return 0;
+}
+
+/**
+ * qcomtee_user_object_submit() - Submit a response for a user object.
+ * @ctx: context to look for a user object.
+ * @params: returned parameters.
+ * @num_params: number of elements in the parameter array.
+ * @req_id: request ID for the response.
+ * @errno: result of user object invocation.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_user_object_submit(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ int req_id, int errno)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_ureq *ureq;
+
+ /* See comments for reqs_lock in qcomtee_user_object_select(). */
+ guard(mutex)(&ctxdata->reqs_lock);
+
+ ureq = ureq_dequeue(ctxdata, req_id);
+ if (!ureq)
+ return -EINVAL;
+
+ ureq->state = QCOMTEE_REQ_PROCESSED;
+
+ if (!errno)
+ ureq->errno = qcomtee_cb_params_to_args(ureq->args, params,
+ num_params, ctx);
+ else
+ ureq->errno = errno;
+ /* Return errno if qcomtee_cb_params_to_args() failed; otherwise 0. */
+ if (!errno && ureq->errno)
+ errno = ureq->errno;
+ else
+ errno = 0;
+
+ /* Send result to QTEE. */
+ complete(&ureq->c);
+
+ return errno;
+}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index acc7998758ad..d65d47cc154e 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/tee_core.h>
#include <linux/uaccess.h>
-#include <crypto/hash.h>
#include <crypto/sha1.h>
#include "tee_private.h"
@@ -80,6 +79,7 @@ void teedev_ctx_get(struct tee_context *ctx)
kref_get(&ctx->refcount);
}
+EXPORT_SYMBOL_GPL(teedev_ctx_get);
static void teedev_ctx_release(struct kref *ref)
{
@@ -97,11 +97,15 @@ void teedev_ctx_put(struct tee_context *ctx)
kref_put(&ctx->refcount, teedev_ctx_release);
}
+EXPORT_SYMBOL_GPL(teedev_ctx_put);
void teedev_close_context(struct tee_context *ctx)
{
struct tee_device *teedev = ctx->teedev;
+ if (teedev->desc->ops->close_context)
+ teedev->desc->ops->close_context(ctx);
+
teedev_ctx_put(ctx);
tee_device_put(teedev);
}
@@ -142,58 +146,22 @@ static int tee_release(struct inode *inode, struct file *filp)
* This implements section (for SHA-1):
* 4.3. Algorithm for Creating a Name-Based UUID
*/
-static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
- size_t size)
+static void uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
+ size_t size)
{
unsigned char hash[SHA1_DIGEST_SIZE];
- struct crypto_shash *shash = NULL;
- struct shash_desc *desc = NULL;
- int rc;
-
- shash = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(shash)) {
- rc = PTR_ERR(shash);
- pr_err("shash(sha1) allocation failed\n");
- return rc;
- }
-
- desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
- GFP_KERNEL);
- if (!desc) {
- rc = -ENOMEM;
- goto out_free_shash;
- }
-
- desc->tfm = shash;
-
- rc = crypto_shash_init(desc);
- if (rc < 0)
- goto out_free_desc;
+ struct sha1_ctx ctx;
- rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
- if (rc < 0)
- goto out_free_desc;
-
- rc = crypto_shash_update(desc, (const u8 *)name, size);
- if (rc < 0)
- goto out_free_desc;
-
- rc = crypto_shash_final(desc, hash);
- if (rc < 0)
- goto out_free_desc;
+ sha1_init(&ctx);
+ sha1_update(&ctx, (const u8 *)ns, sizeof(*ns));
+ sha1_update(&ctx, (const u8 *)name, size);
+ sha1_final(&ctx, hash);
memcpy(uuid->b, hash, UUID_SIZE);
/* Tag for version 5 */
uuid->b[6] = (hash[6] & 0x0F) | 0x50;
uuid->b[8] = (hash[8] & 0x3F) | 0x80;
-
-out_free_desc:
- kfree(desc);
-
-out_free_shash:
- crypto_free_shash(shash);
- return rc;
}
int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
@@ -203,7 +171,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
kgid_t grp = INVALID_GID;
char *name = NULL;
int name_len;
- int rc;
+ int rc = 0;
if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
@@ -260,7 +228,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
goto out_free_name;
}
- rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
+ uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
out_free_name:
kfree(name);
@@ -354,6 +322,113 @@ tee_ioctl_shm_register(struct tee_context *ctx,
return ret;
}
+static int
+tee_ioctl_shm_register_fd(struct tee_context *ctx,
+ struct tee_ioctl_shm_register_fd_data __user *udata)
+{
+ struct tee_ioctl_shm_register_fd_data data;
+ struct tee_shm *shm;
+ long ret;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ shm = tee_shm_register_fd(ctx, data.fd);
+ if (IS_ERR(shm))
+ return -EINVAL;
+
+ data.id = shm->id;
+ data.flags = shm->flags;
+ data.size = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
+static int param_from_user_memref(struct tee_context *ctx,
+ struct tee_param_memref *memref,
+ struct tee_ioctl_param *ip)
+{
+ struct tee_shm *shm;
+ size_t offs = 0;
+
+ /*
+ * If a NULL pointer is passed to a TA in the TEE,
+ * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
+ * indicating a NULL memory reference.
+ */
+ if (ip->c != TEE_MEMREF_NULL) {
+ /*
+ * If we fail to get a pointer to a shared
+ * memory object (and increase the ref count)
+ * from an identifier we return an error. All
+ * pointers that has been added in params have
+ * an increased ref count. It's the callers
+ * responibility to do tee_shm_put() on all
+ * resolved pointers.
+ */
+ shm = tee_shm_get_from_id(ctx, ip->c);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /*
+ * Ensure offset + size does not overflow
+ * offset and does not overflow the size of
+ * the referred shared memory object.
+ */
+ if ((ip->a + ip->b) < ip->a ||
+ (ip->a + ip->b) > shm->size) {
+ tee_shm_put(shm);
+ return -EINVAL;
+ }
+
+ if (shm->flags & TEE_SHM_DMA_BUF) {
+ struct tee_shm_dmabuf_ref *ref;
+
+ ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
+ if (ref->parent_shm) {
+ /*
+ * The shm already has one reference to
+ * ref->parent_shm so we are clear of 0.
+ * We're getting another reference since
+ * this shm will be used in the parameter
+ * list instead of the shm we got with
+ * tee_shm_get_from_id() above.
+ */
+ refcount_inc(&ref->parent_shm->refcount);
+ tee_shm_put(shm);
+ shm = ref->parent_shm;
+ offs = ref->offset;
+ }
+ }
+ } else if (ctx->cap_memref_null) {
+ /* Pass NULL pointer to OP-TEE */
+ shm = NULL;
+ } else {
+ return -EINVAL;
+ }
+
+ memref->shm_offs = ip->a + offs;
+ memref->size = ip->b;
+ memref->shm = shm;
+
+ return 0;
+}
+
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t num_params,
struct tee_ioctl_param __user *uparams)
@@ -361,8 +436,8 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t n;
for (n = 0; n < num_params; n++) {
- struct tee_shm *shm;
struct tee_ioctl_param ip;
+ int rc;
if (copy_from_user(&ip, uparams + n, sizeof(ip)))
return -EFAULT;
@@ -375,6 +450,7 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
@@ -382,48 +458,29 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
params[n].u.value.b = ip.b;
params[n].u.value.c = ip.c;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ params[n].u.ubuf.uaddr = u64_to_user_ptr(ip.a);
+ params[n].u.ubuf.size = ip.b;
+
+ if (!access_ok(params[n].u.ubuf.uaddr,
+ params[n].u.ubuf.size))
+ return -EFAULT;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ params[n].u.objref.id = ip.a;
+ params[n].u.objref.flags = ip.b;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- /*
- * If a NULL pointer is passed to a TA in the TEE,
- * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
- * indicating a NULL memory reference.
- */
- if (ip.c != TEE_MEMREF_NULL) {
- /*
- * If we fail to get a pointer to a shared
- * memory object (and increase the ref count)
- * from an identifier we return an error. All
- * pointers that has been added in params have
- * an increased ref count. It's the callers
- * responibility to do tee_shm_put() on all
- * resolved pointers.
- */
- shm = tee_shm_get_from_id(ctx, ip.c);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
-
- /*
- * Ensure offset + size does not overflow
- * offset and does not overflow the size of
- * the referred shared memory object.
- */
- if ((ip.a + ip.b) < ip.a ||
- (ip.a + ip.b) > shm->size) {
- tee_shm_put(shm);
- return -EINVAL;
- }
- } else if (ctx->cap_memref_null) {
- /* Pass NULL pointer to OP-TEE */
- shm = NULL;
- } else {
- return -EINVAL;
- }
-
- params[n].u.memref.shm_offs = ip.a;
- params[n].u.memref.size = ip.b;
- params[n].u.memref.shm = shm;
+ rc = param_from_user_memref(ctx, &params[n].u.memref,
+ &ip);
+ if (rc)
+ return rc;
break;
default:
/* Unknown attribute */
@@ -450,6 +507,17 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
put_user(p->u.value.c, &up->c))
return -EFAULT;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ if (put_user((u64)p->u.ubuf.size, &up->b))
+ return -EFAULT;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ if (put_user(p->u.objref.id, &up->a) ||
+ put_user(p->u.objref.flags, &up->b))
+ return -EFAULT;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
if (put_user((u64)p->u.memref.size, &up->b))
@@ -602,6 +670,66 @@ out:
return rc;
}
+static int tee_ioctl_object_invoke(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_object_invoke_arg __user *uarg;
+ struct tee_ioctl_object_invoke_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+
+ if (!ctx->teedev->desc->ops->object_invoke_func)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_object_invoke_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->object_invoke_func(ctx, &arg, params);
+ if (rc)
+ goto out;
+
+ if (put_user(arg.ret, &uarg->ret)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (tee_param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+ return rc;
+}
+
static int tee_ioctl_cancel(struct tee_context *ctx,
struct tee_ioctl_cancel_arg __user *uarg)
{
@@ -650,6 +778,19 @@ static int params_to_supp(struct tee_context *ctx,
ip.b = p->u.value.b;
ip.c = p->u.value.c;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ ip.a = (__force unsigned long)p->u.ubuf.uaddr;
+ ip.b = p->u.ubuf.size;
+ ip.c = 0;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ ip.a = p->u.objref.id;
+ ip.b = p->u.objref.flags;
+ ip.c = 0;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
@@ -752,6 +893,21 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
p->u.value.b = ip.b;
p->u.value.c = ip.c;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ p->u.ubuf.uaddr = u64_to_user_ptr(ip.a);
+ p->u.ubuf.size = ip.b;
+
+ if (!access_ok(params[n].u.ubuf.uaddr,
+ params[n].u.ubuf.size))
+ return -EFAULT;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ p->u.objref.id = ip.a;
+ p->u.objref.flags = ip.b;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
/*
@@ -828,10 +984,14 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return tee_ioctl_shm_alloc(ctx, uarg);
case TEE_IOC_SHM_REGISTER:
return tee_ioctl_shm_register(ctx, uarg);
+ case TEE_IOC_SHM_REGISTER_FD:
+ return tee_ioctl_shm_register_fd(ctx, uarg);
case TEE_IOC_OPEN_SESSION:
return tee_ioctl_open_session(ctx, uarg);
case TEE_IOC_INVOKE:
return tee_ioctl_invoke(ctx, uarg);
+ case TEE_IOC_OBJECT_INVOKE:
+ return tee_ioctl_object_invoke(ctx, uarg);
case TEE_IOC_CANCEL:
return tee_ioctl_cancel(ctx, uarg);
case TEE_IOC_CLOSE_SESSION:
@@ -889,7 +1049,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
if (!teedesc || !teedesc->name || !teedesc->ops ||
!teedesc->ops->get_version || !teedesc->ops->open ||
- !teedesc->ops->release || !pool)
+ !teedesc->ops->release)
return ERR_PTR(-EINVAL);
teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
@@ -977,7 +1137,7 @@ static ssize_t implementation_id_show(struct device *dev,
struct tee_ioctl_version_data vers;
teedev->desc->ops->get_version(teedev, &vers);
- return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
+ return sysfs_emit(buf, "%d\n", vers.impl_id);
}
static DEVICE_ATTR_RO(implementation_id);
@@ -1038,6 +1198,7 @@ void tee_device_put(struct tee_device *teedev)
}
mutex_unlock(&teedev->mutex);
}
+EXPORT_SYMBOL_GPL(tee_device_put);
bool tee_device_get(struct tee_device *teedev)
{
@@ -1050,6 +1211,7 @@ bool tee_device_get(struct tee_device *teedev)
mutex_unlock(&teedev->mutex);
return true;
}
+EXPORT_SYMBOL_GPL(tee_device_get);
/**
* tee_device_unregister() - Removes a TEE device
@@ -1064,6 +1226,8 @@ void tee_device_unregister(struct tee_device *teedev)
if (!teedev)
return;
+ tee_device_put_all_dma_heaps(teedev);
+
if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
cdev_device_del(&teedev->cdev, &teedev->dev);
@@ -1287,3 +1451,5 @@ MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("TEE Driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_IMPORT_NS("DMA_BUF_HEAP");
diff --git a/drivers/tee/tee_heap.c b/drivers/tee/tee_heap.c
new file mode 100644
index 000000000000..d8d7735cdffb
--- /dev/null
+++ b/drivers/tee/tee_heap.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Linaro Limited
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/genalloc.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/tee_core.h>
+#include <linux/xarray.h>
+
+#include "tee_private.h"
+
+struct tee_dma_heap {
+ struct dma_heap *heap;
+ enum tee_dma_heap_id id;
+ struct kref kref;
+ struct tee_protmem_pool *pool;
+ struct tee_device *teedev;
+ bool shutting_down;
+ /* Protects pool, teedev, and shutting_down above */
+ struct mutex mu;
+};
+
+struct tee_heap_buffer {
+ struct tee_dma_heap *heap;
+ size_t size;
+ size_t offs;
+ struct sg_table table;
+};
+
+struct tee_heap_attachment {
+ struct sg_table table;
+ struct device *dev;
+};
+
+struct tee_protmem_static_pool {
+ struct tee_protmem_pool pool;
+ struct gen_pool *gen_pool;
+ phys_addr_t pa_base;
+};
+
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+static DEFINE_XARRAY_ALLOC(tee_dma_heap);
+
+static void tee_heap_release(struct kref *kref)
+{
+ struct tee_dma_heap *h = container_of(kref, struct tee_dma_heap, kref);
+
+ h->pool->ops->destroy_pool(h->pool);
+ tee_device_put(h->teedev);
+ h->pool = NULL;
+ h->teedev = NULL;
+}
+
+static void put_tee_heap(struct tee_dma_heap *h)
+{
+ kref_put(&h->kref, tee_heap_release);
+}
+
+static void get_tee_heap(struct tee_dma_heap *h)
+{
+ kref_get(&h->kref);
+}
+
+static int copy_sg_table(struct sg_table *dst, struct sg_table *src)
+{
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg;
+ int ret;
+ int i;
+
+ ret = sg_alloc_table(dst, src->orig_nents, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ dst_sg = dst->sgl;
+ for_each_sgtable_sg(src, src_sg, i) {
+ sg_set_page(dst_sg, sg_page(src_sg), src_sg->length,
+ src_sg->offset);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ return 0;
+}
+
+static int tee_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct tee_heap_buffer *buf = dmabuf->priv;
+ struct tee_heap_attachment *a;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = copy_sg_table(&a->table, &buf->table);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ attachment->priv = a;
+
+ return 0;
+}
+
+static void tee_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct tee_heap_attachment *a = attachment->priv;
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static struct sg_table *
+tee_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct tee_heap_attachment *a = attachment->priv;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, &a->table, direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &a->table;
+}
+
+static void tee_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct tee_heap_attachment *a = attachment->priv;
+
+ WARN_ON(&a->table != table);
+
+ dma_unmap_sgtable(attachment->dev, table, direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void tee_heap_buf_free(struct dma_buf *dmabuf)
+{
+ struct tee_heap_buffer *buf = dmabuf->priv;
+
+ buf->heap->pool->ops->free(buf->heap->pool, &buf->table);
+ mutex_lock(&buf->heap->mu);
+ put_tee_heap(buf->heap);
+ mutex_unlock(&buf->heap->mu);
+ kfree(buf);
+}
+
+static const struct dma_buf_ops tee_heap_buf_ops = {
+ .attach = tee_heap_attach,
+ .detach = tee_heap_detach,
+ .map_dma_buf = tee_heap_map_dma_buf,
+ .unmap_dma_buf = tee_heap_unmap_dma_buf,
+ .release = tee_heap_buf_free,
+};
+
+static struct dma_buf *tee_dma_heap_alloc(struct dma_heap *heap,
+ unsigned long len, u32 fd_flags,
+ u64 heap_flags)
+{
+ struct tee_dma_heap *h = dma_heap_get_drvdata(heap);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct tee_device *teedev = NULL;
+ struct tee_heap_buffer *buf;
+ struct tee_protmem_pool *pool;
+ struct dma_buf *dmabuf;
+ int rc;
+
+ mutex_lock(&h->mu);
+ if (h->teedev) {
+ teedev = h->teedev;
+ pool = h->pool;
+ get_tee_heap(h);
+ }
+ mutex_unlock(&h->mu);
+
+ if (!teedev)
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ dmabuf = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+ buf->size = len;
+ buf->heap = h;
+
+ rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs);
+ if (rc) {
+ dmabuf = ERR_PTR(rc);
+ goto err_kfree;
+ }
+
+ exp_info.ops = &tee_heap_buf_ops;
+ exp_info.size = len;
+ exp_info.priv = buf;
+ exp_info.flags = fd_flags;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf))
+ goto err_protmem_free;
+
+ return dmabuf;
+
+err_protmem_free:
+ pool->ops->free(pool, &buf->table);
+err_kfree:
+ kfree(buf);
+err:
+ mutex_lock(&h->mu);
+ put_tee_heap(h);
+ mutex_unlock(&h->mu);
+ return dmabuf;
+}
+
+static const struct dma_heap_ops tee_dma_heap_ops = {
+ .allocate = tee_dma_heap_alloc,
+};
+
+static const char *heap_id_2_name(enum tee_dma_heap_id id)
+{
+ switch (id) {
+ case TEE_DMA_HEAP_SECURE_VIDEO_PLAY:
+ return "protected,secure-video";
+ case TEE_DMA_HEAP_TRUSTED_UI:
+ return "protected,trusted-ui";
+ case TEE_DMA_HEAP_SECURE_VIDEO_RECORD:
+ return "protected,secure-video-record";
+ default:
+ return NULL;
+ }
+}
+
+static int alloc_dma_heap(struct tee_device *teedev, enum tee_dma_heap_id id,
+ struct tee_protmem_pool *pool)
+{
+ struct dma_heap_export_info exp_info = {
+ .ops = &tee_dma_heap_ops,
+ .name = heap_id_2_name(id),
+ };
+ struct tee_dma_heap *h;
+ int rc;
+
+ if (!exp_info.name)
+ return -EINVAL;
+
+ if (xa_reserve(&tee_dma_heap, id, GFP_KERNEL)) {
+ if (!xa_load(&tee_dma_heap, id))
+ return -EEXIST;
+ return -ENOMEM;
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+ h->id = id;
+ kref_init(&h->kref);
+ h->teedev = teedev;
+ h->pool = pool;
+ mutex_init(&h->mu);
+
+ exp_info.priv = h;
+ h->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(h->heap)) {
+ rc = PTR_ERR(h->heap);
+ kfree(h);
+
+ return rc;
+ }
+
+ /* "can't fail" due to the call to xa_reserve() above */
+ return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL)));
+}
+
+int tee_device_register_dma_heap(struct tee_device *teedev,
+ enum tee_dma_heap_id id,
+ struct tee_protmem_pool *pool)
+{
+ struct tee_dma_heap *h;
+ int rc;
+
+ if (!tee_device_get(teedev))
+ return -EINVAL;
+
+ h = xa_load(&tee_dma_heap, id);
+ if (h) {
+ mutex_lock(&h->mu);
+ if (h->teedev) {
+ rc = -EBUSY;
+ } else {
+ kref_init(&h->kref);
+ h->shutting_down = false;
+ h->teedev = teedev;
+ h->pool = pool;
+ rc = 0;
+ }
+ mutex_unlock(&h->mu);
+ } else {
+ rc = alloc_dma_heap(teedev, id, pool);
+ }
+
+ if (rc) {
+ tee_device_put(teedev);
+ dev_err(&teedev->dev, "can't register DMA heap id %d (%s)\n",
+ id, heap_id_2_name(id));
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
+
+void tee_device_put_all_dma_heaps(struct tee_device *teedev)
+{
+ struct tee_dma_heap *h;
+ u_long i;
+
+ xa_for_each(&tee_dma_heap, i, h) {
+ if (h) {
+ mutex_lock(&h->mu);
+ if (h->teedev == teedev && !h->shutting_down) {
+ h->shutting_down = true;
+ put_tee_heap(h);
+ }
+ mutex_unlock(&h->mu);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
+
+int tee_heap_update_from_dma_buf(struct tee_device *teedev,
+ struct dma_buf *dmabuf, size_t *offset,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct tee_heap_buffer *buf;
+ int rc;
+
+ /* The DMA-buf must be from our heap */
+ if (dmabuf->ops != &tee_heap_buf_ops)
+ return -EINVAL;
+
+ buf = dmabuf->priv;
+ /* The buffer must be from the same teedev */
+ if (buf->heap->teedev != teedev)
+ return -EINVAL;
+
+ shm->size = buf->size;
+
+ rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table,
+ buf->offs, shm, parent_shm);
+ if (!rc && *parent_shm)
+ *offset = buf->offs;
+
+ return rc;
+}
+#else
+int tee_device_register_dma_heap(struct tee_device *teedev __always_unused,
+ enum tee_dma_heap_id id __always_unused,
+ struct tee_protmem_pool *pool __always_unused)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
+
+void
+tee_device_put_all_dma_heaps(struct tee_device *teedev __always_unused)
+{
+}
+EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
+
+int tee_heap_update_from_dma_buf(struct tee_device *teedev __always_unused,
+ struct dma_buf *dmabuf __always_unused,
+ size_t *offset __always_unused,
+ struct tee_shm *shm __always_unused,
+ struct tee_shm **parent_shm __always_unused)
+{
+ return -EINVAL;
+}
+#endif
+
+static struct tee_protmem_static_pool *
+to_protmem_static_pool(struct tee_protmem_pool *pool)
+{
+ return container_of(pool, struct tee_protmem_static_pool, pool);
+}
+
+static int protmem_pool_op_static_alloc(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t size,
+ size_t *offs)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+ phys_addr_t pa;
+ int ret;
+
+ pa = gen_pool_alloc(stp->gen_pool, size);
+ if (!pa)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret) {
+ gen_pool_free(stp->gen_pool, pa, size);
+ return ret;
+ }
+
+ sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
+ *offs = pa - stp->pa_base;
+
+ return 0;
+}
+
+static void protmem_pool_op_static_free(struct tee_protmem_pool *pool,
+ struct sg_table *sgt)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ gen_pool_free(stp->gen_pool, sg_phys(sg), sg->length);
+ sg_free_table(sgt);
+}
+
+static int protmem_pool_op_static_update_shm(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t offs,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+
+ shm->paddr = stp->pa_base + offs;
+ *parent_shm = NULL;
+
+ return 0;
+}
+
+static void protmem_pool_op_static_destroy_pool(struct tee_protmem_pool *pool)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+
+ gen_pool_destroy(stp->gen_pool);
+ kfree(stp);
+}
+
+static struct tee_protmem_pool_ops protmem_pool_ops_static = {
+ .alloc = protmem_pool_op_static_alloc,
+ .free = protmem_pool_op_static_free,
+ .update_shm = protmem_pool_op_static_update_shm,
+ .destroy_pool = protmem_pool_op_static_destroy_pool,
+};
+
+struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
+ size_t size)
+{
+ const size_t page_mask = PAGE_SIZE - 1;
+ struct tee_protmem_static_pool *stp;
+ int rc;
+
+ /* Check it's page aligned */
+ if ((paddr | size) & page_mask)
+ return ERR_PTR(-EINVAL);
+
+ if (!pfn_valid(PHYS_PFN(paddr)))
+ return ERR_PTR(-EINVAL);
+
+ stp = kzalloc(sizeof(*stp), GFP_KERNEL);
+ if (!stp)
+ return ERR_PTR(-ENOMEM);
+
+ stp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!stp->gen_pool) {
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ rc = gen_pool_add(stp->gen_pool, paddr, size, -1);
+ if (rc)
+ goto err_free_pool;
+
+ stp->pool.ops = &protmem_pool_ops_static;
+ stp->pa_base = paddr;
+ return &stp->pool;
+
+err_free_pool:
+ gen_pool_destroy(stp->gen_pool);
+err_free:
+ kfree(stp);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_protmem_static_pool_alloc);
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 9bc50605227c..6bde688bfcb1 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -8,20 +8,28 @@
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/types.h>
-int tee_shm_get_fd(struct tee_shm *shm);
-
-bool tee_device_get(struct tee_device *teedev);
-void tee_device_put(struct tee_device *teedev);
+/* extra references appended to shm object for registered shared memory */
+struct tee_shm_dmabuf_ref {
+ struct tee_shm shm;
+ size_t offset;
+ struct dma_buf *dmabuf;
+ struct tee_shm *parent_shm;
+};
-void teedev_ctx_get(struct tee_context *ctx);
-void teedev_ctx_put(struct tee_context *ctx);
+int tee_shm_get_fd(struct tee_shm *shm);
struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size);
struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
unsigned long addr, size_t length);
+int tee_heap_update_from_dma_buf(struct tee_device *teedev,
+ struct dma_buf *dmabuf, size_t *offset,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm);
+
#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2a7d253d9c55..4a47de4bb2e5 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -4,6 +4,9 @@
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -12,9 +15,14 @@
#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
-#include <linux/highmem.h>
#include "tee_private.h"
+struct tee_shm_dma_mem {
+ struct tee_shm shm;
+ dma_addr_t dma_addr;
+ struct page *page;
+};
+
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
@@ -45,7 +53,24 @@ static void release_registered_pages(struct tee_shm *shm)
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_POOL) {
+ void *p = shm;
+
+ if (shm->flags & TEE_SHM_DMA_MEM) {
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+ struct tee_shm_dma_mem *dma_mem;
+
+ dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
+ p = dma_mem;
+ dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
+ dma_mem->dma_addr, DMA_BIDIRECTIONAL);
+#endif
+ } else if (shm->flags & TEE_SHM_DMA_BUF) {
+ struct tee_shm_dmabuf_ref *ref;
+
+ ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
+ p = ref;
+ dma_buf_put(ref->dmabuf);
+ } else if (shm->flags & TEE_SHM_POOL) {
teedev->pool->ops->free(teedev->pool, shm);
} else if (shm->flags & TEE_SHM_DYNAMIC) {
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
@@ -59,7 +84,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
teedev_ctx_put(shm->ctx);
- kfree(shm);
+ kfree(p);
tee_device_put(teedev);
}
@@ -169,7 +194,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
* tee_client_invoke_func(). The memory allocated is later freed with a
* call to tee_shm_free().
*
- * @returns a pointer to 'struct tee_shm'
+ * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
*/
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
{
@@ -179,6 +204,62 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
+{
+ struct tee_shm_dmabuf_ref *ref;
+ int rc;
+
+ if (!tee_device_get(ctx->teedev))
+ return ERR_PTR(-EINVAL);
+
+ teedev_ctx_get(ctx);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ rc = -ENOMEM;
+ goto err_put_tee;
+ }
+
+ refcount_set(&ref->shm.refcount, 1);
+ ref->shm.ctx = ctx;
+ ref->shm.id = -1;
+ ref->shm.flags = TEE_SHM_DMA_BUF;
+
+ ref->dmabuf = dma_buf_get(fd);
+ if (IS_ERR(ref->dmabuf)) {
+ rc = PTR_ERR(ref->dmabuf);
+ goto err_kfree_ref;
+ }
+
+ rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
+ &ref->offset, &ref->shm,
+ &ref->parent_shm);
+ if (rc)
+ goto err_put_dmabuf;
+
+ mutex_lock(&ref->shm.ctx->teedev->mutex);
+ ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
+ 1, 0, GFP_KERNEL);
+ mutex_unlock(&ref->shm.ctx->teedev->mutex);
+ if (ref->shm.id < 0) {
+ rc = ref->shm.id;
+ goto err_put_dmabuf;
+ }
+
+ return &ref->shm;
+
+err_put_dmabuf:
+ dma_buf_put(ref->dmabuf);
+err_kfree_ref:
+ kfree(ref);
+err_put_tee:
+ teedev_ctx_put(ctx);
+ tee_device_put(ctx->teedev);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_shm_register_fd);
+
/**
* tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
* kernel buffer
@@ -203,6 +284,71 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+/**
+ * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
+ * @ctx: Context that allocates the shared memory
+ * @page_count: Number of pages
+ *
+ * The allocated memory is expected to be lent (made inaccessible to the
+ * kernel) to the TEE while it's used and returned (accessible to the
+ * kernel again) before it's freed.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_dma_mem *dma_mem;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
+ &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!page)
+ goto err_put_teedev;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
+ if (!dma_mem)
+ goto err_free_pages;
+
+ refcount_set(&dma_mem->shm.refcount, 1);
+ dma_mem->shm.ctx = ctx;
+ dma_mem->shm.paddr = page_to_phys(page);
+ dma_mem->dma_addr = dma_addr;
+ dma_mem->page = page;
+ dma_mem->shm.size = page_count * PAGE_SIZE;
+ dma_mem->shm.flags = TEE_SHM_DMA_MEM;
+
+ teedev_ctx_get(ctx);
+
+ return &dma_mem->shm;
+
+err_free_pages:
+ dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
+ DMA_BIDIRECTIONAL);
+err_put_teedev:
+ tee_device_put(teedev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#else
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#endif
+
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
@@ -321,6 +467,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
if (unlikely(len <= 0)) {
ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
goto err_free_shm_pages;
+ } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
+ /*
+ * If we only got a few pages, update to release the
+ * correct amount below.
+ */
+ shm->num_pages = len / PAGE_SIZE;
+ ret = ERR_PTR(-ENOMEM);
+ goto err_put_shm_pages;
}
/*
@@ -444,6 +598,9 @@ static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
+ /* Refuse sharing registered DMA_bufs with the application */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ return -EINVAL;
/* check for overflowing the buffer's size */
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index d1bb59f1dfbd..ea277c466d8d 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -20,11 +20,13 @@
* If the temperature is higher than a trip point,
* a. if the trend is THERMAL_TREND_RAISING, use higher cooling
* state for this trip point
- * b. if the trend is THERMAL_TREND_DROPPING, do nothing
+ * b. if the trend is THERMAL_TREND_DROPPING, use a lower cooling state
+ * for this trip point, but keep the cooling state above the applicable
+ * minimum
* If the temperature is lower than a trip point,
* a. if the trend is THERMAL_TREND_RAISING, do nothing
- * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
- * state for this trip point, if the cooling state already
+ * b. if the trend is THERMAL_TREND_DROPPING, use the minimum applicable
+ * cooling state for this trip point, or if the cooling state already
* equals lower limit, deactivate the thermal instance
*/
static unsigned long get_target_state(struct thermal_instance *instance,
@@ -51,6 +53,17 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (throttle) {
if (trend == THERMAL_TREND_RAISING)
return clamp(cur_state + 1, instance->lower, instance->upper);
+
+ /*
+ * If the zone temperature is falling, the cooling level can
+ * be reduced, but it should still be above the lower state of
+ * the given thermal instance to pull the temperature further
+ * down.
+ */
+ if (trend == THERMAL_TREND_DROPPING)
+ return clamp(cur_state - 1,
+ min(instance->lower + 1, instance->upper),
+ instance->upper);
} else if (trend == THERMAL_TREND_DROPPING) {
if (cur_state <= instance->lower)
return THERMAL_NO_TARGET;
@@ -69,16 +82,14 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz,
const struct thermal_trip_desc *td,
int trip_threshold)
{
+ bool throttle = tz->temperature >= trip_threshold;
const struct thermal_trip *trip = &td->trip;
enum thermal_trend trend = get_tz_trend(tz, trip);
int trip_id = thermal_zone_trip_id(tz, trip);
struct thermal_instance *instance;
- bool throttle = false;
- if (tz->temperature >= trip_threshold) {
- throttle = true;
+ if (throttle)
trace_thermal_zone_trip(tz, trip_id, trip->type);
- }
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
trip_id, trip->type, trip_threshold, trend, throttle);
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 4c699f0896b5..4ced7bdcd62c 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -12,6 +12,7 @@ config INT340X_THERMAL
select ACPI_THERMAL_LIB
select INTEL_SOC_DTS_IOSF_CORE
select INTEL_TCC
+ select ACPI_PLATFORM_PROFILE
select PROC_THERMAL_MMIO_RAPL if POWERCAP
help
Newer laptops and tablets that use ACPI may have thermal sensors and
diff --git a/drivers/thermal/intel/int340x_thermal/Makefile b/drivers/thermal/intel/int340x_thermal/Makefile
index 184318d1792b..436be34b21a9 100644
--- a/drivers/thermal/intel/int340x_thermal/Makefile
+++ b/drivers/thermal/intel/int340x_thermal/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_mbox.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_wt_req.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_wt_hint.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_power_floor.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_soc_slider.o
obj-$(CONFIG_INT3406_THERMAL) += int3406_thermal.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index cb149bcdd7d5..ce5d53be108b 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -220,9 +220,6 @@ static int acpi_parse_psvt(acpi_handle handle, int *psvt_count, struct psvt **ps
int i, result = 0;
struct psvt *psvts;
- if (!acpi_has_method(handle, "PSVT"))
- return -ENODEV;
-
status = acpi_evaluate_object(handle, "PSVT", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 29fcece48cad..48e7849d4816 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -338,10 +338,17 @@ static int tcc_offset_save = -1;
int proc_thermal_suspend(struct device *dev)
{
+ struct proc_thermal_device *proc_dev;
+
tcc_offset_save = intel_tcc_get_offset(-1);
if (tcc_offset_save < 0)
dev_warn(dev, "failed to save offset (%d)\n", tcc_offset_save);
+ proc_dev = dev_get_drvdata(dev);
+
+ if (proc_dev->mmio_feature_mask & PROC_THERMAL_FEATURE_SOC_POWER_SLIDER)
+ proc_thermal_soc_power_slider_suspend(proc_dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(proc_thermal_suspend);
@@ -357,6 +364,9 @@ int proc_thermal_resume(struct device *dev)
if (tcc_offset_save >= 0)
intel_tcc_set_offset(-1, tcc_offset_save);
+ if (proc_dev->mmio_feature_mask & PROC_THERMAL_FEATURE_SOC_POWER_SLIDER)
+ proc_thermal_soc_power_slider_resume(proc_dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(proc_thermal_resume);
@@ -432,8 +442,18 @@ int proc_thermal_mmio_add(struct pci_dev *pdev,
}
}
+ if (feature_mask & PROC_THERMAL_FEATURE_SOC_POWER_SLIDER) {
+ ret = proc_thermal_soc_power_slider_add(pdev, proc_priv);
+ if (ret) {
+ dev_info(&pdev->dev, "failed to add soc power efficiency slider\n");
+ goto err_rem_wlt;
+ }
+ }
+
return 0;
+err_rem_wlt:
+ proc_thermal_wt_hint_remove(pdev);
err_rem_rfim:
proc_thermal_rfim_remove(pdev);
err_rem_ptc:
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 49398794124a..30760475102f 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -69,6 +69,7 @@ struct rapl_mmio_regs {
#define PROC_THERMAL_FEATURE_POWER_FLOOR 0x40
#define PROC_THERMAL_FEATURE_MSI_SUPPORT 0x80
#define PROC_THERMAL_FEATURE_PTC 0x100
+#define PROC_THERMAL_FEATURE_SOC_POWER_SLIDER 0x200
#if IS_ENABLED(CONFIG_PROC_THERMAL_MMIO_RAPL)
int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
@@ -127,4 +128,9 @@ int proc_thermal_mmio_add(struct pci_dev *pdev,
void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
void proc_thermal_ptc_remove(struct pci_dev *pdev);
+
+int proc_thermal_soc_power_slider_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
+void proc_thermal_soc_power_slider_suspend(struct proc_thermal_device *proc_priv);
+void proc_thermal_soc_power_slider_resume(struct proc_thermal_device *proc_priv);
+
#endif
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index d4d7e8e147d2..e2471768d355 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -498,7 +498,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT |
- PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) },
+ PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC |
+ PROC_THERMAL_FEATURE_SOC_POWER_SLIDER) },
{ PCI_DEVICE_DATA(INTEL, WCL_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT |
PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR |
PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_HINT |
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c
new file mode 100644
index 000000000000..49ff3bae7271
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Processor Thermal Device Interface for Reading and Writing
+ * SoC Power Slider Values from User Space.
+ *
+ * Operation:
+ * The SOC_EFFICIENCY_SLIDER_0_0_0_MCHBAR register is accessed
+ * using the MMIO (Memory-Mapped I/O) interface with an MMIO offset of 0x5B38.
+ * Although this register is 64 bits wide, only bits 7:0 are used,
+ * and the other bits remain unchanged.
+ *
+ * Bit definitions
+ *
+ * Bits 2:0 (Slider value):
+ * The SoC optimizer slider value indicates the system wide energy performance
+ * hint. The slider has no specific units and ranges from 0 (highest
+ * performance) to 6 (highest energy efficiency). Value of 7 is reserved.
+ * Bits 3 : Reserved
+ * Bits 6:4 (Offset)
+ * Offset allows the SoC to automatically switch slider position in range
+ * [slider value (bits 2:0) + offset] to improve power efficiency based on
+ * internal SoC algorithms.
+ * Bit 7 (Enable):
+ * If this bit is set, the SoC Optimization sliders will be processed by the
+ * SoC firmware.
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/platform_profile.h>
+#include "processor_thermal_device.h"
+
+#define SOC_POWER_SLIDER_OFFSET 0x5B38
+
+enum power_slider_preference {
+ SOC_POWER_SLIDER_PERFORMANCE,
+ SOC_POWER_SLIDER_BALANCE,
+ SOC_POWER_SLIDER_POWERSAVE,
+};
+
+#define SOC_SLIDER_VALUE_MINIMUM 0x00
+#define SOC_SLIDER_VALUE_BALANCE 0x03
+#define SOC_SLIDER_VALUE_MAXIMUM 0x06
+
+#define SLIDER_MASK GENMASK_ULL(2, 0)
+#define SLIDER_ENABLE_BIT 7
+
+static u8 slider_values[] = {
+ [SOC_POWER_SLIDER_PERFORMANCE] = SOC_SLIDER_VALUE_MINIMUM,
+ [SOC_POWER_SLIDER_BALANCE] = SOC_SLIDER_VALUE_BALANCE,
+ [SOC_POWER_SLIDER_POWERSAVE] = SOC_SLIDER_VALUE_MAXIMUM,
+};
+
+/* Lock to protect module param updates */
+static DEFINE_MUTEX(slider_param_lock);
+
+static int slider_balanced_param = SOC_SLIDER_VALUE_BALANCE;
+
+static int slider_def_balance_set(const char *arg, const struct kernel_param *kp)
+{
+ u8 slider_val;
+ int ret;
+
+ guard(mutex)(&slider_param_lock);
+
+ ret = kstrtou8(arg, 16, &slider_val);
+ if (!ret) {
+ if (slider_val <= slider_values[SOC_POWER_SLIDER_PERFORMANCE] ||
+ slider_val >= slider_values[SOC_POWER_SLIDER_POWERSAVE])
+ return -EINVAL;
+
+ slider_balanced_param = slider_val;
+ }
+
+ return ret;
+}
+
+static int slider_def_balance_get(char *buf, const struct kernel_param *kp)
+{
+ guard(mutex)(&slider_param_lock);
+ return sysfs_emit(buf, "%02x\n", slider_values[SOC_POWER_SLIDER_BALANCE]);
+}
+
+static const struct kernel_param_ops slider_def_balance_ops = {
+ .set = slider_def_balance_set,
+ .get = slider_def_balance_get,
+};
+
+module_param_cb(slider_balance, &slider_def_balance_ops, NULL, 0644);
+MODULE_PARM_DESC(slider_balance, "Set slider default value for balance");
+
+static u8 slider_offset;
+
+static int slider_def_offset_set(const char *arg, const struct kernel_param *kp)
+{
+ u8 offset;
+ int ret;
+
+ guard(mutex)(&slider_param_lock);
+
+ ret = kstrtou8(arg, 16, &offset);
+ if (!ret) {
+ if (offset > SOC_SLIDER_VALUE_MAXIMUM)
+ return -EINVAL;
+
+ slider_offset = offset;
+ }
+
+ return ret;
+}
+
+static int slider_def_offset_get(char *buf, const struct kernel_param *kp)
+{
+ guard(mutex)(&slider_param_lock);
+ return sysfs_emit(buf, "%02x\n", slider_offset);
+}
+
+static const struct kernel_param_ops slider_offset_ops = {
+ .set = slider_def_offset_set,
+ .get = slider_def_offset_get,
+};
+
+/*
+ * To enhance power efficiency dynamically, the firmware can optionally
+ * auto-adjust the slider value based on the current workload. This
+ * adjustment is controlled by the "slider_offset" module parameter.
+ * This offset permits the firmware to increase the slider value
+ * up to and including "SoC slider + slider offset,".
+ */
+module_param_cb(slider_offset, &slider_offset_ops, NULL, 0644);
+MODULE_PARM_DESC(slider_offset, "Set slider offset");
+
+/* Convert from platform power profile option to SoC slider value */
+static int convert_profile_to_power_slider(enum platform_profile_option profile)
+{
+ switch (profile) {
+ case PLATFORM_PROFILE_LOW_POWER:
+ return slider_values[SOC_POWER_SLIDER_POWERSAVE];
+ case PLATFORM_PROFILE_BALANCED:
+ return slider_values[SOC_POWER_SLIDER_BALANCE];
+ case PLATFORM_PROFILE_PERFORMANCE:
+ return slider_values[SOC_POWER_SLIDER_PERFORMANCE];
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Convert to platform power profile option from SoC slider values */
+static int convert_power_slider_to_profile(u8 slider)
+{
+ if (slider == slider_values[SOC_POWER_SLIDER_PERFORMANCE])
+ return PLATFORM_PROFILE_PERFORMANCE;
+ if (slider == slider_values[SOC_POWER_SLIDER_BALANCE])
+ return PLATFORM_PROFILE_BALANCED;
+ if (slider == slider_values[SOC_POWER_SLIDER_POWERSAVE])
+ return PLATFORM_PROFILE_LOW_POWER;
+
+ return -EOPNOTSUPP;
+}
+
+static inline u64 read_soc_slider(struct proc_thermal_device *proc_priv)
+{
+ return readq(proc_priv->mmio_base + SOC_POWER_SLIDER_OFFSET);
+}
+
+static inline void write_soc_slider(struct proc_thermal_device *proc_priv, u64 val)
+{
+ writeq(val, proc_priv->mmio_base + SOC_POWER_SLIDER_OFFSET);
+}
+
+#define SLIDER_OFFSET_MASK GENMASK_ULL(6, 4)
+
+static void set_soc_power_profile(struct proc_thermal_device *proc_priv, int slider)
+{
+ u64 val;
+
+ val = read_soc_slider(proc_priv);
+ val &= ~SLIDER_MASK;
+ val |= FIELD_PREP(SLIDER_MASK, slider) | BIT(SLIDER_ENABLE_BIT);
+
+ /* Set the slider offset from module params */
+ val &= ~SLIDER_OFFSET_MASK;
+ val |= FIELD_PREP(SLIDER_OFFSET_MASK, slider_offset);
+
+ write_soc_slider(proc_priv, val);
+}
+
+/* profile get/set callbacks are called with a profile lock, so no need for local locks */
+
+static int power_slider_platform_profile_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ struct proc_thermal_device *proc_priv;
+ int slider;
+
+ proc_priv = dev_get_drvdata(dev);
+ if (!proc_priv)
+ return -EOPNOTSUPP;
+
+ guard(mutex)(&slider_param_lock);
+
+ slider_values[SOC_POWER_SLIDER_BALANCE] = slider_balanced_param;
+
+ slider = convert_profile_to_power_slider(profile);
+ if (slider < 0)
+ return slider;
+
+ set_soc_power_profile(proc_priv, slider);
+
+ return 0;
+}
+
+static int power_slider_platform_profile_get(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct proc_thermal_device *proc_priv;
+ int slider, ret;
+ u64 val;
+
+ proc_priv = dev_get_drvdata(dev);
+ if (!proc_priv)
+ return -EOPNOTSUPP;
+
+ val = read_soc_slider(proc_priv);
+ slider = FIELD_GET(SLIDER_MASK, val);
+
+ ret = convert_power_slider_to_profile(slider);
+ if (ret < 0)
+ return ret;
+
+ *profile = ret;
+
+ return 0;
+}
+
+static int power_slider_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+
+ return 0;
+}
+
+static const struct platform_profile_ops power_slider_platform_profile_ops = {
+ .probe = power_slider_platform_profile_probe,
+ .profile_get = power_slider_platform_profile_get,
+ .profile_set = power_slider_platform_profile_set,
+};
+
+int proc_thermal_soc_power_slider_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
+{
+ struct device *ppdev;
+
+ set_soc_power_profile(proc_priv, slider_values[SOC_POWER_SLIDER_BALANCE]);
+
+ ppdev = devm_platform_profile_register(&pdev->dev, "SoC Power Slider", proc_priv,
+ &power_slider_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(ppdev);
+}
+EXPORT_SYMBOL_NS_GPL(proc_thermal_soc_power_slider_add, "INT340X_THERMAL");
+
+static u64 soc_slider_save;
+
+void proc_thermal_soc_power_slider_suspend(struct proc_thermal_device *proc_priv)
+{
+ soc_slider_save = read_soc_slider(proc_priv);
+}
+EXPORT_SYMBOL_NS_GPL(proc_thermal_soc_power_slider_suspend, "INT340X_THERMAL");
+
+void proc_thermal_soc_power_slider_resume(struct proc_thermal_device *proc_priv)
+{
+ write_soc_slider(proc_priv, soc_slider_save);
+}
+EXPORT_SYMBOL_NS_GPL(proc_thermal_soc_power_slider_resume, "INT340X_THERMAL");
+
+MODULE_IMPORT_NS("INT340X_THERMAL");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Processor Thermal Power Slider Interface");
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index a36289e61315..d9ec3bf19496 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -20,6 +20,8 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include "thermal_hwmon.h"
+
#define K3_VTM_DEVINFO_PWR0_OFFSET 0x4
#define K3_VTM_DEVINFO_PWR0_TEMPSENS_CT_MASK 0xf0
#define K3_VTM_TMPSENS0_CTRL_OFFSET 0x300
@@ -513,6 +515,8 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
ret = PTR_ERR(ti_thermal);
goto err_free_ref_table;
}
+
+ devm_thermal_add_hwmon_sysfs(bgp->dev, ti_thermal);
}
platform_set_drvdata(pdev, bgp);
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index f4d1e66d7db9..ab55b20cda47 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -639,7 +639,7 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
lvts_sensor[i].low_thresh = INT_MIN;
lvts_sensor[i].high_thresh = INT_MIN;
- };
+ }
lvts_ctrl->valid_sensor_mask = lvts_ctrl_data->valid_sensor_mask;
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 2c7f3f9a26eb..a6bb01082ec6 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -34,7 +34,8 @@ config QCOM_SPMI_TEMP_ALARM
config QCOM_LMH
tristate "Qualcomm Limits Management Hardware"
- depends on ARCH_QCOM && QCOM_SCM
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
help
This enables initialization of Qualcomm limits management
hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index 75eaa9a68ab8..ddadcfada513 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -5,6 +5,8 @@
*/
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/irqdomain.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -204,7 +206,7 @@ static int lmh_probe(struct platform_device *pdev)
ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_LOW_THRESHOLD, temp_low,
LMH_NODE_DCVS, node_id, 0);
if (ret) {
- dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ dev_err(dev, "Error setting thermal LOW threshold%d\n", ret);
return ret;
}
diff --git a/drivers/thermal/renesas/Kconfig b/drivers/thermal/renesas/Kconfig
index dcf5fc5ae08e..c762c1c30d5a 100644
--- a/drivers/thermal/renesas/Kconfig
+++ b/drivers/thermal/renesas/Kconfig
@@ -10,13 +10,13 @@ config RCAR_THERMAL
thermal framework.
config RCAR_GEN3_THERMAL
- tristate "Renesas R-Car Gen3 and RZ/G2 thermal driver"
+ tristate "Renesas R-Car Gen3/Gen4 and RZ/G2 thermal driver"
depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
help
- Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into
- the Linux thermal framework.
+ Enable this to plug the R-Car Gen3/Gen4 or RZ/G2 thermal sensor
+ driver into the Linux thermal framework.
config RZG2L_THERMAL
tristate "Renesas RZ/G2L thermal driver"
@@ -26,3 +26,18 @@ config RZG2L_THERMAL
help
Enable this to plug the RZ/G2L thermal sensor driver into the Linux
thermal framework.
+
+config RZG3S_THERMAL
+ tristate "Renesas RZ/G3S thermal driver"
+ depends on ARCH_R9A08G045 || COMPILE_TEST
+ depends on OF && IIO && RZG2L_ADC
+ help
+ Enable this to plug the RZ/G3S thermal sensor driver into the Linux
+ thermal framework.
+
+config RZG3E_THERMAL
+ tristate "Renesas RZ/G3E thermal driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Enable this to plug the RZ/G3E thermal sensor driver into the Linux
+ thermal framework.
diff --git a/drivers/thermal/renesas/Makefile b/drivers/thermal/renesas/Makefile
index bf9cb3cb94d6..0ea592247572 100644
--- a/drivers/thermal/renesas/Makefile
+++ b/drivers/thermal/renesas/Makefile
@@ -3,3 +3,6 @@
obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o
+obj-$(CONFIG_RZG3E_THERMAL) += rzg3e_thermal.o
+obj-$(CONFIG_RZG3S_THERMAL) += rzg3s_thermal.o
+
diff --git a/drivers/thermal/renesas/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c
index 24a702ee4c1f..3223de238d01 100644
--- a/drivers/thermal/renesas/rcar_gen3_thermal.c
+++ b/drivers/thermal/renesas/rcar_gen3_thermal.c
@@ -73,11 +73,17 @@ struct rcar_gen3_thermal_fuse_info {
u32 mask;
};
+struct rcar_gen3_thermal_fuse_default {
+ u32 ptat[3];
+ u32 thcodes[TSC_MAX_NUM][3];
+};
+
struct rcar_thermal_info {
int scale;
int adj_below;
int adj_above;
const struct rcar_gen3_thermal_fuse_info *fuses;
+ const struct rcar_gen3_thermal_fuse_default *fuse_defaults;
};
struct equation_set_coef {
@@ -165,7 +171,7 @@ static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
const struct equation_set_coef *coef;
int adj, decicelsius, reg, thcode;
- /* Read register and convert to mili Celsius */
+ /* Read register and convert to millidegree Celsius */
reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
if (reg < tsc->thcode[1]) {
@@ -289,6 +295,7 @@ static void rcar_gen3_thermal_fetch_fuses(struct rcar_gen3_thermal_priv *priv)
static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
{
+ const struct rcar_gen3_thermal_fuse_default *fuse_defaults = priv->info->fuse_defaults;
unsigned int i;
u32 thscp;
@@ -297,24 +304,16 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
if (!priv->info->fuses ||
(thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) {
/* Default THCODE values in case FUSEs are not set. */
- static const int thcodes[TSC_MAX_NUM][3] = {
- { 3397, 2800, 2221 },
- { 3393, 2795, 2216 },
- { 3389, 2805, 2237 },
- { 3415, 2694, 2195 },
- { 3356, 2724, 2244 },
- };
-
- priv->ptat[0] = 2631;
- priv->ptat[1] = 1509;
- priv->ptat[2] = 435;
+ priv->ptat[0] = fuse_defaults->ptat[0];
+ priv->ptat[1] = fuse_defaults->ptat[1];
+ priv->ptat[2] = fuse_defaults->ptat[2];
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- tsc->thcode[0] = thcodes[i][0];
- tsc->thcode[1] = thcodes[i][1];
- tsc->thcode[2] = thcodes[i][2];
+ tsc->thcode[0] = fuse_defaults->thcodes[i][0];
+ tsc->thcode[1] = fuse_defaults->thcodes[i][1];
+ tsc->thcode[2] = fuse_defaults->thcodes[i][2];
}
return false;
@@ -361,11 +360,33 @@ static const struct rcar_gen3_thermal_fuse_info rcar_gen3_thermal_fuse_info_gen4
.mask = GEN4_FUSE_MASK,
};
+static const struct rcar_gen3_thermal_fuse_default rcar_gen3_thermal_fuse_default_info_gen3 = {
+ .ptat = { 2631, 1509, 435 },
+ .thcodes = {
+ { 3397, 2800, 2221 },
+ { 3393, 2795, 2216 },
+ { 3389, 2805, 2237 },
+ { 3415, 2694, 2195 },
+ { 3356, 2724, 2244 },
+ },
+};
+
+static const struct rcar_gen3_thermal_fuse_default rcar_gen3_thermal_fuse_default_info_gen4 = {
+ .ptat = { 3274, 2164, 985 },
+ .thcodes = { /* All four THS units share the same trimming */
+ { 3218, 2617, 1980 },
+ { 3218, 2617, 1980 },
+ { 3218, 2617, 1980 },
+ { 3218, 2617, 1980 },
+ }
+};
+
static const struct rcar_thermal_info rcar_m3w_thermal_info = {
.scale = 157,
.adj_below = -41,
.adj_above = 116,
.fuses = &rcar_gen3_thermal_fuse_info_gen3,
+ .fuse_defaults = &rcar_gen3_thermal_fuse_default_info_gen3,
};
static const struct rcar_thermal_info rcar_gen3_thermal_info = {
@@ -373,6 +394,15 @@ static const struct rcar_thermal_info rcar_gen3_thermal_info = {
.adj_below = -41,
.adj_above = 126,
.fuses = &rcar_gen3_thermal_fuse_info_gen3,
+ .fuse_defaults = &rcar_gen3_thermal_fuse_default_info_gen3,
+};
+
+static const struct rcar_thermal_info rcar_s4_thermal_info = {
+ .scale = 167,
+ .adj_below = -41,
+ .adj_above = 126,
+ .fuses = &rcar_gen3_thermal_fuse_info_gen4,
+ .fuse_defaults = &rcar_gen3_thermal_fuse_default_info_gen3,
};
static const struct rcar_thermal_info rcar_gen4_thermal_info = {
@@ -380,6 +410,7 @@ static const struct rcar_thermal_info rcar_gen4_thermal_info = {
.adj_below = -41,
.adj_above = 126,
.fuses = &rcar_gen3_thermal_fuse_info_gen4,
+ .fuse_defaults = &rcar_gen3_thermal_fuse_default_info_gen4,
};
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
@@ -421,7 +452,7 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
},
{
.compatible = "renesas,r8a779f0-thermal",
- .data = &rcar_gen4_thermal_info,
+ .data = &rcar_s4_thermal_info,
},
{
.compatible = "renesas,r8a779g0-thermal",
diff --git a/drivers/thermal/renesas/rzg3e_thermal.c b/drivers/thermal/renesas/rzg3e_thermal.c
new file mode 100644
index 000000000000..e66d73ca6752
--- /dev/null
+++ b/drivers/thermal/renesas/rzg3e_thermal.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G3E TSU Temperature Sensor Unit
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+#include <linux/clk.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+
+#include "../thermal_hwmon.h"
+
+/* TSU Register offsets and bits */
+#define TSU_SSUSR 0x00
+#define TSU_SSUSR_EN_TS BIT(0)
+#define TSU_SSUSR_ADC_PD_TS BIT(1)
+#define TSU_SSUSR_SOC_TS_EN BIT(2)
+
+#define TSU_STRGR 0x04
+#define TSU_STRGR_ADST BIT(0)
+
+#define TSU_SOSR1 0x08
+#define TSU_SOSR1_ADCT_8 0x03
+#define TSU_SOSR1_ADCS BIT(4)
+#define TSU_SOSR1_OUTSEL BIT(9)
+
+#define TSU_SCRR 0x10
+#define TSU_SCRR_OUT12BIT_TS GENMASK(11, 0)
+
+#define TSU_SSR 0x14
+#define TSU_SSR_CONV BIT(0)
+
+#define TSU_CMSR 0x18
+#define TSU_CMSR_CMPEN BIT(0)
+
+#define TSU_LLSR 0x1C
+#define TSU_ULSR 0x20
+
+#define TSU_SISR 0x30
+#define TSU_SISR_ADF BIT(0)
+#define TSU_SISR_CMPF BIT(1)
+
+#define TSU_SIER 0x34
+#define TSU_SIER_CMPIE BIT(1)
+
+#define TSU_SICR 0x38
+#define TSU_SICR_ADCLR BIT(0)
+#define TSU_SICR_CMPCLR BIT(1)
+
+/* Temperature calculation constants from datasheet */
+#define TSU_TEMP_D (-41)
+#define TSU_TEMP_E 126
+#define TSU_CODE_MAX 0xFFF
+
+/* Timing specifications from datasheet */
+#define TSU_POWERUP_TIME_US 120 /* 120T at 1MHz sensor clock per datasheet */
+#define TSU_CONV_TIME_US 50 /* Per sample conversion time */
+#define TSU_POLL_DELAY_US 10 /* Polling interval */
+#define TSU_MIN_CLOCK_RATE 24000000 /* TSU_PCLK minimum 24MHz */
+
+/**
+ * struct rzg3e_thermal_priv - RZ/G3E TSU private data
+ * @base: TSU register base
+ * @dev: device pointer
+ * @syscon: regmap for calibration values
+ * @zone: thermal zone device
+ * @rstc: reset control
+ * @trmval0: calibration value 0 (b)
+ * @trmval1: calibration value 1 (c)
+ * @trim_offset: offset for trim registers in syscon
+ * @lock: protects hardware access during conversions
+ */
+struct rzg3e_thermal_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct regmap *syscon;
+ struct thermal_zone_device *zone;
+ struct reset_control *rstc;
+ u16 trmval0;
+ u16 trmval1;
+ u32 trim_offset;
+ struct mutex lock;
+};
+
+static int rzg3e_thermal_power_on(struct rzg3e_thermal_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ /* Clear any pending interrupts */
+ writel(TSU_SICR_ADCLR | TSU_SICR_CMPCLR, priv->base + TSU_SICR);
+
+ /* Disable all interrupts during setup */
+ writel(0, priv->base + TSU_SIER);
+
+ /*
+ * Power-on sequence per datasheet 7.11.9.1:
+ * SOC_TS_EN must be set at same time or before EN_TS and ADC_PD_TS
+ */
+ val = TSU_SSUSR_SOC_TS_EN | TSU_SSUSR_EN_TS;
+ writel(val, priv->base + TSU_SSUSR);
+
+ /* Wait for sensor stabilization per datasheet 7.11.7.1 */
+ usleep_range(TSU_POWERUP_TIME_US, TSU_POWERUP_TIME_US + 10);
+
+ /* Configure for average mode with 8 samples */
+ val = TSU_SOSR1_OUTSEL | TSU_SOSR1_ADCT_8;
+ writel(val, priv->base + TSU_SOSR1);
+
+ /* Ensure we're in single scan mode (default) */
+ val = readl(priv->base + TSU_SOSR1);
+ if (val & TSU_SOSR1_ADCS) {
+ dev_err(priv->dev, "Invalid scan mode setting\n");
+ return -EINVAL;
+ }
+
+ /* Wait for any ongoing conversion to complete */
+ ret = readl_poll_timeout(priv->base + TSU_SSR, val,
+ !(val & TSU_SSR_CONV),
+ TSU_POLL_DELAY_US,
+ USEC_PER_MSEC);
+ if (ret) {
+ dev_err(priv->dev, "Timeout waiting for conversion\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzg3e_thermal_power_off(struct rzg3e_thermal_priv *priv)
+{
+ /* Disable all interrupts */
+ writel(0, priv->base + TSU_SIER);
+
+ /* Clear pending interrupts */
+ writel(TSU_SICR_ADCLR | TSU_SICR_CMPCLR, priv->base + TSU_SICR);
+
+ /* Power down sequence per datasheet */
+ writel(TSU_SSUSR_ADC_PD_TS, priv->base + TSU_SSUSR);
+}
+
+/*
+ * Convert 12-bit sensor code to temperature in millicelsius
+ * Formula from datasheet 7.11.7.8:
+ * T(°C) = ((e - d) / (c - b)) * (a - b) + d
+ * where: a = sensor code, b = trmval0, c = trmval1, d = -41, e = 126
+ */
+static int rzg3e_thermal_code_to_temp(struct rzg3e_thermal_priv *priv, u16 code)
+{
+ int temp_e_mc = TSU_TEMP_E * MILLIDEGREE_PER_DEGREE;
+ int temp_d_mc = TSU_TEMP_D * MILLIDEGREE_PER_DEGREE;
+ s64 numerator, denominator;
+ int temp_mc;
+
+ numerator = (temp_e_mc - temp_d_mc) * (s64)(code - priv->trmval0);
+ denominator = priv->trmval1 - priv->trmval0;
+
+ temp_mc = div64_s64(numerator, denominator) + temp_d_mc;
+
+ return clamp(temp_mc, temp_d_mc, temp_e_mc);
+}
+
+/*
+ * Convert temperature in millicelsius to 12-bit sensor code
+ * Formula from datasheet 7.11.7.9 (inverse of above)
+ */
+static u16 rzg3e_thermal_temp_to_code(struct rzg3e_thermal_priv *priv, int temp_mc)
+{
+ int temp_e_mc = TSU_TEMP_E * MILLIDEGREE_PER_DEGREE;
+ int temp_d_mc = TSU_TEMP_D * MILLIDEGREE_PER_DEGREE;
+ s64 numerator, denominator;
+ s64 code;
+
+ numerator = (temp_mc - temp_d_mc) * (priv->trmval1 - priv->trmval0);
+ denominator = temp_e_mc - temp_d_mc;
+
+ code = div64_s64(numerator, denominator) + priv->trmval0;
+
+ return clamp_val(code, 0, TSU_CODE_MAX);
+}
+
+static int rzg3e_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ struct rzg3e_thermal_priv *priv = thermal_zone_device_priv(tz);
+ u32 status, code;
+ int ret, timeout;
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0)
+ return ret;
+
+ guard(mutex)(&priv->lock);
+
+ /* Clear any previous conversion status */
+ writel(TSU_SICR_ADCLR, priv->base + TSU_SICR);
+
+ /* Start single conversion */
+ writel(TSU_STRGR_ADST, priv->base + TSU_STRGR);
+
+ /* Wait for conversion completion - 8 samples at ~50us each */
+ timeout = TSU_CONV_TIME_US * 8 * 2; /* Double for margin */
+ ret = readl_poll_timeout(priv->base + TSU_SISR, status,
+ status & TSU_SISR_ADF,
+ TSU_POLL_DELAY_US, timeout);
+ if (ret) {
+ dev_err(priv->dev, "Conversion timeout (status=0x%08x)\n", status);
+ goto out;
+ }
+
+ /* Read the averaged result and clear the complete flag */
+ code = readl(priv->base + TSU_SCRR) & TSU_SCRR_OUT12BIT_TS;
+ writel(TSU_SICR_ADCLR, priv->base + TSU_SICR);
+
+ /* Convert to temperature */
+ *temp = rzg3e_thermal_code_to_temp(priv, code);
+
+ dev_dbg(priv->dev, "temp=%d mC (%d.%03d°C), code=0x%03x\n",
+ *temp, *temp / 1000, abs(*temp) % 1000, code);
+
+out:
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+ return ret;
+}
+
+static int rzg3e_thermal_set_trips(struct thermal_zone_device *tz,
+ int low, int high)
+{
+ struct rzg3e_thermal_priv *priv = thermal_zone_device_priv(tz);
+ u16 low_code, high_code;
+ u32 val;
+ int ret;
+
+ /* Hardware requires low < high */
+ if (low >= high)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0)
+ return ret;
+
+ guard(mutex)(&priv->lock);
+
+ /* Convert temperatures to codes */
+ low_code = rzg3e_thermal_temp_to_code(priv, low);
+ high_code = rzg3e_thermal_temp_to_code(priv, high);
+
+ dev_dbg(priv->dev, "set_trips: low=%d high=%d (codes: 0x%03x/0x%03x)\n",
+ low, high, low_code, high_code);
+
+ /* Disable comparison during reconfiguration */
+ writel(0, priv->base + TSU_SIER);
+ writel(0, priv->base + TSU_CMSR);
+
+ /* Clear any pending comparison interrupts */
+ writel(TSU_SICR_CMPCLR, priv->base + TSU_SICR);
+
+ /* Set trip points */
+ writel(low_code, priv->base + TSU_LLSR);
+ writel(high_code, priv->base + TSU_ULSR);
+
+ /*
+ * Ensure OUTSEL is set for comparison per datasheet 7.11.7.4
+ * Comparison uses averaged data
+ */
+ val = readl(priv->base + TSU_SOSR1);
+ val |= TSU_SOSR1_OUTSEL;
+ writel(val, priv->base + TSU_SOSR1);
+
+ /* Enable comparison with "out of range" mode (CMPCOND=0) */
+ writel(TSU_CMSR_CMPEN, priv->base + TSU_CMSR);
+
+ /* Unmask compare IRQ and start a conversion to evaluate window */
+ writel(TSU_SIER_CMPIE, priv->base + TSU_SIER);
+ writel(TSU_STRGR_ADST, priv->base + TSU_STRGR);
+
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
+ return 0;
+}
+
+static irqreturn_t rzg3e_thermal_irq_thread(int irq, void *data)
+{
+ struct rzg3e_thermal_priv *priv = data;
+
+ dev_dbg(priv->dev, "Temperature threshold crossed\n");
+
+ /* Notify thermal framework to re-evaluate trip points */
+ thermal_zone_device_update(priv->zone, THERMAL_TRIP_VIOLATED);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rzg3e_thermal_irq(int irq, void *data)
+{
+ struct rzg3e_thermal_priv *priv = data;
+ u32 status;
+
+ status = readl(priv->base + TSU_SISR);
+
+ /* Check if comparison interrupt occurred */
+ if (status & TSU_SISR_CMPF) {
+ /* Clear irq flag and disable interrupt until reconfigured */
+ writel(TSU_SICR_CMPCLR, priv->base + TSU_SICR);
+ writel(0, priv->base + TSU_SIER);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static const struct thermal_zone_device_ops rzg3e_tz_ops = {
+ .get_temp = rzg3e_thermal_get_temp,
+ .set_trips = rzg3e_thermal_set_trips,
+};
+
+static int rzg3e_thermal_get_calibration(struct rzg3e_thermal_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ /* Read calibration values from syscon */
+ ret = regmap_read(priv->syscon, priv->trim_offset, &val);
+ if (ret)
+ return ret;
+ priv->trmval0 = val & GENMASK(11, 0);
+
+ ret = regmap_read(priv->syscon, priv->trim_offset + 4, &val);
+ if (ret)
+ return ret;
+ priv->trmval1 = val & GENMASK(11, 0);
+
+ /* Validate calibration data */
+ if (!priv->trmval0 || !priv->trmval1 ||
+ priv->trmval0 == priv->trmval1 ||
+ priv->trmval0 == 0xFFF || priv->trmval1 == 0xFFF) {
+ dev_err(priv->dev, "Invalid calibration: b=0x%03x, c=0x%03x\n",
+ priv->trmval0, priv->trmval1);
+ return -EINVAL;
+ }
+
+ dev_dbg(priv->dev, "Calibration: b=0x%03x (%u), c=0x%03x (%u)\n",
+ priv->trmval0, priv->trmval0, priv->trmval1, priv->trmval1);
+
+ return 0;
+}
+
+static int rzg3e_thermal_parse_dt(struct rzg3e_thermal_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ u32 offset;
+
+ priv->syscon = syscon_regmap_lookup_by_phandle_args(np, "renesas,tsu-trim", 1, &offset);
+ if (IS_ERR(priv->syscon))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->syscon),
+ "Failed to parse renesas,tsu-trim\n");
+
+ priv->trim_offset = offset;
+ return 0;
+}
+
+static int rzg3e_thermal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzg3e_thermal_priv *priv;
+ struct clk *clk;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ ret = devm_mutex_init(dev, &priv->lock);
+ if (ret)
+ return ret;
+ platform_set_drvdata(pdev, priv);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Parse device tree for trim register info */
+ ret = rzg3e_thermal_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ /* Get clock to verify frequency - clock is managed by power domain */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get clock\n");
+
+ if (clk_get_rate(clk) < TSU_MIN_CLOCK_RATE)
+ return dev_err_probe(dev, -EINVAL,
+ "Clock rate %lu Hz too low (min %u Hz)\n",
+ clk_get_rate(clk), TSU_MIN_CLOCK_RATE);
+
+ priv->rstc = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(dev, PTR_ERR(priv->rstc),
+ "Failed to get/deassert reset control\n");
+
+ /* Get calibration data */
+ ret = rzg3e_thermal_get_calibration(priv);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get valid calibration data\n");
+
+ /* Get comparison interrupt */
+ irq = platform_get_irq_byname(pdev, "adcmpi");
+ if (irq < 0)
+ return irq;
+
+ /* Enable runtime PM */
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ devm_pm_runtime_enable(dev);
+
+ /* Initial hardware setup */
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Runtime resume failed\n");
+
+ /* Register thermal zone - this will trigger DT parsing */
+ priv->zone = devm_thermal_of_zone_register(dev, 0, priv, &rzg3e_tz_ops);
+ if (IS_ERR(priv->zone)) {
+ ret = PTR_ERR(priv->zone);
+ dev_err(dev, "Failed to register thermal zone: %d\n", ret);
+ goto err_pm_put;
+ }
+
+ /* Request threaded IRQ for comparison interrupt */
+ ret = devm_request_threaded_irq(dev, irq, rzg3e_thermal_irq,
+ rzg3e_thermal_irq_thread,
+ IRQF_ONESHOT, "rzg3e_thermal", priv);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ: %d\n", ret);
+ goto err_pm_put;
+ }
+
+ /* Add hwmon sysfs interface */
+ ret = devm_thermal_add_hwmon_sysfs(dev, priv->zone);
+ if (ret)
+ dev_warn(dev, "Failed to add hwmon sysfs attributes\n");
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "RZ/G3E thermal sensor registered\n");
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put_sync(dev);
+ return ret;
+}
+
+static int rzg3e_thermal_runtime_suspend(struct device *dev)
+{
+ struct rzg3e_thermal_priv *priv = dev_get_drvdata(dev);
+
+ rzg3e_thermal_power_off(priv);
+ return 0;
+}
+
+static int rzg3e_thermal_runtime_resume(struct device *dev)
+{
+ struct rzg3e_thermal_priv *priv = dev_get_drvdata(dev);
+
+ return rzg3e_thermal_power_on(priv);
+}
+
+static int rzg3e_thermal_suspend(struct device *dev)
+{
+ struct rzg3e_thermal_priv *priv = dev_get_drvdata(dev);
+
+ /* If device is active, power it off */
+ if (pm_runtime_active(dev))
+ rzg3e_thermal_power_off(priv);
+
+ /* Assert reset to ensure clean state after resume */
+ reset_control_assert(priv->rstc);
+
+ return 0;
+}
+
+static int rzg3e_thermal_resume(struct device *dev)
+{
+ struct rzg3e_thermal_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ /* Deassert reset */
+ ret = reset_control_deassert(priv->rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset: %d\n", ret);
+ return ret;
+ }
+
+ /* If device was active before suspend, power it back on */
+ if (pm_runtime_active(dev))
+ return rzg3e_thermal_power_on(priv);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rzg3e_thermal_pm_ops = {
+ RUNTIME_PM_OPS(rzg3e_thermal_runtime_suspend,
+ rzg3e_thermal_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(rzg3e_thermal_suspend, rzg3e_thermal_resume)
+};
+
+static const struct of_device_id rzg3e_thermal_dt_ids[] = {
+ { .compatible = "renesas,r9a09g047-tsu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg3e_thermal_dt_ids);
+
+static struct platform_driver rzg3e_thermal_driver = {
+ .driver = {
+ .name = "rzg3e_thermal",
+ .of_match_table = rzg3e_thermal_dt_ids,
+ .pm = pm_ptr(&rzg3e_thermal_pm_ops),
+ },
+ .probe = rzg3e_thermal_probe,
+};
+module_platform_driver(rzg3e_thermal_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G3E TSU Thermal Sensor Driver");
+MODULE_AUTHOR("John Madieu <john.madieu.xa@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/renesas/rzg3s_thermal.c b/drivers/thermal/renesas/rzg3s_thermal.c
new file mode 100644
index 000000000000..e25e36c99a88
--- /dev/null
+++ b/drivers/thermal/renesas/rzg3s_thermal.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G3S TSU Thermal Sensor Driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/iio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+
+#include "../thermal_hwmon.h"
+
+#define TSU_SM 0x0
+#define TSU_SM_EN BIT(0)
+#define TSU_SM_OE BIT(1)
+#define OTPTSUTRIM_REG(n) (0x18 + (n) * 0x4)
+#define OTPTSUTRIM_EN_MASK BIT(31)
+#define OTPTSUTRIM_MASK GENMASK(11, 0)
+
+#define TSU_READ_STEPS 8
+
+/* Default calibration values, if FUSE values are missing. */
+#define SW_CALIB0_VAL 1297
+#define SW_CALIB1_VAL 751
+
+#define MCELSIUS(temp) ((temp) * MILLIDEGREE_PER_DEGREE)
+
+/**
+ * struct rzg3s_thermal_priv - RZ/G3S thermal private data structure
+ * @base: TSU base address
+ * @dev: device pointer
+ * @tz: thermal zone pointer
+ * @rstc: reset control
+ * @channel: IIO channel to read the TSU
+ * @mode: current device mode
+ * @calib0: calibration value
+ * @calib1: calibration value
+ */
+struct rzg3s_thermal_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct thermal_zone_device *tz;
+ struct reset_control *rstc;
+ struct iio_channel *channel;
+ enum thermal_device_mode mode;
+ u16 calib0;
+ u16 calib1;
+};
+
+static int rzg3s_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ struct rzg3s_thermal_priv *priv = thermal_zone_device_priv(tz);
+ int ts_code_ave = 0;
+
+ if (priv->mode != THERMAL_DEVICE_ENABLED)
+ return -EAGAIN;
+
+ for (u8 i = 0; i < TSU_READ_STEPS; i++) {
+ int ret, val;
+
+ ret = iio_read_channel_raw(priv->channel, &val);
+ if (ret < 0)
+ return ret;
+
+ ts_code_ave += val;
+ /*
+ * According to the HW manual (Rev.1.10, section 40.4.4 Procedure for Measuring
+ * the Temperature) we need to wait here at leat 3us.
+ */
+ usleep_range(5, 10);
+ }
+
+ ts_code_ave = DIV_ROUND_CLOSEST(MCELSIUS(ts_code_ave), TSU_READ_STEPS);
+
+ /*
+ * According to the HW manual (Rev.1.10, section 40.4.4 Procedure for Measuring the
+ * Temperature) the computation formula is as follows:
+ *
+ * Tj = (ts_code_ave - priv->calib1) * 165 / (priv->calib0 - priv->calib1) - 40
+ *
+ * Convert everything to milli Celsius before applying the formula to avoid
+ * losing precision.
+ */
+
+ *temp = div_s64((s64)(ts_code_ave - MCELSIUS(priv->calib1)) * MCELSIUS(165),
+ MCELSIUS(priv->calib0 - priv->calib1)) - MCELSIUS(40);
+
+ /* Report it in milli degrees Celsius and round it up to 0.5 degrees Celsius. */
+ *temp = roundup(*temp, 500);
+
+ return 0;
+}
+
+static void rzg3s_thermal_set_mode(struct rzg3s_thermal_priv *priv,
+ enum thermal_device_mode mode)
+{
+ struct device *dev = priv->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return;
+
+ if (mode == THERMAL_DEVICE_DISABLED) {
+ writel(0, priv->base + TSU_SM);
+ } else {
+ writel(TSU_SM_EN, priv->base + TSU_SM);
+ /*
+ * According to the HW manual (Rev.1.10, section 40.4.1 Procedure for
+ * Starting the TSU) we need to wait here 30us or more.
+ */
+ usleep_range(30, 40);
+
+ writel(TSU_SM_OE | TSU_SM_EN, priv->base + TSU_SM);
+ /*
+ * According to the HW manual (Rev.1.10, section 40.4.1 Procedure for
+ * Starting the TSU) we need to wait here 50us or more.
+ */
+ usleep_range(50, 60);
+ }
+
+ pm_runtime_put_autosuspend(dev);
+}
+
+static int rzg3s_thermal_change_mode(struct thermal_zone_device *tz,
+ enum thermal_device_mode mode)
+{
+ struct rzg3s_thermal_priv *priv = thermal_zone_device_priv(tz);
+
+ if (priv->mode == mode)
+ return 0;
+
+ rzg3s_thermal_set_mode(priv, mode);
+ priv->mode = mode;
+
+ return 0;
+}
+
+static const struct thermal_zone_device_ops rzg3s_tz_of_ops = {
+ .get_temp = rzg3s_thermal_get_temp,
+ .change_mode = rzg3s_thermal_change_mode,
+};
+
+static int rzg3s_thermal_read_calib(struct rzg3s_thermal_priv *priv)
+{
+ struct device *dev = priv->dev;
+ u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ val = readl(priv->base + OTPTSUTRIM_REG(0));
+ if (val & OTPTSUTRIM_EN_MASK)
+ priv->calib0 = FIELD_GET(OTPTSUTRIM_MASK, val);
+ else
+ priv->calib0 = SW_CALIB0_VAL;
+
+ val = readl(priv->base + OTPTSUTRIM_REG(1));
+ if (val & OTPTSUTRIM_EN_MASK)
+ priv->calib1 = FIELD_GET(OTPTSUTRIM_MASK, val);
+ else
+ priv->calib1 = SW_CALIB1_VAL;
+
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int rzg3s_thermal_probe(struct platform_device *pdev)
+{
+ struct rzg3s_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->channel = devm_iio_channel_get(dev, "tsu");
+ if (IS_ERR(priv->channel))
+ return dev_err_probe(dev, PTR_ERR(priv->channel), "Failed to get IIO channel!\n");
+
+ priv->rstc = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset!\n");
+
+ priv->dev = dev;
+ priv->mode = THERMAL_DEVICE_DISABLED;
+ platform_set_drvdata(pdev, priv);
+
+ pm_runtime_set_autosuspend_delay(dev, 300);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM!\n");
+
+ ret = rzg3s_thermal_read_calib(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read calibration data!\n");
+
+ priv->tz = devm_thermal_of_zone_register(dev, 0, priv, &rzg3s_tz_of_ops);
+ if (IS_ERR(priv->tz))
+ return dev_err_probe(dev, PTR_ERR(priv->tz), "Failed to register thermal zone!\n");
+
+ ret = devm_thermal_add_hwmon_sysfs(dev, priv->tz);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add hwmon sysfs!\n");
+
+ return 0;
+}
+
+static int rzg3s_thermal_suspend(struct device *dev)
+{
+ struct rzg3s_thermal_priv *priv = dev_get_drvdata(dev);
+
+ rzg3s_thermal_set_mode(priv, THERMAL_DEVICE_DISABLED);
+
+ return reset_control_assert(priv->rstc);
+}
+
+static int rzg3s_thermal_resume(struct device *dev)
+{
+ struct rzg3s_thermal_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret)
+ return ret;
+
+ if (priv->mode != THERMAL_DEVICE_DISABLED)
+ rzg3s_thermal_set_mode(priv, priv->mode);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rzg3s_thermal_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(rzg3s_thermal_suspend, rzg3s_thermal_resume)
+};
+
+static const struct of_device_id rzg3s_thermal_dt_ids[] = {
+ { .compatible = "renesas,r9a08g045-tsu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg3s_thermal_dt_ids);
+
+static struct platform_driver rzg3s_thermal_driver = {
+ .driver = {
+ .name = "rzg3s-thermal",
+ .of_match_table = rzg3s_thermal_dt_ids,
+ .pm = pm_ptr(&rzg3s_thermal_pm_ops),
+ },
+ .probe = rzg3s_thermal_probe,
+};
+module_platform_driver(rzg3s_thermal_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G3S Thermal Sensor Unit Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 3beff9b6fac3..c49ddf70f86e 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -74,6 +74,7 @@ struct chip_tsadc_table {
* @tshut_temp: the hardware-controlled shutdown temperature value, with no trim
* @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
* @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ * @grf_required: true, if a GRF is required for proper functionality
* @initialize: SoC special initialize tsadc controller method
* @irq_ack: clear the interrupt
* @control: enable/disable method for the tsadc controller
@@ -97,6 +98,9 @@ struct rockchip_tsadc_chip {
enum tshut_mode tshut_mode;
enum tshut_polarity tshut_polarity;
+ /* GRF availability */
+ bool grf_required;
+
/* Chip-wide methods */
void (*initialize)(struct regmap *grf,
void __iomem *reg, enum tshut_polarity p);
@@ -1098,10 +1102,9 @@ static const struct rockchip_tsadc_chip px30_tsadc_data = {
/* cpu, gpu */
.chn_offset = 0,
.chn_num = 2, /* 2 channels for tsadc */
-
+ .grf_required = true,
.tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv4_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1109,7 +1112,6 @@ static const struct rockchip_tsadc_chip px30_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3328_code_table,
.length = ARRAY_SIZE(rk3328_code_table),
@@ -1122,11 +1124,10 @@ static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
/* cpu */
.chn_offset = 0,
.chn_num = 1, /* one channel for tsadc */
-
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv2_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1134,7 +1135,6 @@ static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rv1108_table,
.length = ARRAY_SIZE(rv1108_table),
@@ -1147,11 +1147,10 @@ static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
/* cpu */
.chn_offset = 0,
.chn_num = 1, /* one channel for tsadc */
-
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv2_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1159,7 +1158,6 @@ static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3228_code_table,
.length = ARRAY_SIZE(rk3228_code_table),
@@ -1172,11 +1170,10 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
/* cpu, gpu */
.chn_offset = 1,
.chn_num = 2, /* two channels for tsadc */
-
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv2_initialize,
.irq_ack = rk_tsadcv2_irq_ack,
.control = rk_tsadcv2_control,
@@ -1184,7 +1181,6 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3288_code_table,
.length = ARRAY_SIZE(rk3288_code_table),
@@ -1197,10 +1193,9 @@ static const struct rockchip_tsadc_chip rk3328_tsadc_data = {
/* cpu */
.chn_offset = 0,
.chn_num = 1, /* one channels for tsadc */
-
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv2_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1208,7 +1203,6 @@ static const struct rockchip_tsadc_chip rk3328_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3328_code_table,
.length = ARRAY_SIZE(rk3328_code_table),
@@ -1221,11 +1215,10 @@ static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
/* cpu, gpu */
.chn_offset = 0,
.chn_num = 2, /* two channels for tsadc */
-
+ .grf_required = true,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv3_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1233,7 +1226,6 @@ static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3228_code_table,
.length = ARRAY_SIZE(rk3228_code_table),
@@ -1246,11 +1238,10 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
/* cpu, gpu */
.chn_offset = 0,
.chn_num = 2, /* two channels for tsadc */
-
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv2_initialize,
.irq_ack = rk_tsadcv2_irq_ack,
.control = rk_tsadcv2_control,
@@ -1258,7 +1249,6 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3368_code_table,
.length = ARRAY_SIZE(rk3368_code_table),
@@ -1271,11 +1261,10 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
/* cpu, gpu */
.chn_offset = 0,
.chn_num = 2, /* two channels for tsadc */
-
+ .grf_required = true,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv3_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1283,7 +1272,6 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3399_code_table,
.length = ARRAY_SIZE(rk3399_code_table),
@@ -1296,11 +1284,10 @@ static const struct rockchip_tsadc_chip rk3568_tsadc_data = {
/* cpu, gpu */
.chn_offset = 0,
.chn_num = 2, /* two channels for tsadc */
-
+ .grf_required = true,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
-
.initialize = rk_tsadcv7_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
@@ -1308,7 +1295,6 @@ static const struct rockchip_tsadc_chip rk3568_tsadc_data = {
.set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
-
.table = {
.id = rk3568_code_table,
.length = ARRAY_SIZE(rk3568_code_table),
@@ -1321,6 +1307,7 @@ static const struct rockchip_tsadc_chip rk3576_tsadc_data = {
/* top, big_core, little_core, ddr, npu, gpu */
.chn_offset = 0,
.chn_num = 6, /* six channels for tsadc */
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
@@ -1345,6 +1332,7 @@ static const struct rockchip_tsadc_chip rk3588_tsadc_data = {
/* top, big_core0, big_core1, little_core, center, gpu, npu */
.chn_offset = 0,
.chn_num = 7, /* seven channels for tsadc */
+ .grf_required = false,
.tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
@@ -1621,12 +1609,10 @@ static int rockchip_configure_from_dt(struct device *dev,
return -EINVAL;
}
- /* The tsadc wont to handle the error in here since some SoCs didn't
- * need this property.
- */
thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
- if (IS_ERR(thermal->grf))
- dev_warn(dev, "Missing rockchip,grf property\n");
+ if (IS_ERR(thermal->grf) && thermal->chip->grf_required)
+ return dev_err_probe(dev, PTR_ERR(thermal->grf),
+ "Missing rockchip,grf property\n");
rockchip_get_trim_configuration(dev, np, thermal);
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
index eb27d194c583..9b3e91f7fb97 100644
--- a/drivers/thermal/tegra/Makefile
+++ b/drivers/thermal/tegra/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o
obj-$(CONFIG_TEGRA30_TSENSOR) += tegra30-tsensor.o
tegra-soctherm-y := soctherm.o soctherm-fuse.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114-soctherm.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra132-soctherm.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm-fuse.c b/drivers/thermal/tegra/soctherm-fuse.c
index 190f95280e0b..8d37cd8c9122 100644
--- a/drivers/thermal/tegra/soctherm-fuse.c
+++ b/drivers/thermal/tegra/soctherm-fuse.c
@@ -9,15 +9,12 @@
#include "soctherm.h"
-#define NOMINAL_CALIB_FT 105
#define NOMINAL_CALIB_CP 25
#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
-#define FUSE_TSENSOR_COMMON 0x180
-
/*
* Tegra210: Layout of bits in FUSE_TSENSOR_COMMON:
* 3 2 1 0
@@ -26,7 +23,7 @@
* | BASE_FT | BASE_CP | SHFT_FT | SHIFT_CP |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * Tegra12x, etc:
+ * Tegra124:
* In chips prior to Tegra210, this fuse was incorrectly sized as 26 bits,
* and didn't hold SHIFT_CP in [31:26]. Therefore these missing six bits
* were obtained via the FUSE_SPARE_REALIGNMENT_REG register [5:0].
@@ -44,6 +41,13 @@
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |---------------------------------------------------| SHIFT_CP |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Tegra114: Layout of bits in FUSE_TSENSOR_COMMON aka FUSE_VSENSOR_CALIB:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SHFT_FT | BASE_FT | SHIFT_CP | BASE_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
#define CALIB_COEFFICIENT 1000000LL
@@ -77,7 +81,7 @@ int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
s32 shifted_cp, shifted_ft;
int err;
- err = tegra_fuse_readl(FUSE_TSENSOR_COMMON, &val);
+ err = tegra_fuse_readl(tfuse->fuse_common_reg, &val);
if (err)
return err;
@@ -96,10 +100,12 @@ int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
return err;
}
+ shifted_cp = (val & tfuse->fuse_shift_cp_mask) >>
+ tfuse->fuse_shift_cp_shift;
shifted_cp = sign_extend32(val, 5);
shared->actual_temp_cp = 2 * NOMINAL_CALIB_CP + shifted_cp;
- shared->actual_temp_ft = 2 * NOMINAL_CALIB_FT + shifted_ft;
+ shared->actual_temp_ft = 2 * tfuse->nominal_calib_ft + shifted_ft;
return 0;
}
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 53a5c649f4b1..5d26b52beaba 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -31,6 +31,7 @@
#include <linux/reset.h>
#include <linux/thermal.h>
+#include <dt-bindings/thermal/tegra114-soctherm.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
#include "../thermal_core.h"
@@ -357,6 +358,12 @@ struct soctherm_oc_irq_chip_data {
static struct soctherm_oc_irq_chip_data soc_irq_cdata;
+/* Ensure that TEGRA114_* and TEGRA124_* counterparts are equal */
+static_assert(TEGRA114_SOCTHERM_SENSOR_CPU == TEGRA124_SOCTHERM_SENSOR_CPU);
+static_assert(TEGRA114_SOCTHERM_SENSOR_MEM == TEGRA124_SOCTHERM_SENSOR_MEM);
+static_assert(TEGRA114_SOCTHERM_SENSOR_GPU == TEGRA124_SOCTHERM_SENSOR_GPU);
+static_assert(TEGRA114_SOCTHERM_SENSOR_PLLX == TEGRA124_SOCTHERM_SENSOR_PLLX);
+
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
@@ -2045,6 +2052,12 @@ static void soctherm_init(struct platform_device *pdev)
}
static const struct of_device_id tegra_soctherm_of_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ {
+ .compatible = "nvidia,tegra114-soctherm",
+ .data = &tegra114_soctherm,
+ },
+#endif
#ifdef CONFIG_ARCH_TEGRA_124_SOC
{
.compatible = "nvidia,tegra124-soctherm",
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
index 70501e73d586..aa4af9268b05 100644
--- a/drivers/thermal/tegra/soctherm.h
+++ b/drivers/thermal/tegra/soctherm.h
@@ -56,6 +56,9 @@
#define SENSOR_TEMP2_MEM_TEMP_MASK (0xffff << 16)
#define SENSOR_TEMP2_PLLX_TEMP_MASK 0xffff
+#define FUSE_VSENSOR_CALIB 0x08c
+#define FUSE_TSENSOR_COMMON 0x180
+
/**
* struct tegra_tsensor_group - SOC_THERM sensor group data
* @name: short name of the temperature sensor group
@@ -109,9 +112,11 @@ struct tsensor_group_thermtrips {
struct tegra_soctherm_fuse {
u32 fuse_base_cp_mask, fuse_base_cp_shift;
+ u32 fuse_shift_cp_mask, fuse_shift_cp_shift;
u32 fuse_base_ft_mask, fuse_base_ft_shift;
u32 fuse_shift_ft_mask, fuse_shift_ft_shift;
- u32 fuse_spare_realignment;
+ u32 fuse_common_reg, fuse_spare_realignment;
+ u32 nominal_calib_ft;
};
struct tsensor_shared_calib {
@@ -137,6 +142,10 @@ int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
const struct tsensor_shared_calib *shared,
u32 *calib);
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_soctherm_soc tegra114_soctherm;
+#endif
+
#ifdef CONFIG_ARCH_TEGRA_124_SOC
extern const struct tegra_soctherm_soc tegra124_soctherm;
#endif
diff --git a/drivers/thermal/tegra/tegra114-soctherm.c b/drivers/thermal/tegra/tegra114-soctherm.c
new file mode 100644
index 000000000000..688104f28052
--- /dev/null
+++ b/drivers/thermal/tegra/tegra114-soctherm.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2024, Svyatoslav Ryhel <clamor95@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra114-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA114_THERMTRIP_ANY_EN_MASK (0x1 << 28)
+#define TEGRA114_THERMTRIP_MEM_EN_MASK (0x1 << 27)
+#define TEGRA114_THERMTRIP_GPU_EN_MASK (0x1 << 26)
+#define TEGRA114_THERMTRIP_CPU_EN_MASK (0x1 << 25)
+#define TEGRA114_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
+#define TEGRA114_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
+#define TEGRA114_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
+#define TEGRA114_THERMTRIP_TSENSE_THRESH_MASK 0xff
+
+#define TEGRA114_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17)
+#define TEGRA114_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9)
+
+#define TEGRA114_THRESH_GRAIN 1000
+#define TEGRA114_BPTT 8
+
+static const struct tegra_tsensor_configuration tegra114_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 163,
+ .tsample_ate = 655,
+};
+
+static const struct tegra_tsensor_group tegra114_tsensor_group_cpu = {
+ .id = TEGRA114_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 10,
+ .pdiv_ate = 10,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 6,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA114_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA114_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA114_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA114_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA114_THERMCTL_LVL0_DN_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra114_tsensor_group_gpu = {
+ .id = TEGRA114_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 10,
+ .pdiv_ate = 10,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 6,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA114_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA114_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA114_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA114_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA114_THERMCTL_LVL0_DN_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra114_tsensor_group_pll = {
+ .id = TEGRA114_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 10,
+ .pdiv_ate = 10,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA114_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA114_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA114_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+ .thermctl_lvl0_up_thresh_mask = TEGRA114_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA114_THERMCTL_LVL0_DN_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra114_tsensor_group_mem = {
+ .id = TEGRA114_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 10,
+ .pdiv_ate = 10,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA114_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA114_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA114_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+ .thermctl_lvl0_up_thresh_mask = TEGRA114_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA114_THERMCTL_LVL0_DN_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra114_tsensor_groups[] = {
+ &tegra114_tsensor_group_cpu,
+ &tegra114_tsensor_group_gpu,
+ &tegra114_tsensor_group_pll,
+ &tegra114_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra114_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1196400,
+ .fuse_corr_beta = -13600000,
+ .group = &tegra114_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1196400,
+ .fuse_corr_beta = -13600000,
+ .group = &tegra114_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1196400,
+ .fuse_corr_beta = -13600000,
+ .group = &tegra114_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1196400,
+ .fuse_corr_beta = -13600000,
+ .group = &tegra114_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1000000,
+ .fuse_corr_beta = 0,
+ .group = &tegra114_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1000000,
+ .fuse_corr_beta = 0,
+ .group = &tegra114_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1124500,
+ .fuse_corr_beta = -9793100,
+ .group = &tegra114_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra114_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1224200,
+ .fuse_corr_beta = -14665000,
+ .group = &tegra114_tsensor_group_pll,
+ },
+};
+
+static const struct tegra_soctherm_fuse tegra114_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff,
+ .fuse_base_cp_shift = 0,
+ .fuse_shift_cp_mask = 0x3f << 10,
+ .fuse_shift_cp_shift = 10,
+ .fuse_base_ft_mask = 0x7ff << 16,
+ .fuse_base_ft_shift = 16,
+ .fuse_shift_ft_mask = 0x1f << 27,
+ .fuse_shift_ft_shift = 27,
+ .fuse_common_reg = FUSE_VSENSOR_CALIB,
+ .fuse_spare_realignment = 0,
+ .nominal_calib_ft = 90,
+};
+
+const struct tegra_soctherm_soc tegra114_soctherm = {
+ .tsensors = tegra114_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra114_tsensors),
+ .ttgs = tegra114_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra114_tsensor_groups),
+ .tfuse = &tegra114_soctherm_fuse,
+ .thresh_grain = TEGRA114_THRESH_GRAIN,
+ .bptt = TEGRA114_BPTT,
+ .use_ccroc = false,
+};
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
index 20ad27f4d1a1..d86acff1b234 100644
--- a/drivers/thermal/tegra/tegra124-soctherm.c
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -200,11 +200,15 @@ static const struct tegra_tsensor tegra124_tsensors[] = {
static const struct tegra_soctherm_fuse tegra124_soctherm_fuse = {
.fuse_base_cp_mask = 0x3ff,
.fuse_base_cp_shift = 0,
+ .fuse_shift_cp_mask = 0x3f,
+ .fuse_shift_cp_shift = 0,
.fuse_base_ft_mask = 0x7ff << 10,
.fuse_base_ft_shift = 10,
.fuse_shift_ft_mask = 0x1f << 21,
.fuse_shift_ft_shift = 21,
+ .fuse_common_reg = FUSE_TSENSOR_COMMON,
.fuse_spare_realignment = 0x1fc,
+ .nominal_calib_ft = 105,
};
const struct tegra_soctherm_soc tegra124_soctherm = {
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
index b76308fdad9e..64c0363b9717 100644
--- a/drivers/thermal/tegra/tegra132-soctherm.c
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -200,11 +200,15 @@ static struct tegra_tsensor tegra132_tsensors[] = {
static const struct tegra_soctherm_fuse tegra132_soctherm_fuse = {
.fuse_base_cp_mask = 0x3ff,
.fuse_base_cp_shift = 0,
+ .fuse_shift_cp_mask = 0x3f,
+ .fuse_shift_cp_shift = 0,
.fuse_base_ft_mask = 0x7ff << 10,
.fuse_base_ft_shift = 10,
.fuse_shift_ft_mask = 0x1f << 21,
.fuse_shift_ft_shift = 21,
+ .fuse_common_reg = FUSE_TSENSOR_COMMON,
.fuse_spare_realignment = 0x1fc,
+ .nominal_calib_ft = 105,
};
const struct tegra_soctherm_soc tegra132_soctherm = {
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
index d0ff793f18c5..f6e1493f0202 100644
--- a/drivers/thermal/tegra/tegra210-soctherm.c
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -201,11 +201,15 @@ static const struct tegra_tsensor tegra210_tsensors[] = {
static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
.fuse_base_cp_mask = 0x3ff << 11,
.fuse_base_cp_shift = 11,
+ .fuse_shift_cp_mask = 0x3f,
+ .fuse_shift_cp_shift = 0,
.fuse_base_ft_mask = 0x7ff << 21,
.fuse_base_ft_shift = 21,
.fuse_shift_ft_mask = 0x1f << 6,
.fuse_shift_ft_shift = 6,
+ .fuse_common_reg = FUSE_TSENSOR_COMMON,
.fuse_spare_realignment = 0,
+ .nominal_calib_ft = 105,
};
static struct tsensor_group_thermtrips tegra210_tsensor_thermtrips[] = {
diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c
index 4257d813d572..c12c405225bb 100644
--- a/drivers/thermal/testing/zone.c
+++ b/drivers/thermal/testing/zone.c
@@ -184,15 +184,14 @@ static void tt_add_tz_work_fn(struct work_struct *work)
int tt_add_tz(void)
{
- struct tt_thermal_zone *tt_zone __free(kfree);
- struct tt_work *tt_work __free(kfree) = NULL;
int ret;
- tt_zone = kzalloc(sizeof(*tt_zone), GFP_KERNEL);
+ struct tt_thermal_zone *tt_zone __free(kfree) = kzalloc(sizeof(*tt_zone),
+ GFP_KERNEL);
if (!tt_zone)
return -ENOMEM;
- tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+ struct tt_work *tt_work __free(kfree) = kzalloc(sizeof(*tt_work), GFP_KERNEL);
if (!tt_work)
return -ENOMEM;
@@ -237,7 +236,6 @@ static void tt_zone_unregister_tz(struct tt_thermal_zone *tt_zone)
int tt_del_tz(const char *arg)
{
- struct tt_work *tt_work __free(kfree) = NULL;
struct tt_thermal_zone *tt_zone, *aux;
int ret;
int id;
@@ -246,7 +244,7 @@ int tt_del_tz(const char *arg)
if (ret != 1)
return -EINVAL;
- tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+ struct tt_work *tt_work __free(kfree) = kzalloc(sizeof(*tt_work), GFP_KERNEL);
if (!tt_work)
return -ENOMEM;
@@ -330,20 +328,17 @@ static void tt_zone_add_trip_work_fn(struct work_struct *work)
int tt_zone_add_trip(const char *arg)
{
- struct tt_thermal_zone *tt_zone __free(put_tt_zone) = NULL;
- struct tt_trip *tt_trip __free(kfree) = NULL;
- struct tt_work *tt_work __free(kfree);
int id;
- tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
+ struct tt_work *tt_work __free(kfree) = kzalloc(sizeof(*tt_work), GFP_KERNEL);
if (!tt_work)
return -ENOMEM;
- tt_trip = kzalloc(sizeof(*tt_trip), GFP_KERNEL);
+ struct tt_trip *tt_trip __free(kfree) = kzalloc(sizeof(*tt_trip), GFP_KERNEL);
if (!tt_trip)
return -ENOMEM;
- tt_zone = tt_get_tt_zone(arg);
+ struct tt_thermal_zone *tt_zone __free(put_tt_zone) = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
@@ -387,7 +382,6 @@ static const struct thermal_zone_device_ops tt_zone_ops = {
static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
{
- struct thermal_trip *trips __free(kfree) = NULL;
struct thermal_zone_device *tz;
struct tt_trip *tt_trip;
int i;
@@ -397,7 +391,8 @@ static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
if (tt_zone->tz)
return -EINVAL;
- trips = kcalloc(tt_zone->num_trips, sizeof(*trips), GFP_KERNEL);
+ struct thermal_trip *trips __free(kfree) = kcalloc(tt_zone->num_trips,
+ sizeof(*trips), GFP_KERNEL);
if (!trips)
return -ENOMEM;
@@ -421,9 +416,7 @@ static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
int tt_zone_reg(const char *arg)
{
- struct tt_thermal_zone *tt_zone __free(put_tt_zone);
-
- tt_zone = tt_get_tt_zone(arg);
+ struct tt_thermal_zone *tt_zone __free(put_tt_zone) = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
@@ -432,9 +425,7 @@ int tt_zone_reg(const char *arg)
int tt_zone_unreg(const char *arg)
{
- struct tt_thermal_zone *tt_zone __free(put_tt_zone);
-
- tt_zone = tt_get_tt_zone(arg);
+ struct tt_thermal_zone *tt_zone __free(put_tt_zone) = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index ee3d0aa31406..7c844589b153 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -7,6 +7,7 @@
* Author: Laxman Dewangan <ldewangan@nvidia.com>
*/
#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -73,6 +74,58 @@ static const struct thermal_zone_device_ops gadc_thermal_ops = {
.get_temp = gadc_thermal_get_temp,
};
+static const struct iio_chan_spec gadc_thermal_iio_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }
+};
+
+static int gadc_thermal_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct gadc_thermal_info *gtinfo = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = gadc_thermal_get_temp(gtinfo->tz_dev, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info gadc_thermal_iio_info = {
+ .read_raw = gadc_thermal_read_raw,
+};
+
+static int gadc_iio_register(struct device *dev, struct gadc_thermal_info *gti)
+{
+ struct gadc_thermal_info *gtinfo;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gtinfo));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ gtinfo = iio_priv(indio_dev);
+ memcpy(gtinfo, gti, sizeof(*gtinfo));
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->info = &gadc_thermal_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = gadc_thermal_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(gadc_thermal_iio_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
static int gadc_thermal_read_linear_lookup_table(struct device *dev,
struct gadc_thermal_info *gti)
{
@@ -153,7 +206,7 @@ static int gadc_thermal_probe(struct platform_device *pdev)
devm_thermal_add_hwmon_sysfs(dev, gti->tz_dev);
- return 0;
+ return gadc_iio_register(&pdev->dev, gti);
}
static const struct of_device_id of_adc_thermal_match[] = {
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 0ecccd4d8556..64cc3ab949fe 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -96,7 +96,7 @@ thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
mutex_lock(&thermal_hwmon_list_lock);
list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
- strcpy(type, tz->type);
+ strscpy(type, tz->type);
strreplace(type, '-', '_');
if (!strcmp(hwmon->type, type)) {
mutex_unlock(&thermal_hwmon_list_lock);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 44427415a80d..724ad4f3cbee 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -128,7 +128,7 @@ config SERIAL_SB1250_DUART_CONSOLE
config SERIAL_ATMEL
bool "AT91 on-chip serial port support"
depends on COMMON_CLK
- depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
select MFD_AT91_USART
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 32ec632fd080..3f5b87c4cc54 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1200,7 +1200,13 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
int ret;
proto = geni_se_read_proto(&port->se);
- if (proto != GENI_SE_UART) {
+ if (proto == GENI_SE_INVALID_PROTO) {
+ ret = geni_load_se_firmware(&port->se, GENI_SE_UART);
+ if (ret) {
+ dev_err(uport->dev, "UART firmware load failed ret: %d\n", ret);
+ return ret;
+ }
+ } else if (proto != GENI_SE_UART) {
dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
return -ENXIO;
}
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index cc88aaa106da..c9bdd4140fd0 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -29,6 +29,10 @@
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US 500000
@@ -355,9 +359,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
+ u32 intrs;
u16 qsize;
int i;
+ /* Enable required interrupts */
+ intrs = UFSHCD_ENABLE_MCQ_INTRS;
+ if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
+ intrs &= ~MCQ_CQ_EVENT_STATUS;
+ ufshcd_enable_intr(hba, intrs);
+
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->id = i;
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 4bd7d491e3c5..0086816b27cd 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -512,6 +512,8 @@ static ssize_t pm_qos_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ guard(mutex)(&hba->pm_qos_mutex);
+
return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
}
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index caa32e23ffa5..584c2b5c6ad9 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -11,6 +11,7 @@
#include <ufs/ufs.h>
#include <linux/tracepoint.h>
+#include "ufs_trace_types.h"
#define str_opcode(opcode) \
__print_symbolic(opcode, \
diff --git a/drivers/ufs/core/ufs_trace_types.h b/drivers/ufs/core/ufs_trace_types.h
new file mode 100644
index 000000000000..f2d5ad1d92b9
--- /dev/null
+++ b/drivers/ufs/core/ufs_trace_types.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _UFS_TRACE_TYPES_H_
+#define _UFS_TRACE_TYPES_H_
+
+enum ufs_trace_str_t {
+ UFS_CMD_SEND,
+ UFS_CMD_COMP,
+ UFS_DEV_COMP,
+ UFS_QUERY_SEND,
+ UFS_QUERY_COMP,
+ UFS_QUERY_ERR,
+ UFS_TM_SEND,
+ UFS_TM_COMP,
+ UFS_TM_ERR
+};
+
+enum ufs_trace_tsf_t {
+ UFS_TSF_CDB,
+ UFS_TSF_OSF,
+ UFS_TSF_TM_INPUT,
+ UFS_TSF_TM_OUTPUT
+};
+
+#endif /* _UFS_TRACE_TYPES_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 9a43102b2b21..d9632d7c5f01 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -45,11 +45,6 @@
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
-#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
- UFSHCD_ERROR_MASK |\
- MCQ_CQ_EVENT_STATUS)
-
-
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
@@ -316,6 +311,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGLF2G9D8KBADG",
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
+ .model = "THGJFJT1E45BATP",
+ .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT },
{}
};
@@ -369,7 +367,7 @@ EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
* @hba: per adapter instance
* @intrs: interrupt bits
*/
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
u32 new_val = old_val | intrs;
@@ -606,10 +604,12 @@ void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
lrbp = &hba->lrb[tag];
- dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
- dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
+ if (hba->monitor.enabled) {
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag,
+ div_u64(lrbp->issue_time_stamp_local_clock, 1000));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", tag,
+ div_u64(lrbp->compl_time_stamp_local_clock, 1000));
+ }
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -1045,6 +1045,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
*/
void ufshcd_pm_qos_init(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
if (hba->pm_qos_enabled)
return;
@@ -1061,6 +1062,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba)
*/
void ufshcd_pm_qos_exit(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -1075,6 +1078,8 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
*/
static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -2230,11 +2235,13 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
- ktime_t curr_t = ktime_get();
+ ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ curr_t = ktime_get();
+
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
if (!hba->clk_scaling.active_reqs++)
@@ -2354,10 +2361,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
- lrbp->issue_time_stamp = ktime_get();
- lrbp->issue_time_stamp_local_clock = local_clock();
- lrbp->compl_time_stamp = ktime_set(0, 0);
- lrbp->compl_time_stamp_local_clock = 0;
+ if (hba->monitor.enabled) {
+ lrbp->issue_time_stamp = ktime_get();
+ lrbp->issue_time_stamp_local_clock = local_clock();
+ lrbp->compl_time_stamp = ktime_set(0, 0);
+ lrbp->compl_time_stamp_local_clock = 0;
+ }
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
if (lrbp->cmd)
ufshcd_clk_scaling_start_busy(hba);
@@ -5622,8 +5631,10 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
- lrbp->compl_time_stamp = ktime_get();
- lrbp->compl_time_stamp_local_clock = local_clock();
+ if (hba->monitor.enabled) {
+ lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
+ }
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -6457,13 +6468,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
-static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+void ufshcd_force_error_recovery(struct ufs_hba *hba)
{
spin_lock_irq(hba->host->host_lock);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
}
+EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery);
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
@@ -8786,7 +8798,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
struct utp_upiu_query_v4_0 *upiu_data;
- if (dev_info->wspecversion < 0x400)
+ if (dev_info->wspecversion < 0x400 ||
+ hba->dev_quirks & UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT)
return;
ufshcd_dev_man_lock(hba);
@@ -8913,16 +8926,11 @@ err:
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
- u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
- intrs = UFSHCD_ENABLE_MCQ_INTRS;
- if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
- intrs &= ~MCQ_CQ_EVENT_STATUS;
- ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
@@ -10756,6 +10764,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
mutex_init(&hba->ee_ctrl_mutex);
mutex_init(&hba->wb_mutex);
+
+ /* Initialize mutex for PM QoS request synchronization */
+ mutex_init(&hba->pm_qos_mutex);
+
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index f0adcd9dd553..70d195179eba 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -776,7 +776,7 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
u32 mask, sync_len;
enum {
SYNC_LEN_G1 = 80 * 1000, /* 80us */
- SYNC_LEN_G2 = 40 * 1000, /* 44us */
+ SYNC_LEN_G2 = 40 * 1000, /* 40us */
SYNC_LEN_G3 = 20 * 1000, /* 20us */
};
int i;
@@ -1896,6 +1896,13 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
return 0;
}
+static int fsd_ufs_suspend(struct exynos_ufs *ufs)
+{
+ exynos_ufs_gate_clks(ufs);
+ hci_writel(ufs, 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs)
{
return (16 * 1000 * 1000000UL / ufs->mclk_rate);
@@ -2162,6 +2169,7 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.pre_link = fsd_ufs_pre_link,
.post_link = fsd_ufs_post_link,
.pre_pwr_change = fsd_ufs_pre_pwr_change,
+ .suspend = fsd_ufs_suspend,
};
static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index f902ce08c95a..758a393a9de1 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -29,6 +29,7 @@
#include "ufs-mediatek-sip.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
@@ -415,7 +416,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
}
}
-static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
+static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
@@ -451,8 +452,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
break;
} while (time_checked < timeout);
- if (wait_idle && sm != VS_HCE_BASE)
+ if (wait_idle && sm != VS_HCE_BASE) {
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
@@ -798,8 +803,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
clk_pwr_off = true;
}
- if (clk_pwr_off)
+ if (clk_pwr_off) {
ufs_mtk_pwr_ctrl(hba, false);
+ } else {
+ dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n",
+ hba->ahit,
+ ufshcd_readl(hba,
+ REG_AUTO_HIBERNATE_IDLE_TIMER));
+ }
ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
@@ -1018,7 +1029,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
struct arm_smccc_res res;
int err, ver;
- if (hba->vreg_info.vcc)
+ if (info->vcc)
return 0;
if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
@@ -1075,6 +1086,80 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ u32 ah_ms = 10;
+ u32 ah_scale, ah_timer;
+ u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
+ ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK,
+ hba->ahit);
+ ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ if (ah_scale <= 5)
+ ah_ms = ah_timer * scale_us[ah_scale] / 1000;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = max(ah_ms, 10U);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+/* Convert microseconds to Auto-Hibernate Idle Timer register value */
+static u32 ufs_mtk_us_to_ahit(unsigned int timer)
+{
+ unsigned int scale;
+
+ for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
+ timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
+
+ return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
+}
+
+static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
+{
+ unsigned int us;
+
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ switch (hba->dev_info.wmanufacturerid) {
+ case UFS_VENDOR_SAMSUNG:
+ /* configure auto-hibern8 timer to 3.5 ms */
+ us = 3500;
+ break;
+
+ case UFS_VENDOR_MICRON:
+ /* configure auto-hibern8 timer to 2 ms */
+ us = 2000;
+ break;
+
+ default:
+ /* configure auto-hibern8 timer to 1 ms */
+ us = 1000;
+ break;
+ }
+
+ hba->ahit = ufs_mtk_us_to_ahit(us);
+ }
+
+ ufs_mtk_setup_clk_gating(hba);
+}
+
+static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba)
+{
+ /* UFS version is below 4.0, clock scaling is not necessary */
+ if ((hba->dev_info.wspecversion < 0x0400) &&
+ ufs_mtk_is_clk_scale_ready(hba)) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+
+ _ufs_mtk_clk_scale(hba, false);
+ }
+}
+
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -1240,6 +1325,10 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
dev_req_params->gear_rx < UFS_HS_G4)
return false;
+ if (dev_req_params->pwr_tx == SLOW_MODE ||
+ dev_req_params->pwr_rx == SLOW_MODE)
+ return false;
+
return true;
}
@@ -1255,6 +1344,10 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
host_params.hs_rx_gear = UFS_HS_G5;
host_params.hs_tx_gear = UFS_HS_G5;
+ if (dev_max_params->pwr_rx == SLOW_MODE ||
+ dev_max_params->pwr_tx == SLOW_MODE)
+ host_params.desired_working_mode = UFS_PWM_MODE;
+
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
@@ -1278,6 +1371,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_NO_ADAPT);
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+ }
+
ret = ufshcd_uic_change_pwr_mode(hba,
FASTAUTO_MODE << 4 | FASTAUTO_MODE);
@@ -1287,10 +1402,59 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
}
- if (host->hw_ver.major >= 3) {
+ /* if already configured to the requested pwr_mode, skip adapt */
+ if (dev_req_params->gear_rx == hba->pwr_info.gear_rx &&
+ dev_req_params->gear_tx == hba->pwr_info.gear_tx &&
+ dev_req_params->lane_rx == hba->pwr_info.lane_rx &&
+ dev_req_params->lane_tx == hba->pwr_info.lane_tx &&
+ dev_req_params->pwr_rx == hba->pwr_info.pwr_rx &&
+ dev_req_params->pwr_tx == hba->pwr_info.pwr_tx &&
+ dev_req_params->hs_rate == hba->pwr_info.hs_rate) {
+ return ret;
+ }
+
+ if (dev_req_params->pwr_rx == FAST_MODE ||
+ dev_req_params->pwr_rx == FASTAUTO_MODE) {
+ if (host->hw_ver.major >= 3) {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ } else {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_NO_ADAPT);
+ }
+ } else {
ret = ufshcd_dme_configure_adapt(hba,
- dev_req_params->gear_tx,
- PA_INITIAL_ADAPT);
+ dev_req_params->gear_tx,
+ PA_NO_ADAPT);
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* disable auto-hibern8 */
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* wait host return to idle state when auto-hibern8 off */
+ ret = ufs_mtk_wait_idle_state(hba, 5);
+ if (ret)
+ goto out;
+
+ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+
+out:
+ if (ret) {
+ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
+
+ ufshcd_force_error_recovery(hba);
+
+ /* trigger error handler and break suspend */
+ ret = -EBUSY;
}
return ret;
@@ -1302,13 +1466,20 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
+ static u32 reg;
switch (stage) {
case PRE_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ ufs_mtk_auto_hibern8_disable(hba);
+ }
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
break;
default:
ret = -EINVAL;
@@ -1342,6 +1513,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ufs_mtk_get_controller_version(hba);
@@ -1367,34 +1539,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+ /* Enable the 1144 functions setting */
+ if (host->ip_ver == IP_VER_MT6989) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ if (ret)
+ return ret;
+
+ tmp |= 0x10;
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
+ }
+
return ret;
}
-static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+static void ufs_mtk_post_link(struct ufs_hba *hba)
{
- u32 ah_ms;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 tmp;
- if (ufshcd_is_clkgating_allowed(hba)) {
- if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
- ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
- hba->ahit);
- else
- ah_ms = 10;
- ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
+ /* fix device PA_INIT no adapt */
+ if (host->ip_ver >= IP_VER_MT6899) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ tmp |= 0x100;
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
}
-}
-static void ufs_mtk_post_link(struct ufs_hba *hba)
-{
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
-
- /* will be configured during probe hba */
- if (ufshcd_is_auto_hibern8_supported(hba))
- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
-
- ufs_mtk_setup_clk_gating(hba);
}
static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
@@ -1421,11 +1592,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
- /* disable hba before device reset */
- ufshcd_hba_stop(hba);
-
ufs_mtk_device_reset_ctrl(0, res);
+ /* disable hba in middle of device reset */
+ ufshcd_hba_stop(hba);
+
/*
* The reset signal is active low. UFS devices shall detect
* more than or equal to 1us of positive or negative RST_n
@@ -1462,7 +1633,11 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
return err;
/* Check link state to make sure exit h8 success */
- ufs_mtk_wait_idle_state(hba, 5);
+ err = ufs_mtk_wait_idle_state(hba, 5);
+ if (err) {
+ dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
+ return err;
+ }
err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (err) {
dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
@@ -1507,6 +1682,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
{
struct ufs_vreg *vccqx = NULL;
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
if (hba->vreg_info.vccq)
vccqx = hba->vreg_info.vccq;
else
@@ -1561,21 +1739,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
}
}
-static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
-{
- int ret;
-
- /* disable auto-hibern8 */
- ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
-
- /* wait host return to idle state when auto-hibern8 off */
- ufs_mtk_wait_idle_state(hba, 5);
-
- ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
- if (ret)
- dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
-}
-
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1584,7 +1747,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
- ufs_mtk_auto_hibern8_disable(hba);
+ return ufs_mtk_auto_hibern8_disable(hba);
return 0;
}
@@ -1642,8 +1805,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
return 0;
+
fail:
- return ufshcd_link_recovery(hba);
+ /*
+ * Check if the platform (parent) device has resumed, and ensure that
+ * power, clock, and MTCMOS are all turned on.
+ */
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
+ hba->dev->power.request,
+ hba->dev->power.runtime_status,
+ hba->dev->power.runtime_error);
+ }
+
+ return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
@@ -1726,6 +1902,8 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
ufs_mtk_vreg_fix_vcc(hba);
ufs_mtk_vreg_fix_vccqx(hba);
+ ufs_mtk_fix_ahit(hba);
+ ufs_mtk_fix_clock_scaling(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@@ -2012,6 +2190,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
return ret;
}
}
+ host->is_mcq_intr_enabled = true;
return 0;
}
@@ -2095,10 +2274,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
- struct device *dev = &pdev->dev;
- struct device_node *reset_node;
- struct platform_device *reset_pdev;
+ struct device *dev = &pdev->dev, *phy_dev = NULL;
+ struct device_node *reset_node, *phy_node = NULL;
+ struct platform_device *reset_pdev, *phy_pdev = NULL;
struct device_link *link;
+ struct ufs_hba *hba;
+ struct ufs_mtk_host *host;
reset_node = of_find_compatible_node(NULL, NULL,
"ti,syscon-reset");
@@ -2125,13 +2306,51 @@ static int ufs_mtk_probe(struct platform_device *pdev)
}
skip_reset:
+ /* find phy node */
+ phy_node = of_parse_phandle(dev->of_node, "phys", 0);
+
+ if (phy_node) {
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (!phy_pdev)
+ goto skip_phy;
+ phy_dev = &phy_pdev->dev;
+
+ pm_runtime_set_active(phy_dev);
+ pm_runtime_enable(phy_dev);
+ pm_runtime_get_sync(phy_dev);
+
+ put_device(phy_dev);
+ dev_info(dev, "phys node found\n");
+ } else {
+ dev_notice(dev, "phys node not found\n");
+ }
+
+skip_phy:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
-
-out:
- if (err)
+ if (err) {
dev_err(dev, "probe failed %d\n", err);
+ goto out;
+ }
+
+ hba = platform_get_drvdata(pdev);
+ if (!hba)
+ goto out;
+
+ if (phy_node && phy_dev) {
+ host = ufshcd_get_variant(hba);
+ host->phy_dev = phy_dev;
+ }
+
+ /*
+ * Because the default power setting of VSx (the upper layer of
+ * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
+ * entering LPM.
+ */
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+out:
+ of_node_put(phy_node);
of_node_put(reset_node);
return err;
}
@@ -2156,27 +2375,38 @@ static int ufs_mtk_system_suspend(struct device *dev)
ret = ufshcd_system_suspend(dev);
if (ret)
- return ret;
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
ufs_mtk_dev_vreg_set_lpm(hba, true);
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
- return 0;
+out:
+ return ret;
}
static int ufs_mtk_system_resume(struct device *dev)
{
+ int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
struct arm_smccc_res res;
- ufs_mtk_dev_vreg_set_lpm(hba, false);
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
- return ufshcd_system_resume(dev);
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+out:
+ ret = ufshcd_system_resume(dev);
+
+ return ret;
}
#endif
@@ -2184,6 +2414,7 @@ static int ufs_mtk_system_resume(struct device *dev)
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
int ret = 0;
@@ -2196,17 +2427,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev)
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
+ if (host->phy_dev)
+ pm_runtime_put_sync(host->phy_dev);
+
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
+ if (host->phy_dev)
+ pm_runtime_get_sync(host->phy_dev);
+
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_runtime_resume(dev);
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index e46dc5fa209d..dfbf78bd8664 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -193,6 +193,7 @@ struct ufs_mtk_host {
bool is_mcq_intr_enabled;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
+ struct device *phy_dev;
};
/* MTK delay of autosuspend: 500 ms */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 9574fdc2bb0f..3e83dc51d538 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -38,6 +38,9 @@
#define DEEMPHASIS_3_5_dB 0x04
#define NO_DEEMPHASIS 0x0
+#define UFS_ICE_SYNC_RST_SEL BIT(3)
+#define UFS_ICE_SYNC_RST_SW BIT(4)
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -494,12 +497,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
* If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
* so that the subsequent power mode change shall stick to Rate-A.
*/
- if (host->hw_ver.major == 0x5) {
- if (host->phy_gear == UFS_HS_G5)
- host_params->hs_rate = PA_HS_MODE_A;
- else
- host_params->hs_rate = PA_HS_MODE_B;
- }
+ if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5)
+ host_params->hs_rate = PA_HS_MODE_A;
mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
@@ -751,11 +750,29 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
+ u32 reg_val;
err = ufs_qcom_enable_lane_clks(host);
if (err)
return err;
+ if ((!ufs_qcom_is_link_active(hba)) &&
+ host->hw_ver.major == 5 &&
+ host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG);
+ reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG);
+ reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW);
+ /*
+ * HW documentation doesn't recommend any delay between the
+ * reset set and clear. But we are enforcing an arbitrary delay
+ * to give flops enough time to settle in.
+ */
+ usleep_range(50, 100);
+ ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG);
+ ufshcd_readl(hba, UFS_MEM_ICE_CFG);
+ }
+
return ufs_qcom_ice_resume(host);
}
@@ -1096,6 +1113,18 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
}
}
+static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
+ u32 hs_gear_old = host_params->hs_tx_gear;
+
+ ufshcd_parse_gear_limits(hba, host_params);
+ if (host_params->hs_tx_gear != hs_gear_old) {
+ host->phy_gear = host_params->hs_tx_gear;
+ }
+}
+
static void ufs_qcom_set_host_params(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1162,6 +1191,13 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
case PRE_CHANGE:
if (on) {
ufs_qcom_icc_update_bw(host);
+ if (ufs_qcom_is_link_hibern8(hba)) {
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err) {
+ dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err);
+ return err;
+ }
+ }
} else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
@@ -1187,6 +1223,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
} else {
+ if (ufs_qcom_is_link_hibern8(hba))
+ ufs_qcom_disable_lane_clks(host);
+
ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
@@ -1337,6 +1376,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_advertise_quirks(hba);
ufs_qcom_set_host_params(hba);
ufs_qcom_set_phy_gear(host);
+ ufs_qcom_parse_gear_limits(hba);
err = ufs_qcom_ice_init(host);
if (err)
@@ -1742,7 +1782,7 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
}
static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix, enum ufshcd_res id)
+ const char *prefix, void __iomem *base)
{
u32 *regs __free(kfree) = NULL;
size_t pos;
@@ -1755,7 +1795,7 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
return -ENOMEM;
for (pos = 0; pos < len; pos += 4)
- regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+ regs[pos / 4] = readl(base + offset + pos);
print_hex_dump(KERN_ERR, prefix,
len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
@@ -1766,30 +1806,34 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0];
+ void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE;
+
struct dump_info {
+ void __iomem *base;
size_t offset;
size_t len;
const char *prefix;
- enum ufshcd_res id;
};
struct dump_info mcq_dumps[] = {
- {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
- {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
- {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
- {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
- {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
- {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
- {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
- {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
- {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
- {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
- {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ {hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "},
+ {hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "},
+ {mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "},
+ {opr->base, 0x0, 256 * 4, "MCQ SQD-0 "},
+ {opr->base, 0x400, 256 * 4, "MCQ SQD-1 "},
+ {opr->base, 0x800, 256 * 4, "MCQ SQD-2 "},
+ {opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "},
+ {opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "},
+ {opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "},
+ {opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "},
+ {opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "},
+
};
for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
- mcq_dumps[i].prefix, mcq_dumps[i].id);
+ mcq_dumps[i].prefix, mcq_dumps[i].base);
cond_resched();
}
}
@@ -1910,116 +1954,68 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-/* Resources */
-static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
- {.name = "ufs_mem",},
- {.name = "mcq",},
- /* Submission Queue DAO */
- {.name = "mcq_sqd",},
- /* Submission Queue Interrupt Status */
- {.name = "mcq_sqis",},
- /* Completion Queue DAO */
- {.name = "mcq_cqd",},
- /* Completion Queue Interrupt Status */
- {.name = "mcq_cqis",},
- /* MCQ vendor specific */
- {.name = "mcq_vs",},
-};
-
static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
{
struct platform_device *pdev = to_platform_device(hba->dev);
- struct ufshcd_res_info *res;
- struct resource *res_mem, *res_mcq;
- int i, ret;
-
- memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
-
- for (i = 0; i < RES_MAX; i++) {
- res = &hba->res[i];
- res->resource = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- res->name);
- if (!res->resource) {
- dev_info(hba->dev, "Resource %s not provided\n", res->name);
- if (i == RES_UFS)
- return -ENODEV;
- continue;
- } else if (i == RES_UFS) {
- res_mem = res->resource;
- res->base = hba->mmio_base;
- continue;
- }
+ struct resource *res;
- res->base = devm_ioremap_resource(hba->dev, res->resource);
- if (IS_ERR(res->base)) {
- dev_err(hba->dev, "Failed to map res %s, err=%d\n",
- res->name, (int)PTR_ERR(res->base));
- ret = PTR_ERR(res->base);
- res->base = NULL;
- return ret;
- }
+ /* Map the MCQ configuration region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq");
+ if (!res) {
+ dev_err(hba->dev, "MCQ resource not found in device tree\n");
+ return -ENODEV;
}
- /* MCQ resource provided in DT */
- res = &hba->res[RES_MCQ];
- /* Bail if MCQ resource is provided */
- if (res->base)
- goto out;
-
- /* Explicitly allocate MCQ resource from ufs_mem */
- res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
- if (!res_mcq)
- return -ENOMEM;
-
- res_mcq->start = res_mem->start +
- MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
- res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
- res_mcq->flags = res_mem->flags;
- res_mcq->name = "mcq";
-
- ret = insert_resource(&iomem_resource, res_mcq);
- if (ret) {
- dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
- ret);
- return ret;
- }
-
- res->base = devm_ioremap_resource(hba->dev, res_mcq);
- if (IS_ERR(res->base)) {
- dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
- (int)PTR_ERR(res->base));
- ret = PTR_ERR(res->base);
- goto ioremap_err;
+ hba->mcq_base = devm_ioremap_resource(hba->dev, res);
+ if (IS_ERR(hba->mcq_base)) {
+ dev_err(hba->dev, "Failed to map MCQ region: %ld\n",
+ PTR_ERR(hba->mcq_base));
+ return PTR_ERR(hba->mcq_base);
}
-out:
- hba->mcq_base = res->base;
return 0;
-ioremap_err:
- res->base = NULL;
- remove_resource(res_mcq);
- return ret;
}
static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
{
- struct ufshcd_res_info *mem_res, *sqdao_res;
struct ufshcd_mcq_opr_info_t *opr;
int i;
+ u32 doorbell_offsets[OPR_MAX];
- mem_res = &hba->res[RES_UFS];
- sqdao_res = &hba->res[RES_MCQ_SQD];
+ /*
+ * Configure doorbell address offsets in MCQ configuration registers.
+ * These values are offsets relative to mmio_base (UFS_HCI_BASE).
+ *
+ * Memory Layout:
+ * - mmio_base = UFS_HCI_BASE
+ * - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200)
+ * - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) +
+ * - UFS_QCOM_MCQ_SQD_OFFSET
+ * - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET
+ */
- if (!mem_res->base || !sqdao_res->base)
- return -EINVAL;
+ doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET;
+ doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET;
+ doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET;
+ doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET;
+ /*
+ * Configure MCQ operation registers.
+ *
+ * The doorbell registers are physically located within the MCQ region:
+ * - doorbell_physical_addr = mmio_base + doorbell_offset
+ * - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
+ */
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
- opr->offset = sqdao_res->resource->start -
- mem_res->resource->start + 0x40 * i;
- opr->stride = 0x100;
- opr->base = sqdao_res->base + 0x40 * i;
+ opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */
+ opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */
+
+ /*
+ * Calculate the actual doorbell base address within MCQ region:
+ * base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
+ */
+ opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET);
}
return 0;
@@ -2034,12 +2030,8 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
unsigned long *ocqs)
{
- struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
-
- if (!mcq_vs_res->base)
- return -EINVAL;
-
- *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+ /* Read from MCQ vendor-specific register in MCQ region */
+ *ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS);
return 0;
}
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index e0e129af7c16..380d02333d38 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -33,6 +33,28 @@
#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
+/* Qualcomm MCQ Configuration */
+#define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */
+#define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */
+
+/* Doorbell offsets within MCQ region (relative to MCQ_CONFIG_BASE) */
+#define UFS_QCOM_MCQ_SQD_OFFSET 0x5000
+#define UFS_QCOM_MCQ_CQD_OFFSET 0x5080
+#define UFS_QCOM_MCQ_SQIS_OFFSET 0x5040
+#define UFS_QCOM_MCQ_CQIS_OFFSET 0x50C0
+#define UFS_QCOM_MCQ_STRIDE 0x100
+
+/* Calculated doorbell address offsets (relative to mmio_base) */
+#define UFS_QCOM_SQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQD_OFFSET)
+#define UFS_QCOM_CQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQD_OFFSET)
+#define UFS_QCOM_SQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQIS_OFFSET)
+#define UFS_QCOM_CQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQIS_OFFSET)
+#define REG_UFS_MCQ_STRIDE UFS_QCOM_MCQ_STRIDE
+
+/* MCQ Vendor specific address offsets (relative to MCQ_CONFIG_BASE) */
+#define UFS_MEM_VS_BASE 0x4000
+#define UFS_MEM_CQIS_VS 0x4008
+
/* QCOM UFS host controller vendor specific registers */
enum {
REG_UFS_SYS1CLK_1US = 0xC0,
@@ -60,7 +82,7 @@ enum {
UFS_AH8_CFG = 0xFC,
UFS_RD_REG_MCQ = 0xD00,
-
+ UFS_MEM_ICE_CFG = 0x2600,
REG_UFS_MEM_ICE_CONFIG = 0x260C,
REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
@@ -95,10 +117,6 @@ enum {
REG_UFS_SW_H8_EXIT_CNT = 0x2710,
};
-enum {
- UFS_MEM_CQIS_VS = 0x8,
-};
-
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index ffe5d1d2b215..c2dafb583cf5 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -430,6 +430,39 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
}
EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params);
+/**
+ * ufshcd_parse_gear_limits - Parse DT-based gear and rate limits for UFS
+ * @hba: Pointer to UFS host bus adapter instance
+ * @host_params: Pointer to UFS host parameters structure to be updated
+ *
+ * This function reads optional device tree properties to apply
+ * platform-specific constraints.
+ *
+ * "limit-hs-gear": Specifies the max HS gear.
+ * "limit-gear-rate": Specifies the max High-Speed rate.
+ */
+void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params)
+{
+ struct device_node *np = hba->dev->of_node;
+ u32 hs_gear;
+ const char *hs_rate;
+
+ if (!of_property_read_u32(np, "limit-hs-gear", &hs_gear)) {
+ host_params->hs_tx_gear = hs_gear;
+ host_params->hs_rx_gear = hs_gear;
+ }
+
+ if (!of_property_read_string(np, "limit-gear-rate", &hs_rate)) {
+ if (!strcmp(hs_rate, "rate-a"))
+ host_params->hs_rate = PA_HS_MODE_A;
+ else if (!strcmp(hs_rate, "rate-b"))
+ host_params->hs_rate = PA_HS_MODE_B;
+ else
+ dev_warn(hba->dev, "Invalid rate: %s\n", hs_rate);
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_parse_gear_limits);
+
void ufshcd_init_host_params(struct ufs_host_params *host_params)
{
*host_params = (struct ufs_host_params){
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index 3017f8e8f93c..0a18a8aed94d 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -29,6 +29,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
void ufshcd_init_host_params(struct ufs_host_params *host_params);
+void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_remove(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 6f44dd732315..9e79cbe50715 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -196,6 +196,11 @@ struct uvc_file_handle {
#define to_uvc_file_handle(handle) \
container_of(handle, struct uvc_file_handle, vfh)
+static inline struct uvc_file_handle *file_to_uvc_file_handle(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct uvc_file_handle, vfh);
+}
+
/* ------------------------------------------------------------------------
* Functions
*/
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index fc9a8d31a1e9..fd4b998ccd16 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -672,10 +672,9 @@ uvc_v4l2_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, vdev);
- v4l2_fh_add(&handle->vfh);
+ v4l2_fh_add(&handle->vfh, file);
handle->device = &uvc->video;
- file->private_data = &handle->vfh;
return 0;
}
@@ -685,7 +684,7 @@ uvc_v4l2_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
- struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ struct uvc_file_handle *handle = file_to_uvc_file_handle(file);
struct uvc_video *video = handle->device;
mutex_lock(&video->mutex);
@@ -693,8 +692,7 @@ uvc_v4l2_release(struct file *file)
uvc_v4l2_disable(uvc);
mutex_unlock(&video->mutex);
- file->private_data = NULL;
- v4l2_fh_del(&handle->vfh);
+ v4l2_fh_del(&handle->vfh, file);
v4l2_fh_exit(&handle->vfh);
kfree(handle);
diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
index f2673d395236..4d70c833fa32 100644
--- a/drivers/vfio/pci/pds/lm.c
+++ b/drivers/vfio/pci/pds/lm.c
@@ -151,8 +151,7 @@ static struct page *pds_vfio_get_file_page(struct pds_vfio_lm_file *lm_file,
lm_file->last_offset_sg = sg;
lm_file->sg_last_entry += i;
lm_file->last_offset = cur_offset;
- return nth_page(sg_page(sg),
- (offset - cur_offset) / PAGE_SIZE);
+ return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE;
}
cur_offset += sg->length;
}
diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c
index ba92bb4e9af9..7dd0ac866461 100644
--- a/drivers/vfio/pci/virtio/migrate.c
+++ b/drivers/vfio/pci/virtio/migrate.c
@@ -53,8 +53,7 @@ virtiovf_get_migration_page(struct virtiovf_data_buffer *buf,
buf->last_offset_sg = sg;
buf->sg_last_entry += i;
buf->last_offset = cur_offset;
- return nth_page(sg_page(sg),
- (offset - cur_offset) / PAGE_SIZE);
+ return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE;
}
cur_offset += sg->length;
}
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 9f27c3f6091b..1778eff7ab00 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1161,6 +1161,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
struct iov_iter iter;
u64 translated;
int ret;
+ size_t size;
ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
len - total_translated, &translated,
@@ -1178,9 +1179,9 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
translated);
}
- ret = copy_to_iter(src, translated, &iter);
- if (ret < 0)
- return ret;
+ size = copy_to_iter(src, translated, &iter);
+ if (size != translated)
+ return -EFAULT;
src += translated;
dst += translated;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 5940e2eb9231..96cc9b389246 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -279,14 +279,7 @@ static int fbcon_get_rotate(struct fb_info *info)
static bool fbcon_skip_panic(struct fb_info *info)
{
-/* panic_cpu is not exported, and can't be used if built as module. Use
- * oops_in_progress instead, but non-fatal oops won't be printed.
- */
-#if defined(MODULE)
- return (info->skip_panic && unlikely(oops_in_progress));
-#else
- return (info->skip_panic && unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID));
-#endif
+ return (info->skip_panic && unlikely(panic_in_progress()));
}
static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info)
diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c
index 900e9386eceb..763adbba71cb 100644
--- a/drivers/video/screen_info_generic.c
+++ b/drivers/video/screen_info_generic.c
@@ -5,6 +5,8 @@
#include <linux/screen_info.h>
#include <linux/string.h>
+#include <video/pixel_format.h>
+
static void resource_init_named(struct resource *r,
resource_size_t start, resource_size_t size,
const char *name, unsigned int flags)
@@ -180,3 +182,56 @@ u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si)
return bits_per_pixel;
}
EXPORT_SYMBOL(__screen_info_lfb_bits_per_pixel);
+
+static int __screen_info_lfb_pixel_format(const struct screen_info *si, struct pixel_format *f)
+{
+ u32 bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ if (bits_per_pixel > U8_MAX)
+ return -EINVAL;
+
+ f->bits_per_pixel = bits_per_pixel;
+
+ if (si->lfb_depth > 8) {
+ f->indexed = false;
+ f->alpha.offset = 0;
+ f->alpha.length = 0;
+ f->red.offset = si->red_pos;
+ f->red.length = si->red_size;
+ f->green.offset = si->green_pos;
+ f->green.length = si->green_size;
+ f->blue.offset = si->blue_pos;
+ f->blue.length = si->blue_size;
+ } else {
+ f->indexed = true;
+ f->index.offset = 0;
+ f->index.length = si->lfb_depth;
+ }
+
+ return 0;
+}
+
+/**
+ * screen_info_pixel_format - Returns the screen-info format as pixel-format description
+ *
+ * @si: the screen_info
+ * @f: pointer to return pixel-format description
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int screen_info_pixel_format(const struct screen_info *si, struct pixel_format *f)
+{
+ unsigned int type = screen_info_video_type(si);
+
+ /* TODO: Add support for additional types as needed. */
+ switch (type) {
+ case VIDEO_TYPE_VLFB:
+ case VIDEO_TYPE_EFI:
+ return __screen_info_lfb_pixel_format(si, f);
+ }
+
+ /* not supported */
+ return -EINVAL;
+}
+EXPORT_SYMBOL(screen_info_pixel_format);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index e299e18346a3..7f3fd72678eb 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -488,8 +488,7 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
page = balloon_page_pop(&vb->free_page_list);
if (!page)
break;
- free_pages((unsigned long)page_address(page),
- VIRTIO_BALLOON_HINT_BLOCK_ORDER);
+ __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
}
vb->num_free_page_blocks -= num_returned;
spin_unlock_irq(&vb->free_page_list_lock);
@@ -719,8 +718,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
if (vq->num_free > 1) {
err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
if (unlikely(err)) {
- free_pages((unsigned long)p,
- VIRTIO_BALLOON_HINT_BLOCK_ORDER);
+ __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
return err;
}
virtqueue_kick(vq);
@@ -733,7 +731,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
* The vq has no available entry to add this page block, so
* just free it.
*/
- free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
+ __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
}
return 0;
@@ -875,7 +873,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
balloon_page_finalize(page);
put_page(page); /* balloon reference */
- return MIGRATEPAGE_SUCCESS;
+ return 0;
}
#endif /* CONFIG_BALLOON_COMPACTION */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f5062061c408..c147145a6593 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -378,7 +378,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
* is initialized by the hardware. Explicitly check/unpoison it
* depending on the direction.
*/
- kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
+ kmsan_handle_dma(sg_phys(sg), sg->length, direction);
*addr = (dma_addr_t)sg_phys(sg);
return 0;
}
@@ -3157,7 +3157,7 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
struct vring_virtqueue *vq = to_vvq(_vq);
if (!vq->use_dma_api) {
- kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
+ kmsan_handle_dma(virt_to_phys(ptr), size, dir);
return (dma_addr_t)virt_to_phys(ptr);
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 24f485827e03..f9a35ed266ec 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -138,6 +138,7 @@ config XEN_GNTDEV
depends on XEN
default m
select MMU_NOTIFIER
+ select FIND_NORMAL_PAGE
help
Allows userspace processes to use grants.
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 91ba5078c9d9..2c960f187f7c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -318,6 +318,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+ /* Note: this will perform a pte_mkspecial() through the hypercall. */
gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
@@ -525,7 +526,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
gntdev_put_map(priv, map);
}
-static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
+static struct page *gntdev_vma_find_normal_page(struct vm_area_struct *vma,
unsigned long addr)
{
struct gntdev_grant_map *map = vma->vm_private_data;
@@ -536,7 +537,7 @@ static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
static const struct vm_operations_struct gntdev_vmops = {
.open = gntdev_vma_open,
.close = gntdev_vma_close,
- .find_special_page = gntdev_vma_find_special_page,
+ .find_normal_page = gntdev_vma_find_normal_page,
};
/* ------------------------------------------------------------------ */
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index da1a7d3d377c..dd7747a2de87 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -392,6 +392,25 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
}
}
+static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
+ phys_addr_t paddr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t dma_addr = paddr;
+
+ if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
+ dev_err_once(dev,
+ "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
+ return DMA_MAPPING_ERROR;
+ }
+
+ return dma_addr;
+}
+
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -426,5 +445,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
- .map_resource = dma_direct_map_resource,
+ .map_resource = xen_swiotlb_direct_map_resource,
};